text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
# Value-at-Risk for Stocks: Delta-Normal Approach, EWMA
### Lecture Notes by Jakov Ivan S. Dumbrique (jdumbrique@ateneo.edu)
MATH 100.2: Topics in Financial Mathematics II \
First Semester, S.Y. 2021-2022 \
Ateneo de Manila University
```
import numpy as np # Numerical Computing
import pandas as pd # Data wrangling
import matplotlib.pyplot as plt # Plotting
from scipy.stats import norm # statistical analysis
from statistics import NormalDist # statistical analysis
%matplotlib inline
def get_return(df):
return np.log(df["close"]/df["previous"])
def one_day_alpha_percent_VaR_single_stock_delta_normal(df, N, alpha):
"""Returns the one-day 100(alpha)% VaR of a single stock using Delta Normal Approach.
Parameters
----------
df : pandas.DataFrame
has two columns: (1) dt [str] and (2) closing price [float]
assumes the dates are arranged from newest to oldest, and the date today is the date on the first row
N : int
number of shares for the sole stock
alpha : int
the value to be used in calculting the 100(alpha)% VaR (e.g. 0.99, 0.95)
Returns
-------
float (2 decimal places)
one-day 100(alpha)% VaR of a single stock using Delta Normal Approach
"""
S_0 = df.loc[0, "close"]
# creates a new column that shifts the prices d values up (or -d values down)
df["previous"] = df["close"].shift(-1)
# get the daily returns
df["return"] = df.apply(get_return, axis=1)
# standard deviation of your stock returns
sigma = df["return"].std()
quantile = NormalDist().inv_cdf(alpha)
var = N * S_0 * sigma * quantile
return round(var, 2)
```
# EWMA for a single-asset portfolio
The one-day 99\% VaR on the portfolio is given by
\begin{equation}
\text{VaR}_{0.99} = |V| = NS_0\sigma_n\Phi^{-1}(0.99).
\end{equation}
The variance estimate $\sigma_n^2$ from the EWMA can be used as the variance of the random variable $R$, and so we assume that $R\sim N(0,\sigma_n^2)$.
\begin{equation}
\sigma_n^2 \approx \sum_{i=1}^{n-1}(1-\lambda)\lambda^{i-1}R_{n-i}^2,
\end{equation}
giving us $\alpha_i = (1-\lambda)\lambda^{i-1}$, $i=1,2,\dots,n-1$.
$\alpha_i = \alpha_{j}= (1-\lambda)\lambda^{j}$, $j=0,1,2,\dots,n-2$
## Question:
Today is February 23, 2018. You are a portfolio risk manager who is assigned to analyze the market risk of a portfolio of 700 PLDT (TEL) shares. Assume that the one-day volatility of the stock follows an exponentially-weighted moving average model with parameter $\lambda$=0.65
. Determine the portfolio's one-day 99% VaR.
```
def get_weights_df(df, d, ewma_par):
"""
appends weights series to df
"""
count_returns = len(df["close"])-d # gives the number of non-NaN (or non-empty) returns
weight_lst = [(1-ewma_par)*(ewma_par**j) for j in range(count_returns)]
df["weight"] = pd.Series(weight_lst)
return df
def d_day_alpha_percent_VaR_single_stock_delta_normal(df, d, N, alpha, ewma_par=None):
"""Returns the d-day 100(alpha)% VaR of a single stock using Delta Normal Approach.
Parameters
----------
df : pandas.DataFrame
has two columns: (1) dt [str] and (2) closing price [float]
assumes the dates are arranged from newest to oldest, and the date today is the date on the first row
d : int
value of d for the d-day VaR
N : int
number of shares for the sole stock
alpha : int
the value to be used in calculting the 100(alpha)% VaR (e.g. 0.99, 0.95)
ewma_par :float
the value of the lambda parameter in an EWMA model for the volatility of the stock
assumes the value is in the range (0,1)
Returns
-------
float (2 decimal places)
d-day 100(alpha)% VaR of a single stock using Delta Normal Approach
"""
# gets the most recent stock price
S_0 = df.loc[0, "close"]
# creates a new column that shifts the prices d values up (or -d values down)
df["previous"] = df["close"].shift(-d)
# get the d-day returns
df["return"] = df.apply(get_return, axis=1)
#Case 1: if I will use EWMA
if (ewma_par != None) and (0 < ewma_par < 1):
df = get_weights_df(df, d, ewma_par)
variance = np.nansum(df["weight"] * (df["return"]**2))
sigma = np.sqrt(variance)
#Case 2: I'll use the normal VaR
else:
# standard deviation of your stock returns
sigma = df["return"].std()
quantile = NormalDist().inv_cdf(alpha)
var = N * S_0 * sigma * quantile
return round(var, 2)
# read the data
TEL_df = pd.read_csv("https://raw.githubusercontent.com/ateneomathdept/math100.2_2021Sem1/main/data/lectures/TEL_2018.csv")
TEL_df
get_weights_df(TEL_df, d=1, ewma_par=0.65)
d_day_alpha_percent_VaR_single_stock_delta_normal(df=TEL_df, d=1, N=700, alpha=0.99, ewma_par=0.65)
d_day_alpha_percent_VaR_single_stock_delta_normal(df=TEL_df, d=1, N=700, alpha=0.99)
d_day_alpha_percent_VaR_single_stock_delta_normal(df=TEL_df, d=10, N=700, alpha=0.99, ewma_par=0.65)
```
| github_jupyter |
```
import keras
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('Keras version:', keras.__version__)
import os
from os.path import join
import json
import random
import itertools
import re
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
import pylab
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Activation
from keras.layers import Reshape, Lambda
from keras.layers.merge import add, concatenate
from keras.models import Model, load_model
from keras.layers.recurrent import GRU
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras.preprocessing import image
import keras.callbacks
import cv2
sess = tf.Session()
K.set_session(sess)
```
# Get alphabet
```
from collections import Counter
def get_counter(dirpath, tag):
dirname = os.path.basename(dirpath)
ann_dirpath = join(dirpath, 'ann')
letters = ''
lens = []
for filename in os.listdir(ann_dirpath):
json_filepath = join(ann_dirpath, filename)
ann = json.load(open(json_filepath, 'r'))
tags = ann['tags']
if tag in tags:
description = ann['description']
lens.append(len(description))
letters += description
print('Max plate length in "%s":' % dirname, max(Counter(lens).keys()))
return Counter(letters)
c_val = get_counter('/data/', 'val')
c_train = get_counter('/data/', 'train')
letters_train = set(c_train.keys())
letters_val = set(c_val.keys())
if letters_train == letters_val:
print('Letters in train and val do match')
else:
raise Exception()
# print(len(letters_train), len(letters_val), len(letters_val | letters_train))
letters = sorted(list(letters_train))
print('Letters:', ' '.join(letters))
```
# Input data generator
```
def labels_to_text(labels):
return ''.join(list(map(lambda x: letters[int(x)], labels)))
def text_to_labels(text):
return list(map(lambda x: letters.index(x), text))
def is_valid_str(s):
for ch in s:
if not ch in letters:
return False
return True
class TextImageGenerator:
def __init__(self,
dirpath,
tag,
img_w, img_h,
batch_size,
downsample_factor,
max_text_len=8):
self.img_h = img_h
self.img_w = img_w
self.batch_size = batch_size
self.max_text_len = max_text_len
self.downsample_factor = downsample_factor
img_dirpath = join(dirpath, 'img')
ann_dirpath = join(dirpath, 'ann')
self.samples = []
for filename in os.listdir(img_dirpath):
name, ext = os.path.splitext(filename)
if ext in ['.png', '.jpg']:
img_filepath = join(img_dirpath, filename)
json_filepath = join(ann_dirpath, name + '.json')
ann = json.load(open(json_filepath, 'r'))
description = ann['description']
tags = ann['tags']
print(filename, tag, tags)
if tag not in tags:
print('LOL????????')
continue
if is_valid_str(description):
self.samples.append([img_filepath, description])
print('or this?', is_valid_str(description))
self.n = len(self.samples)
self.indexes = list(range(self.n))
self.cur_index = 0
def build_data(self):
self.imgs = np.zeros((self.n, self.img_h, self.img_w))
self.texts = []
for i, (img_filepath, text) in enumerate(self.samples):
img = cv2.imread(img_filepath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (self.img_w, self.img_h))
img = img.astype(np.float32)
img /= 255
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
self.imgs[i, :, :] = img
self.texts.append(text)
def get_output_size(self):
return len(letters) + 1
def next_sample(self):
self.cur_index += 1
if self.cur_index >= self.n:
self.cur_index = 0
random.shuffle(self.indexes)
return self.imgs[self.indexes[self.cur_index]], self.texts[self.indexes[self.cur_index]]
def next_batch(self):
while True:
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
if K.image_data_format() == 'channels_first':
X_data = np.ones([self.batch_size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([self.batch_size, self.img_w, self.img_h, 1])
Y_data = np.ones([self.batch_size, self.max_text_len])
input_length = np.ones((self.batch_size, 1)) * (self.img_w // self.downsample_factor - 2)
label_length = np.zeros((self.batch_size, 1))
source_str = []
for i in range(self.batch_size):
img, text = self.next_sample()
print(img, text)
img = img.T
if K.image_data_format() == 'channels_first':
img = np.expand_dims(img, 0)
else:
img = np.expand_dims(img, -1)
X_data[i] = img
Y_data[i] = text_to_labels(text)
source_str.append(text)
label_length[i] = len(text)
inputs = {
'the_input': X_data,
'the_labels': Y_data,
'input_length': input_length,
'label_length': label_length,
#'source_str': source_str
}
outputs = {'ctc': np.zeros([self.batch_size])}
yield (inputs, outputs)
tiger = TextImageGenerator('/data/', 'val', 128, 64, 8, 4)
tiger.build_data()
for inp, out in tiger.next_batch():
print('Text generator output (data which will be fed into the neutral network):')
print('1) the_input (image)')
if K.image_data_format() == 'channels_first':
img = inp['the_input'][0, 0, :, :]
else:
img = inp['the_input'][0, :, :, 0]
plt.imshow(img.T, cmap='gray')
plt.show()
print('2) the_labels (plate number): %s is encoded as %s' %
(labels_to_text(inp['the_labels'][0]), list(map(int, inp['the_labels'][0]))))
print('3) input_length (width of image that is fed to the loss function): %d == %d / 4 - 2' %
(inp['input_length'][0], tiger.img_w))
print('4) label_length (length of plate number): %d' % inp['label_length'][0])
break
```
# Loss and train functions, network architecture
```
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def train(img_w, load=False):
# Input Parameters
img_h = 64
# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
if K.image_data_format() == 'channels_first':
input_shape = (1, img_w, img_h)
else:
input_shape = (img_w, img_h, 1)
batch_size = 32
downsample_factor = pool_size ** 2
tiger_train = TextImageGenerator('/data/', 'train', img_w, img_h, batch_size, downsample_factor)
tiger_train.build_data()
tiger_val = TextImageGenerator('/data/', 'val', img_w, img_h, batch_size, downsample_factor)
tiger_val.build_data()
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
# Two layers of bidirecitonal GRUs
# GRU seems to work as well, if not better than LSTM:
gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(tiger_train.get_output_size(), kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels', shape=[tiger_train.max_text_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
if load:
model = load_model('./tmp_model.h5', compile=False)
else:
model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
if not load:
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
model.fit_generator(generator=tiger_train.next_batch(),
steps_per_epoch=tiger_train.n,
epochs=1,
validation_data=tiger_val.next_batch(),
validation_steps=tiger_val.n)
return model
```
# Model description and training
Next block will take about 30 minutes.
```
model = train(128, load=False)
```
# Function to decode neural network output
```
# For a real OCR application, this should be beam search with a dictionary
# and language model. For this example, best path is sufficient.
def decode_batch(out):
ret = []
for j in range(out.shape[0]):
out_best = list(np.argmax(out[j, 2:], 1))
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = ''
for c in out_best:
if c < len(letters):
outstr += letters[c]
ret.append(outstr)
return ret
```
# Test on validation images
```
tiger_test = TextImageGenerator('/data/', 'test', 128, 64, 8, 4)
tiger_test.build_data()
print(len(tiger_test.texts))
net_inp = model.get_layer(name='the_input').input
net_out = model.get_layer(name='softmax').output
for inp_value, _ in tiger_test.next_batch():
bs = inp_value['the_input'].shape[0]
X_data = inp_value['the_input']
net_out_value = sess.run(net_out, feed_dict={net_inp:X_data})
pred_texts = decode_batch(net_out_value)
labels = inp_value['the_labels']
texts = []
for label in labels:
text = ''.join(list(map(lambda x: letters[int(x)], label)))
texts.append(text)
for i in range(bs):
fig = plt.figure(figsize=(10, 10))
outer = gridspec.GridSpec(2, 1, wspace=10, hspace=0.1)
ax1 = plt.Subplot(fig, outer[0])
fig.add_subplot(ax1)
ax2 = plt.Subplot(fig, outer[1])
fig.add_subplot(ax2)
print('Predicted: %s\nTrue: %s' % (pred_texts[i], texts[i]))
img = X_data[i][:, :, 0].T
ax1.set_title('Input img')
ax1.imshow(img, cmap='gray')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.set_title('Acrtivations')
ax2.imshow(net_out_value[i].T, cmap='binary', interpolation='nearest')
ax2.set_yticks(list(range(len(letters) + 1)))
ax2.set_yticklabels(letters + ['blank'])
ax2.grid(False)
for h in np.arange(-0.5, len(letters) + 1 + 0.5, 1):
ax2.axhline(h, linestyle='-', color='k', alpha=0.5, linewidth=1)
#ax.axvline(x, linestyle='--', color='k')
plt.show()
break
```
| github_jupyter |
### Imports
```
from datetime import datetime
import time
from contracts_lib_py.account import Account
from common_utils_py.agreements.service_types import ServiceTypesIndices
from nevermined_sdk_py import Config, Nevermined
from nevermined_sdk_py.nevermined.keeper import NeverminedKeeper as Keeper
CONSUMER_ADDRESS = "0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e"
CONSUMER_PASSWORD = "node0"
CONSUMER_KEYFILE = "../resources/accounts/consumer.json"
CONFIG_FILE = "../config.ini"
PROVIDER_ADDRESS = "0x068Ed00cF0441e4829D9784fCBe7b9e26D4BD8d0"
ASSET_COMPUTE_DID_1 = "did:nv:4639d23189e2871c654a3d192a19be9a17e47378b48523a3f8fd783afa1f7c35"
ASSET_COMPUTE_DID_2 = "did:nv:e1397649dd8a437b45b60d00b089442c35560425d27df60d4112cea2e83cf983"
ASSET_COORDINATOR_DID = "did:nv:7d698cffb2f41a4420c4a3057883a5f77645fb4564716053404cdec57c33a71b"
```
### Setup Nevermined and accounts
```
nevermined = Nevermined(Config(CONFIG_FILE))
consumer_account = Account(CONSUMER_ADDRESS, CONSUMER_PASSWORD, CONSUMER_KEYFILE)
nevermined.accounts.request_tokens(consumer_account, 10)
```
### Publish algorithm
```
metadata_algorithm = {
"main": {
"name": "Fraud Demo Algorithm",
"dateCreated": datetime.utcnow().isoformat(timespec="seconds") + "Z",
"author": "Nevermined Consumer",
"license": "",
"price": "0",
"files": [
{
"index": 0,
"contentType": "text/text",
"checksum": "0x52b5c93b82dd9e7ecc3d9fdf4755f7f69a54484941897dc517b4adfe3bbc3377",
"checksumType": "MD5",
"contentLength": "12057507",
"url": "https://raw.githubusercontent.com/nevermined-io/fl-demo/master/fraud-detection-xain/notebooks/federated_fraud_demo.ipynb"
}
],
"type": "algorithm",
"algorithm": {
"language": "python",
"format": "py",
"version": "0.1.0",
"entrypoint": (
"pip install jupyter pandas papermill scikit-learn xain-sdk && "
"papermill --stdout-file - federated_fraud_demo.ipynb $NEVERMINED_OUTPUTS_PATH/federated_fraud_demo_output.ipynb"
),
"requirements": {
"container": {
"image": "python",
"tag": "3.8-slim-buster",
"checksum": "sha256:53ad3a03b2fb240b6c494339821e6638cd44c989bcf26ec4d51a6a52f7518c1d"
}
}
}
}
}
ddo_algorithm = nevermined.assets.create(metadata_algorithm, consumer_account, providers=[PROVIDER_ADDRESS])
print(f"Algorithm DID: {ddo_algorithm.did}")
```
### Publish the workflows:
- Two for the compute to the data assets
- One for the coordinator service
```
# Compute asset 1
metadata_workflow1 = {
"main": {
"name": "Fraud Demo Workflow 1",
"dateCreated": datetime.utcnow().isoformat(timespec="seconds") + "Z",
"author": "Nevermined Consumer",
"license": "",
"price": "0",
"type": "workflow",
"workflow": {
"stages": [
{
"index": 0,
"input": [
{
"index": 0,
"id": ASSET_COMPUTE_DID_1
}
],
"transformation": {
"id": ddo_algorithm.did
}
}
]
}
}
}
ddo_workflow1 = nevermined.assets.create(metadata_workflow1, consumer_account, providers=[PROVIDER_ADDRESS])
print(f"Workflow DID: {ddo_workflow1.did}")
# Compute asset 2
metadata_workflow2 = {
"main": {
"name": "Fraud Demo Workflow 2",
"dateCreated": datetime.utcnow().isoformat(timespec="seconds") + "Z",
"author": "Nevermined Consumer",
"license": "",
"price": "0",
"type": "workflow",
"workflow": {
"stages": [
{
"index": 0,
"input": [
{
"index": 0,
"id": ASSET_COMPUTE_DID_2
}
],
"transformation": {
"id": ddo_algorithm.did
}
}
]
}
}
}
ddo_workflow2 = nevermined.assets.create(metadata_workflow2, consumer_account, providers=[PROVIDER_ADDRESS])
print(f"Workflow DID: {ddo_workflow2.did}")
# Coordinator service
metadata_workflow_coordinator = {
"main": {
"author": "Nevermined Consumer",
"checksum": "0x52b5c93b82dd9e7ecc3d9fdf4755f7f69a54484941897dc517b4adfe3bbc3377",
"dateCreated": datetime.utcnow().isoformat(timespec="seconds") + "Z",
"datePublished": datetime.utcnow().isoformat(timespec="seconds") + "Z",
"license": "",
"name": "My workflow",
"price": "0",
"type": "fl-coordinator",
"parameters": {
"minParticipants": 2,
"participantsRatio": 1,
"rounds": 10
},
"workflow": {
"stages": [
{
"input": [],
"index": 0,
"requirements": {
"serverInstances": 1,
"container": {
"image": "rmarques/xain-fl",
"tag": "latest",
"checksum": "sha256:cb57ecfa6ebbefd8ffc7f75c0f00e57a7fa739578a429b6f72a0df19315deadc"
}
}
}
]
}
}
}
ddo_workflow_coordinator = nevermined.assets.create(metadata_workflow_coordinator, consumer_account, providers=[PROVIDER_ADDRESS])
print(f"Workflow DID: {ddo_workflow_coordinator.did}")
```
### Order computations:
- One for each data asset
- One for the coordinator service
In between we want for the blockchain to process the request.
```
keeper = Keeper.get_instance()
service_agreement_id1 = nevermined.assets.order(ASSET_COMPUTE_DID_1, ServiceTypesIndices.DEFAULT_COMPUTING_INDEX, consumer_account, consumer_account)
print(f"Service Agreement ID: {service_agreement_id1}")
event = keeper.lock_payment_condition.subscribe_condition_fulfilled(
service_agreement_id1, 60, None, (), wait=True
)
assert event is not None, "Payment condition is not found"
service_agreement_id2 = nevermined.assets.order(ASSET_COMPUTE_DID_2, ServiceTypesIndices.DEFAULT_COMPUTING_INDEX, consumer_account, consumer_account)
print(f"Service Agreement ID: {service_agreement_id2}")
event = keeper.lock_payment_condition.subscribe_condition_fulfilled(
service_agreement_id2, 60, None, (), wait=True
)
assert event is not None, "Payment condition is not found"
service_agreement_id_coordinator = nevermined.assets.order(ASSET_COORDINATOR_DID, ServiceTypesIndices.DEFAULT_COMPUTING_INDEX, consumer_account, consumer_account)
print(f"Service Agreement ID: {service_agreement_id_coordinator}")
event = keeper.lock_payment_condition.subscribe_condition_fulfilled(
service_agreement_id_coordinator, 60, None, (), wait=True
)
assert event is not None, "Payment condition is not found"
```
### Execute workflows
This orders the Nevermined to start the execution of the workflows:
- One for each data asset
- One for the coordinator service
```
execution_id1 = nevermined.assets.execute(
service_agreement_id1,
ASSET_COMPUTE_DID_1,
ServiceTypesIndices.DEFAULT_COMPUTING_INDEX,
consumer_account,
ddo_workflow1.did
)
print(f"Execution ID: {execution_id1}")
execution_id2 = nevermined.assets.execute(
service_agreement_id2,
ASSET_COMPUTE_DID_2,
ServiceTypesIndices.DEFAULT_COMPUTING_INDEX,
consumer_account,
ddo_workflow2.did
)
print(f"Execution ID: {execution_id2}")
execution_id_coordinator = nevermined.assets.execute(
service_agreement_id_coordinator,
ASSET_COORDINATOR_DID,
ServiceTypesIndices.DEFAULT_COMPUTING_INDEX,
consumer_account,
ddo_workflow_coordinator.did
)
print(f"Execution ID: {execution_id_coordinator}")
```
### Wait for compute jobs to finish
```
succeeded = set()
failed = set()
outputs = set()
while len(succeeded) < 3:
for job_id in [execution_id1, execution_id2, execution_id_coordinator]:
result = nevermined.assets.compute_status(service_agreement_id_coordinator, job_id, consumer_account)
status = result["status"]
print(f"{job_id}: {status}")
if status == "Succeeded":
succeeded.add(job_id)
outputs.add(result["did"])
elif status == "Failed":
raise Exception("Some jobs failed")
print()
time.sleep(10)
for did in outputs:
print(f"Output DID: {did}")
```
### Download the results
```
for did in outputs:
print(f"Downloading: {did}")
nevermined.assets.download(did, ServiceTypesIndices.DEFAULT_ACCESS_INDEX, consumer_account, "./")
```
| github_jupyter |
# ODYM Example no. 5. Estimating the material content of the global vehicle fleet
ODYM was designed to handle extensive MFA systems by covering multiple aspects (time, age-cohort, region, material, chemical elements, processes, goods, components, ...) in a systematic manner. Its data format is used to structure and store input data, its software structure determines how the information is organised in the computer, and its application scripts provide a working environment for conducting reproducible dynamic MFA research with comprehensive and multi-aspect systems.
This example shows a fully-fledged application of ODYM to estimate the material composition of the global passenger vehicle fleet in 2017, covering 130 countries, 25 age-cohorts, and 25 materials. The application is controlled by a config file, reads the model parameters in standard format, performs the model computations and a Monte-Carlo simulation of the uncertainties stemming from vehicle lifetime and material composition, performs automatic mass balance checks, and stores the model procedures in a log file.
The research questions asked are: __How big is the material stock currently embodied in the global passenger vehicle fleet, and when will this material become available for recycling?__
To answer these questions a dynamic material flow analysis of the global passenger vehicle fleet and the waste management industries is performed.
The dynamic MFA model has the following indices:
+ t: time (1990-2017)
+ c: age-cohort (1990-2017)
+ r: region (130 countries accounting for most of the global vehicle fleet)
+ g: good (passenger vehicle)
+ p: process (vehicle markets, use phase, waste management industries, scrap markets)
+ m: engineering materials (25)
+ e: chemical elements (all)
+ w: waste types (steel, Al, Cu scrap, plastics, glass, and other waste)
The system definition of the model is given in the figure below. The data availability limits the temporal scope to 2017. The figure also shows the aspects of the different system variables. The total registration of vehicles, for example, is broken down into individual materials, whereas the flow of deregistered vehicles is broken down into regions, age-cohorts, and materials.
<img src="Images/ODYM_Tutorial5_SysDef.png" width="850" height="290" alt="ODYM_Tutorial5 System Definition">
The model equations are as follows:
1) inflow-driven dynamic stock model, where _F12_ is the historic inflow, _Sf_ is the survival function of the age-cohort (1-sum(pdf of discard)), and _S2_ is the stock:
$$S_2(t,c,r,g) = F_{1-2}(c,r,g)\cdot Sf(t,c,r,g)$$
2) Calculation of difference between inflow-driven stock (covering only the age-cohorts 2005-2017 due to data availability) and the 2015 reported stock and distribution of the difference to the years 1990-2005 (constant inflow assumed for these years)
3) Calculation of material composition of the fleet _S2_ with
$$S_2(t,c,r,g,m) = \mu(c,r,g,m)\cdot S_2(t,c,r,g)$$
4) Estimation of available future end-of-life vehicle scrap _F34_ with
$$F_{3-4}(r,g,w,m) = \sum_{t,c}EoL_eff(r,g,m,w)\cdot M(t,c,r,g,m)$$
The remaining system variables are calculated by mass balance.
__Model choice:__ Because the system covers region, material, waste, and chemical element aspects and a dynamic stock model, a simple calculation in Excel would be too laborious and the potential for testing and quick re-use of data and programming would be limited. Instead, the ODYM framework is chosen to structure the model data and calculations.
### 1) Load ODYM
```
# Load a local copy of the current ODYM branch:
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import xlrd, xlwt
import pylab
from copy import deepcopy
import logging as log
# For Ipython Notebook only
%matplotlib inline
# add ODYM module directory to system path, relative
MainPath = os.path.join('..', 'odym', 'modules')
sys.path.insert(0, MainPath)
# add ODYM module directory to system path, absolute
sys.path.insert(0, os.path.join(os.getcwd(),'..', 'odym', 'modules'))
# Specify path to dynamic stock model and to datafile, relative
DataPath = os.path.join('..', 'docs', 'files')
# Specify path to dynamic stock model and to datafile, absolute
DataPath = os.path.join(os.getcwd(),'..', 'docs', 'files')
import ODYM_Classes as msc # import the ODYM class file
import ODYM_Functions as msf # import the ODYM function file
import dynamic_stock_model as dsm # import the dynamic stock model library
# Initialize loggin routine
log_verbosity = eval("log.DEBUG")
log_filename = 'LogFileTest.md'
[Mylog, console_log, file_log] = msf.function_logger(log_filename, os.getcwd(),
log_verbosity, log_verbosity)
Mylog.info('### 1. - Initialize.')
```
### 2) Load Config file and read model control parameters
```
Mylog.info('### 2 - Load Config file and read model control parameters')
#Read main script parameters
#Load project-specific config file
ProjectSpecs_Name_ConFile = 'ODYM_Config_Tutorial5.xlsx'
Model_Configfile = xlrd.open_workbook(os.path.join(DataPath, ProjectSpecs_Name_ConFile))
ScriptConfig = {'Model Setting': Model_Configfile.sheet_by_name('Config').cell_value(3,3)}
Model_Configsheet = Model_Configfile.sheet_by_name('Setting_' + ScriptConfig['Model Setting'])
Name_Scenario = Model_Configsheet.cell_value(3,3)
print(Name_Scenario)
### 1.2) Read model control parameters
#Read control and selection parameters into dictionary
SCix = 0
# search for script config list entry
while Model_Configsheet.cell_value(SCix, 1) != 'General Info':
SCix += 1
SCix += 2 # start on first data row
while len(Model_Configsheet.cell_value(SCix, 3)) > 0:
ScriptConfig[Model_Configsheet.cell_value(SCix, 2)] = Model_Configsheet.cell_value(SCix,3)
SCix += 1
SCix = 0
# search for script config list entry
while Model_Configsheet.cell_value(SCix, 1) != 'Software version selection':
SCix += 1
SCix += 2 # start on first data row
while len(Model_Configsheet.cell_value(SCix, 3)) > 0:
ScriptConfig[Model_Configsheet.cell_value(SCix, 2)] = Model_Configsheet.cell_value(SCix,3)
SCix += 1
print(ScriptConfig)
```
### 3) Read classification and data
```
Mylog.info('### 3 - Read classification and data')
# This is standard for each ODYM model run.
# Read model run config data
Classfile = xlrd.open_workbook(os.path.join(DataPath,
str(ScriptConfig['Version of master classification']) \
+ '.xlsx'))
Classsheet = Classfile.sheet_by_name('MAIN_Table')
ci = 1 # column index to start with
MasterClassification = {} # Dict of master classifications
while True:
TheseItems = []
ri = 10 # row index to start with
try:
ThisName = Classsheet.cell_value(0,ci)
ThisDim = Classsheet.cell_value(1,ci)
ThisID = Classsheet.cell_value(3,ci)
ThisUUID = Classsheet.cell_value(4,ci)
TheseItems.append(Classsheet.cell_value(ri,ci)) # read the first classification item
except:
print('End of file or formatting error while reading the classification file in column '+ str(ci) +'.')
break
while True:
ri +=1
try:
ThisItem = Classsheet.cell_value(ri,ci)
except:
break
if ThisItem is not '':
TheseItems.append(ThisItem)
MasterClassification[ThisName] = msc.Classification(Name = ThisName, Dimension = ThisDim,
ID = ThisID, UUID = ThisUUID, Items = TheseItems)
ci +=1
print('Read index table from model config sheet.')
ITix = 0
while True: # search for index table entry
if Model_Configsheet.cell_value(ITix,1) == 'Index Table':
break
else:
ITix += 1
IT_Aspects = []
IT_Description = []
IT_Dimension = []
IT_Classification = []
IT_Selector = []
IT_IndexLetter = []
ITix += 2 # start on first data row
while True:
if len(Model_Configsheet.cell_value(ITix,2)) > 0:
IT_Aspects.append(Model_Configsheet.cell_value(ITix,2))
IT_Description.append(Model_Configsheet.cell_value(ITix,3))
IT_Dimension.append(Model_Configsheet.cell_value(ITix,4))
IT_Classification.append(Model_Configsheet.cell_value(ITix,5))
IT_Selector.append(Model_Configsheet.cell_value(ITix,6))
IT_IndexLetter.append(Model_Configsheet.cell_value(ITix,7))
ITix += 1
else:
break
print('Read parameter list from model config sheet.')
PLix = 0
while True: # search for parameter list entry
if Model_Configsheet.cell_value(PLix,1) == 'Model Parameters':
break
else:
PLix += 1
PL_Names = []
PL_Description = []
PL_Version = []
PL_IndexStructure = []
PL_IndexMatch = []
PL_IndexLayer = []
PLix += 2 # start on first data row
while True:
if len(Model_Configsheet.cell_value(PLix,2)) > 0:
PL_Names.append(Model_Configsheet.cell_value(PLix,2))
PL_Description.append(Model_Configsheet.cell_value(PLix,3))
PL_Version.append(Model_Configsheet.cell_value(PLix,4))
PL_IndexStructure.append(Model_Configsheet.cell_value(PLix,5))
PL_IndexMatch.append(Model_Configsheet.cell_value(PLix,6))
# strip numbers out of list string
PL_IndexLayer.append(msf.ListStringToListNumbers(Model_Configsheet.cell_value(PLix,7)))
PLix += 1
else:
break
print('Read process list from model config sheet.')
PrLix = 0
while True: # search for process list entry
if Model_Configsheet.cell_value(PrLix,1) == 'Process Group List':
break
else:
PrLix += 1
PrL_Number = []
PrL_Name = []
PrL_Code = []
PrL_Type = []
PrLix += 2 # start on first data row
while True:
if Model_Configsheet.cell_value(PrLix,2) != '':
try:
PrL_Number.append(int(Model_Configsheet.cell_value(PrLix,2)))
except:
PrL_Number.append(Model_Configsheet.cell_value(PrLix,2))
PrL_Name.append(Model_Configsheet.cell_value(PrLix,3))
PrL_Code.append(Model_Configsheet.cell_value(PrLix,4))
PrL_Type.append(Model_Configsheet.cell_value(PrLix,5))
PrLix += 1
else:
break
print('Read model run control from model config sheet.')
PrLix = 0
while True: # search for model flow control entry
if Model_Configsheet.cell_value(PrLix,1) == 'Model flow control':
break
else:
PrLix += 1
PrLix += 2 # start on first data row
while True:
if Model_Configsheet.cell_value(PrLix,2) != '':
try:
ScriptConfig[Model_Configsheet.cell_value(PrLix,2)] = Model_Configsheet.cell_value(PrLix,3)
except:
None
PrLix += 1
else:
break
print('Define model classifications and select items for model classifications according to information provided by config file.')
ModelClassification = {} # Dict of model classifications
for m in range(0,len(IT_Aspects)):
ModelClassification[IT_Aspects[m]] = deepcopy(MasterClassification[IT_Classification[m]])
EvalString = msf.EvalItemSelectString(IT_Selector[m],len(ModelClassification[IT_Aspects[m]].Items))
if EvalString.find(':') > -1: # range of items is taken
RangeStart = int(EvalString[0:EvalString.find(':')])
RangeStop = int(EvalString[EvalString.find(':')+1::])
ModelClassification[IT_Aspects[m]].Items = ModelClassification[IT_Aspects[m]].Items[RangeStart:RangeStop]
elif EvalString.find('[') > -1: # selected items are taken
ModelClassification[IT_Aspects[m]].Items = \
[ModelClassification[IT_Aspects[m]].Items[i] for i in eval(EvalString)]
elif EvalString == 'all':
None
else:
Mylog.info('ITEM SELECT ERROR for aspect ' + IT_Aspects[m] + ' were found in datafile.</br>')
break
```
The following code block creates the index table for the MFA system and parses all parameter files specified into the parameter dictionary ParameterDict.
```
# Define model index table and parameter dictionary
Model_Time_Start = int(min(ModelClassification['Time'].Items))
Model_Time_End = int(max(ModelClassification['Time'].Items))
Model_Duration = Model_Time_End - Model_Time_Start
print('Define index table dataframe.')
IndexTable = pd.DataFrame({'Aspect' : IT_Aspects, # 'Time' and 'Element' must be present!
'Description' : IT_Description,
'Dimension' : IT_Dimension,
'Classification': [ModelClassification[Aspect] for Aspect in IT_Aspects],
# Unique one letter (upper or lower case) indices to be used later for calculations.
'IndexLetter' : IT_IndexLetter})
# Default indexing of IndexTable, other indices are produced on the fly
IndexTable.set_index('Aspect', inplace = True)
# Add indexSize to IndexTable:
IndexTable['IndexSize'] = \
pd.Series([len(IndexTable.Classification[i].Items) for i in range(0,len(IndexTable.IndexLetter))], index=IndexTable.index)
# list of the classifications used for each indexletter
IndexTable_ClassificationNames = [IndexTable.Classification[i].Name for i in range(0,len(IndexTable.IndexLetter))]
#Define shortcuts for the most important index sizes:
Nt = len(IndexTable.Classification[IndexTable.index.get_loc('Time')].Items)
NR = len(IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('r')].Items)
NG = len(IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('g')].Items)
NM = len(IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('m')].Items)
print('Read model data and parameters.')
ParameterDict = {}
for mo in range(0,len(PL_Names)):
ParPath = os.path.join(DataPath,PL_Version[mo])
print('Reading parameter ' + PL_Names[mo])
# Do not change order of parameters handed over to function!
MetaData, Values, Uncertainty = msf.ReadParameterV2(ParPath, PL_Names[mo], PL_IndexStructure[mo],
PL_IndexMatch[mo], PL_IndexLayer[mo],
MasterClassification, IndexTable,
IndexTable_ClassificationNames, ScriptConfig, Mylog, ParseUncertainty = True)
ParameterDict[PL_Names[mo]] = msc.Parameter(Name = MetaData['Dataset_Name'],
ID = MetaData['Dataset_ID'],
UUID = MetaData['Dataset_UUID'],
P_Res = None,
MetaData = MetaData,
Indices = PL_IndexStructure[mo],
Values=Values,
Uncert=Uncertainty,
Unit = MetaData['Dataset_Unit'])
```
The classifications for the different parameter aspects specified in all the parameter files must be the same as specified in the config file for the model run. The resolution can differ, however: The parameter files can contain classification items not selected for the current model run, and the model run can contain classification items for which no data are present, in which case the value remains at zero.
In the report returned by msf.ReadParameterV2 above, the function returns the number of values read from each parameter file and the number of values assigned to the parameter arrays in the parameter dictionary.
Not all values are assigned (e.g., the vehicle stock in the parameter file is given for 2015 only) and more values than needed for the parameter files may be present (e.g. vehicle registration and vehicle stock data are present for more countries than selected for the model run).
Next, we define proxies to fill data gaps. This proxy-filling can be done in the parameter files, if general, but should be done in the model here, if model-specific.
```
# Replicate lifetime, given for 2010 age-cohort, for all age-cohorts
ParameterDict['ODYM_Tutorial5_VehicleLifetime'].Values[0,0,:,:] = np.einsum('r,c->rc',ParameterDict['ODYM_Tutorial5_VehicleLifetime'].Values[0,0,:,20],np.ones(Nt))
for m in range(0,NR): # Replicate uncertainty (not very elegantly)
for n in range(0,Nt):
ParameterDict['ODYM_Tutorial5_VehicleLifetime'].Uncert[m*Nt +n] = ParameterDict['ODYM_Tutorial5_VehicleLifetime'].Uncert[m*Nt+20]
# Replicate vehicle material content for all countries and age-cohorts
ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,0,:,:] = np.einsum('m,rc->mrc',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,0,0,20],np.ones((NR,Nt)))
for m in range(0,NM): # Replicate uncertainty (not very elegantly)
for r in range(0,NR):
for t in range(0,Nt):
ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Uncert[m*Nt*NR + r*Nt +t] = ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Uncert[m*Nt*NR +20]
# The missing years (prior to 2005) for the vehicle registration are estimated later/
# The stock is only given for 2015 as a refernce.
```
### 4) Define MFA system
With the index table and parameter dictionary defined, we can now define the MFA system.
```
Mylog.info('### 4 - Define MFA system')
print('Define MFA system and processes.')
PassengerVehicleFleet_MFA_System = msc.MFAsystem(Name = 'Materials_GlobalPassengerVehicleFleet',
Geogr_Scope = 'World',
Unit = 'Mt',
ProcessList = [],
FlowDict = {},
StockDict = {},
ParameterDict = ParameterDict,
Time_Start = Model_Time_Start,
Time_End = Model_Time_End,
IndexTable = IndexTable,
Elements = IndexTable.loc['Element'].Classification.Items,
Graphical = None) # Initialize MFA system
# Check Validity of index tables:
# returns true if dimensions are OK and time index is present and element list is not empty
PassengerVehicleFleet_MFA_System.IndexTableCheck()
# Add processes to system
for m in range(0, len(PrL_Number)):
PassengerVehicleFleet_MFA_System.ProcessList.append(msc.Process(Name = PrL_Name[m], ID = PrL_Number[m]))
# Define system variables: 6 flows.
PassengerVehicleFleet_MFA_System.FlowDict['F_0_1'] = msc.Flow(Name = 'Passenger vehicle production', P_Start = 0,
P_End = 1, Indices = 't,g,m,e',
Values=None, Uncert=None, Color = None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.FlowDict['F_1_2'] = msc.Flow(Name = 'New registration of vehicles', P_Start = 1,
P_End = 2, Indices = 't,g,r,m,e',
Values=None, Uncert=None, Color = None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.FlowDict['F_2_3'] = msc.Flow(Name = 'Scrapping of vehicles', P_Start = 2,
P_End = 3, Indices = 't,c,g,r,m,e',
Values=None, Uncert=None, Color = None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.FlowDict['F_3_4'] = msc.Flow(Name = 'Vehicle scrap', P_Start = 3,
P_End = 4, Indices = 't,w,m,e',
Values=None, Uncert=None, Color = None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.FlowDict['F_3_0'] = msc.Flow(Name = 'Recovery losses', P_Start = 3,
P_End = 0, Indices = 't,m,e',
Values=None, Uncert=None, Color = None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.FlowDict['F_4_0'] = msc.Flow(Name = 'Vehicle scrap supply', P_Start = 4,
P_End = 0, Indices = 't,w,m,e',
Values=None, Uncert=None, Color = None,
ID = None, UUID = None)
# Define system variables: 1 stock and 1 stock change:
PassengerVehicleFleet_MFA_System.StockDict['S_2'] = msc.Stock(Name = 'In-use stock', P_Res = 2, Type = 0,
Indices = 't,c,g,r,m,e', Values=None, Uncert=None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.StockDict['dS_2'] = msc.Stock(Name = 'Net in-use stock change', P_Res = 2, Type = 1,
Indices = 't,g,r,m,e', Values=None, Uncert=None,
ID = None, UUID = None)
PassengerVehicleFleet_MFA_System.Initialize_StockValues() # Assign empty arrays to stocks according to dimensions.
PassengerVehicleFleet_MFA_System.Initialize_FlowValues() # Assign empty arrays to flows according to dimensions.
PassengerVehicleFleet_MFA_System.IndexTable
PassengerVehicleFleet_MFA_System.ParameterDict
print(PassengerVehicleFleet_MFA_System.ProcessList)
print([i.Name for i in PassengerVehicleFleet_MFA_System.ProcessList])
print([i.ID for i in PassengerVehicleFleet_MFA_System.ProcessList])
```
### 5) Building and solving the MFA model, without consideration of parameter uncertainty
With the parameter loaded into the MFA system structure the system model can now be built and solved as shown in the previous tutorials.
In a first step, the dynamic stock model for the passenger vehicle fleet is computed:
```
Mylog.info('### 5 - Building and solving the MFA model, without consideration of parameter uncertainty')
# 1) Determine vehicle stock and outflow by age-cohort from registration data. These calculations are done outside of the MFA system,
# as we are not yet on the material level but at the product level.
# The 2015 stock is calculated and compared to the actual reported stock.
Dyn_MFA_EstimatedVehicleStock2015 = np.zeros((NR)) # determined for calibration purposes.
GlobalVehicleStock_TimeSeries = np.zeros((Nt,Nt,NG,NR)) # determined for visualisation and verification purposes and for the Monte-Carlo simulation.
GlobalEoL_Vehicles_TimeSeries = np.zeros((Nt,Nt,NG,NR)) # determined for visualisation and verification purposes and for the Monte-Carlo simulation.
print('Solving dynamic stock model of the passenger vehicle fleet for: ')
for region in np.arange(0,NR):
# Determine lifetime distribution from uncertainty string (of first age-cohort, as all age-cohorts have the same lifetime here)
UncParts = ParameterDict['ODYM_Tutorial5_VehicleLifetime'].Uncert[region*Nt].split(';') # parse uncertainty string
if int(UncParts[0]) == 3: # Define dictionary for normally distributed lifetime
LT = {'Type': 'Normal', 'Mean': [np.float(UncParts[1])],'StdDev': [np.float(UncParts[2])]}
if int(UncParts[0]) == 8: # Define dictionary for Weibull-distributed lifetime
LT = {'Type': 'Weibull', 'Scale': [1/np.float(UncParts[2])],'Shape': [np.float(UncParts[3])]}
# 1a) Loop over all regions to determine inflow-driven stock of vehicles, with pre 2005 age-cohorts absent
print(IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('r')].Items[region])
# Create helper DSM for computing the dynamic stock model:
DSM_Inflow = dsm.DynamicStockModel(t = np.array(IndexTable.Classification[IndexTable.index.get_loc('Time')].Items),
i = PassengerVehicleFleet_MFA_System.ParameterDict['ODYM_Tutorial5_VehicleNewRegistration'].Values[0,region,:,0],
lt = LT)
Stock_by_cohort = DSM_Inflow.compute_s_c_inflow_driven()
#print(Stock_by_cohort.shape)
O_C = DSM_Inflow.compute_o_c_from_s_c()
#print(O_C.shape)
S = DSM_Inflow.compute_stock_total()
#print(S.shape)
DS = DSM_Inflow.compute_stock_change()
#print(DS.shape)
Dyn_MFA_EstimatedVehicleStock2015[region] = S[25]
# Calibration: the following countries got their lifetime adjusted so that the stock estimate from
# the DSM for inflow from 2005-2015 is not much bigger than the total stock,
# which also comprises pre-2005 inflow data, which we don't have.
# Luxembourg: 16->12 years, Singapore and New Caledonia: 16->12 years, Saudi-Arabia: 12 years,
# Oman (stock unreasonably small), Cambodia (no stock data at all)
# These countries have either too small stocks reported or the lifetime for some of the 2005-2015 age-cohorts
# was much shorter than indicated in the parameter file, wich is possible, especially in rich countries, where
# vehicles get exported at young age.
# Apart from changing the lifetime distribution, no calibration was performed, and registration data were used instead.
# 1b) Estimate pre-2005 vehicle registration.
StockDiff_r = 1000 * ParameterDict['ODYM_Tutorial5_VehicleStock2015'].Values[0,region,25,0] - Dyn_MFA_EstimatedVehicleStock2015[region]
print('2015 stock difference before estimation of pre-2005 age-cohorts: ',StockDiff_r)
# estimate size of original inflow for countries where pre-2005 age-cohorts are >0 (applies to 124 out of 130 countries)
# Assumption: constant inflow for 15 years (1990-2004)
if StockDiff_r > 0:
Inflow_original = [StockDiff_r / 15 / DSM_Inflow.sf[25,i] for i in range(0,15)] # divide by value of survival function in 2015
PassengerVehicleFleet_MFA_System.ParameterDict['ODYM_Tutorial5_VehicleNewRegistration'].Values[0,region,0:15,0] = Inflow_original
# 1c) re-calculuate dynamic stock model with pre 2005 age-cohorts
DSM_Inflow = dsm.DynamicStockModel(t = np.array(IndexTable.Classification[IndexTable.index.get_loc('Time')].Items),
i = PassengerVehicleFleet_MFA_System.ParameterDict['ODYM_Tutorial5_VehicleNewRegistration'].Values[0,region,:,0],
lt = LT)
Stock_by_cohort = DSM_Inflow.compute_s_c_inflow_driven()
#print(Stock_by_cohort.shape)
O_C = DSM_Inflow.compute_o_c_from_s_c()
#print(O_C.shape)
S = DSM_Inflow.compute_stock_total()
#print(S.shape)
DS = DSM_Inflow.compute_stock_change()
#print(DS.shape)
Dyn_MFA_EstimatedVehicleStock2015[region] = S[25]
StockDiff_r = 1000 * ParameterDict['ODYM_Tutorial5_VehicleStock2015'].Values[0,region,25,0] - Dyn_MFA_EstimatedVehicleStock2015[region]
print('2015 stock difference after estimation of pre-2005 age-cohorts: ',StockDiff_r)
print('')
# 1d) Multiply results with vehicle material content and assign to MFA system:
GlobalVehicleStock_TimeSeries[:,:,0,region] = Stock_by_cohort
GlobalEoL_Vehicles_TimeSeries[:,:,0,region] = O_C
PassengerVehicleFleet_MFA_System.FlowDict['F_1_2'].Values[:,:,region,:,0] = \
np.einsum('mgc,cg->cgm',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,:,region,:], \
PassengerVehicleFleet_MFA_System.ParameterDict['ODYM_Tutorial5_VehicleNewRegistration'].Values[0,region,:,:]) / 1e9
# For the inflow, the age-cohort c is recorded in year t, and c = t.
# Here, region is not an index, as it is fixed, and the aspects process (U) and chemical element (e) are fixed
# because they are not listed explicitly in the material composition (e) and the flow dictionary (U).
PassengerVehicleFleet_MFA_System.FlowDict['F_2_3'].Values[:,:,:,region,:,0] = \
np.einsum('mgc,tc->tcgm',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,:,region,:],O_C) / 1e9
PassengerVehicleFleet_MFA_System.StockDict['S_2'].Values[:,:,:,region,:,0] = \
np.einsum('mgc,tc->tcgm',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,:,region,:],Stock_by_cohort) / 1e9
PassengerVehicleFleet_MFA_System.StockDict['dS_2'].Values[:,:,region,:,0] = \
np.einsum('mgc,c->cgm',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,:,region,:],DS) / 1e9
# For the net stock change, the age-cohort c is recorded in year t, and c = t.
# Compare stock estimated by DSM with reported stock for 2015
# and plot stock difference for all countries, after calibration:
#StockDiff = 1000 * ParameterDict['ODYM_Tutorial5_VehicleStock2015'].Values[0,:,25,0] - Dyn_MFA_EstimatedVehicleStock2015
#print('')
#print('Difference between reported stock and estimated remainder from 2005-2015 age-cohorts: ')
#print(list(zip(IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('r')].Items,StockDiff.tolist())))
```
In a second step, we compute the available scrap and the remaining flows:
```
PassengerVehicleFleet_MFA_System.FlowDict['F_0_1'].Values = np.einsum('tgrme->tgme',PassengerVehicleFleet_MFA_System.FlowDict['F_1_2'].Values)
PassengerVehicleFleet_MFA_System.FlowDict['F_3_4'].Values = np.einsum('gmw,tcgrme->twme',ParameterDict['ODYM_Tutorial5_EoLRecoveryRate'].Values,PassengerVehicleFleet_MFA_System.FlowDict['F_2_3'].Values)
PassengerVehicleFleet_MFA_System.FlowDict['F_4_0'].Values = PassengerVehicleFleet_MFA_System.FlowDict['F_3_4'].Values
PassengerVehicleFleet_MFA_System.FlowDict['F_3_0'].Values = np.einsum('tcgrme->tme',PassengerVehicleFleet_MFA_System.FlowDict['F_2_3'].Values) - np.einsum('twme->tme',PassengerVehicleFleet_MFA_System.FlowDict['F_3_4'].Values)
# Mass balance check:
Bal = PassengerVehicleFleet_MFA_System.MassBalance()
print(Bal.shape) # dimensions of balance are: time step x process x chemical element
print(np.abs(Bal).sum(axis = 0)) # reports the sum of all absolute balancing errors by process.
```
Evaluation of results, short, because we will focus on the Monte-Carlo Simulation later:
```
MatStock_trm = np.einsum('tcrgme->trm',PassengerVehicleFleet_MFA_System.StockDict['S_2'].Values)
MatStock_2017_global = np.einsum('rm->m',MatStock_trm[27,:,:])
print('Material stock in Mt, 2017, global')
[print(i) for i in list(zip(IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('m')].Items,MatStock_2017_global))]
```
The figures above provide a preliminary answer to the first research question: _How big is the material stock currently embodied in the global passenger vehicle fleet?_
The answer is preliminary, because the variability of the metal composition of the individual car and thus the uncertainty of aggregate total metal content is quite high for some metals, and we need to quantify the uncertainty ranges using a Monte-Carlo-Simulation in the next section.
Here, we will answer the second question: _When will this material become available for recycling?_
The time frame of the considered historic vehicle registration is 1990-2017, but we extended the model time frame until 2050 but with zero new registration after 2017. That means, that the calculated future scrap flows result from the 2017 stock only and can be extracted directly to answer the second question:
```
# Future material outflow by year (t), region (r), and material (m):
Future_Material_Outflow_trm = np.einsum('tcrm->trm',PassengerVehicleFleet_MFA_System.FlowDict['F_2_3'].Values[28::,:,0,:,:,0])
# Future srap flow by year (t) and scrap type (w):
Future_Scrap_Availability_tm = np.einsum('twme->tw',PassengerVehicleFleet_MFA_System.FlowDict['F_4_0'].Values[28::,:,:,:])
```
Future_Material_Outflow_trm can now be analysed for certain regions, e.g. for industries and policy makers to understand which materials will be available in which quantities and when. Since the model tracks different age-cohorts, one can in principle also quantify the outflow of individual age-cohorts, e.g., when they contain certain alloys or contaminants. Here, we focus on the estimation of the scrap flows from the 2017 vehicle fleet.
```
width = 35
height = 25
WasteGroups = IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('w')].Items
WasteGroups[1] = 'Al extrusion scrap'
WasteGroups[5] = 'Shredder light fraction'
MyColorCycle = pylab.cm.Paired(np.arange(0,1,0.1)) # select 10 colors from the 'Paired' color map.
# plot at linear scale
fig, ax = plt.subplots()
plt.figure(figsize=(width, height))
for m in range(0,len(WasteGroups)):
ax.plot(PassengerVehicleFleet_MFA_System.IndexTable['Classification']['Time'].Items[28::],
Future_Scrap_Availability_tm[:,m],
color = MyColorCycle[m,:], linewidth = 2)
ax.set_ylabel('Scrap flows, Mt/yr,',fontsize =16)
ax.legend(WasteGroups, loc='upper right',prop={'size':8})
fig.savefig('ScrapFlows_2017', dpi = 400)
# plot at log scale
fig, ax = plt.subplots()
plt.figure(figsize=(width, height))
for m in range(0,len(WasteGroups)):
ax.semilogy(PassengerVehicleFleet_MFA_System.IndexTable['Classification']['Time'].Items[28::],
Future_Scrap_Availability_tm[:,m],
color = MyColorCycle[m,:], linewidth = 2)
ax.set_ylabel('Scrap flows, Mt/yr,',fontsize =16)
ax.legend(WasteGroups, loc='upper right',prop={'size':8})
```
One can see that the pre-2020 outflows will decline sharply by about 25%, before they reach a plateau between ca. 2021 and 2029. This plateau is probably the consequence of two peaks overlapping, one pre-2017 peak (from 2002 +/- 5 years cars) and one ca. 2027 (from 2012 +/- 5 years cars). To find out which countries are causing this behaviour, let' zoom into the region-specific results! The value for electronic waste is very low because due to data limitations, it was assumed that all copper goes into the copper wire scrap fraction and all plastic to the shredder light fraction.
```
width = 35
height = 25
# We want to plot the ten countries with the largest EoL vehicle flows in 2020:
Sort = np.argsort(Future_Material_Outflow_trm[2,:,:].sum(axis =1))
Top10 = [i for i in reversed(Sort[-10::])]
Top10Regions = [IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('r')].Items[i] for i in Top10]
MyColorCycle = pylab.cm.Paired(np.arange(0,1,0.1)) # select 10 colors from the 'Paired' color map.
fig, ax = plt.subplots()
plt.figure(figsize=(width, height))
for m in range(0,len(Top10)):
ax.plot(PassengerVehicleFleet_MFA_System.IndexTable['Classification']['Time'].Items[28::],
Future_Material_Outflow_trm[:,Top10[m],:].sum(axis =1),
color = MyColorCycle[m,:], linewidth = 2)
ax.set_ylabel('Scrap flows, Mt/yr,',fontsize =16)
ax.legend(Top10Regions, loc='upper right',prop={'size':8})
fig.savefig('ScrapFlows_2017_Top10Regions', dpi = 400)
```
The total outflow from the 2017 stock is indeed the consequence of two overlapping peaks, one resulting from the constantly high car sales in the richest countries, and the later one resulting from the recent sharp increase of vehicle registration in China.
### 6) Performing a Monte-Carlo-Simulation
ODYM has no built-in MC tool yet, as practices still need to evolve and the different application cases vary a lot.
Re-sampling an entire parameter from the Uncertainty information is easy to implement but often very inefficient, as, like in this case, few actually known parameter values are replicated to span all countries and age-cohorts.
Here, we therefore sample the 25 original material content array only, and replicate the sampled values to cover all regions and age-cohorts. Of course, this can be changed when more data is available. We sample the material content per vehicle NMC times from its defined distribution and re-calculate the stock and outflow variables.
The result can be visualized in box plots, for example, as shown below.
```
Mylog.info('### 6 - Performing a Monte-Carlo-Simulation')
NMC = 2000 # Number of Monte-Carlo runs
MatContent_Samples = np.zeros((NMC,NM)) # define sample array for material content
MatStock_2017_MC = np.zeros((NMC,NM)) # define sample array for material stock
MatOFlow_2017_MC = np.zeros((NMC,NM)) # define sample array for material in EoL vehicles
# 1) re-sample the material composition
for m in range(0,NM):
UncParts = ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Uncert[m*Nt*NR +20].split(';') # parse uncertainty string for material m
print(UncParts)
MatContent_Samples[:,m] = np.random.uniform(UncParts[1],UncParts[2],NMC)
# 2) re-calculate the material stock and the outflow
print('')
for s in range(0,NMC):
if s % 100 == 0:
print(s)
# 2a) Replicate vehicle material content for all countries and age-cohorts
ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values[:,0,:,:] = np.einsum('m,rc->mrc',MatContent_Samples[s,:],np.ones((NR,Nt)))
# 2b) Assign aggregated result to result array:
MatStock_2017_MC[s,:] = np.einsum('mgrc,cgr->m',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values,GlobalVehicleStock_TimeSeries[27,:,:,:]) / 1e9
MatOFlow_2017_MC[s,:] = np.einsum('mgrc,cgr->m',ParameterDict['ODYM_Tutorial5_VehicleMaterialContent'].Values,GlobalEoL_Vehicles_TimeSeries[27,:,:,:]) / 1e9
# Steel only (largest by far)
fig, ax = plt.subplots()
ax.boxplot(MatStock_2017_MC[:,0], notch=True)
ax.set_title('Variation of material content, steel, in Mt, for 2017 stock.')
ax.set_xticklabels([IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('m')].Items[0]], fontsize =14)
fig.savefig('BoxPlot_Steel_2017', dpi = 400)
# Selected materials (largest second to steel)
fig, ax = plt.subplots()
ax.boxplot(MatStock_2017_MC[:,[1,2,4,11,13,19,20,21,24]], notch=True)
ax.set_title('Variation of material content, in Mt, for 2017 stock.')
PLabels = [IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('m')].Items[i] for i in[1,2,4,11,13,19,20,21,24]]
PLabels[1] = 'Copper'
ax.set_xticklabels(PLabels, fontsize =12, rotation =90)
fig.savefig('BoxPlot_OtherMaterials_2017_Sel', dpi = 400, bbox_inches='tight')
# All materials (other than steel)
fig, ax = plt.subplots()
ax.boxplot(MatStock_2017_MC[:,1::], notch=True)
ax.set_title('Variation of material content, in Mt, for 2017 stock.')
fig.savefig('BoxPlot_OtherMaterials_2017', dpi = 400)
```
As the material content array comes with min/max uncertainty/variability ranges, from which uniformly distributed samples were drawn, the box plot shows no outliers and the 1./3. quartiles span exactly half the space between min and max values. It does show how the variability of the different material content estimates impacts the variation of the material stock estimate. While the estimates for materials 20-22 (rubber, glass, and ceramics) are rather certain, the one for material 11 (Aluminium) varies substantially, which reflects the high variability of the Al content of passenger vehicles.
If the data are well structured and the model is set up accordingly, performing an uncertainty analysis can be quickly done, as the brevity of the Monte-Carlo-code above shows.
### 7) Exporting results and close model calculation
It is good practice to export the data behind all figures plotted and those shown in large tables so that other researchers can easily look them up and re-use them.
```
# Define excel export function
def ExcelSheetFill(Workbook, Sheetname, values, topcornerlabel=None,
rowlabels=None, collabels=None, Style=None,
rowselect=None, colselect=None):
Sheet = Workbook.add_sheet(Sheetname)
if topcornerlabel is not None:
if Style is not None:
Sheet.write(0,0,label = topcornerlabel, style = Style) # write top corner label
else:
Sheet.write(0,0,label = topcornerlabel) # write top corner label
if rowselect is None: # assign row select if not present (includes all rows in that case)
rowselect = np.ones((values.shape[0]))
if colselect is None: # assign col select if not present (includes all columns in that case)
colselect = np.ones((values.shape[1]))
if rowlabels is not None: # write row labels
rowindexcount = 0
for m in range(0,len(rowlabels)):
if rowselect[m] == 1: # True if True or 1
if Style is None:
Sheet.write(rowindexcount +1, 0, label = rowlabels[m])
else:
Sheet.write(rowindexcount +1, 0, label = rowlabels[m], style = Style)
rowindexcount += 1
if collabels is not None: # write column labels
colindexcount = 0
for m in range(0,len(collabels)):
if colselect[m] == 1: # True if True or 1
if Style is None:
Sheet.write(0, colindexcount +1, label = collabels[m])
else:
Sheet.write(0, colindexcount +1, label = collabels[m], style = Style)
colindexcount += 1
# write values:
rowindexcount = 0
for m in range(0,values.shape[0]): # for all rows
if rowselect[m] == 1:
colindexcount = 0
for n in range(0,values.shape[1]): # for all columns
if colselect[n] == 1:
Sheet.write(rowindexcount +1, colindexcount + 1, label=values[m, n])
colindexcount += 1
rowindexcount += 1
Mylog.info('### 7 - Exporting results and close model calculation')
#Export to Excel
myfont = xlwt.Font()
myfont.bold = True
mystyle = xlwt.XFStyle()
mystyle.font = myfont
Result_workbook = xlwt.Workbook(encoding = 'ascii') # Export element stock by region
#scrap supply, global total, by scrap type:
ExcelSheetFill(Result_workbook, 'F_4_0_Scrap_Supply_types', Future_Scrap_Availability_tm, topcornerlabel = 'F_4_0: Future scrap availability, by year and scrap group, Mt/yr', rowlabels = IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('t')].Items[28::], collabels = IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('w')].Items, Style = mystyle, rowselect = None, colselect = None)
#material outflow, top 10 countries, all scrap types:
ExcelSheetFill(Result_workbook, 'F_2_3_EoL_Material_region', Future_Material_Outflow_trm[:,Top10,:].sum(axis =2), topcornerlabel = 'F_2_3: Future EoL vehicle material flows, by year and region, Mt/yr', rowlabels = IndexTable.Classification[IndexTable.set_index('IndexLetter').index.get_loc('t')].Items[28::], collabels = Top10Regions, Style = mystyle, rowselect = None, colselect = None)
# The Monte-Carlo simulation result can be exported as well but is not exported here to keep the resulting Excel file small.
Result_workbook.save('ODYM_GlobalVehicleFleet_Results.xls')
# Close log file
Mylog.info('Script is finished. Terminating logging process and closing all log files.')
# remove all handlers from logger
root = log.getLogger()
root.handlers = [] # required if you don't want to exit the shell
log.shutdown()
#The end.
```
| github_jupyter |
```
# for use in tutorial and development; do not include this `sys.path` change in production:
import sys ; sys.path.insert(0, "../")
```
# Statistical Relational Learning with `pslpython`
In this section we'll explore one form of
[*statistical relational learning*](../glossary/#statistical-relational-learning)
called
[*probabilistic soft logic*](../glossary/#probabilistic-soft-logic) (PSL).
One of the examples given for PSL is called [*simple acquaintances*](https://github.com/linqs/psl-examples/tree/master/simple-acquaintances), which uses a graph of some friends, where they live, what interests they share, and then infers who probably knows whom.
Some people explicitly do or do not know each other, while other "knows" relations can be inferred based on whether two people have lived in the same place or share common interest.
The objective is to build a PSL model for [*link prediction*](https://en.wikipedia.org/wiki/Link_prediction), to evaluate the annotations in the friend graph.
In this case, we'll assume that the "knows" relations have been added from a questionable source (e.g., some third-party dataset) so we'll measure a subset of these relations and determine their likelihood.
NB: this is really useful for cleaning up annotations in a large graph!
Now let's load a KG which is an RDF representation of this "simple acquaintances" example, based on using the [`foaf`](http://www.foaf-project.org/) vocabulary:
```
import kglab
namespaces = {
"acq": "http://example.org/stuff/",
"foaf": "http://xmlns.com/foaf/0.1/",
}
kg = kglab.KnowledgeGraph(
name = "LINQS simple acquaintance example for PSL",
base_uri = "http://example.org/stuff/",
namespaces = namespaces,
)
kg.load_rdf("../dat/acq.ttl")
```
Take a look at the `dat/acq.ttl` file to see the people and their relations.
Here's a quick visualization of the graph:
```
VIS_STYLE = {
"foaf": {
"color": "orange",
"size": 5,
},
"acq":{
"color": "blue",
"size": 30,
},
}
excludes = [
kg.get_ns("rdf").type,
kg.get_ns("rdfs").domain,
kg.get_ns("rdfs").range,
]
subgraph = kglab.SubgraphTensor(kg, excludes=excludes)
pyvis_graph = subgraph.build_pyvis_graph(notebook=True, style=VIS_STYLE)
pyvis_graph.force_atlas_2based()
pyvis_graph.show("tmp.fig04.html")
```
## Loading a PSL model
Next, we'll use the [`pslpython`](https://pypi.org/project/pslpython/) library implemented in Python (atop its core library running in Java) to define three *predicates* (i.e., relations – similar as in RDF) which are: `Neighbors`, `Likes`, `Knows`
```
psl = kglab.PSLModel(
name = "simple acquaintances",
)
```
Then add each of the predicates:
```
psl.add_predicate("Neighbors", size=2)
psl.add_predicate("Likes", size=2)
psl.add_predicate("Knows", size=2, closed=False)
```
Next, we'll add a set of probabilistic [*rules*](https://psl.linqs.org/wiki/2.2.1/Rule-Specification.html), all with different weights applied:
1. "Two people who live in the same place are **more** likely to know each other"
2. "Two people who don't live in the same place are **less** likely to know each other"
3. "Two people who share a common interest are **more** likely to know each other"
4. "Two people who both know a third person are **more** likely to know each other"
5. "Otherwise, any pair of people are **less** likely to know each other"
```
psl.add_rule("Neighbors(P1, L) & Neighbors(P2, L) & (P1 != P2) -> Knows(P1, P2)", weight=20.0, squared=True)
psl.add_rule("Neighbors(P1, L1) & Neighbors(P2, L2) & (P1 != P2) & (L1 != L2) -> !Knows(P1, P2)", weight=5.0, squared=True)
psl.add_rule("Likes(P1, L) & Likes(P2, L) & (P1 != P2) -> Knows(P1, P2)", weight=10.0, squared=True)
psl.add_rule("Knows(P1, P2) & Knows(P2, P3) & (P1 != P3) -> Knows(P1, P3)", weight=5.0, squared=True)
psl.add_rule("!Knows(P1, P2)", weight=5.0, squared=True)
```
Finally we'll add a [*commutative*](https://mathworld.wolfram.com/Commutative.html) rule such that:
> "If Person 1 knows Person 2, then Person 2 also knows Person 1."
```
psl.add_rule("Knows(P1, P2) = Knows(P2, P1)", weighted=False)
```
To initialize the model, we'll clear any pre-existing data from each of the predicates:
```
psl.clear_model()
```
Next we'll create a specific `Subgraph` to transform the names of `foaf:Person` in the graph, since the PSL rules in this example focus on relations among the people:
```
people_iter = kg.rdf_graph().subjects(kg.get_ns("rdf").type, kg.get_ns("foaf").Person)
people_nodes = [ p for p in sorted(people_iter, key=lambda p: str(p)) ]
subgraph = kglab.Subgraph(kg, preload=people_nodes)
```
Now let's query our KG to populate data into the `Neighbors` predicate in the PSL model, based on `foaf:based_near` which represents people who live nearby each other:
```
sparql = """
SELECT DISTINCT ?p1 ?p2
WHERE {
?p1 foaf:based_near ?l .
?p2 foaf:based_near ?l .
FILTER( ?p1 != ?p2 )
}
"""
for row in kg.query(sparql):
p1 = subgraph.transform(row.p1)
p2 = subgraph.transform(row.p2)
psl.add_data_row("Neighbors", [p1, p2])
```
Note: these data points are *observations*, i.e., empirical support for the probabilistic model.
Then let's query our KG to populate data into the `Likes` predicate in the PSL model, based on shared interests in `foaf:topic_interest` topics:
```
sparql = """
SELECT DISTINCT ?p1 ?p2
WHERE {
?p1 foaf:topic_interest ?t .
?p2 foaf:topic_interest ?t .
FILTER( ?p1 != ?p2 )
}
"""
for row in kg.query(sparql):
p1 = subgraph.transform(row.p1)
p2 = subgraph.transform(row.p2)
psl.add_data_row("Likes", [p1, p2]) # anchoring PR discussion
```
Just for kicks, let's take a look at the internal representation of a PSL predicate, which is a `pandas.DataFrame`:
```
predicate = psl.model.get_predicate("Likes")
predicate.__dict__
```
Now we'll load data from the `dat/psl/knows_targets.txt` CSV file, which is a list of `foaf:knows` relations in our graph that we want to analyze.
Each of these has an assumed value of `1.0` (true) or `0.0` (false).
Our PSL analysis will assign probabilities for each so that we can compare which annotations appear to be suspect and require further review:
```
import csv
import pandas as pd
targets = []
rows_list = []
with open("../dat/psl/knows_targets.txt", "r") as f:
reader = csv.reader(f, delimiter="\t")
for i, row in enumerate(reader):
p1 = int(row[0])
p2 = int(row[1])
targets.append((p1, p2))
p1_node = subgraph.inverse_transform(p1)
p2_node = subgraph.inverse_transform(p2)
if (p1_node, kg.get_ns("foaf").knows, p2_node) in kg.rdf_graph():
truth = 1.0
rows_list.append({ 0: p1, 1: p2, "truth": truth})
psl.add_data_row("Knows", [p1, p2], partition="truth", truth_value=truth)
psl.add_data_row("Knows", [p1, p2], partition="targets")
elif (p1_node, kg.get_ns("acq").wantsIntro, p2_node) in kg.rdf_graph():
truth = 0.0
rows_list.append({ 0: p1, 1: p2, "truth": truth})
psl.add_data_row("Knows", [p1, p2], partition="truth", truth_value=truth)
psl.add_data_row("Knows", [p1, p2], partition="targets")
else:
print("UNKNOWN", p1, p2)
```
These data points are considered to be *ground atoms*, each with a *truth* value set initially.
These are also our *targets* for which nodes in the graph to analyze based on the rules.
We'll keep a dataframe called `df_dat` to preserve these values for later use:
```
df_dat = pd.DataFrame(rows_list)
df_dat.head()
```
Next, we'll add `foaf:knows` observations which are in the graph, although not among our set of targets.
This provides more evidence for the probabilistic inference.
Note that since RDF does not allow for representing probabilities on relations, we're using the `acq:wantsIntro` to represent a `foaf:knows` with a `0.0` probability:
```
sparql = """
SELECT ?p1 ?p2
WHERE {
?p1 foaf:knows ?p2 .
}
ORDER BY ?p1 ?p2
"""
for row in kg.query(sparql):
p1 = subgraph.transform(row.p1)
p2 = subgraph.transform(row.p2)
if (p1, p2) not in targets:
psl.add_data_row("Knows", [p1, p2], truth_value=1.0)
sparql = """
SELECT ?p1 ?p2
WHERE {
?p1 acq:wantsIntro ?p2 .
}
ORDER BY ?p1 ?p2
"""
for row in kg.query(sparql):
p1 = subgraph.transform(row.p1)
p2 = subgraph.transform(row.p2)
if (p1, p2) not in targets:
psl.add_data_row("Knows", [p1, p2], truth_value=0.0)
```
Now we're ready to run optimization on the PSL model and infer the *grounded atoms*.
This may take a few minutes to run:
```
psl.infer()
```
Let's examine the results.
We'll get a `pandas.DataFrame` describing the targets in the `Knows` predicate:
```
df = psl.get_results("Knows")
df.head()
```
Now we can compare the "truth" values from our targets, with their probabilities from the inference provided by the PSL model.
Let's build a dataframe to show that:
```
dat_val = {}
df.insert(1, "p1", "")
df.insert(2, "p2", "")
for index, row in df_dat.iterrows():
p1 = int(row[0])
p2 = int(row[1])
key = (p1, p2)
dat_val[key] = row["truth"]
for index, row in df.iterrows():
p1 = int(row[0])
p2 = int(row[1])
key = (p1, p2)
df.at[index, "diff"] = row["truth"] - dat_val[key]
df.at[index, "p1"] = str(subgraph.inverse_transform(p1))
df.at[index, "p2"] = str(subgraph.inverse_transform(p2))
df = df.drop(df.columns[[3, 4]], axis=1)
pd.set_option("max_rows", None)
df.head()
```
In other words, which of these "knows" relations in the graph appears to be suspect, based on our rules plus the other evidence in the graph?
Let's visualize a histogram of how the inferred probabilities are distributed:
```
df["diff"].hist();
```
In most cases there is little or no difference (`0.0 <= d <= 0.2`) in the probabilities for the target relations.
However, some appear to be off by a substantial (`-0.8`) amount, which indicates problems in that part of our graph data.
The following rows show where these `foaf:knows` annotations in the graph differs significantly from their truth values predicted by PSL:
```
df[df["diff"] < -0.2]
```
Speaking of [*human-in-the-loop*](https://derwen.ai/d/human-in-the-loop) practices for AI, using PSL along with a KG seems like a great way to leverage machine learning, so that the people can focus on parts of the graph that have the most uncertainty.
And, therefore, probably provide the best ROI for investing time+cost into curation.
---
## Exercises
**Exercise 1:**
Build a PSL model that tests the "noodle vs. pancake" rules used in an earlier example with our recipe KG.
Which recipes should be annotated differently?
**Exercise 2:**
Try representing one of the other [PSL examples](https://github.com/linqs/psl-examples/) using RDF and `kglab`.
| github_jupyter |
```
from utils import *
from defense import *
from skimage.measure import compare_ssim
import argparse
import imutils
import cv2
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=config)
def ssim_score(cleandata,data):
# cleandata = (cleandata * 255).astype('uint8')
# data = (data * 255).astype('uint8')
SSIM = []
for i in range(cleandata.shape[0]):
ssim = compare_ssim(cleandata[i], data[i], multichannel=True, data_range = 1.0)
SSIM.append(ssim)
SSIM = np.asarray(SSIM)
return SSIM.mean()
# the seleted data from the imagenet validation set
cleandata = np.load("./data/clean100data.npy")
cleanlabel = np.load("./data/clean100label.npy")
data_FD = defend_FD(cleandata)
data_fdgd = defended('GD',data_FD)
data_fdrd = defended('onlyrand',data_FD)
data_gd = defended('GD',cleandata)
data_rd = defended('onlyrand',cleandata)
data_pd = defended('pixel_deflection',cleandata)
data_shield = defended('SHIELD',cleandata)
data_br = defended('BitReduct',cleandata)
data_tv = defended('TotalVarience',cleandata)
#FD
print(l2_distortion(cleandata,data_FD))
print(ssim_score(cleandata,data_FD))
#FD+GD
print(l2_distortion(cleandata,data_fdgd))
print(ssim_score(cleandata,data_fdgd))
#FD+RD
print(l2_distortion(cleandata,data_fdrd))
print(ssim_score(cleandata,data_fdrd))
#GD
print(l2_distortion(cleandata,data_gd))
print(ssim_score(cleandata,data_gd))
#RD
print(l2_distortion(cleandata,data_rd))
print(ssim_score(cleandata,data_rd))
#PD
print(l2_distortion(cleandata,data_pd))
print(ssim_score(cleandata,data_pd))
#shield
print(l2_distortion(cleandata,data_shield))
print(ssim_score(cleandata,data_shield))
#BR
print(l2_distortion(cleandata,data_br))
print(ssim_score(cleandata,data_br))
data_tv = defended('TotalVarience',cleandata)
print(l2_distortion(cleandata,data_tv))
print(ssim_score(cleandata,data_tv))
np.save('./data/'+'FD'+'_def_data.npy',data_FD)
np.save('./data/'+'fdgd'+'_def_data.npy',data_fdgd)
np.save('./data/'+'fdrd'+'_def_data.npy',data_fdrd)
np.save('./data/'+'rd'+'_def_data.npy',data_rd)
np.save('./data/'+'gd'+'_def_data.npy',data_gd)
np.save('./data/'+'pd'+'_def_data.npy',data_pd)
np.save('./data/'+'shield'+'_def_data.npy',data_shield)
np.save('./data/'+'br'+'_def_data.npy',data_br)
np.save('./data/'+'tv'+'_def_data.npy',data_tv)
data_FD = np.load('./data/'+'FD'+'_def_data.npy')
data_fdgd = np.load('./data/'+'fdgd'+'_def_data.npy')
data_fdrd = np.load('./data/'+'fdrd'+'_def_data.npy')
data_rd = np.load('./data/'+'rd'+'_def_data.npy')
data_gd = np.load('./data/'+'gd'+'_def_data.npy')
data_pd = np.load('./data/'+'pd'+'_def_data.npy')
data_shield = np.load('./data/'+'shield'+'_def_data.npy')
data_br = np.load('./data/'+'br'+'_def_data.npy')
data_tv = np.load('./data/'+'tv'+'_def_data.npy')
plt.imshow(data_FD[0])
(score, diff) = compare_ssim(cleandata[0], data_FD[0], full=True, multichannel=True)
diff = (diff * 255).astype("uint8")
print("SSIM: {}".format(score))
plt.imshow(diff)
```
| github_jupyter |
# Black Scholes Model
The Black Scholes model is considered to be one of the best ways of determining fair prices of options. It requires five variables: the strike price of an option, the current stock price, the time to expiration, the risk-free rate, and the volatility.
## Black and Scholes componets
- C = call option price
- N = CDF of the normal distribution
- St= spot price of an asset
- K = strike price
- r = risk-free interest rate
- t = time to maturity
- σ = volatility of the asset
# Black and Scholes assumption:
- It works on European options that can only be exercised at expiration.
- No dividends paid out during the option’s life.
- No transaction and commissions costs in buying the option.
- The returns on the underlying are normally distributed.
```
from math import log, sqrt, pi, exp
from scipy.stats import norm
from datetime import datetime, date
import numpy as np
import pandas as pd
from pandas import DataFrame
#Define d1 and d2 funcion of Black and Scholes
def d1(S,K,T,r,sigma):
return(log(S/K)+(r+sigma**2/2.)*T)/(sigma*sqrt(T))
def d2(S,K,T,r,sigma):
return d1(S,K,T,r,sigma)-sigma*sqrt(T)
#calculating call option
def bs_call(S, K, T, r, sigma):
return S*norm.cdf(d1(S,K,T,r,sigma))-K*exp(-r*T)*norm.cdf(d2(S,K,T,r,sigma))
#calculating put option
def bs_put(S,K,T,r,sigma):
return K*exp(-r*T)-S+bs_call(S,K,T,r,sigma)
```
# Collecting the Data
For us to test the formula on a stock we need to get the historical data for that specific stock and the other inputs related to the stock. We will be using Yahoo Finance and the pandas library to get this data.
```
from datetime import datetime, date
import numpy as np
import pandas as pd
import pandas_datareader.data as web
stock = 'SPY'
expiry = '12-18-2022'
strike_price = 370
today = datetime.now()
one_year_ago = today.replace(year=today.year-1)
df = web.DataReader(stock, 'yahoo', one_year_ago, today)
df = df.sort_values(by="Date")
df = df.dropna()
df = df.assign(close_day_before=df.Close.shift(1))
df['returns'] = ((df.Close - df.close_day_before)/df.close_day_before)
sigma = np.sqrt(252) * df['returns'].std()
uty = (web.DataReader(
"^TNX", 'yahoo', today.replace(day=today.day-1), today)['Close'].iloc[-1])/100
lcp = df['Close'].iloc[-1]
t = (datetime.strptime(expiry, "%m-%d-%Y") - datetime.utcnow()).days / 365
print('The Option Price is: ', bs_call(lcp, strike_price, t, uty, sigma))
def call_implied_volatility(Price, S, K, T, r):
sigma = 0.001
while sigma < 1:
Price_implied = S * \
norm.cdf(d1(S, K, T, r, sigma))-K*exp(-r*T) * \
norm.cdf(d2(S, K, T, r, sigma))
if Price-(Price_implied) < 0.001:
return sigma
sigma += 0.001
return "Not Found"
def put_implied_volatility(Price, S, K, T, r):
sigma = 0.001
while sigma < 1:
Price_implied = K*exp(-r*T)-S+bs_call(S, K, T, r, sigma)
if Price-(Price_implied) < 0.001:
return sigma
sigma += 0.001
return "Not Found"
print("Implied Volatility: " +
str(100 * call_implied_volatility(bs_call(lcp, strike_price, t, uty, sigma,), lcp, strike_price, t, uty,)) + " %")
```
## Implied volatility
The implied volatility is key when measuring whether options prices are cheap or expensive. It allows traders to determine what they think the future volatility is likely to be. It is recommended to buy when the implied volatility is at its lowest as that generally means that their prices are discounted. This is because options that have high levels of implied volatility will result in high-priced option premiums. On the opposite side, when implied volatility is low, this means that the market’s expectations and demand for the option is decreasing, therefore causing prices to decrease.
## Greeks
- Delta: the sensitivity of an option’s price changes relative to the changes in the underlying asset’s price.
- Gamma: the delta’s change relative to the changes in the price of the underlying asset.
- Vega: the sensitivity of an option price relative to the volatility of the underlying asset.
- Theta: the sensitivity of the option price relative to the option’s time to maturity.
- Rho: the sensitivity of the option price relative to interest rates.
## Implied Volatility and the Greeks
Implied Volatility is defined as the expected future volatility of the stock over the life of the option. It is directly influenced by the supply and demand of the underlying option and the market’s expectation of the stock price’s direction. It could be calculated by solving the Black Scholes equation backwards for the volatility starting with the option trading price.
```
from math import log, sqrt, pi, exp
from scipy.stats import norm
from datetime import datetime, date
import numpy as np
import pandas as pd
import pandas_datareader.data as web
#calculating greeks for call options:
def call_delta(S,K,T,r,sigma):
return norm.cdf(d1(S,K,T,r,sigma))
def call_gamma(S,K,T,r,sigma):
return norm.pdf(d1(S,K,T,r,sigma))/(S*sigma*sqrt(T))
def call_vega(S,K,T,r,sigma):
return 0.01*(S*norm.pdf(d1(S,K,T,r,sigma))*sqrt(T))
def call_theta(S,K,T,r,sigma):
return 0.01*(-(S*norm.pdf(d1(S,K,T,r,sigma))*sigma)/(2*sqrt(T)) - r*K*exp(-r*T)*norm.cdf(d2(S,K,T,r,sigma)))
def call_rho(S,K,T,r,sigma):
return 0.01*(K*T*exp(-r*T)*norm.cdf(d2(S,K,T,r,sigma)))
#creating greeks for put optios
def put_delta(S,K,T,r,sigma):
return -norm.cdf(-d1(S,K,T,r,sigma))
def put_gamma(S,K,T,r,sigma):
return norm.pdf(d1(S,K,T,r,sigma))/(S*sigma*sqrt(T))
def put_vega(S,K,T,r,sigma):
return 0.01*(S*norm.pdf(d1(S,K,T,r,sigma))*sqrt(T))
def put_theta(S,K,T,r,sigma):
return 0.01*(-(S*norm.pdf(d1(S,K,T,r,sigma))*sigma)/(2*sqrt(T)) + r*K*exp(-r*T)*norm.cdf(-d2(S,K,T,r,sigma)))
def put_rho(S,K,T,r,sigma):
return 0.01*(-K*T*exp(-r*T)*norm.cdf(-d2(S,K,T,r,sigma)))
```
| github_jupyter |
# Tratamento de Dados Radioativos
Importação das bibliotecas utilizadas
```
import re
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
```
Leitura de arquivos contendo os dados
```
enviroment = open('enviroment.txt')
radioactive_source = open('radioactive_source.txt')
uranite = open('uranite.txt')
```
split dos dados
```
for text in enviroment:
data_env = map(int,text.split())
for text in radioactive_source:
data_rad = map(int,text.split())
for text in uranite:
data_ura = map(int,text.split())
```
Transformando dados em um único dataframe
```
data_dose = {'enviroment':data_env, 'radioactive_source':data_rad, 'uranite':data_ura}
dataf_dose = pd.DataFrame(data_dose)
dataf_dose.head()
```
Tempo de execução de cada dose
```
t_enviroment = 1200 #seconds
t_radioactive_source = 120 #seconds
t_uranite = 600 #seconds
```
Transformação dos tempos em um dataframe
```
data_time = {'enviroment':np.linspace(0, t_enviroment, len(data['enviroment'])),
'radioactive_source':np.linspace(0,t_radioactive_source, len(data['radioactive_source'])),
'uranite':np.linspace(0,t_uranite, len(data['uranite']))}
dataf_time = pd.DataFrame(data_time)
dataf_time.head()
```
Gráficos de doses para cada espectro
```
row_names = dataf_dose.columns
for row_name in row_names:
plt.plot(dataf_time[row_name],dataf_dose[row_name])
plt.title('Spectrum ' + row_name)
plt.xlabel('time (s)')
plt.ylabel('Dose')
plt.show()
data_dose[row_name]=np.array(data_dose[row_name])
```
# Encontrado Intervalos de Máximo
## Taxa de Dose Ambiente
Intervalos para encontrar máximo
```
t_analyse = [0, 200, 250, 400, 600, 1200]
dose_max = []
for i in range(len(t_analyse)-1):
dose_max+=[np.amax(data_dose['enviroment'][np.logical_and(t_analyse[i]<data_dose['enviroment'], \
data_dose['enviroment']<=t_analyse[i+1])])]
dose_max
```
## Taxa de Dose da Fonte Radioativa
Intervalos para encontrar máximo
```
t_analyse = [0,20, 25, 40, 60, 100, 200, 250, 350, 600]
dose_max = []
for i in range(len(t_analyse)-1):
dose_max+=[np.amax(data_dose['radioactive_source'][np.logical_and(t_analyse[i]<data_dose['radioactive_source'], \
data_dose['radioactive_source']<=t_analyse[i+1])])]
dose_max
```
## Taxa de Dose da Uranita
Intervalos para encontrar máximo
```
t_analyse = [0,20, 25, 40, 60, 100, 200, 250, 350, 600]
dose_max = []
for i in range(len(t_analyse)-1):
dose_max+=[np.amax(data_dose['uranite'][np.logical_and(t_analyse[i]<data_dose['uranite'], \
data_dose['uranite']<=t_analyse[i+1])])]
dose_max
```
| github_jupyter |
# SSD
This is to go through each important step of SSD.
Firstly, load the model. You only need to do this one time.
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
import numpy as np
import os
os.chdir('..')
caffe_root = './'
import sys
sys.path.insert(0, caffe_root + 'python')
import time
import caffe
from caffe.proto import caffe_pb2
caffe.set_device(0)
caffe.set_mode_gpu()
# caffe.set_mode_cpu()
# We create a solver that fine-tunes from a previously trained network.
solver = caffe.SGDSolver(caffe_root + 'models/VGGNet/VOC0712/SSD_300x300/solver.prototxt')
solver.net.copy_from(caffe_root + 'models/VGGNet/VGG_ILSVRC_16_layers_fc_reduced.caffemodel')
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': solver.net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([104,117,123])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
net = solver.net
from google.protobuf import text_format
from caffe.proto import caffe_pb2
# load PASCAL VOC labels
labelmap_file = 'data/VOC0712/labelmap_voc.prototxt'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
# Forward one step.
solver.step(1)
```
Now let's see the annotated datum after one forward-backward step.
```
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
img_blob = net.blobs['data'].data
num_imgs = img_blob.shape[0]
img_height = img_blob.shape[2]
img_width = img_blob.shape[3]
label_blob = net.blobs['label'].data[0,0,:,:]
num_labels = label_blob.shape[0]
for i in xrange(num_imgs):
img = transformer.deprocess('data', img_blob[i])
plt.subplot(1, num_imgs, i + 1)
plt.imshow(img)
currentAxis = plt.gca()
for j in xrange(num_labels):
gt_bbox = label_blob[j, :]
if gt_bbox[0] == i:
xmin = gt_bbox[3] * img_width
ymin = gt_bbox[4] * img_height
xmax = gt_bbox[5] * img_width
ymax = gt_bbox[6] * img_height
gt_label = int(gt_bbox[1])
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
color = colors[gt_label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
label = get_labelname(labelmap, gt_bbox[1])[0]
currentAxis.text(xmin, ymin, label, bbox={'facecolor':color, 'alpha':0.5})
# take an array of shape (n, height, width) or (n, height, width, channels)
# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
def vis_square(data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data)
```
Let's visualize the conv1_1's filters to make sure we have loaded a good pretrained model. Otherwise, it should plot random noise squares.
```
# the parameters are a list of [weights, biases]
filters = net.params['conv1_1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))
```
And let's visualize conv5_3 layer responses. You should see nicely pattern.
```
feat = net.blobs['conv5_3'].data[0, :]
vis_square(feat, padval=1)
```
Make sure that the PermuteLayer is doing the right thing.
```
fc7_mbox_loc = net.blobs['fc7_mbox_loc'].data
print fc7_mbox_loc[0,:,1,2]
fc7_mbox_loc_perm = net.blobs['fc7_mbox_loc_perm'].data
print fc7_mbox_loc_perm[0,1,2,:]
```
Make sure the PriorBoxLayer generates the correct priors.
```
img_blob = net.blobs['data'].data
num_imgs = img_blob.shape[0]
img_height = img_blob.shape[2]
img_width = img_blob.shape[3]
priorbox = net.blobs['mbox_priorbox'].data[0,0,:]
num_priors = priorbox.shape[0]
colors='rgbcmy'
for i in xrange(num_imgs):
img = transformer.deprocess('data', img_blob[i])
plt.subplot(1, num_imgs, i + 1)
plt.imshow(img)
currentAxis = plt.gca()
for j in xrange(240,243):
prior_bbox = priorbox[j*4:(j+1)*4]
xmin = prior_bbox[0] * img_width
ymin = prior_bbox[1] * img_height
xmax = prior_bbox[2] * img_width
ymax = prior_bbox[3] * img_height
coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=colors[j%4], linewidth=2))
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
from rocket import Rocket
r = Rocket()
r
def og_dynamics(prob, obj, section):
#extract states and controls
s = tuple([prob.states(i, section) for i in range(5)])
u = tuple([prob.controls(i, section) for i in range(2)])
dx = Dynamics(prob, section)
#get dynamics from the rocket
ds = obj.vec_dynamics(s, u)
for i in range(5):
dx[i] = ds[i]
return dx()
def og_equality(prob, obj):
s = [prob.states_all_section(i) for i in range(5)]
u = [prob.controls_all_section(i) for i in range(2)]
result = Condition()
for i in range(5):
result.equal(s[i][0], obj.s_0[i]), #r, v, m = [r,v,m](0)
for i in range(4):
result.equal(s[i][-1], 0.) #r, v = 0
return result
def og_inequality(prob, obj):
s = [prob.states_all_section(i) for i in range(5)]
u = [prob.controls_all_section(i) for i in range(2)]
result = Condition()
#require u to be bounded
result.lower_bound(u[0], 0.)
result.upper_bound(u[0], 1.)
result.lower_bound(u[1], -np.pi)
result.upper_bound(u[1], +np.pi)
#bound states
result.lower_bound(s[4], 0.)
result.upper_bound(s[4], obj.m_0)
def og_cost(prob, obj):
s = [prob.states_all_section(i) for i in range(5)]
u = [prob.controls_all_section(i) for i in range(2)]
tf = prob.time_final(-1)
#maximize final mass, so minimize the negative
m = s[-1]
return -m[-1]/obj.m_0
time_init = [0.0, (2*r.z_0/r.g)]
n = [20]
num_states = [5]
num_controls = [2]
max_iteration = 20
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
prob.dynamics = [og_dynamics]
prob.cost = og_cost
prob.equality = og_equality
prob.inequality = og_inequality
x_guess = Guess.linear(prob.time_all_section, r.x_0, 0)
z_guess = Guess.linear(prob.time_all_section, r.z_0, 0)
vx_guess = Guess.linear(prob.time_all_section, r.vx_0, 0)
vz_guess = Guess.linear(prob.time_all_section, r.vz_0, 0)
m_guess = Guess.linear(prob.time_all_section, r.m_0, r.m_0/2)
u1_guess = Guess.linear(prob.time_all_section, 1, 1)
u2_guess = Guess.linear(prob.time_all_section, 0, 0)
prob.set_states_all_section(0, x_guess)
prob.set_states_all_section(1, z_guess)
prob.set_states_all_section(2, vx_guess)
prob.set_states_all_section(3, vz_guess)
prob.set_states_all_section(4, m_guess)
prob.set_controls_all_section(0, u1_guess)
prob.set_controls_all_section(0, u2_guess)
prob.solve(r)
```
| github_jupyter |
```
%pylab inline
import pandas as pd
import os
# Just use 1 GPU
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import pandas as pd
from pyvirchow.io import WSIReader
from pyvirchow.morphology import TissuePatch
from matplotlib.patches import Polygon
from shapely.geometry import Point as shapelyPoint
from shapely.geometry import box as shapelyRectangle
from pyvirchow.io.operations import get_annotation_bounding_boxes, get_annotation_polygons, translate_and_scale_object
from pyvirchow.io.operations import translate_and_scale_polygon
from openslide.deepzoom import DeepZoomGenerator
import os
import glob
from skimage.filters import threshold_otsu
from skimage.color import rgb2gray, gray2rgb
from shapely.geometry import Polygon as shapelyPolygon
import openslide
from joblib import Parallel, delayed
from tqdm import tqdm_notebook, tqdm
import cv2
from pyvirchow.io.operations import get_annotation_bounding_boxes, get_annotation_polygons, \
poly2mask, translate_and_scale_polygon, read_as_rgb
from pyvirchow.morphology.patch_extractor import TissuePatch
from pyvirchow.morphology.mask import mpl_polygon_to_shapely_scaled, get_common_interior_polygons
from keras.models import Sequential
from keras.layers import Lambda, Dropout
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import StratifiedShuffleSplit
from keras.callbacks import ModelCheckpoint
from pyvirchow.io.tiling import generate_tiles, get_all_patches_from_slide
import matplotlib.gridspec as gridspec
from sklearn.metrics import confusion_matrix
from tqdm import tqdm_notebook
from matplotlib import cm
from multiprocessing import Pool
NUM_CLASSES = 2 # not_tumor, tumor
BATCH_SIZE = 32
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(256, 256, 3)))
model.add(Convolution2D(100, (5, 5), strides=(2, 2), activation='elu', padding='same'))
model.add(MaxPooling2D())
model.add(Convolution2D(200, (5, 5), strides=(2, 2), activation='elu', padding='same'))
model.add(MaxPooling2D())
model.add(Convolution2D(300, (3, 3), activation='elu', padding='same'))
model.add(Convolution2D(400, (3, 3), activation='elu', padding='same'))
model.add(Dropout(0.1))
model.add(Convolution2D(400, (3, 3), activation='elu', padding='same'))
model.add(Convolution2D(300, (3, 3), activation='elu', padding='same'))
model.add(Dropout(0.1))
model.add(Convolution2D(2, (1, 1))) # this is called upscore layer for some reason?
model.add(Conv2DTranspose(2, (31, 31), strides=(16, 16), activation='softmax', padding='same'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.load_weights('weights-improvement-12-0.98.hdf')
def predict_from_model(patch, model):
"""Predict which pixels are tumor.
input: patch: 256x256x3, rgb image
input: model: keras model
output: prediction: 256x256x1, per-pixel tumor probability
"""
prediction = model.predict(patch.reshape(1, 256, 256, 3))
prediction = prediction[:, :, :, 1].reshape(256, 256)
return prediction
def plot_blend(patch, prediction, ax, alpha=0.75):
"""alpha blend patch and prediction.
https://matplotlib.org/examples/pylab_examples/layer_images.html
input: patch: 256x256x3, rgb image
input: prediction: 256x256x1, per-pixel tumor probability
input: ax: maplotlib Axes object
input: alpha: alpha blend
"""
dx, dy = 0.05, 0.05
x = np.arange(0, patch.shape[1] - 1, dx)
y = np.arange(0, patch.shape[0] - 1, dy)
xmin, xmax, ymin, ymax = np.amin(x), np.amax(x), np.amin(y), np.amax(y)
extent = xmin, xmax, ymin, ymax
# fig = plt.figure(frameon=False, figsize=(10, 5))
Z1 = rgb2gray(patch)
Z2 = prediction
im1 = ax.imshow(Z1, cmap='gray', extent=extent)
im2 = ax.imshow(Z2, cmap='coolwarm', alpha=alpha, vmin=0.0, vmax=1.0,
extent=extent)
ax.axis('off');
def plot_patch_with_pred(patch, truth, prediction, title_str='', alpha=0.6):
"""
input: patch: 256x256x3, rgb image
input: truth: 256x256x2, onehot output classes (not_tumor, tumor)
input: prediction: 256x256x1, per-pixel tumor probability
"""
gs = gridspec.GridSpec(2, 4, width_ratios=[10, 10, 19, 1])
ax0 = plt.subplot(gs[0, 0])
ax1 = plt.subplot(gs[0, 1])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[1, 1])
ax4 = plt.subplot(gs[:, 2])
axc = plt.subplot(gs[:, 3])
ax0.imshow(patch);
ax0.set_title('Original')
ax1.imshow(truth.argmax(axis=2), cmap='gray', vmin=0, vmax=1);
ax1.set_title('Truth mask (white=tumor, black=not_tumor)')
p = ax2.imshow(prediction, cmap='coolwarm', vmin=0, vmax=1);
ax2.set_title('Prediction heatmap')
ax3.imshow((prediction > 0.5).astype(np.int), cmap='gray', vmin=0, vmax=1);
ax3.set_title('Prediction mask (white=tumor, black=not_tumor)')
plot_blend(patch, prediction, ax4, alpha)
ax4.set_title('Original+Prediction blend')
fig = plt.gcf()
fig.set_size_inches(20, 10)
fig.suptitle(title_str)
fig.colorbar(p, cax=axc, orientation="vertical")
axc.set_title('Probability pixel is tumor')
def predict_batch_from_model(patches, model):
"""Predict which pixels are tumor.
input: patch: `batch_size`x256x256x3, rgb image
input: model: keras model
output: prediction: 256x256x1, per-pixel tumor probability
"""
predictions = model.predict(patches)
predictions = predictions[:, :, :, 1]
return predictions
tumor_df = pd.read_table('/Z/personal-folders/interns/saket/histopath_data/patches_dataframe/training/tumor/master_df.tsv')
tumor_076 = tumor_df[tumor_df.uid=='tumor_076']
tumor_082 = tumor_df[tumor_df.uid=='tumor_082']
tumor_002 = tumor_df[tumor_df.uid=='tumor_002']
tumor_002.tile_loc = [eval(x) for x in tumor_002.tile_loc]
tumor_002
sample_gen = generate_tiles(tumor_002.sample(32, random_state=4), 32, shuffle=True)
example_X, example_y = next(sample_gen)
example_patch = example_X[-9]
example_truth = example_y[-9]
prediction = predict_from_model(example_patch, model)
fig = plt.figure(figsize=(10,10))
plot_patch_with_pred(example_patch, example_truth, prediction, title_str='Example Tumor Patch')
fig.savefig('plots_exploratory_analysis/blended_output.pdf')
```
# Generate entire tumor pics
```
output_dir = '/Z/personal-folders/interns/saket/histopath_data/prediction_heatmaps/tumor_038/'
os.makedirs(output_dir, exist_ok=True)
alpha = 0.5
slide_path = '/Z/personal-folders/interns/saket/histopath_data/CAMELYON16/training/tumor/tumor_038.tif'
json_filepath = '/Z/personal-folders/interns/saket/histopath_data/CAMELYON16/training/lesion_annotations_json/tumor_038.json'
all_samples = get_all_patches_from_slide(slide_path, json_filepath, False, 256)
slide = WSIReader(slide_path, 40)
n_samples = len(all_samples)
n_cols = int(slide.dimensions[0] / 256)
n_rows = int(slide.dimensions[1] / 256)
#assert n_cols * n_rows == n_samples
thumbnail = slide.get_thumbnail((n_cols, n_rows))
thumbnail = np.array(thumbnail)
# batch_size = n_cols
batch_size = 32
output_thumbnail_preds = list()
from joblib import Parallel, delayed
#Parallel(n_jobs=16)(
def process_batch(args):
idx, batch_samples, X = args
png_fnames = batch_samples.tile_loc.apply(lambda coord: os.path.join(output_dir,
'{}_{}.png'.format(coord[1], coord[0])))
output_thumbnail_pred = None
if batch_samples.is_tissue.nunique() == 1 and batch_samples.iloc[0].is_tissue == False:
# all patches in this row do not have tissue, skip them all
output_thumbnail_pred = np.zeros(batch_size, dtype=np.float32)
# output pngs
for i, png_fname in enumerate(png_fnames):
plt.imsave(png_fname, X[i])
else:
# make predictions
preds = predict_batch_from_model(X, model)
output_thumbnail_pred = preds.mean(axis=(1,2))
# overlay preds
# save blended imgs
for i, png_fname in enumerate(png_fnames):
pred_i = preds[i]
X_i = X[i]
#output_img = rgb2gray(X_i)
#output_img2 = gray2rgb(output_img.copy())
#overlay = np.uint8(cm.viridis(pred_i) * 255)[:,:,:3]
#blended = overlay*alpha + output_img2 *(1-alpha) + 0
output_img = cv2.cvtColor(X_i, cv2.COLOR_RGB2GRAY)
output_img2 = cv2.cvtColor(output_img.copy(), cv2.COLOR_GRAY2RGB)
overlay = np.uint8(cm.viridis(pred_i) * 255)[:,:,:3]
blended = cv2.addWeighted(overlay, alpha, output_img2, 1-alpha, 0, output_img)
#blended = overlay*alpha + output_img2 *(1-alpha) + 0
#blended = np.clip(blended, 0, 255)
plt.imsave(png_fname, blended)
return idx, output_thumbnail_pred
all_batch_samples = []
for offset in tqdm(list(range(0, n_samples, batch_size))):
all_batch_samples.append(all_samples.iloc[offset:offset+batch_size])
total = len(list(range(0, n_samples, batch_size)))
output_thumbnail_preds = []
output_thumbnail_idx = []
with Pool(processes=32) as p:
with tqdm(total=total) as pbar:
#output_thumbnail_pred = list(tqdm_notebook(p.imap(process_batch, all_batch_samples), total=total))
results = p.imap_unordered(process_batch, samples)
for idx, result in results:
output_thumbnail_preds.append(result)
output_thumbnail_idx.append(idx)
pbar.update()
#for i, output_thumbnail_pred in enumerate(p.map(process_batch, all_batch_samples)):
# output_thumbnail_preds.append(output_thumbnail_pred)
# pbar.update()
output_thumbnail_preds = np.array(output_thumbnail_preds)
samples = []
for index, batch_samples in enumerate(tqdm_notebook(all_batch_samples)):
X, _ = next(generate_tiles(batch_samples, batch_size, shuffle=False))
samples.append((index, batch_samples, X))
output_thumbnail_preds = Parallel(n_jobs=16,
verbose=1)(map(delayed(process_batch),
enumerate(all_batch_samples)))
len(output_thumbnail_preds)
```
| github_jupyter |
# Spark Learning Note - MLlib
Jia Geng | gjia0214@gmail.com
<a id='directory'></a>
## Directory
- [Data Source](https://github.com/databricks/Spark-The-Definitive-Guide/tree/master/data/)
- [1. Some Machine Learning Examples](#sec1)
- [2. Classic ML Developmental Stages](#sec2-1)
- [3. Spark MLlib Overview](#sec3)
- [4. Simple Example Walk Through](#sec4)
- [4.1 Load the data](#sec4-1)
- [4.2 Transformer - RFomula](#sec4-2)
- [4.3 Estimator](#sec4-3)
- [4.4 Pipeline and GridSearch](#sec4-4)
- [4.5 Tuning (Evaluator and GridSearch)](#sec4-5)
## 1. Some Machine Learning Examples <a id='sec1'></a>
Supervised Learning
- classification
- predicting disease
- clasifying image
- regression
- predicting sales
- predicting number of viewer of a show
Recommendation
- movie recommendation
- product recommendation
Unsupervised Learning
- anormaly detection
- user segmentation
- topic modeling
Graph Analysis
- fraud prediction
- interesting - account within two hops of fraudulent number might be considered as suspicious
- anormaly detection
- e.g. if typically in the data each vertex has ten edges associated with it. given a vertex only has one edge -> possible anormaly
- classification
- influencer's network has similar structure
- recommendation
- PageRank is a graph algorithm!
[back to top](#directory)
## 2. Classic ML Developmental Stages <a id='sec2'></a>
- collect data
- clean data
- feature engineering
- modeling
- evaluating and tuning
- leveraging model/insights
[back to top](#directory)
## 3. Spark MLlib Overview <a id='sec3'></a>
Spark MLlib provide two core packages for machine learning;
- `pyspark.ml`: provide high level DataFrames APIs for building machine learning piplines
- `pyspark.mllib`: provide low level RDD APIs
**Spark MLlib vs Other ML packages**
- most of other ml packages are **single machine tools**
- when to use MLlib?
- when data is large, use MLlib for feature engineering then use single machine tool for modeling
- when data and model are both large and can not fit on one machine, MLlib makes distributed machine learning very simple
- potential disadvantage of MLlib
- When deploying the model, MLlib does not have buildin to serve low-latency predictions from a model
- Might want to export the model to another serving system or custom application to do it
**Spark Structual Types**
- Transformers: functions convert raw data in some way
- Estimators
- can a a kind of transformer than is initialized data, e.g. normalize data need to get the mean and std from data
- algorithms that allow users to train a model from data
- Evaluator: provide insight about how a model performs according to some criteria we specified such as AUC.
- Pipeline: a container hat pipelining the process, like the scikit-learn pipeline
- **The transformer, estimator and evaluater object classes usually can be initiated as a 'blank' object. Then set up the attribute and configuration later. This makes these classes support the Pipeline construction and grid search.**
**Spark Low Level Data Types**
- `from pyspark.ml.linalg import Vectors`
- Dense Vector: `Vector.dense(1.0, 2.0, 3.0)`
- Spark Vector: `Vector.sparse(size, idx, values)` idx for positions that is not zero
[back to top](#directory)
## 4. Simple Example Walk Through <a id='sec4'></a>
### 4.1 Load the data <a id='sec4-1'></a>
Initialize the spark session, load the data, set up partitions, cahce if needed, and do some exploration such as count, check nulls, summary, etc.
[back to top](#directory)
```
from pyspark.sql.session import SparkSession
data_example_path = '/home/jgeng/Documents/Git/SparkLearning/data/simple-ml'
spark = SparkSession.builder.appName('MLexample').getOrCreate()
spark
# load the data
df = spark.read.json(data_example_path)
from pyspark.sql.functions import col, max, min, avg, stddev_samp
# check on schema
df.show(3)
df.printSchema()
# check nulls
for col_name in df.columns:
print(df.where('{} is null'.format(col_name)).count())
df.select(col('color')).distinct().show(3)
df.select(col('lab')).distinct().show(3)
df.select('value1', 'value2').summary().show()
```
### 4.2 Transformer - RFomula <a id='sec4-2'></a>
Most of the machine learning algorithms in MLlib needs the input to be transformed into:
- Double for labels
- Vector[Double] for features
**Use R-liked operator to build a `RFomula` as transformer**
- under `pyspark.ml.feature`
- `~` sperate the target and terms
- `+` to concat/include a feature.
- `+0` to remove the intercept
- `-` to remove a term
- `-0` to remove the intercept (same as `+0`)
- `:` as the interaction between two feature, i.e. multiplication for numeric values or binarized categorical values
- `.` all columns except for the target
E.g.
`lab~.+color:value1+colr:value2` means
- label is the target
- model takes all columns except lab column as input
- model also takes interaction terms between color:value1, color:value2 as input
To transform data into usable features:
- build a `RFormula object`
- use `RFormula.fit(data_df)` to set up the transform configuration. `fit` return a `RFormulaModel` object
- Transform the data via `RFormulaModel` by calling `.transform(data)`
[back to top](#directory)
```
from pyspark.ml.feature import RFormula
# specify the transformer using RFormula
rfm = RFormula()
rfm.setFormula('lab~.+color:value1+color:value2')
# fit the rformula object with data to create the transformer
transformer = rfm.fit(df)
print(type(transformer))
print(transformer.explainParams())
# transform - it will concat a feature column to the original df
preparedDF = transformer.transform(df)
preparedDF.show(3)
preparedDF.printSchema()
preparedDF.select('features').show(3, False)
# split the data into train an test
train, test = preparedDF.randomSplit([0.7, 0.3])
print(train.count())
print(test.count())
```
### 4.3 Estimator <a id='sec4-3'></a>
Most of the algorithms are under `pyspark.ml`. E.g logistic regression under `pyspark.ml.classification.LogisticRegression`
The classifier constructor usually takes in parameters that specify the feature column and label column along with some hyperparameters. **MOst classifier object have a funtion `explainParam()` that can provide info regarding the hyperparameters**
Estimator
- the class object only contains the params configuration for the model, e.g. `LogisticRegreesion`
- use `.fit()` to fit the training data
- `fit` returns a trained classifier, e.g. `LogisticRegressionModel`. this is the classifier object that contains weights etc. for making predictions!
- use `.transform()` to make predictions since logically, prediction is just transform the input to labels!
[back to top](#directory)
```
from pyspark.ml.classification import LogisticRegression
logit = LogisticRegression(labelCol='label', featuresCol='features')
print(type(logit))
print(logit.explainParams())
# fit the model with training data
clf = logit.fit(train)
print(type(clf))
print(clf.coefficientMatrix)
# making predictions
clf.transform(test).show(3) # probably want to select the probability and prediction column only
```
### 4.4 Pipeline and GridSearch <a id='sec4-4'></a>
Spark also have a pipeline class: `pyspark.ml.Pipeline`. `Pipeline` is essentially a compact estimator that can do feature transformation, model fitting and prediction. `Pipeline` have a `.stages` attribute that keeps the configurations of the transformer and estimator.
[back to top](#directory)
### 4.5 Tuning (Evaluator and GridSearch) <a id='sec4-5'></a>
Spark provide a very compact way to do model selection.
Steps:
- initialize the transformer, estimator and pipeline
- set up the `ParamGridBuilder` under `pyspark.ml.tuning` for the grid search
- `ParamGridBuilder` can be used to configure the searching space for transformer (feature subsets) and the estimator (model hyperparam)
- use `ParamGridBuilder.addGrid(attr, candidates)` to configure the grid search
- create a evaluator. `pyspark.ml.evaluation` host different types of evaluators for different task, that can be used for evaluating the model performance. When constructing the evaluator, you usually need to:
- `setMetricName()`
- `setRawPredictionCol()`
- `setLabelCol()`
- create a verifier, e.g. `TrainValidationSplit`. this is a compact class that takes in the pipeline and evaluator and do tuning,
After training.
- use `evaluator.evaluate(tvsFitted.bestModel.transform(test))` for the performance
- to check the training record on the best model `summary= tvsFitted.bestModel.stages[1].summary`
- use stages to get the classifer if pipeline estimator was used
- `summary.objectiveHistory` is the loss history during training
- `summary.roc.show()` gives the roc curve data
To load/write model, just use `load` `write` mothod
[back to top](#directory)
```
from pyspark.ml.feature import RFormula
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
# prepare the transformer and the estimator
# do not specify any hyperparameters here
rfm = RFormula()
logit = LogisticRegression().setLabelCol('label').setFeaturesCol('features')
# construct the pipeline
ppBuilder = Pipeline()
# set up the stage
stages = [rfm, logit]
pp = ppBuilder.setStages(stages) # does not configure inplace!
print(type(ppBuilder), type(pp))
from pyspark.ml.evaluation import BinaryClassificationEvaluator
# build the evaluator
evaluator = BinaryClassificationEvaluator().setMetricName('areaUnderROC')\
.setRawPredictionCol('prediction')\
.setLabelCol('label')
from pyspark.ml.tuning import ParamGridBuilder
# building the grid search space
rfm_can = ['lab~.', 'lab~.+color:value1+color:value2'] # feature space
enet_can = [0, 0.5, 1] # 0 for l1 0.5 for l1 l2, 1 for l2
reg_can = [0, 1e-3, 1e-2, 1e-1, 1, 10] # 0 for no regularization
params = ParamGridBuilder().addGrid(rfm.formula, rfm_can)\
.addGrid(logit.elasticNetParam, enet_can)\
.addGrid(logit.regParam, reg_can)\
.build() # dont forget to call build!
from pyspark.ml.tuning import TrainValidationSplit
# build the train validation machine
# tvs will takes 0.25 of the training data as the holdout set for validation
tvs = TrainValidationSplit().setTrainRatio(0.75)\
.setEstimator(pp)\
.setEvaluator(evaluator)\
.setEstimatorParamMaps(params)
train, test = df.randomSplit([0.7, 0.3])
tvsFitted = tvs.fit(train) # train on the train data
evaluator.evaluate(tvsFitted.bestModel.transform(test))
# get the valdiation results via the validation metrics
# get associated model params via getEstimatorParamMaps
print(len(tvsFitted.getEstimatorParamMaps()))
print(len(tvsFitted.validationMetrics))
print(tvsFitted.validationMetrics)
# get the est logistic model summary
# since the model is pipeline
# need to first get the classifier via stage
# then get the summary
summary = tvsFitted.bestModel.stages[1].summary
loss_history = summary.objectiveHistory
print(loss_history, len(loss_history))
summary.roc.show() # get the roc curve detail
```
[back to top](#directory)
| github_jupyter |
## Aplicando Pipeline na base de dados adult.data disponivel em: https://archive.ics.uci.edu/ml/datasets/Adult
#### Resumo : Preveja se a renda excede US $ 50 mil / ano com base nos dados do censo. Também conhecido como conjunto de dados "Renda do Censo".
* Informações sobre atributos:
* Listagem de atributos:
* renda: > 50K, <= 50K.
* idade: contínua.
* classe: Privado, Auto-emp-não-inc, Auto-emp-inc, Federal-gov, Local-gov, Estado-gov, Sem salário, Nunca trabalhou.
* fnlwgt: contínuo.
* educação: Bacharelado, Ensino Médio, 11º, Ensino Médio, Prof-escola, Assoc-acdm, Assoc-voc, 9º, 7º-8º, 12º, Mestrado, 1º-4º, 10º, Doutorado, 5º-6º, Pré-Escola.
* número da educação: contínuo.
* Estado civil: cônjuge civil, divorciado, nunca casado, separado, viúvo, cônjuge ausente, cônjuge afetivo.
* ocupação: Suporte técnico, Reparação artesanal, Outro serviço, Vendas, Gerenciamento executivo, Especialidade prof, Limpadores de manipuladores, Inspeção de máquinas, Adm-administrativo, Agricultura, Pesca de pesca, Transporte de mudanças, Casa particular serviço, serviço protetor, forças armadas.
* Relacionamento: Esposa, Filho próprio, Marido, Não família, Outro parente, Solteiro.
* Raça: Branco, Asiático-Pac-Island, Amer-Indian-Eskimo, Outro, Preto.
* sexo: Feminino, Masculino.
* ganho de capital: contínuo.
* perda de capital: contínua.
* horas por semana: contínua.
* país de origem: Estados Unidos, Camboja, Inglaterra, Porto Rico, Canadá, Alemanha, Estados Unidos (Guam-USVI-etc), Índia, Japão, Grécia, Sul, China, Cuba, Irã, Honduras, Filipinas, Itália , Polônia, Jamaica, Vietnã, México, Portugal, Irlanda, França, República Dominicana, Laos, Equador, Taiwan, Haiti, Colômbia, Hungria, Guatemala, Nicarágua, Escócia, Tailândia, Iugoslávia, El Salvador, Trinadad e Tobago, Peru, Hong , Holanda - Holanda.
```
#!pip install category_encoders
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from category_encoders import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
df = pd.read_csv('adult.data')
df.head()
#removendo colunas desnecessarias
df.drop('education', axis=1, inplace=True)
df.head()
df.describe()
df.isnull().sum()
y = df.income
x = df.drop('income', axis=1, inplace=False)
X_train, X_test, Y_train, Y_test = train_test_split(x, y)
```
### Aplicando One Hot Enconder
```
ohe = OneHotEncoder(use_cat_names=True)
X_train = ohe.fit_transform(X_train)
X_train.head()
```
### Aplicando um pré-processador
```
scaler = StandardScaler().fit(X_train)
scaler
X_train = scaler.transform(X_train)
X_train[:10]
```
### Gerando o modelo
```
#criando uma instancia da arvore
clf_tree = tree.DecisionTreeClassifier()
#treinando os dados
clf_tree = clf_tree.fit(X_train, Y_train)
```
### Aplicando OHE nos dados de teste
```
X_test = ohe.transform(X_test)
scaler_test = StandardScaler().fit(X_test)
X_test = scaler_test.transform(X_test)
X_test[:10]
clf_tree.predict(X_test)
```
### Validando o Modelo
```
acuracia = clf_tree.score(X_test, Y_test)
acuracia
```
## Criando Pipelines
```
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
#pipeline para calculo da mediana
mediana = Pipeline(steps=[
('mediana', SimpleImputer(strategy='median'))
])
#pipelipe para preecher uma coluna com valores mais frequentes
frequente = Pipeline(steps=[
('frequente', SimpleImputer(strategy='most_frequent'))
])
#encadeando pipelines
data_cleaning = ColumnTransformer(transformers=[
('mediana', mediana, ['education-num']),
('frequente', frequente, ['race'])
])
```
#### Aplicando pipeline com DecisionTreeClassifier
```
pip_1 = Pipeline([
('ohe', OneHotEncoder()),
('scaler', StandardScaler()),
('clf', tree.DecisionTreeClassifier())
])
pip_1.steps
#dividindo os dados em treino e teste
x_train, x_test, y_train, y_test = train_test_split(x, y)
pip_1.fit(x_train, y_train)
#medindo a acuracia do pipeline 1
acuracia = pip_1.score(x_test,y_test)
acuracia
```
#### Definindo um pipeline com OneHotEncoder, MinMaxScaler, DecisionTreeClassifier(max_depth=3)
```
pip_2 = Pipeline([
('ohe',OneHotEncoder()),
('min_max_scaler', MinMaxScaler()),
('clf',tree.DecisionTreeClassifier(max_depth=3))
])
pip_2.fit(x_train, y_train)
#medindo a acuracia do pipeline 2
acuracia_pip2 = pip_2.score(x_test,y_test)
acuracia_pip2
```
#### Aplicando um pipeline com RandomForestClassifier
```
pip_3 = Pipeline([
('data_cleaning', data_cleaning),
('ohe',OneHotEncoder()),
('scale', StandardScaler()),
('tree', RandomForestClassifier(n_estimators=100,max_depth=3, random_state=242))
])
pip_3.fit(x_train,y_train)
#medindo a acuracia do pipeline 3
pip_3.score(x_test, y_test)
```
| github_jupyter |
# TORCHVISION.TRANSFORMS
```
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch
from matplotlib.pyplot import imshow
from torchvision.transforms import ToPILImage
def get_transform(centercrop, resize, totensor, normalize, normalize2):
options = []
if centercrop:
options.append(transforms.CenterCrop(160))
if resize:
options.append(transforms.Resize((80, 80)))
if totensor:
options.append(transforms.ToTensor())
if normalize:
options.append(transforms.Normalize(mean=[0.5,0.5,0.5],std=[0.5,0.5,0.5]))
if normalize2:
options.append(transforms.Normalize(mean=[-1,-1,-1],std=[2,2,2]))
transform = transforms.Compose(options)
return transform
path = './UCSD_Anomaly_Dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train'
```
- resize
```
transforming = get_transform(False, True, True, False, False)
dataset = datasets.ImageFolder(path, transform=transforming)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=2,
shuffle=True,
num_workers=8)
data_iter = iter(data_loader)
images, _ = next(data_iter)
print(images.size())
to_img = ToPILImage()
imshow(to_img(images[0]))
```
- center crop
```
transforming = get_transform(True, False, True, False, False)
dataset = datasets.ImageFolder(path, transform=transforming)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=2,
shuffle=True,
num_workers=8)
data_iter = iter(data_loader)
images, _ = next(data_iter)
print(images.size())
to_img = ToPILImage()
imshow(to_img(images[0]))
```
- normalize (-1 to 1)
```
transforming = get_transform(False, False, True, True, False)
dataset = datasets.ImageFolder(path, transform=transforming)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=2,
shuffle=True,
num_workers=8)
data_iter = iter(data_loader)
images, _ = next(data_iter)
print(images.size())
images[0]
to_img = ToPILImage()
imshow(to_img(images[0]))
```
- normalize2 (0 to 1)
```
transforming = get_transform(False, False, True, False, True)
dataset = datasets.ImageFolder(path, transform=transforming)
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=2,
shuffle=True,
num_workers=8)
data_iter = iter(data_loader)
images, _ = next(data_iter)
print(images.size())
images[0]
to_img = ToPILImage()
imshow(to_img(images[0]))
```
| github_jupyter |
## 01. Object-oriented programming
In __procedural programming__ paradigm, the focus is on writing functions or procedures which operate on data. While in __object-oriented programming__ the focus is on the creation of objects which contain both data and functionality together.
## 02. User Defined Classes
If the first line after the class header is a string, it becomes the docstring of the class, and will be recognized by various tools. (This is also the way docstrings work in functions.)
Every class should have a method with the special name `__init__`. This initializer method, often referred to as the __constructor__, is automatically called whenever a new instance of class is created. It gives the programmer the opportunity to set up the attributes required within the new instance by giving them their initial state values. The self parameter (you could choose any other name, but nobody ever does!) is automatically set to reference the newly created object that needs to be initialized.
```
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self):
""" Create a new point at the origin """
self.x = 0
self.y = 0
p = Point() # Instantiate an object of type Point
q = Point() # and make a second point
print(p)
print(q)
print(p is q)
```
Now when we create new points, we supply the x and y coordinates as parameters. When the point is created, the values of initX and initY are assigned to the state of the object, in the __instance variables__ x and y.
```
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
p = Point(7,6)
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
p = Point(7,6)
print(p.getX())
print(p.getY())
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
p = Point(7,6)
print(p.distanceFromOrigin())
```
You can pass an object as an argument to a function, in the usual way.
```
import math
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def distance(point1, point2):
xdiff = point2.getX()-point1.getX()
ydiff = point2.getY()-point1.getY()
dist = math.sqrt(xdiff**2 + ydiff**2)
return dist
p = Point(4,3)
q = Point(0,0)
print(distance(p,q))
```
We could have made distance be a method of the Point class.
```
import math
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def distance(self, point2):
xdiff = point2.getX()-self.getX()
ydiff = point2.getY()-self.getY()
dist = math.sqrt(xdiff**2 + ydiff**2)
return dist
p = Point(4,3)
q = Point(0,0)
print(p.distance(q))
```
When we’re working with classes and objects, it is often necessary to print an object (that is, to print the state of an object). Consider the example below.
```
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def __str__(self):
return "x = {}, y = {}".format(self.x, self.y)
p = Point(7,6)
print(p)
```
We could also use instances as return values
```
class Point:
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def __str__(self):
return "x = {}, y = {}".format(self.x, self.y)
def halfway(self, target):
mx = (self.x + target.x)/2
my = (self.y + target.y)/2
return Point(mx, my)
p = Point(3,4)
q = Point(5,12)
mid = p.halfway(q)
# note that you would have exactly the same result if you instead wrote
# mid = q.halfway(p)
# because they are both Point objects, and the middle is the same no matter what
print(mid)
print(mid.getX())
print(mid.getY())
```
## 03. Sorting List of Instance
Sometimes you will find it convenient to define a method for the class that does some computation on the data in an instance.
```
L = ["Cherry", "Apple", "Blueberry"]
print(sorted(L, key=len))
#alternative form using lambda, if you find that easier to understand
print(sorted(L, key= lambda x: len(x)))
class Fruit():
def __init__(self, name, price):
self.name = name
self.price = price
L = [Fruit("Cherry", 10), Fruit("Apple", 5), Fruit("Blueberry", 20)]
for f in sorted(L, key=lambda x: x.price):
print(f.name)
class Fruit():
def __init__(self, name, price):
self.name = name
self.price = price
def sort_priority(self):
return self.price
L = [Fruit("Cherry", 10), Fruit("Apple", 5), Fruit("Blueberry", 20)]
print("-----sorted by price, referencing a class method-----")
for f in sorted(L, key=Fruit.sort_priority):
print(f.name)
print("---- one more way to do the same thing-----")
for f in sorted(L, key=lambda x: x.sort_priority()):
print(f.name)
```
## 04. Class Variables and Instance Variables
You have already seen that each instance of a class has its own namespace with its own instance variables. Two instances of the Point class each have their own instance variable x. Setting x in one instance doesn’t affect the other instance.
A class can also have class variables. A class variable is set as part of the class definition.
```
class Point:
""" Point class for representing and manipulating x,y coordinates. """
printed_rep = "*"
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def graph(self):
rows = []
size = max(int(self.x), int(self.y)) + 2
for j in range(size-1) :
if (j+1) == int(self.y):
special_row = str((j+1) % 10) + (" "*(int(self.x) -1)) + self.printed_rep
rows.append(special_row)
else:
rows.append(str((j+1) % 10))
rows.reverse() # put higher values of y first
x_axis = ""
for i in range(size):
x_axis += str(i % 10)
rows.append(x_axis)
return "\n".join(rows)
p1 = Point(2, 3)
p2 = Point(3, 12)
print(p1.graph())
print()
print(p2.graph())
p1.printed_rep="+"
print(p1.graph())
print(p2.graph())
```
## 04. Thinking About Classes and Instances
Before you decide to define a new class, there are a few things to keep in mind, and questions you should ask yourself:
- __What is the data that you want to deal with?__ (Data about a bunch of songs from iTunes? Data about a bunch of tweets from Twitter? Data about a bunch of hashtag searches on Twitter? Two numbers that represent coordinates of a point on a 2-dimensional plane?)
- __What will one instance of your class represent?__ In other words, which sort of new thing in your program should have fancy functionality? One song? One hashtag? One tweet? One point? The answer to this question should help you decide what to call the class you define.
- __What information should each instance have as instance variables?__ This is related to what an instance represents. See if you can make it into a sentence. “Each instance represents one < song > and each < song > has an < artist > and a < title > as instance variables.” Or, “Each instance represents a < Tweet > and each < Tweet > has a < user (who posted it) > and < a message content string > as instance variables.”
- __What instance methods should each instance have?__ What should each instance be able to do? To continue using the same examples: Maybe each song has a method that uses a lyrics API to get a long string of its lyrics. Maybe each song has a method that returns a string of its artist’s name. Or for a tweet, maybe each tweet has a method that returns the length of the tweet’s message. (Go wild!)
- __What should the printed version of an instance look like?__ (This question will help you determine how to write the __str__ method.) Maybe, “Each song printed out will show the song title and the artist’s name.” or “Each Tweet printed out will show the username of the person who posted it and the message content of the tweet.”
## 05. A Tamagotchi Game
```
from random import randrange
class Pet():
boredom_decrement = 4
hunger_decrement = 6
boredom_threshold = 5
hunger_threshold = 10
sounds = ['Mrrp']
def __init__(self, name = "Kitty"):
self.name = name
self.hunger = randrange(self.hunger_threshold)
self.boredom = randrange(self.boredom_threshold)
self.sounds = self.sounds[:] # copy the class attribute, so that when we make changes to it, we won't affect the other Pets in the class
def clock_tick(self):
self.boredom += 1
self.hunger += 1
def mood(self):
if self.hunger <= self.hunger_threshold and self.boredom <= self.boredom_threshold:
return "happy"
elif self.hunger > self.hunger_threshold:
return "hungry"
else:
return "bored"
def __str__(self):
state = " I'm " + self.name + ". "
state += " I feel " + self.mood() + ". "
# state += "Hunger {} Boredom {} Words {}".format(self.hunger, self.boredom, self.sounds)
return state
def hi(self):
print(self.sounds[randrange(len(self.sounds))])
self.reduce_boredom()
def teach(self, word):
self.sounds.append(word)
self.reduce_boredom()
def feed(self):
self.reduce_hunger()
def reduce_hunger(self):
self.hunger = max(0, self.hunger - self.hunger_decrement)
def reduce_boredom(self):
self.boredom = max(0, self.boredom - self.boredom_decrement)
p1 = Pet("Fido")
print(p1)
for i in range(10):
p1.clock_tick()
print(p1)
p1.feed()
p1.hi()
p1.teach("Boo")
for i in range(10):
p1.hi()
print(p1)
import sys
#sys.setExecutionLimit(60000)
def whichone(petlist, name):
for pet in petlist:
if pet.name == name:
return pet
return None # no pet matched
def play():
animals = []
option = ""
base_prompt = """
Quit
Adopt <petname_with_no_spaces_please>
Greet <petname>
Teach <petname> <word>
Feed <petname>
Choice: """
feedback = ""
while True:
action = input(feedback + "\n" + base_prompt)
feedback = ""
words = action.split()
if len(words) > 0:
command = words[0]
else:
command = None
if command == "Quit":
print("Exiting...")
return
elif command == "Adopt" and len(words) > 1:
if whichone(animals, words[1]):
feedback += "You already have a pet with that name\n"
else:
animals.append(Pet(words[1]))
elif command == "Greet" and len(words) > 1:
pet = whichone(animals, words[1])
if not pet:
feedback += "I didn't recognize that pet name. Please try again.\n"
print()
else:
pet.hi()
elif command == "Teach" and len(words) > 2:
pet = whichone(animals, words[1])
if not pet:
feedback += "I didn't recognize that pet name. Please try again."
else:
pet.teach(words[2])
elif command == "Feed" and len(words) > 1:
pet = whichone(animals, words[1])
if not pet:
feedback += "I didn't recognize that pet name. Please try again."
else:
pet.feed()
else:
feedback+= "I didn't understand that. Please try again."
for pet in animals:
pet.clock_tick()
feedback += "\n" + pet.__str__()
play()
```
## Practice
1. Create a class called `NumberSet` that accepts 2 integers as input, and defines two instance variables: `num1` and `num2`, which hold each of the input integers. Then, create an instance of NumberSet where its num1 is 6 and its num2 is 10. Save this instance to a variable `t`.
```
class NumberSet:
def __init__(self,num1,num2):
self.num1 = num1
self.num2 = num2
t=NumberSet(6,10)
```
2. Create a class called `Animal` that accepts two numbers as inputs and assigns them respectively to two instance variables: `arms` and `legs`. Create an instance method called `limbs` that, when called, returns the total number of limbs the animal has. To the variable name spider, assign an instance of Animal that has 4 arms and 4 legs. Call the limbs method on the spider instance and save the result to the variable name spidlimbs.
```
class Animal:
def __init__(self,arms,legs):
self.arms = arms
self.legs = legs
def limbs(self):
return self.arms + self.legs
spider = Animal(4,4)
spidlimbs = spider.limbs()
print(spidlimbs)
```
3. Create a class called `Cereal` that accepts three inputs: 2 strings and 1 integer, and assigns them to 3 instance variables in the constructor: `name`, `brand`, and `fiber`. When an instance of Cereal is printed, the user should see the following: "[name] cereal is produced by [brand] and has [fiber integer] grams of fiber in every serving!" To the variable name c1, assign an instance of Cereal whose name is "Corn Flakes", brand is "Kellogg's", and fiber is 2. To the variable name c2, assign an instance of Cereal whose name is "Honey Nut Cheerios", brand is "General Mills", and fiber is 3. Practice printing both!
```
class Cereal:
def __init__(self,name, brand, fiber):
self.name = name
self.brand = brand
self.fiber = fiber
def __str__(self):
return "{} cereal is produced by {} and has {} grams of fiber in every serving!"\
.format(self.name,self.brand,self.fiber)
c1=Cereal("Corn Flakes","Kellogg's",2)
c2=Cereal("Honey Nut Cheerios","General Mills",3)
print(c1)
print(c2)
```
4. Add a method `reflect_x` to Point which returns a new Point, one which is the reflection of the point about the x-axis. For example, Point(3, 5).reflect_x() is (3, -5)
```
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
def move(self, dx, dy):
self.x = self.x + dx
self.y = self.y + dy
def reflect_x(self):
return Point(self.x,- self.y)
def __str__(self):
return str(self.x)+","+str(self.y)
print(Point(3, 5).reflect_x())
```
5. Add a method called `move` that will take two parameters, call them dx and dy. The method will cause the point to move in the x and y direction the number of units given. (Hint: you will change the values of the state of the point)
```
class Point:
""" Point class for representing and manipulating x,y coordinates. """
def __init__(self, initX, initY):
self.x = initX
self.y = initY
def getX(self):
return self.x
def getY(self):
return self.y
def distanceFromOrigin(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
# Put your new method here
def move(self,dx,dy):
self.x = self.x +dx
self.y = self.y +dy
def __str__(self):
return str(self.x)+","+str(self.y)
```
6. Define a class called `Bike` that accepts a string and a float as input, and assigns those inputs respectively to two instance variables, `color` and `price`. Assign to the variable testOne an instance of Bike whose color is blue and whose price is 89.99. Assign to the variable testTwo an instance of Bike whose color is purple and whose price is 25.0.
```
class Bike:
def __init__(self,color,price):
self.color = color
self.price = price
testOne = Bike('blue',89.99)
testTwo = Bike('purple',25.0)
```
7. Create a class called `AppleBasket` whose constructor accepts two inputs: a string representing a color, and a number representing a quantity of apples. The constructor should initialize two instance variables: apple_color and apple_quantity. Write a class method called increase that increases the quantity by 1 each time it is invoked. You should also write a __str__ method for this class that returns a string of the format: "A basket of [quantity goes here] [color goes here] apples." e.g. "A basket of 4 red apples." or "A basket of 50 blue apples." (Writing some test code that creates instances and assigns values to variables may help you solve this problem!)
```
class AppleBasket:
def __init__(self,apple_color,apple_quantity):
self.apple_color = apple_color
self.apple_quantity=apple_quantity
def increase(self):
self.apple_quantity = self.apple_quantity + 1
def __str__(self):
return "A basket of {} {} apples.".format(self.apple_quantity,self.apple_color)
```
8. Define a class called `BankAccount` that accepts the name you want associated with your bank account in a string, and an integer that represents the amount of money in the account. The constructor should initialize two instance variables from those inputs: `name` and `amt`. Add a string method so that when you print an instance of BankAccount, you see "Your account, [name goes here], has [start_amt goes here] dollars." Create an instance of this class with "Bob" as the name and 100 as the amount. Save this to the variable t1.
```
class BankAccount:
def __init__(self, name, amt):
self.name = name
self.amt = amt
def __str__(self):
return "Your account, {}, has {} dollars.".format(self.name, self.amt)
```
| github_jupyter |
# Ensembles and Predictions Clipping
The combination of predictions from several methods to one forecast often leads to great performance improvements.
## Simple Ensembles
The most common strategy just takes an average of all the forecast, which often leads to surprisingly good results, for more on this topic, see forecast combination chapter from [Forecasting: Principles and Practice](https://otexts.com/fpp2/combinations.html). hcrystalball implements `SimpleEnsemble` which provides a simple interface for putting together very diverse models. `SimpleEnsemble` model takes a list of any hcrystalball model wrapper instance(s) as base learners and aggregates their prediction using `ensemble_func`.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
plt.rcParams['figure.figsize'] = [12, 6]
from hcrystalball.utils import get_sales_data
df = get_sales_data(n_dates=100,
n_assortments=1,
n_states=1,
n_stores=1)
X, y = pd.DataFrame(index=df.index), df['Sales']
from hcrystalball.ensemble import SimpleEnsemble
from hcrystalball.wrappers import ProphetWrapper
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
prophet = ProphetWrapper(
extra_seasonalities = [{
'name':'bi-weekly',
'period': 14.,
'fourier_order': 5,
'prior_scale': 15.0,
'mode': None
}]
)
simple_ensemble = SimpleEnsemble(
base_learners=[
prophet,
ExponentialSmoothingWrapper(),
get_sklearn_wrapper(RandomForestRegressor, random_state=42)
],
ensemble_func = 'median')
preds = (simple_ensemble.fit(X[:-10], y[:-10])
.predict(X[-10:])
.merge(y, left_index=True, right_index=True, how='outer')
.tail(50)
)
preds.plot(title=f"MAE:{(preds['Sales']-preds['simple_ensemble']).abs().mean().round(3)}");
```
## Stacking Ensembles
hcrystalball `StackingEnsemble` model is very similar to sklearn [StackingRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.StackingRegressor.html), nice explanation of the concept could be also found [here](http://rasbt.github.io/mlxtend/user_guide/regressor/StackingCVRegressor/). Unfortunately, the sklearn version of the `StackingEnsemble` doesn't allow us to enrich the `meta_training_data` by other features than predictions of base learners.
In the case of time-series forecasts, the additional features are the key to enabling the `meta_model` to successfully learn when each model performs best. It's often the case that some model performs better during specific days (i.e. weekends/holidays/spikes) and other is better during more stable periods. The goal is to combine them appropriately.
To do that you can specify `weekdays_as_features` as `True`, which will enrich the `meta_training_data` with the day of the week features. Another way how to improve the performance of the stacking ensemble is to take into account that some models perform better on short horizons, and some on longer ones. To take this effect into account the `horizons_as_features` can be set to `True`, which creates an additional column per each horizon and enables the meta_model to learn this representation. Another important aspect of `StackingRegressor` is that to obtain `meta_model` which can generalize well - it needs to be fitted on out-of-sample predictions - `train_horizon` and `train_n_splits`. If you set `train_horizon`=5 and`train_n_splits`=4 then the training set for meta_model will be 20 observations. It's advisable to have
`train_horizon` * `train_n_splits` = training set for `meta_model` as big as possible and have `train_horizon` which will match the horizon of the `StackingEnsemble` itself.
### Predictions Clipping
Meta model's predictions can be clipped to certain range given `clip_predictions_lower` and `clip_predictions_upper` parameters and/or we can in the same way also restrict `base_learners` (see/try commented lines)
```
from hcrystalball.ensemble import StackingEnsemble
stacking_ensemble = StackingEnsemble(
base_learners=[
prophet,
ExponentialSmoothingWrapper(
# prediction bounds for ExponentialSmoothingWrapper base_learner predictions
# clip_predictions_lower=0.
# clip_predictions_upper=50_000.
),
get_sklearn_wrapper(
RandomForestRegressor,
random_state=42,
# prediction bounds for RandomForestRegressor base_learner predictions
# clip_predictions_lower=0.
# clip_predictions_upper=50_000.
)
],
train_horizon=10,
train_n_splits=3,
meta_model=LinearRegression(),
# prediction bounds for meta_model predictions
# clip_predictions_lower=0.
# clip_predictions_upper=50_000.
)
preds = (stacking_ensemble.fit(X[:-10], y[:-10])
.predict(X[-10:])
.merge(y, left_index=True, right_index=True, how='outer')
.tail(50)
)
preds.plot(title=f"MAE:{(preds['Sales']-preds['stacking_ensemble']).abs().mean().round(3)}");
```
| github_jupyter |
# `Практикум по программированию на языке Python`
<br>
## `Занятие 4: Основы ООП, особенности ООП в Python`
<br><br>
### `Мурат Апишев (mel-lain@yandex.ru)`
#### `Москва, 2020`
### `Парадигмы проектирования кода`
Императивное программирование (язык ассемблера)
`mov ecx, 7`
Декларативное программирование (SQL)
`select * from table where index % 10 == 0`
Подвид: функциональное программирование (Haskell)
`filter even [1..10]`
Объектно-ориентированное программирование (С++)
`auto car = new Car(); a.fill_up(10)`;
### `Объектно-ориентированное программирование`
- Программа, как и окружающий мир, состоит из сущностей<br><br>
- Сущности имеют какое-то внутренее состояние<br><br>
- Также сущности взаимодействуют друг с другом<br><br>
- ООП нужно для описания программы в виде сущностей и их взаимоотношений<br><br>
- При этом и на сущности, и на отношения накладываются ограничения<br><br>
- Это позволяет писать более короткий, простой и переиспользуемый код
### `Базовые понятия: класс и объект`
- __Класс__ представляет собой тип данных (как int или str)<br><br>
- Это способ описания некоторой сущности, её состояния и возможного поведения<br><br>
- Поведение при этом зависит от состояния и может его изменять<br><br>
- __Объект__ - это конретный представитель класса (как переменная этого типа)<br><br>
- У объекта своё состояние, изменяемое поведением<br><br>
- Поведение полностью определяется правилами, описанными в классе
### `Базовые понятия: интерфейс`
- __Интерфейс__ - это класс, описывающий только поведение<br><br>
- У интерфейса нет состояния<br><br>
- Как следствие, создать объект типа интерфейса невозможно<br><br>
- Вместо этого описываются классы, которые реализуют этот интерфейс и, в то же время, имеют состояние<br><br>
- С помощью интерфейсов реализуется полиморфизм (будет далее)<br><br>
- Программирование на уровне интерфейсов делает код читаемее и проще<br><br>
- Интерфейсы в некоторых языках (например, Java) решают проблему отсутствия множественного наследования
### `Интерфейс: пример`
`interface SomeCar {`
$\quad$`fill_up(gas_volume)`
$\quad$`turn_on()`
$\quad$`turn_off()`
`}`
Интерфейс
- не содержит информации о состоянии автомобиля<br><br>
- не содержит информации о том, как выполнять описанные команды<br><br>
- он только описывает то, какие операции должны быть доступны над объектом, который претендует на то, чтобы быть автомобилем
### `Реализация интерфейса`
`class ConcreteCar {`
$\quad$`fill_up(gas_volume) { tank += gas_volume }`
$\quad$`turn_on() { is_turned_on = true }`
$\quad$`turn_off() { is_turned_on = false }`
$\quad$`tank = 0`
$\quad$`is_turned_on = false`
`}`
- Обычно данные класса называют _полями_ (или _атрибутами_), а функции - _методами_ <br><br>
- **Абстрактный класс** - промежуточный вариант между интерфейсом и обычным классом
### `Принципы ООП`
- **Абстракция** - выделение важных свойств объекта и игнорирование прочих<br><br>
- **Инкапсуляция** - хранение данных и методов работы с ними внутри одного класса с доступом к данным только через методы<br><br>
- **Наследование** - возможность создания наследников, получающих все свойства родителей с возможностью их переопределения и расширения<br><br>
- **Полиморфизм** - возможность использования объектов разных типов с общим интерфейсом без информации об их внутреннем устройстве
### `ООП в Python`
- Python - это полностью объектно-ориентированный язык<br><br>
- В Python абсолютно всё является объектами, включая классы<br><br>
- Полностью поддерживаются все принципы ООП, кроме инкапсуляции<br><br>
- Инкапсуляция поддерживается частично: нет ограничения на доступ к полям класса<br><br>
- Поэтому для инкапсуляции используют договорные соглашения
### `Так выглядят классы в Python`
```
class ConcreteCar:
def __init__(self):
self.tank = 0
self.is_turned_on = False
def fill_up(self, gas_volume):
self.tank += gas_volume
def turn_on(self):
self.is_turned_on = True
def turn_off(self):
self.is_turned_on = False
car = ConcreteCar()
print(type(car), car.__class__)
car.fill_up(10)
print(car.tank)
```
### `Функция __init__`
- Главное: `__init__` - не конструктор! Она ничего не создаёт и не возвращает
- Созданием объекта занимается функция `__new__`, переопределять которую без необходимости не надо
- `__init__` получает на вход готовый объект и инициализирует его атрибуты<br>
В отличие от C++, атрибуты можно добавлять/удалять на ходу:
```
class Cls:
pass
cls = Cls()
cls.field = 'field'
print(cls.field)
del cls.field
print(cls.field) # AttributeError: 'Cls' object has no attribute 'field'
```
### `Параметр self`
- Метод класса отличается от обычной функции только наличием объекта `self` в качестве первого аргумента <br><br>
- Это то же самое, что происходит в C++/Java (там аналогом `self` является указатель/ссылка `this`) <br><br>
- Название `self` является общим соглашением, но можно использовать и другое (не надо!)<br><br>
- Метод класса, не получающий на вход `self` является _статическим_, то есть применяется вне зависимости от существования объектов данного класса<br><br>
- Статические методы часто используются для специализированного создания объектов класса<br><br>
- В Python `__new__` является статическим методом
### `Как быть с инкапсуляцией`
- Приватное поле прежде всего должно быть обозначено таковым
- В Python для этого есть соглашения:<br>
```
class Cls:
def __init__(self):
self.public_field = 'Ok'
self._private_field = "You're shouldn't see it"
self.__very_private_field = "YOU REALLY SHOULDN'T SEE IT!!!"
cls = Cls()
print(cls.public_field)
print(cls._private_field)
print(cls.__very_private_field)
print(cls._Cls__very_private_field)
```
### `Атрибуты объекта и класса`
```
class Cls:
pass
cls = Cls()
print([e for e in dir(cls) if not e.startswith('__')])
cls.some_obj_attr = '1'
print([e for e in dir(cls) if not e.startswith('__')])
print([e for e in dir(Cls) if not e.startswith('__')])
Cls.some_cls_attr = '1'
print([e for e in dir(Cls) if not e.startswith('__')])
```
### `Переменная __dict__`
- Для большого числа типов в Python пределена переменная-словарь `__dict__`
- Она содержит атрибуты, специфичные для данного объекта (не его класса и не его родителей)
- Множество элементов `__dict__` является подмножеством элементов, возвращаемых функцией `dir()`
```
class A: pass
print(set(A.__dict__.keys()).issubset(set(dir(A))))
[].__dict__
```
### `Доступ к атрибутам`
- Для работы с атрибутами есть функции `getattr`, `setattr` и `delattr`
- Их основное преимущество - оперирование именами атрибутов в виде строк
```
cls = Cls()
setattr(cls, 'some_attr', 'some')
print(getattr(cls, 'some_attr'))
delattr(cls, 'some_attr')
print(getattr(cls, 'some_attr'))
```
### `Class magic methods`
- Магические методы придают объекту класса определённые свойства
- Такие методы получают `self` вызываются интерпретатором неявно
- Например, операторы - это магические методы<bf>
Рассмотрим несколько примеров:
```
class Cls:
def __init__(self): # initialize object
self.name = 'Some class'
def __repr__(self): # str for printing object
return 'Class: {}'.format(self.name)
def __call__(self, counter): # call == operator() in C++
return self.name * counter
cls = Cls()
print(cls.__repr__()) # == print(cls)
print(cls(2))
```
### `Class magic methods`
Ещё примеры магических методов:
```
def __lt__(self, other): pass
def __eq__(self, other): pass
def __add__(self, other): pass
def __mul__(self, value): pass
def __int__(self): pass
def __bool__(self): pass
def __hash__(self): pass
def __getitem__(self, index): pass
def __setitem__(self, index, value): pass
```
### `Как на самом деле устроен доступ к атрибутам`
При работе с атрибутами вызываются магические методы `__getattr__`, `__getattribute__`, `__setattr__` и `__delattr__`:
```
class Cls:
def __setattr__(self, attr, value):
print(f'Create attr with name "{attr}" and value "{value}"')
self.__dict__[attr] = value
def __getattr__(self, attr):
print(f'WE WILL ENTER IT ONLY IN CASE OF ERROR!')
return self.__dict__[attr]
def __getattribute__(self, attr):
if not attr.startswith('__'):
print(f'Get value of attr with name "{attr}"')
return super().__getattribute__(attr) # call parent method implementation
def __delattr__(self, attr):
print(f'Remove attr "{attr}" is impossible!')
```
### `Как на самом деле устроен доступ к атрибутам`
```
cls = Cls()
cls.some_attr = 'some'
a = cls.some_attr
del cls.some_attr
b = cls.some_attr
cls.non_exists_attr
```
### `Магические методы и менеджер контекста`
Менеджер контекста (оператор `with`) работает с двумя магическими методами:
- `__enter__` - код, который нужно выполнить над объектом при входе в блок менеджера
- `__exit__` - код, который нужно в любом случае выполнить при выходе из блока
```
class SomeDataBaseDao:
def __init__(self): self._db = ()
def append(self, value): self._db.append(value)
def __enter__(self):
self._db = list(self._db)
print('Set DB to read-write mode')
return self
def __exit__(self, exception_type, exception_val, trace):
self._db = tuple(self._db)
print('Set DB to read-only mode')
return True
dao = SomeDataBaseDao()
#dao.append(1) # AttributeError: 'tuple' object has no attribute 'append'
with dao:
dao.append(1)
print(dao._db)
```
### `Наследование в Python`
```
class Parent:
def __init__(self):
self._value = 10
self.__value = 20
def get_value(self):
return self.__value
class Child(Parent):
pass
print(Parent().get_value(), Child().get_value())
print(Child().__dict__)
```
- `__dict__` содержит информацию об атрибутах объекта, атрибутов класса этого объекта (или родителей этого класса) там нет<br><br>
- При конструировании объекта класса-наследника создаётся один объект, в котором выставляют атрибуты все вызовы `__init__` в иерархии наследования (если они вызывались снизу вверх с помощью `super()`<br><br>
- Поэтому нет разницы, добавлены были атрибуты в `__init__` родительского класса или класса-наследника - это всё равно атрибуты этого объекта, они будут содержаться в его `__dict__`
### `Перегрузка родительских методов`
```
class Parent:
def __init__(self, value):
self._value = value
def get_value(self):
return self._value
def __str__(self):
return f'Value: {self._value}'
class Child(Parent):
def __init__(self, value):
Parent.__init__(self, value) # == super().__init__(value)
def get_value(self):
return Parent.get_value(self) * 2 # == super().get_value() * 2
print(Parent(10).get_value())
print(Child(10).get_value())
print(Child(10)._value)
print(Child(10))
```
### `Интерфейсы`
- На уровне языка интерфейсов нет
- Это некритично в силу наличия множественного наследования
- При этом эмулировать интерфейсы - хорошая практика
```
class Interface:
def get_value(self):
raise NotImplementedError
class Cls(Interface):
def __init__(self, value):
self.value = value
def get_value(self):
return self.value
print(Cls(10).get_value())
print(Interface().get_value()) # NotImplementedError
```
### `Полезная функция isinstance`
```
print('isinstance(1, int) == {}'.format(isinstance(1, int)))
print('isinstance(1.0, int) == {}'.format(isinstance(1.0, int)))
print('isinstance(True, int) == {}'.format(isinstance(True, int)))
class Interface:
def get_value(self):
raise NotImplementedError
class Cls1(Interface):
pass
class Cls2(Interface):
pass
print('isinstance(Cls1(), Cls1) == {}'.format(isinstance(Cls1(), Cls1)))
print('isinstance(Cls1(), Interface) == {}'.format(isinstance(Cls1(), Interface)))
print('isinstance(Cls1(), object) == {}'.format(isinstance(Cls1(), object)))
print('isinstance(Cls2(), Cls1) == {}'.format(isinstance(Cls2(), Cls1)))
```
### `Полиморфизм`
- Полиморфизм позволяет работать с объектами, основываясь только на их интерфейсе, без знания типа<br><br>
- В C++ требуется, чтобы объекты полиморфных классов имели общего предка<br><br>
- В Python это не обязательно, достаточно, чтобы объекты поддерживали один интерфейс<br><br>
- Такое поведение называется duck-typing<br><br>
- Общий интерфейс в данной ситуации фиксирует протокол взаимодействия
### `Полиморфизм: пример`
```
class Figure:
def area(self):
raise NotImplementedError
class Square(Figure):
def __init__(self, side):
self.side = side
def area(self):
return self.side ** 2
import math
class Circle(Figure):
def __init__(self, radius):
self.radius = radius
def area(self):
return math.pi * self.radius ** 2
class Triangle(Figure):
def __init__(self, a, b, c):
self.a, self.b, self.c = a, b, c
def area(self):
s = (self.a + self.b + self.c) / 2.0
return (s *(s - self.a) * (s - self.b) * (s - self.c)) ** 0.5
```
### `Полиморфизм: пример`
Теперь опишем функцию, которая ожидает объекты, реализующие `Figure`:
```
def compute_areas(figures):
for figure in figures:
print(figure.area())
```
Можем запускать, не беспокоясь о том, что именно представляют собой входные объекты:
```
s = Square(10)
c = Circle(5)
t = Triangle(1, 3, 3)
compute_areas([s, c, t])
```
В Python можно обойтись и без наследования Figure, достаточно наличия метода `area` с нужным поведением
### `Сохранение объектов: модуль pickle`
```
class Cls:
def __init__(self, value):
self.__value = value
def get_value(self):
return self.__value
import pickle
cls = Cls(Cls(10))
with open('cls.pkl', 'wb') as fout:
pickle.dump(cls, fout)
with open('cls.pkl', 'rb') as fin:
cls_2 = pickle.load(fin)
cls_2.get_value().get_value()
```
### `Исключения`
- Исключение - механизм, который был придуман штатной обработки ошибочных ситуаций<br><br>
- Часто ошибочно относится к ООП, на самом деле это иная концепция<br><br>
- Python поддерживает исключения, и ими надо пользоваться<br><br>
- В языке есть большая иерархия классов исключений на все случаи жизни<br><br>
- Если нужен свой класс, то можно наследовать от какого-то из существующих<br><br>
- Оптимальный вариант - класс `Exception`
### `Базовый синтаксис`
```
1 / 0
try:
1 / 0
except:
print('Zero division!')
try:
raise ZeroDivisionError
except:
print('Zero division!')
```
### `Полный синтаксис`
```
try:
# some code
pass
except ValueError: # catch value errors
print('ERROR') # do something
raise # continue rising of this exception (or can skip it)
except RuntimeError as error: # catch runtume errors and store object
print(error) # inspect exception content
raise error # continue rising
except: # try not to use except without class specification
print('Unknown error')
pass
else: # if there's no exception, execute this branch
print('OK')
finally: # actions that chould be done in any case
#some actions (closing files for instance)
print('finally')
pass
```
### `Спасибо за внимание!`
| github_jupyter |
```
!curl -s https://course.fast.ai/setup/colab | bash
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
root_dir = "/content/gdrive/My Drive/"
base_dir = root_dir + 'fastai-v3/'
```
**Important note:** You should <mark>always work on a duplicate of the course notebook</mark>. On the page you used to open this, tick the box next to the name of the notebook and click duplicate to easily create a new version of this notebook.
<mark>You will get errors each time you try to update your course repository if you don't do this, and your changes will end up being erased by the original course version.
# Welcome to Jupyter Notebooks!
If you want to learn how to use this tool you've come to the right place. This article will teach you all you need to know to use Jupyter Notebooks effectively. You only need to go through Section 1 to learn the basics and you can go into Section 2 if you want to further increase your productivity.
You might be reading this tutorial in a web page (maybe Github or the course's webpage). <mark>We strongly suggest to read this tutorial in a (yes, you guessed it) Jupyter Notebook</mark>. <mark>This way you will be able to actually *try* the different commands we will introduce here</mark>.
## Section 1: Need to Know
### Introduction
Let's build up from the basics, what is a Jupyter Notebook? Well, you are reading one. It is a document made of cells. You can write like I am writing now (markdown cells) or you can perform calculations in Python (code cells) and run them like this:
```
1+1
```
Cool huh? This combination of prose and code makes <mark>Jupyter Notebook ideal for experimentation</mark>: we can see the rationale for each experiment, the code and the results in one comprehensive document. <mark>In fast.ai, each lesson is documented in a notebook and you can later use that notebook to experiment yourself.
Other <mark>renowned institutions in academy and industry use Jupyter Notebook</mark>: Google, Microsoft, IBM, Bloomberg, Berkeley and NASA among others. <mark>Even Nobel-winning economists [use Jupyter Notebooks](https://paulromer.net/jupyter-mathematica-and-the-future-of-the-research-paper/) for their experiments<mark> and <mark>some suggest that Jupyter Notebooks will be the [new format for research papers](https://www.theatlantic.com/science/archive/2018/04/the-scientific-paper-is-obsolete/556676/)</mark>.
### Writing
A type of cell in which you can write like this is called _Markdown_. [_Markdown_](https://en.wikipedia.org/wiki/Markdown) is a very popular markup language. To specify that a cell is _Markdown_ you need to click in the drop-down menu in the toolbar and select _Markdown_.
Click on the the '+' button on the left and select _Markdown_ from the toolbar.
Now you can type your first _Markdown_ cell. Write 'My first markdown cell' and press run.

You should see something like this:
My first markdown cell
Now try making your first _Code_ cell: follow the same steps as before but don't change the cell type (when you add a cell its default type is _Code_). Type something like 3/2. You should see '1.5' as output.
```
3/2
```
### Modes
If you made a mistake in your *Markdown* cell and you have already ran it, you will notice that you cannot edit it just by clicking on it. This is because you are in **Command Mode**. Jupyter Notebooks have two distinct modes:
1. <mark>**Edit Mode**</mark>: Allows you to edit a cell's content.
2. <mark>**Command Mode**</mark>: Allows you to edit the notebook as a whole and use keyboard shortcuts but not edit a cell's content.
You can <mark>toggle between these two by either pressing <kbd>ESC</kbd> and <kbd>Enter</kbd> or clicking outside a cell or inside it</mark> (you need to double click if its a Markdown cell). You can always know which mode you're on since the current cell has a green border if in **Edit Mode** and a blue border in **Command Mode**. Try it!
### Other Important Considerations
1. Your notebook is autosaved every 120 seconds. If you want to manually save it you can just press the save button on the upper left corner or press <kbd>s</kbd> in **Command Mode**.

2. To know if your kernel is computing or not you can check the dot in your upper right corner. If the dot is full, it means that the kernel is working. If not, it is idle. You can place the mouse on it and see the state of the kernel be displayed.

3. There are a couple of shortcuts you must know about which we use **all** the time (always in **Command Mode**). These are:
<kbd>Shift</kbd>+<kbd>Enter</kbd>: Runs the code or markdown on a cell
<kbd>Up Arrow</kbd>+<kbd>Down Arrow</kbd>: Toggle across cells
<kbd>b</kbd>: Create new cell
<kbd>0</kbd>+<kbd>0</kbd>: Reset Kernel
You can find more shortcuts in the Shortcuts section below.
4. You may need to use a terminal in a Jupyter Notebook environment (for example to git pull on a repository). That is very easy to do, just press 'New' in your Home directory and 'Terminal'. Don't know how to use the Terminal? We made a tutorial for that as well. You can find it [here](https://course.fast.ai/terminal_tutorial.html).

That's it. This is all you need to know to use Jupyter Notebooks. That said, we have more tips and tricks below ↓↓↓
## Section 2: Going deeper
### Markdown formatting
#### Italics, Bold, Strikethrough, Inline, Blockquotes and Links
The five most important concepts to format your code appropriately when using markdown are:
1. *Italics*: Surround your text with '\_' or '\*'
2. **Bold**: Surround your text with '\__' or '\**'
3. `inline`: Surround your text with '\`'
4. > blockquote: Place '\>' before your text.
5. [Links](https://course.fast.ai/): Surround the text you want to link with '\[\]' and place the link adjacent to the text, surrounded with '()'
#### Headings
Notice that including a hashtag before the text in a markdown cell makes the text a heading. The number of hashtags you include will determine the priority of the header ('#' is level one, '##' is level two, '###' is level three and '####' is level four). We will add three new cells with the '+' button on the left to see how every level of heading looks.
Double click on some headings and find out what level they are!
#### Lists
There are three types of lists in markdown.
Ordered list:
1. Step 1
2. Step 1B
3. Step 3
Unordered list
* learning rate
* cycle length
* weight decay
Task list
- [x] Learn Jupyter Notebooks
- [x] Writing
- [x] Modes
- [x] Other Considerations
- [ ] Change the world
Double click on each to see how they are built!
### Code Capabilities
**Code** cells are different than **Markdown** cells in that they have an output cell. This means that we can _keep_ the results of our code within the notebook and share them. Let's say we want to show a graph that explains the result of an experiment. We can just run the necessary cells and save the notebook. The output will be there when we open it again! Try it out by running the next four cells.
```
# Import necessary libraries
from fastai.vision import *
import matplotlib.pyplot as plt
from PIL import Image
a = 1
b = a + 1
c = b + a + 1
d = c + b + a + 1
a, b, c ,d
plt.plot([a,b,c,d])
plt.show()
```
We can also print images while experimenting. I am watching you.
```
Image.open('images/notebook_tutorial/cat_example.jpg')
```
### Running the app locally
You may be running Jupyter Notebook from an interactive coding environment like Gradient, Sagemaker or Salamander. You can also run a Jupyter Notebook server from your local computer. What's more, if you have installed Anaconda you don't even need to install Jupyter (if not, just `pip install jupyter`).
You just need to run `jupyter notebook` in your terminal. Remember to run it from a folder that contains all the folders/files you will want to access. You will be able to open, view and edit files located within the directory in which you run this command but not files in parent directories.
If a browser tab does not open automatically once you run the command, you should CTRL+CLICK the link starting with 'https://localhost:' and this will open a new tab in your default browser.
### Creating a notebook
Click on 'New' in the upper right corner and 'Python 3' in the drop-down list (we are going to use a [Python kernel](https://github.com/ipython/ipython) for all our experiments).

Note: You will sometimes hear people talking about the Notebook 'kernel'. The 'kernel' is just the Python engine that performs the computations for you.
### Shortcuts and tricks
#### Command Mode Shortcuts
There are a couple of useful keyboard shortcuts in `Command Mode` that you can leverage to make Jupyter Notebook faster to use. Remember that to switch back and forth between `Command Mode` and `Edit Mode` with <kbd>Esc</kbd> and <kbd>Enter</kbd>.
<kbd>m</kbd>: Convert cell to Markdown
<kbd>y</kbd>: Convert cell to Code
<kbd>D</kbd>+<kbd>D</kbd>: Delete the cell(if it's not the only cell) or delete the content of the cell and reset cell to Code(if only one cell left)
<kbd>o</kbd>: Toggle between hide or show output
<kbd>Shift</kbd>+<kbd>Arrow up/Arrow down</kbd>: Selects multiple cells. Once you have selected them you can operate on them like a batch (run, copy, paste etc).
<kbd>Shift</kbd>+<kbd>M</kbd>: Merge selected cells.
<kbd>Shift</kbd>+<kbd>Tab</kbd>: [press these two buttons at the same time, once] Tells you which parameters to pass on a function
<kbd>Shift</kbd>+<kbd>Tab</kbd>: [press these two buttons at the same time, three times] Gives additional information on the method
#### Cell Tricks
```
from fastai import*
from fastai.vision import *
```
There are also some tricks that you can code into a cell.
`?function-name`: Shows the definition and docstring for that function
```
?ImageDataBunch
```
`??function-name`: Shows the source code for that function
```
??ImageDataBunch
```
`doc(function-name)`: Shows the definition, docstring **and links to the documentation** of the function
(only works with fastai library imported)
```
doc(ImageDataBunch)
```
#### Line Magics
Line magics are functions that you can run on cells and take as an argument the rest of the line from where they are called. You call them by placing a '%' sign before the command. The most useful ones are:
`%matplotlib inline`: This command ensures that all matplotlib plots will be plotted in the output cell within the notebook and will be kept in the notebook when saved.
`%reload_ext autoreload`, `%autoreload 2`: Reload all modules before executing a new line. If a module is edited, it is not necessary to rerun the import commands, the modules will be reloaded automatically.
These three commands are always called together at the beginning of every notebook.
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
```
`%timeit`: Runs a line a ten thousand times and displays the average time it took to run it.
```
%timeit [i+1 for i in range(1000)]
```
`%debug`: Allows to inspect a function which is showing an error using the [Python debugger](https://docs.python.org/3/library/pdb.html).
```
for i in range(1000):
a = i+1
b = 'string'
c = b+1
%debug
c
```
| github_jupyter |
# Load Packages
```
import sys
sys.path.append('..')
from numpy_fracdiff import fracdiff
import numpy as np
import scipy.special
import matplotlib.pyplot as plt
%matplotlib inline
#!pip install memory_profiler
import memory_profiler
%load_ext memory_profiler
```
# Load Demo Data
```
with np.load('data/demo1.npz') as data:
t = data['t']
px = data['px']
x = px[:, 0]
del t, px
```
# Modeling
## (a) First Order Differentiation
Let $x_t$ a time series,
$t\in\mathbb{N}$ the time step,
$\Delta^1$ the difference operator for first order $d=1$
$$
(\Delta^1 x)_t = x_t - x_{t-1}
$$
Example
```
z = x[1:] - x[:-1]
z
```
Wrap it into a function
```
def firstdiff(x: np.ndarray) -> np.ndarray:
z = np.empty(shape=x.shape)
z[0] = np.nan
z[1:] = x[1:] - x[:-1]
return z
%time z = firstdiff(x)
print(z[:10])
```
## (b) Integral Differentiation
Let $x_t$ a time series,
$t\in\mathbb{N}$ the time step,
$\Delta^d$ the difference operator of integral order $d\in\mathbb{N}^+$
$$
(\Delta^d x)_t = \sum_{k=0}^d (-1)^k {d\choose k} x_{t-k}
$$
Example with `d=1`
```
# generate weights
d = 1
w = np.empty((d+1,))
for k in range(d+1):
w[k] = np.power(-1, k) * scipy.special.binom(d, k)
print(w)
# apply weights
z = w[0] * x
z[:d] = np.nan
for k in range(1, d+1):
z[k:] += w[k] * x[:-k]
print(z[:10])
```
Wrap it in a function
```
def intdiff(x: np.ndarray, d: int) -> (np.ndarray, np.ndarray):
# generate weights
w = np.empty((d + 1,))
for k in range(d+1):
w[k] = np.power(-1, k) * scipy.special.binom(d, k)
# apply weights
z = w[0] * x
z[:d] = np.nan
for k in range(1, d+1):
z[k:] += w[k] * x[:-k]
return z, w
%time z, _ = intdiff(x, 1)
print(z[:10])
z, _ = intdiff(x, 2)
print(z[:10])
z, _ = intdiff(x, 3)
print(z[:10])
```
## (c) Fractional Differentiation
Let $x_t$ a time series,
$t\in\mathbb{N}$ the time step,
$\Delta^d$ the difference operator of fractional order $d\in\mathbb{R}^+$,
and $m$ the truncation order
$$
(\Delta^d x)_t = x_t + \sum_{k=1}^\infty \left(\prod_{i=1}^k \frac{d - i + 1}{i} \right) x_{t-k} \\
(\Delta^d x)_t \approx x_t + \sum_{k=1}^m \left(\prod_{i=1}^k \frac{d - i + 1}{i} \right) x_{t-k}
$$
(see Lopez, 2018, p.78, from the 'iterative estimation' formula; Jensen and Nielsen, 2014)
```
%time z = fracdiff(x, order=1)
print(z[:10])
#plt.plot(z);
%time z = fracdiff(x, order=0.5)
plt.plot(z);
%time z = fracdiff(x, order=1.5)
plt.plot(z);
```
# Speed
```
%timeit z = firstdiff(x)
%timeit z, _ = intdiff(x, 1)
%timeit z = fracdiff(x, order=1)
```
# Memory
```
%memit z = firstdiff(x)
%memit z, _ = intdiff(x, 1)
%memit z = fracdiff(x, order=1)
```
# Line-by-Line
```
%mprun -f fracdiff fracdiff(x, order=1)
```
# References
* Jensen, A.N., Nielsen, M.Ø., 2014. A Fast Fractional Difference Algorithm. Journal of Time Series Analysis 35, 428–436. https://doi.org/10.1111/jtsa.12074
* Prado, M.L. de, 2018. Advances in Financial Machine Learning, 1st ed. Wiley.
| github_jupyter |
```
import pandas as pd
from ast import literal_eval
import matplotlib.pyplot as plt
import matplotlib
plt.style.use('fivethirtyeight')
%matplotlib inline
!ls
#Importing the data
df = pd.read_csv('readable_cleaned.csv')
del df['Date.1']
df.index = pd.to_datetime(df['Date'], format='%Y-%m-%d %H:%M:%S')
df.info()
```
# Distribution over time
## Weekly
```
# Distribution
df[df['Date']!= '2001-01-01 01:01:01'].resample('W')['Number'].count().plot()
df[df['Date']!= '2001-01-01 01:01:01'].resample('W')['Number'].count().sort_values(ascending=False).head()
```
## Daily
```
df[df['Date']!= '2001-01-01 01:01:01'].resample('D')['Number'].count().plot()
df[df['Date']!= '2001-01-01 01:01:01'].resample('D')['Number'].count().sort_values(ascending=False).head()
```
## Most active Day
```
df['2016-05-12'].sort_values(by='Date')
df['Number'].groupby(df['Number'].index.weekday).count()
```
## Most active time of day
```
df['Number'].groupby(df['Number'].index.hour).count()
```
# Ad Click Count
```
df['Ad Clicks'].sum()
```
# Ad click Distribution
```
df['Ad Clicks'].describe()
```
# Ad Click top 5
```
df['Ad Clicks'].sort_values(ascending=False).head()
pd.set_option('max_colwidth',400)
df[df['Ad Clicks']>40200]
```
# Ad Impressions
```
df['Ad Impressions'].sum()
df['Ad Impressions'].describe()
df['Ad Impressions'].sort_values(ascending=False).head()
df[df['Ad Impressions']>5112241
## Ad Spend
df['Ad Spend'].sum()
df['Ad Spend'].sort_values(ascending=False).head()
```
# Age
## Percentage were targetted at under age
```
df[df['AgeMin']< 18]['Ad Impressions'].sum()
df[df['AgeMin']< 18]['Ad Impressions'].sum() / df['Ad Impressions'].sum() * 100
```
## Interests
```
lst = list(df['Interests'])
lst[0]
new_lst = []
for elem in lst:
new_lst.append(literal_eval(elem))
flat_list = [item for sublist in new_lst for item in sublist]
fl = []
for elem in flat_list:
elem = elem.upper().replace(' OR', '').strip()
fl.append(elem)
df_tags = pd.DataFrame(fl)
df_tags.columns = ['Tags']
df_tags['Tags'].count()
df_tags['Tags'].value_counts().head(40)
df_tags = df_tags[df_tags['Tags']!='N/A']
df_tags = df_tags[df_tags['Tags']!='JR.']
df_tags = df_tags[df_tags['Tags']!='']
df_tags['Tags'].value_counts().head(15).sort_values(ascending=True).plot(kind='barh')
#After election
lst = list(df['2017-05-01':]['Interests'])
new_lst = []
for elem in lst:
new_lst.append(literal_eval(elem))
flat_list = [item for sublist in new_lst for item in sublist]
fl = []
for elem in flat_list:
elem = elem.upper().replace(' OR', '').strip()
fl.append(elem)
df_tags = pd.DataFrame(fl)
df_tags.columns = ['Tags']
df_tags['Tags'].value_counts().count()
df_tags['Tags'].value_counts().head(40)
```
# Location
```
!ls
df_usstates = pd.read_csv('US.csv')
states = list(df_usstates['name'])
statecounts = []
for state in states:
new = pd.DataFrame(df['Location'].str.contains(state))
count = new[new['Location']==True].count()[0]
mini_dict = {'State':state,
'Count':count}
statecounts.append(mini_dict)
pd.DataFrame(statecounts).to_csv('state_count.csv')
df_usstates = pd.DataFrame(statecounts)
df_usstates.sort_values(by='Count', ascending=False).head(10)
poly = pd.read_csv('cartodb_query.csv')
merged = df_usstates.merge(poly, left_on='State', right_on='name')
merged.to_csv('state_count_with_polygon.csv')
```
## Finding white nationalism
```
def unlist(elem):
elem = str(elem)
return elem
df['Interests'] = df['Interests'].apply(unlist)
df['White'] = df['Interests'].str.contains('illegal')
df[df['Interests']=="['Confederate Flag', 'American Civil War reenactment']"]
df['Interests']
```
| github_jupyter |
# Churn Prediction
This notebook will introduce the use of the churn dataset to create churn prediction model using deep kernel learning.
The dataset used to ingest is from SIDKDD 2009 competition.
The pipeline is composed using Azure ML pipeline and trained on Azure ML compute with hyper parameters of the gaussian process and the neural network jointly tuned through hyperdrive.
```
%load_ext autoreload
%autoreload 2
import os
import urllib
from azureml.core import (Workspace,Run,VERSION,
Experiment,Datastore)
from azureml.core.runconfig import (RunConfiguration,
DEFAULT_GPU_IMAGE)
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.compute import (AmlCompute, ComputeTarget)
from azureml.exceptions import ComputeTargetException
from azureml.data.data_reference import DataReference
from azureml.pipeline.core import (Pipeline,
PipelineData)
from azureml.pipeline.steps import (HyperDriveStep,PythonScriptStep)
from azureml.train.dnn import PyTorch
from azureml.train.hyperdrive import *
from azureml.widgets import RunDetails
print('SDK verison', VERSION)
```
## Variables declaration
Declare variables to be used through out, please fill in the Azure subscription ID, resource-group and workspace name to connect to your Azure ML workspace.
```
SUBSCRIPTION_ID = ''
RESOURCE_GROUP = ''
WORKSPACE_NAME = ''
PROJECT_DIR = os.getcwd()
EXPERIMENT_NAME = "customer_churn"
CLUSTER_NAME = "gpu-cluster"
DATA_DIR = os.path.join(PROJECT_DIR,'data')
TRAIN_DIR = os.path.join(PROJECT_DIR,'code','train')
PREPROCESS_DIR = os.path.join(PROJECT_DIR,'code','preprocess')
SOURCE_URL ='https://amlgitsamples.blob.core.windows.net/churn'
FILE_NAME = 'CATelcoCustomerChurnTrainingSample.csv'
PYTORCH_SUPPORTED_VERSION = '1.1'
```
## Initialize workspace
Initialize a workspace object
```
ws = Workspace(workspace_name = WORKSPACE_NAME,
subscription_id = SUBSCRIPTION_ID ,
resource_group = RESOURCE_GROUP
)
ws.write_config()
print('Workspace loaded:', ws.name)
```
## Upload dataset to blob datastore
Upload dataset to workspace default blob storage which will be mounted on AzureML compute during pipeline execution.
```
default_store = default_datastore=ws.datastores["workspaceblobstore"]
default_store.upload(src_dir=DATA_DIR, target_path='churn', overwrite=True, show_progress=True)
```
## Retrieve or create a Azure Machine Learning compute
Here we create a new Azure Machine Learning Compute in the current workspace, if it doesn't already exist. We will then run the training script on this compute target.
If you have already created an Azure ML compute in your workspace, just provide it's name in the cell below to have it used for Azure ML pipeline execution.
```
cluster_name = "cluster"
try:
cluster = ComputeTarget(ws, cluster_name)
print(cluster_name, "found")
except ComputeTargetException:
print(cluster_name, "not found, provisioning....")
provisioning_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',max_nodes=4)
cluster = ComputeTarget.create(ws, cluster_name, provisioning_config)
cluster.wait_for_completion(show_output=True)
```
## Pipeline definition
The Azure ML pipeline is composed of two steps:
- Data pre-processing which consist of one-hot encoding categorical features, normalization of the features set, spliting of dataset into training/testing sets and finally writing out the output to storage.
- Hyperdrive step that tune and train the deep kernel learning model using GPytorch and Pytorch estimator
## Pipeline data input/output
Here, we define the input and intermediary dataset that will be used by the pipeline steps.
```
input_dir = DataReference(datastore=default_store,
data_reference_name="input_data",
path_on_datastore="churn"
)
processed_dir = PipelineData(name = 'processed_data',
datastore=default_store
)
```
## Pipeline 1st step: Data Preprocessing
We start by defining the run configuration with the needed dependencies by the preprocessing step.
In the cell that follow, we compose the first step of the pipeline.
```
cd = CondaDependencies()
cd.add_conda_package('pandas')
cd.add_conda_package('matplotlib')
cd.add_conda_package('numpy')
cd.add_conda_package('scikit-learn')
run_config = RunConfiguration(framework="python",
conda_dependencies= cd)
run_config.target = cluster
run_config.environment.docker.enabled = True
run_config.environment.docker.base_image = DEFAULT_GPU_IMAGE
run_config.environment.python.user_managed_dependencies = False
pre_processing = PythonScriptStep(
name='preprocess dataset',
script_name='preprocess.py',
arguments=['--input_path', input_dir,\
'--output_path', processed_dir],
inputs=[input_dir],
outputs=[processed_dir],
compute_target=cluster_name,
runconfig=run_config,
source_directory=PREPROCESS_DIR
)
```
## Pipeline second step: training
For the second step, we start by defining the pytorch estimator that will be used to traing the Stochastic variational deep kernel learning model using Gpytorch.
```
estimator = PyTorch(source_directory=TRAIN_DIR,
conda_packages=['pandas', 'numpy', 'scikit-learn'],
pip_packages=['gpytorch'],
compute_target=cluster,
entry_script='svdkl_entry.py',
use_gpu=True,
framework_version=PYTORCH_SUPPORTED_VERSION
)
```
Here, we configure Hyperdrive by defining the hyperparametes space and select choose Area under the curve as the metric to optimize for.
```
ps = RandomParameterSampling(
{
'--batch-size': choice(4096,8192),
'--epochs': choice(500),
'--neural-net-lr': loguniform(-4,-2),
'--likelihood-lr': loguniform(-4,-2),
'--grid-size': choice(32,64),
'--grid-bounds': choice(-1,0),
'--latent-dim': choice(2),
'--num-mixtures': choice(4,6,8)
}
)
early_termination_policy = BanditPolicy(evaluation_interval=10, slack_factor=0.1)
hd_config = HyperDriveConfig(estimator=estimator,
hyperparameter_sampling=ps,
policy=early_termination_policy,
primary_metric_name='auc',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=12,
max_concurrent_runs=4)
```
Last, we define the hyperdrive step of the pipeline.
```
hd_step = HyperDriveStep(
name="hyper parameters tunning",
hyperdrive_config=hd_config,
estimator_entry_script_arguments=['--data-folder', processed_dir],
inputs=[processed_dir])
```
## Build & Execute pipeline
```
pipeline = Pipeline(workspace=ws, steps=[hd_step],
default_datastore=default_store
)
pipeline_run = Experiment(ws, 'Customer_churn').submit(pipeline,
regenerate_outputs=True)
RunDetails(pipeline_run).show()
from azureml.widgets import RunDetails
RunDetails(pipeline_run).show()
```
| github_jupyter |
```
from tensorflow.keras.preprocessing.text import Tokenizer
sentences = [
'i love my dog',
'I, love my cat',
'You love my dog!'
]
tokenizer = Tokenizer(num_words = 100)
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
print(word_index)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
sentences = [
'I love my dog',
'I love my cat',
'You love my dog!',
'Do you think my dog is amazing?'
]
tokenizer = Tokenizer(num_words = 100, oov_token="<OOV>")
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(sentences)
padded = pad_sequences(sequences, maxlen=5)
print("\nWord Index = " , word_index)
print("\nSequences = " , sequences)
print("\nPadded Sequences:")
print(padded)
# Try with words that the tokenizer wasn't fit to
test_data = [
'i really love my dog',
'my dog loves my manatee'
]
test_seq = tokenizer.texts_to_sequences(test_data)
print("\nTest Sequence = ", test_seq)
padded = pad_sequences(test_seq, maxlen=10)
print("\nPadded Test Sequence: ")
print(padded)
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import numpy as np
tokenizer = Tokenizer()
data = open('../../../../../../Desktop/irish-lyrics-eof.txt').read()
corpus = data.lower().split("\n")
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
print(tokenizer.word_index)
print(total_words)
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# pad sequences
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# create predictors and label
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
print(tokenizer.word_index['in'])
print(tokenizer.word_index['the'])
print(tokenizer.word_index['town'])
print(tokenizer.word_index['of'])
print(tokenizer.word_index['athy'])
print(tokenizer.word_index['one'])
print(tokenizer.word_index['jeremy'])
print(tokenizer.word_index['lanigan'])
print(xs[6])
print(ys[6])
print(xs[5])
print(ys[5])
print(tokenizer.word_index)
model = Sequential()
model.add(Embedding(total_words, 100, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(150)))
model.add(Dense(total_words, activation='softmax'))
adam = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
history = model.fit(xs, ys, epochs=10, verbose=1)
#print model.summary()
print(model)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
history.history
plot_graphs(history, 'acc')
seed_text = "I've got a bad feeling about this"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
list(range(10))
seed_text = "I want to write some poetry"
next_words = 100
for i in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
```
| github_jupyter |
```
!pip install autokeras
!pip install git+https://github.com/keras-team/keras-tuner.git@1.0.2rc4
```
In this tutorial we are making use of the
[AutoModel](/auto_model/#automodel-class)
API to show how to handle multi-modal data and multi-task.
## What is multi-modal?
Multi-modal data means each data instance has multiple forms of information. For example, a photo can be saved as a image. Besides the image, it may also have when and where it was taken as its attributes, which can be represented as structured data.
## What is multi-task?
Multi-task here we refer to we want to predict multiple targets with the same input features. For example, we not only want to classify an image according to its content, but we also want to regress its quality as a float number between 0 and 1.
The following diagram shows an example of multi-modal and multi-task neural network model.
<div class="mermaid">
graph TD
id1(ImageInput) --> id3(Some Neural Network Model)
id2(StructuredDataInput) --> id3
id3 --> id4(ClassificationHead)
id3 --> id5(RegressionHead)
</div>
It has two inputs the images and the structured data. Each image is associated with a set of attributes in the structured data. From these data, we are trying to predict the classification label and the regression value at the same time.
## Data Preparation
To illustrate our idea, we generate some random image and structured data as the multi-modal data.
```
import numpy as np
num_instances = 100
# Generate image data.
image_data = np.random.rand(num_instances, 32, 32, 3).astype(np.float32)
# Generate structured data.
structured_data = np.random.rand(num_instances, 20).astype(np.float32)
```
We also generate some multi-task targets for classification and regression.
```
# Generate regression targets.
regression_target = np.random.rand(num_instances, 1).astype(np.float32)
# Generate classification labels of five classes.
classification_target = np.random.randint(5, size=num_instances)
```
## Build and Train the Model
Then we initialize the multi-modal and multi-task model with
[AutoModel](/auto_model/#automodel-class).
Since this is just a demo, we use small amount of `max_trials` and `epochs`.
```
import autokeras as ak
# Initialize the multi with multiple inputs and outputs.
model = ak.AutoModel(
inputs=[ak.ImageInput(), ak.StructuredDataInput()],
outputs=[
ak.RegressionHead(metrics=['mae']),
ak.ClassificationHead(loss='categorical_crossentropy', metrics=['accuracy'])
],
overwrite=True,
max_trials=2)
# Fit the model with prepared data.
model.fit(
[image_data, structured_data],
[regression_target, classification_target],
epochs=3)
```
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data.
As shown in the example below, you can use `validation_split` to specify the percentage.
```
model.fit(
[image_data, structured_data],
[regression_target, classification_target],
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=2)
```
You can also use your own validation set
instead of splitting it from the training data with `validation_data`.
```
split = 20
image_val = image_data[split:]
structured_val = structured_data[split:]
regression_val = regression_target[split:]
classification_val = classification_target[split:]
image_data = image_data[:split]
structured_data = structured_data[:split]
regression_target = regression_target[:split]
classification_target = classification_target[:split]
model.fit(
[image_data, structured_data],
[regression_target, classification_target],
# Use your own validation set.
validation_data=(
[image_val, structured_val],
[regression_val, classification_val]),
epochs=2)
```
## Customized Search Space
You can customize your search space.
The following figure shows the search space we want to define.
<div class="mermaid">
graph LR
id1(ImageInput) --> id2(Normalization)
id2 --> id3(Image Augmentation)
id3 --> id4(Convolutional)
id3 --> id5(ResNet V2)
id4 --> id6(Merge)
id5 --> id6
id7(StructuredDataInput) --> id8(CategoricalToNumerical)
id8 --> id9(DenseBlock)
id6 --> id10(Merge)
id9 --> id10
id10 --> id11(Classification Head)
id10 --> id12(Regression Head)
</div>
```
import autokeras as ak
input_node1 = ak.ImageInput()
output_node = ak.Normalization()(input_node1)
output_node = ak.ImageAugmentation()(output_node)
output_node1 = ak.ConvBlock()(output_node)
output_node2 = ak.ResNetBlock(version='v2')(output_node)
output_node1 = ak.Merge()([output_node1, output_node2])
input_node2 = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node2)
output_node2 = ak.DenseBlock()(output_node)
output_node = ak.Merge()([output_node1, output_node2])
output_node1 = ak.ClassificationHead()(output_node)
output_node2 = ak.RegressionHead()(output_node)
auto_model = ak.AutoModel(
inputs=[input_node1, input_node2],
outputs=[output_node1, output_node2],
overwrite=True,
max_trials=2)
image_data = np.random.rand(num_instances, 32, 32, 3).astype(np.float32)
structured_data = np.random.rand(num_instances, 20).astype(np.float32)
regression_target = np.random.rand(num_instances, 1).astype(np.float32)
classification_target = np.random.randint(5, size=num_instances)
auto_model.fit(
[image_data, structured_data],
[classification_target, regression_target],
batch_size=32,
epochs=3)
```
## Data Format
You can refer to the documentation of
[ImageInput](/node/#imageinput-class),
[StructuredDataInput](/node/#structureddatainput-class),
[TextInput](/node/#textinput-class),
[RegressionHead](/block/#regressionhead-class),
[ClassificationHead](/block/#classificationhead-class),
for the format of different types of data.
You can also refer to the Data Format section of the tutorials of
[Image Classification](/tutorial/image_classification/#data-format),
[Text Classification](/tutorial/text_classification/#data-format),
[Structured Data Classification](/tutorial/structured_data_classification/#data-format).
## Reference
[AutoModel](/auto_model/#automodel-class),
[ImageInput](/node/#imageinput-class),
[StructuredDataInput](/node/#structureddatainput-class),
[DenseBlock](/block/#denseblock-class),
[RegressionHead](/block/#regressionhead-class),
[ClassificationHead](/block/#classificationhead-class),
[CategoricalToNumerical](/block/#categoricaltonumerical-class).
| github_jupyter |
```
#hide
#skip
! [ -e /content ] && pip install -Uqq mrl-pypi # upgrade mrl on colab
# default_exp core
```
# Core
> Core functions for MRL, mostly low level plumbing and parallel processing
```
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
# export
from mrl.imports import *
from multiprocessing import get_context
import requests
import zipfile
```
## Miscellaneous Functions
Low level helper functions
```
# export
def is_container(x):
"check if `x` is a container (used for parallel processing)"
if isinstance(x, (list, tuple, np.ndarray)):
return True
else:
return False
def flatten_recursive(list_of_lists):
"Recursively flattel list of lists"
flat_list = []
for item in list_of_lists:
if type(item) == list:
flat_list += flatten_recursive(item)
else:
flat_list.append(item)
return flat_list
def flatten_list_of_lists(list_of_lists):
"Flattens list of lists (not recursive)"
return [item for sublist in list_of_lists for item in sublist]
def deduplicate_list(l):
"Deduplicates list l"
return list(set(l))
def chunk_list(input_list, chunksize):
'Breaks `input_list` into chunks of size `chunksize`, ragged on last list'
return [input_list[i:i+chunksize] for i in range(0, len(input_list), chunksize)]
def filter_passing(inputs, bools):
'Subsets `inputs` (list) by `bools` (list of bools)'
assert len(inputs)==len(bools), '`inputs` and `bools` must have the same length'
return [inputs[i] for i in range(len(inputs)) if bools[i]]
assert flatten_list_of_lists([[1],[2],[3]]) == [1,2,3]
assert flatten_recursive([[1],[2],[3, [4,5, [6,7,8]]]]) == [1,2,3,4,5,6,7,8]
```
## Parallel Processing
MRL tries to build in parallel processing at every level. This can make a huge difference when you're processing millions of molecules
`new_pool_parallel` and `maybe_parallel` are convenient wrappers for parallel processing. The given `func` is wrapped with `**kwargs` and used to process the `iterable`. If `iterable` is a `list` or `np.ndarray`, the elements in `iterable` are run in parallel by `func`.
### Parallel processing tradeoffs
Parallel processing can significantly speed up a process. There are however some trade-offs.
In Python, parallel processing is creating using a `Pool`. A pool maps instances of a function over an iterable.
```
# uses 5 processes to map `my_iterable` to `my_func`
with Pool(processes=5) as p:
outputs = p.map(my_func, my_iterable)
```
Using the above code creates a new `Pool` with 5 processes, and uses those 5 processes to process the function arguments. The code incurs some i/o overhead creating the `Pool`. This means that if the time required to process the function calls is less than the Pool overhead, using parallel processing will actually be slower than serial processing. There are two ways around this:
1. Use process pools for bulk processing (ie function time much greater than i/o time)
2. Maintain an open process pool to avoid repeated pool creation overhead
MRL uses different functions to control the method of parallel processing used.
The `new_pool_parallel` function implements parallel processing using a new pool for every function call, similar to the above code. This function is best used to process large numbers of inputs infrequently. Parallel processing is controlled by the `cpus` argument. If `cpus=None`, the `ncpus` environment variable is used (ie `os.environ['ncpus'] = '8'`)
The `maybe_parallel` function allows for repeated use of a stateful process Pool, defined by the `GLOBAL_POOL` variable. By default, `GLOBAL_POOL=None`. To create a global pool, use the `set_global_pool` function.
```
set_global_pool(cpus=8)
```
If the `cpus=None`, `maybe_parallel` will run processes using `GLOBAL_POOL` if it exists, or serial processing if it does not. If `cpus` is not None, `maybe_parallel` defaults back to using `new_pool_parallel`
If you need to frequently use parallel processing on small batches of inputs (ie batches from a model), set a global pool and use `maybe_parallel`
### Global Pool Gotchas
Using a global pool allows us to take advantage of parallel processing on small batches without having the overhead of creating process pools over and over again. However, process pools left open accumulate memory. If memory usage builds up, use `refresh_global_pool` to release the memory and create a new global pool, or use `close_global_pool` to delete the global pool and reset it to `None`
```
# export
GLOBAL_POOL = None
os.environ['max_global_threads'] = '2000'
def set_global_pool(cpus=None):
global GLOBAL_POOL
if GLOBAL_POOL is not None:
close_global_pool()
if cpus is None:
GLOBAL_POOL = None
else:
GLOBAL_POOL = Pool(processes=cpus)
GLOBAL_POOL.uses = 0
def close_global_pool():
global GLOBAL_POOL
if GLOBAL_POOL is not None:
GLOBAL_POOL.close()
del GLOBAL_POOL
GLOBAL_POOL = None
gc.collect()
def refresh_global_pool():
global GLOBAL_POOL
if GLOBAL_POOL is not None:
cpus = GLOBAL_POOL._processes
close_global_pool()
set_global_pool(cpus=cpus)
def new_pool_parallel(func, iterable, cpus=None, **kwargs):
p_func = partial(func, **kwargs)
if is_container(iterable):
if cpus is None:
if 'ncpus' in os.environ.keys():
cpus = int(os.environ['ncpus'])
else:
cpus = 0
processes = min(cpus, len(iterable))
if processes == 1:
# spinning up a single pool has more overhead
processes = 0
if processes == 0:
output = [p_func(i) for i in iterable]
else:
with Pool(processes=cpus) as p:
output = p.map(p_func, iterable)
else:
output = p_func(iterable)
return output
def maybe_parallel(func, iterable, cpus=None, **kwargs):
global GLOBAL_POOL
p_func = partial(func, **kwargs)
if is_container(iterable):
if cpus is not None:
output = new_pool_parallel(func, iterable, cpus, **kwargs)
elif GLOBAL_POOL is not None:
output = GLOBAL_POOL.map(p_func, iterable)
GLOBAL_POOL.uses += 1
if GLOBAL_POOL.uses > int(os.environ['max_global_threads']):
refresh_global_pool()
gc.collect()
else:
output = [p_func(i) for i in iterable]
else:
output = p_func(iterable)
return output
def test_func(x):
time.sleep(0.5)
return x
start = time.time()
_ = [test_func(i) for i in range(10)]
t1 = time.time()
_ = new_pool_parallel(test_func, list(range(10))) # serial processing
t2 = time.time()
_ = new_pool_parallel(test_func, list(range(10)), cpus=4) # 4 cpus manually defined
t3 = time.time()
os.environ['ncpus'] = '4'
_ = new_pool_parallel(test_func, list(range(10))) # 4 cpus defined by environ variable
t4 = time.time()
print_str = f'''
Serial time: {t1-start:.2f}\n
new_pool_parallel, 0 cpus time: {t2-t1:.2f}\n
new_pool_parallel, 4 cpus (arg defined) time: {t3-t2:.2f}\n
new_pool_parallel, 4 cpus (environ defined) time: {t4-t3:.2f}\n
'''
print(print_str)
print(type(GLOBAL_POOL))
set_global_pool(5)
print(type(GLOBAL_POOL))
start = time.time()
_ = maybe_parallel(test_func, list(range(10)))
t1 = time.time()
_ = maybe_parallel(test_func, list(range(10)), cpus=2)
t2 = time.time()
print_str = f'''
maybe_parallel Global Pool (5 cpus) time: {t1-start:.2f}\n
maybe_parallel arg override 2 cpus time: {t2-t1:.2f}\n
'''
print(print_str)
start = time.time()
_ = maybe_parallel(test_func, list(range(10)))
t1 = time.time()
_ = maybe_parallel(test_func, list(range(10)), cpus=2)
t2 = time.time()
print_str = f'''
maybe_parallel Global Pool (5 cpus) time: {t1-start:.2f}\n
maybe_parallel arg override 2 cpus time: {t2-t1:.2f}\n
'''
print(print_str)
```
## Examples of different pools
```
start = time.time()
for i in range(10):
_ = new_pool_parallel(test_func, list(range(10)))
end = time.time() - start
print(f'{end:.2f} elapsed')
start = time.time()
for i in range(10):
_ = maybe_parallel(test_func, list(range(10)))
end = time.time() - start
print(f'{end:.2f} elapsed')
```
In the above example, `new_pool_parallel` takes 5 seconds longer to execute compared to `maybe_parallel`. The time difference is driven by the overhead generated by creating new pools
## Debugging Parallel Processing
Errors in parallel processing can be difficult to debug because the true error and stack trace are obscured by the parallel processing stack trace. If you have errors in parallel processing, first try setting `os.environ['ncpus'] = '0'` and running `close_global_pool` to disable python multiprocessing. This should reveal the true error.
If everything works fine when multiprocessing is disabled, it is likely one of your functions is failing to pickle.
## Download Files
Download the `files` directory from the repo. Used for Collab notebooks
```
# export
def download_files():
if not os.path.exists('files'):
r = requests.get('https://dmai-mrl.s3.us-west-2.amazonaws.com/mrl_public/files.zip')
with open('files.zip', 'wb') as f:
f.write(r.content)
with zipfile.ZipFile('files.zip', 'r') as zip_ref:
zip_ref.extractall('.')
os.remove('files.zip')
# hide
from nbdev.export import notebook2script; notebook2script()
```
| github_jupyter |
## Creating a Convolutional Neural Network-Dogs-v-Cats
### Imports
```
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
```
### Creating a NN
```
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, (3,3))
self.conv2 = nn.Conv2d(32, 64, (3,3))
self.conv3 = nn.Conv2d(64, 64, (3,3))
```
> The layers input and output size, and a kernel size of `3x3` which is also known as the kennel window. The first `conv1` will output `32` which will be the input of `conv2` which has a kernel size of `3x3` and outputing `64`.
> Now we need to flatten the output at some point to a `dense` layer with an activation function, because now if we look at the last layer it is not flat. So we need to flatten the output of the last layer before passing it to the dense layer. **How do we do that?**
> We are ging to solve this by creating a fake, torch tesor and get the shape of the last `conv` by applying some max_pooling in the `conv` function.
```
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, (3,3))
self.conv2 = nn.Conv2d(32, 64, (3,3))
self.conv3 = nn.Conv2d(64, 128, (3,3))
self.x = torch.randn(200,200).view(-1, 1,200,200)
self._to_linear = None
self.conv(self.x)
self.fc1 = nn.Linear(self._to_linear, 32)
self.fc2 = nn.Linear(32, 2)
def conv(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, kernel_size=(2, 2))
# print()
#print(x.shape)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size =(2, 2))
x = F.relu(self.conv3(x))
x = F.max_pool2d(x, kernel_size =(2, 2))
if self._to_linear is None:
self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]
return x
def forward(self, x):
x = self.conv(x)
# print(self._to_linear)
# type(x)
x = x.view(-1, self._to_linear) ##
x = F.relu(self.fc1(x))
return F.softmax(self.fc2(x), dim=1)
```
> **What is going on here?**
* first we created a dummy torch tensor `x` with a shape of `1x1x200x200`
* we created a `_to_linear` local variable that will helps us to keep tracking our shape.
* The idea here is to say, when we get the shape we want to `multiply` all dimensions and asign the value to `_to_linear`
* We created our `conv` function that will help us to update the value of the `_to_linear` when it is called.
* First we applied a `relu` activation function to our first `conv1` layer
* We then apply `max_pool2d` with a kernel_size of `2x2`
* We do this for all `layers`
* Since we have assigned `_to_linear` to **None** we want to check if it is still none. If that's the case we will multiply all shapes of the element `x`.
* We will create our forward function as usual and imidiately call conv function and get the shape. We will reshape `x` to the new `_to_linear` value, that is flattening it
* Apply a `relu` activation function
* return the output with softmax activation function applied
```
net = Net()
net
128*23*23
```
### Model Training
```
from torch import optim
optimizer = optim.Adam(net.parameters(), lr=1e-3)
loss_function = nn.MSELoss()
```
> We are going to use the `Adam` optimizer
* Since we have `one_hot` vectors, we're going to use `MSELoss` metric.
### Data
> We are now going to load our data and split it into `train` and `test`
```
data = np.load('cats_v_dogs.npy', allow_pickle=True)
len(data)
type(data)
```
> We want to convert this data to `toch.Tensor`
```
X = torch.Tensor([(i[0]) for i in data]).view(-1, 200, 200)
y = torch.Tensor([i[1] for i in data])
X.view((-1, 200, 200)).shape
```
> Split the data into `train` and `test`.
```
X_train = X[:150]
y_train = y[:150]
X_test = X[-150:]
y_test = y[-150:]
EPOCHS = 10
BATCH_SIZE = 5
for epoch in range(EPOCHS):
print(f'Epochs: {epoch+1}/{EPOCHS}')
for i in range(0, len(y_train), BATCH_SIZE):
X_batch = X_train[i: i+BATCH_SIZE].view(-1, 1, 200, 200)
y_batch = y_train[i: i+BATCH_SIZE]
net.zero_grad() ## or you can say optimizer.zero_grad()
outputs = net(X_batch)
loss = loss_function(outputs, y_batch)
loss.backward()
optimizer.step()
print("Loss", loss)
total, correct = 0, 0
with torch.no_grad():
for i in range(len(X_test)):
correct_label = torch.argmax(y_test[i])
prediction = torch.argmax(net(X_test[i].view(-1, 1, 200, 200))[0])
if prediction == correct_label:
correct+=1
total +=1
print(f"Accuracy: {correct/total}")
```
#### Making predictions
> We are going to predict one of the image that is in our `test` and visualise it. Remember `cat=0` and `dog=1`.
```
class_names = ["cat", "dog"]
def plotImages(images, labels, cmap="gray"):
plt.figure()
for i in range(len(labels)):
plt.subplot(221+i)
plt.title(class_names[labels[i]])
plt.imshow(images[i], cmap=cmap)
plt.show()
images = X_test[4:6]
predictions = []
for image in images:
prediction_index = torch.argmax(net(image.view(-1,1, 200, 200))[0])
predictions.append(prediction_index)
plotImages(images, predictions)
```
> Read More **[Docs](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html)**
| github_jupyter |
# Proglearn: Scene Segmentation of ISIC using Scikit-Image
*Neuro Data Design II: Spring 2022*
This tutorial provides a walkthrough to applying a Random Forest model to perform scene segmentation on images taken from the International Skin Imaging Collaboration (ISIC) dataset from 2016 using Scikit-Image.
**Contributor**: Amy van Ee (avanee1@jhu.edu)
## 0. Environment Setup
To start this tutorial, we will first import the necessary packages and functions.
```
# ========================================================#
# import packages
# for handling the dataset
import cv2
import os
# for processing the dataset and visualization
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
# for scene segmentation
from skimage import segmentation, feature, future
from sklearn.ensemble import RandomForestClassifier
from functools import partial
# original functions for scene segmentation
from functions.scene_segmentation_rf_isic_exp_functions import (
get_dice,
perform_scene_seg,
)
# for analyzing scene segmentation performance
from skimage.metrics import adapted_rand_error, variation_of_information
```
## I. Preprocessing of Images
**Loading the Dataset**
Now, we will retrieve images from the ISIC dataset.
```
# ========================================================#
# retrieve data
# input location of data
dataloc = "C:/Users/Amy/Documents/Python/Neuro Data Design/"
# extract images
datalbl = dataloc + "NDD II/ISIC/ISBI2016_ISIC_Part1_Training_GroundTruth/"
dataimg = dataloc + "NDD II/ISIC/ISBI2016_ISIC_Part1_Training_Data/"
lblpaths = [datalbl + im for im in os.listdir(datalbl)]
imgpaths = [dataimg + im for im in os.listdir(dataimg)]
# sort and print information
imgpaths.sort()
lblpaths.sort()
print("Total # of images =", len(imgpaths))
print("Total # of labels =", len(lblpaths))
```
Next, we will load all of our images into the workspace. Each image has true annotations where its component pixels are labeled as being part of one of two categories -- normal or cancerous tissue. The labels are converted to greyscale as required by the classifier in the subsequent parts of this tutorial.
```
# ========================================================#
# load all images
images = [cv2.imread(img) for img in imgpaths]
labels_grey = np.array(
[(rgb2gray(cv2.imread(lblpath)) * 1000).astype(int) for lblpath in lblpaths]
)
```
**Visualize an Example Image**
Having loaded in our dataset, we will now try to familiarize ourself with it. We will choose a sample image from the ISIC dataset to see the original image adjacent to the annotated image.
```
# ========================================================#
# Plot the original image alongside the annotated image
# Prepare plot
fig, ax = plt.subplots(1, 2)
fig.set_size_inches(10, 10)
# import image 1
image = cv2.imread(imgpaths[1])
# import annotation for image 1 and convert to greyscale
label_grey = (rgb2gray(cv2.imread(lblpaths[1])) * 1000).astype(int)
# plot data
ax[0].imshow(image)
ax[0].set_title("Original Image")
ax[1].imshow(label_grey, cmap=plt.cm.gray)
ax[1].set_title("True Annotations")
plt.show()
```
## II. Scene Segmentation using Scikit
Having familiarized ourself with the images after some analysis, we will now proceed to perform scene segmentation using Scikit-Image by first training our classifier.
```
# ========================================================#
# Use scikit-image to perform Image Segmentation
# prepare training labels to train the model, where
# 1 indiciates normal tissue and
# 1000 is cancerous tissue
training_labels = np.zeros(image.shape[:2], dtype=np.uint8)
training_labels = np.add(label_grey, training_labels)
training_labels[training_labels == 0] = 1
# perform training
sigma_min = 1
sigma_max = 16
features_func = partial(
feature.multiscale_basic_features,
intensity=True,
edges=False,
texture=True,
sigma_min=sigma_min,
sigma_max=sigma_max,
channel_axis=-1,
)
# obtain features from image
features = features_func(image)
# define random forest
clf = RandomForestClassifier(n_estimators=50, n_jobs=-1, max_depth=10, max_samples=0.05)
# fit forest to features from original image and labeled image
clf = future.fit_segmenter(training_labels, features, clf)
# predict labels after training
# result will be array of 1's (normal) and 1000's (lesion)
result = future.predict_segmenter(features, clf)
# plot results
fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(9, 4))
ax[0].imshow(segmentation.mark_boundaries(image, result, mode="thick"))
ax[0].contour(training_labels)
ax[0].set_title("Image, mask and segmentation boundaries")
ax[1].imshow(result, cmap=plt.cm.gray)
ax[1].set_title("Segmentation")
fig.tight_layout()
```
We can see on the left our original image with the predicted segmentation between the normal and cancerous tissue in green. On the right we can see the segmentation with black indicating normal tissue and white cancerous tissue.
**Analyzing Accuracy**
We will next analyze the performance of scikit-image by computing the accuracy. We will do so by comparing the result from scene segmentation to the true annotated image.
Our metrics for measuring accuracy are as follows.
*Precision-Recall Curve*
- Precision: also known as positive predictive value, the number of true positives (TP) divided by the total number of positives (true positive (TP) + false positive (FP)) for a given class, or
$$Precision = \frac{TP}{TP + FP}$$
- Recall: the number of true positives (TP) divided by the total number of predicted results (true positive (TP) + false negative (FN)) for a given class
$$Recall = \frac{TP}{TP + FN}$$
Precision and recall are often presented together as a Precision-Recall curve, as will be done in this tutorial. It is desirable that both values are close to 1.
*False Splits and Merges*
- False Splits: the fraction of times the classifier incorrectly segments two regions of the image to be normal and lesion when actually it is just one category
- False Merges: the fraction of times the classifier incorrectly identifies a region of the image to be only normal or cancerous when actually two separate categories occupy this region
False splits and false merges are often presented together in one graph, as we will do momentarily. It is desirable for these values to be close to 0.
*Dice Score*
- Sorensen-Dice Coefficient: also known as the dice score, this is a measure of how similar two images are, defined as
$$Dice = \frac{2|X \cap Y|}{|X| + |Y|}$$
where X and Y are the sets of pixels in the true annotated image and the predicted labels. The numerator is finding where they intersect and have the same pixel labels, and the numerator is finding the total number of pixels. It is desirable for this value to be close to 1.
```
# ========================================================#
# Analyze the accuracy by looking at
# precision, recall, false splits, false merges, dice score
# correction so that the "normal" label for the predicted
# array matches that of the true array (both "0")
result[result == 1] = 0
# calculate error, precision, recall, splits, merges, dice
error, precision, recall = adapted_rand_error(label_grey, result)
splits, merges = variation_of_information(label_grey, result)
dice = get_dice(label_grey, result)
# print results
print(f"Adapted Rand error: {error}")
print(f"Adapted Rand precision: {precision}")
print(f"Adapted Rand recall: {recall}")
print(f"False Splits: {splits}")
print(f"False Merges: {merges}")
print(f"Dice Coefficient: {dice}")
```
Evidently, based on these numerical results, it appears that scikit-image did a good job of scene segmentation, and so we next test it on other images in the dataset.
**Testing the Model**
We will now use this trained classifier to perform scene segmentation on a few other images, looking at the visual output and our measures of accuracy.
```
# ========================================================#
# perform scene segmentation on a third image
perform_scene_seg(images[2], labels_grey[2], clf)
# ========================================================#
# perform scene segmentation on a third image
perform_scene_seg(images[3], labels_grey[3], clf)
# ========================================================#
# perform scene segmentation on a third image
perform_scene_seg(images[8], labels_grey[8], clf)
```
**Testing the Model on 100 Images**
We will know look at the accuracy of the model after performing scene segmentation on 100 images.
```
# ========================================================#
# perform scene segmentation on 100 images
n = 100
# initialize arrays
error_list = np.zeros(n)
precision_list = np.zeros(n)
recall_list = np.zeros(n)
splits_list = np.zeros(n)
merges_list = np.zeros(n)
dice_list = np.zeros(n)
result_list = np.zeros(n, dtype=object)
# loop through each image and determine values
for i in np.arange(len(images[1:n])):
# use classifier
features = features_func(images[i])
result = future.predict_segmenter(features, clf)
result[result == 1] = 0 # correction for when compare to true
# assess
error, precision, recall = adapted_rand_error(labels_grey[i], result)
splits, merges = variation_of_information(labels_grey[i], result)
dice = get_dice(labels_grey[i], result)
# add to list
error_list[i] = error
precision_list[i] = precision
recall_list[i] = recall
splits_list[i] = splits
merges_list[i] = merges
dice_list[i] = dice
result_list[i] = result
```
Having obtained the predicted scene segmentations on our 100 images, we will now analyze the results in a graphical form.
```
# ========================================================#
# analyze results
# create figure
fig, axes = plt.subplots(3, 1, figsize=(6, 6), constrained_layout=True)
ax = axes.ravel()
# plot merges, splits
ax[0].scatter(merges_list, splits_list)
ax[0].set_xlabel("False Merges (bits)")
ax[0].set_ylabel("False Splits (bits)")
ax[0].set_title("Split Variation of Information")
# plot precision, recall
ax[1].plot(precision_list, recall_list)
ax[1].set_xlabel("Precision")
ax[1].set_ylabel("Recall")
ax[1].set_title("Adapted Random precision vs. recall")
ax[1].set_xlim(0, 1)
ax[1].set_ylim(0, 1)
# plot dice coefficients
ax[2].hist(dice_list)
ax[2].set_xlabel("Dice Coefficient")
ax[2].set_ylabel("Frequency")
ax[2].set_title("Histogram of Dice Coefficients")
```
For the first graph, we can see the fraction of false merges plotted against false splits. For the second graph, we can see precision plotted against recall. For the third graph, we can see a histogram of dice coefficients. We can see that this last graph has a left skew with most images having very high dice scores.
## III. Conclusion
Evidently, we can see that Scikit-Image works well to perform scene-segmentation of images, but it is not perfect, and there is still great room for improvement in applying machine learning to perform scene segmentation.
| github_jupyter |
<a href="https://colab.research.google.com/github/lucianogaldino/ENEM-2019-SP/blob/main/Enem_2019_SP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **PROJETO ENEM 2019**
## Este projeto analisa os resultados do ENEM no estado de São Paulo no ano de 2019.
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
import seaborn as sns
```
## Os dados foram extraídos do site do INEP:
http://inep.gov.br/microdados
```
dados = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Indicadores Educacionais/ENEM_SP_2019.csv', sep=',', encoding='iso-8859-1')
```
## **MINERAÇÃO DOS DADOS**
```
dados.head()
#O número total de inscritos em São Paulo foi de 813772, sendo de 15,97% de todo o Brasil que foi de 5095308 inscritos.
dados.shape
# Análise dos tipos de atributos.
dados.dtypes
dados['NU_IDADE'].value_counts()
dados['NU_IDADE'].hist(bins=30, figsize=(8,6));
dados['NU_IDADE'].value_counts().sort_index()
```
Alguns valores estranhos e curiosos: alguns inscritos com idades menores que 10 anos e maiores de 80 anos.
```
menores_12 = dados.query('NU_IDADE <= 12')['NO_MUNICIPIO_RESIDENCIA'].value_counts()
# Crianças abaixo de 11 anos, algumas abaixo de 6 anos aparecem como inscritas. Esse dados serão excluídos devido a alta probabilidade de erros.
menores_12
dados_maiores_11 = dados.loc[dados.NU_IDADE > 11]
dados_maiores_11.head()
dados_maiores_11['NU_IDADE'].value_counts().sort_index()
# Quantidade de Treineiros
dados_maiores_11.query('IN_TREINEIRO == 1')['IN_TREINEIRO'].value_counts()
treineiros = dados_maiores_11.loc[dados_maiores_11.IN_TREINEIRO == 1]
treineiros.head()
# Separação entre os treineiros e os vestibulandos
vestibulandos = dados_maiores_11.loc[dados_maiores_11.IN_TREINEIRO == 0]
vestibulandos.head()
vestibulandos.shape
# Análise da quantidade de presentes (1), faltantes(0) e eliminados(2) nas provas.
presenca = vestibulandos.loc[vestibulandos.TP_PRESENCA_MT == 2]
presenca.shape
```
RESUMO DA PRESENÇA NAS PROVAS:
Ciências da Natureza: 500805 presentes, 218693 faltaram e 270 eliminados.
Ciências Humanas: 533142 presentes, 185969 faltaram e 657 eliminados.
Linguagens e Códigos: 533142 presentes, 185969 faltaram e 657 eliminados.
Matemática: 500805 presentes, 218693 faltaram e 270 eliminados.
```
provas = ['NU_NOTA_REDACAO','NU_NOTA_CN','NU_NOTA_MT','NU_NOTA_LC','NU_NOTA_CH']
# Registros NAN nas notas. Faltaram ou tiraram zero.
dados[provas].isnull().sum()
presenca_total = ['TP_PRESENCA_CN','TP_PRESENCA_CH','TP_PRESENCA_MT','TP_PRESENCA_LC']
#Criado uma coluna que soma as presenças nas provas, se a soma for 0 faltou nos dois dias, se a soma for 2, foi em um dia e se a soma for 4, foi nos dois dias.
vestibulandos['presenca_total'] = vestibulandos[presenca_total].sum(axis=1)
vestibulandos.head()
# Siglas: 0 faltou nos dois dias, 2 faltou em um dia e 4 compareceu nos dois dias.
verificacao_presenca = vestibulandos.loc[vestibulandos.presenca_total == 4]
verificacao_presenca.shape
```
COMPARECIMENTO NOS DIAS DAS PROVAS:
500526 inscritos compareceram nos dois dias.
185175 inscritos faltaram nos dois dias.
33726 inscritos faltaram em um dos dias.
```
#Eliminando todos que faltaram nos dois dias
vestibulandos_presentes = vestibulandos.loc[vestibulandos.presenca_total != 0]
#Quantidade de valores NAN dos alunos presentes em pelo menos um dos dois dias de prova.
vestibulandos_presentes[provas].isnull().sum()
```
Análise das faltas, já tendo excluídos os inscritos que faltaram nos dois dias e também os treineiros:
1451 faltas no dia de prova de Redação, Linguagem e Códigos e Ciências Humanas.
33788 faltas no dia da prova de Matemática e Ciências da Natureza.
A avaliação de Matemática e Ciências da natureza ocorreu no segundo dia e, portanto, muitos dos que perderam a prova no primeiro dia não foram no segundo dia, pois já estavam eliminados, muitos desanimaram depois de fazer a prova do primeiro dia e muitos acabaram perdendo a prova no segundo dia.
```
vestibulandos_presentes.query('NU_NOTA_COMP5 == 0')['NU_NOTA_COMP5'].value_counts()
```
NOTAS ZEROS EM REDAÇÃO:
REDAÇÃO (final): 13379 notas zeros devido a não cumprimento das regras ou tirou zero em todos os cinco componentes.
COMPONENTE 1(Demonstrar domínio da modalidade escrita formal da Língua Portuguesa.): 9 notas zeros somente neste item.
COMPONENTE 2 (Compreender a proposta de redação e aplicar conceitos das várias áreas de conhecimento para desenvolver o tema, dentro dos limites estruturais do texto dissertativo-argumentativo em prosa): nenhuma nota zero neste item.
COMPONENTE 3 (Selecionar, relacionar, organizar e interpretar informações, fatos, opiniões e argumentos em defesa de um ponto de vista): 18 notas zeros somente neste item.
COMPONENTE 4 (Demonstrar conhecimento dos mecanismos linguísticos necessários para a construção da argumentação.): 16 notas zeros somente neste item.
COMPONENTE 5 (Elaborar proposta de intervenção para o problema abordado, respeitando os direitos humanos): 71871 notas zeros somente neste item.
Além disso, teve mais 1451 pessoas que faltaram no dia da redação, mas compareceram no outro dia.
```
#Eliminando os estudantes que foram desclassificados por terem comparecido em apenas um dos dias, isto é ficando apenas com os que vão concorrer no vestibular.
vestibulandos_classificados = vestibulandos_presentes.loc[vestibulandos_presentes.presenca_total == 4]
# Substituindo NAN por 0 nas notas de redação, para ser possível realizar análises estatísticas e de Machine Learning.
vestibulandos_classificados['NU_NOTA_REDACAO'].fillna(0, inplace=True)
vestibulandos_classificados['NU_NOTA_COMP1'].fillna(0, inplace=True)
vestibulandos_classificados['NU_NOTA_COMP2'].fillna(0, inplace=True)
vestibulandos_classificados['NU_NOTA_COMP3'].fillna(0, inplace=True)
vestibulandos_classificados['NU_NOTA_COMP4'].fillna(0, inplace=True)
vestibulandos_classificados['NU_NOTA_COMP5'].fillna(0, inplace=True)
vestibulandos_classificados.head()
# O dataframe já possui colunas com todas as notas, exceto para redação, com zeros ao invés de NAN, essas colunas não tem o NU_ antes da palavra nota.
vestibulandos_classificados[provas].isnull().sum()
notas_redacao = ['NU_NOTA_COMP1','NU_NOTA_COMP2','NU_NOTA_COMP3','NU_NOTA_COMP4','NU_NOTA_COMP5','NU_NOTA_REDACAO']
vestibulandos_classificados[notas_redacao].head()
vestibulandos_classificados[notas_redacao].isnull().sum()
# Notas dos cinco componentes sem NAN
notas_componentes = ['NU_NOTA_REDACAO','NOTA_CN','NOTA_MT','NOTA_LC','NOTA_CH']
vestibulandos_classificados[notas_componentes].isnull().sum()
vestibulandos_classificados.query('NOTA_CH == 0')['NOTA_CH'].value_counts()
```
NOTAS ZEROS DOS INSCRITOS CONCORRENTES AO VESTIBULAR:
Matemática = 642
Ciências da Natureza = 622
Linguagens e Códigos = 721
Ciências Humanas = 929
Redação = 9767
## **ANÁLISES ESTATÍSTICAS**
```
# relação entre homens e mulheres inscritos.
dados['TP_SEXO'].value_counts()
dados.TP_SEXO.value_counts().plot(figsize=(6,6),kind='pie', autopct='%.1f%%', fontsize=15)
plt.axis('equal')
plt.title('RELAÇÃO ENTRE HOMENS(M) E MULHERES (F)', loc = 'right', fontsize=20);
#RELAÇÃO ENTRE HOMENS E MULHERES QUE FIZERAM A PROVA
vestibulandos_presentes['TP_SEXO'].value_counts()
vestibulandos_presentes.TP_SEXO.value_counts().plot(figsize=(6,6),kind='pie', autopct='%.1f%%', fontsize=15)
plt.axis('equal')
plt.title('RELAÇÃO ENTRE HOMENS(M) E MULHERES (F)', loc = 'right', fontsize=20);
```
| github_jupyter |
```
%reload_ext autoreload
%autoreload 2
import logging
import numpy as np
# Make analysis reproducible
np.random.seed(0)
# Enable logging
logging.basicConfig(level=logging.INFO)
from replay_trajectory_classification import make_track_graph, plot_track_graph
import matplotlib.pyplot as plt
node_positions = [(40, 80), # 0
(40, 0), # 1
(0, 0), # 2
(0, 80), # 3
(80, 0), # 4
(80, 80), # 5
]
edges = [(0, 1),
(1, 2),
(2, 3),
(1, 4),
(4, 5)
]
track_graph1 = make_track_graph(node_positions, edges)
fig, ax = plt.subplots()
plot_track_graph(track_graph1, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
node_positions = [(80, 40),
(0, 40),
(0, 80),
(80, 80),
(0, 0),
(80, 0),
]
edges = [(0, 1),
(1, 2),
(2, 3),
(1, 4),
(4, 5)
]
track_graph2 = make_track_graph(node_positions, edges)
fig, ax = plt.subplots()
plot_track_graph(track_graph2, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
track_graph1.edges
track_graph2.edges
traversal_path = [(0, 1), (1, 2), (2, 3), (3, 2), (2, 1), (1, 0), (0, 1), (1, 4), (4, 5), (5, 4), (4, 1), (1, 0)]
def generate_position(traversal_path, track_graph, step_size=0.020, n_traversals=10):
points = []
for _ in range(n_traversals):
for node1, node2 in traversal_path:
x1, y1 = track_graph.nodes[node1]['pos']
x2, y2 = track_graph.nodes[node2]['pos']
dx, dy = x2 - x1, y2 - y1
dist = np.sqrt(dx**2 + dy**2)
n_points = int(dist // step_size)
w = np.linspace(0, 1, n_points)
points.append((x1 + dx * w, y1 + dy * w))
return np.concatenate(points, axis=1).T
position1 = generate_position(traversal_path, track_graph1)
fig, ax = plt.subplots()
plot_track_graph(track_graph1, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.scatter(position1[:, 0], position1[:, 1], zorder=10, color='orange')
position2 = generate_position(traversal_path, track_graph2)
fig, ax = plt.subplots()
plot_track_graph(track_graph2, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.scatter(position2[:, 0], position2[:, 1], zorder=10, color='orange')
from replay_trajectory_classification import plot_graph_as_1D
edge_order = [(0, 1),
(1, 2),
(2, 3),
(1, 4),
(4, 5)
]
edge_spacing = [30, 0, 30, 0, 0, 0]
plot_graph_as_1D(track_graph1, edge_order, edge_spacing)
from track_linearization import get_linearized_position
position_df1 = get_linearized_position(position1,
track_graph1,
edge_order=edge_order,
edge_spacing=edge_spacing,
use_HMM=False)
plt.figure(figsize=(10, 5))
plt.scatter(np.arange(position_df1.shape[0]), position_df1.linear_position)
position_df2 = get_linearized_position(position2,
track_graph2,
edge_order=edge_order,
edge_spacing=edge_spacing,
use_HMM=False)
plt.figure(figsize=(10, 5))
plt.scatter(np.arange(position_df2.shape[0]), position_df2.linear_position)
from replay_trajectory_classification.simulate import simulate_neuron_with_place_field
sampling_frequency = 1000
place_field_centers = generate_position(traversal_path, track_graph1, step_size=10, n_traversals=1)
place_field_centers = np.unique(place_field_centers, axis=0)
fig, ax = plt.subplots()
plot_track_graph(track_graph1, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.scatter(place_field_centers[:, 0], place_field_centers[:, 1], zorder=10, color='red')
spikes1 = np.stack([simulate_neuron_with_place_field(center, position1,
sampling_frequency=sampling_frequency,
variance=6.0**2)
for center in place_field_centers], axis=1)
fig, ax = plt.subplots()
plot_track_graph(track_graph1, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
for spike in spikes1.T:
spike_ind = np.nonzero(spike)[0]
ax.scatter(position1[spike_ind, 0], position1[spike_ind, 1])
from replay_trajectory_classification.simulate import simulate_neuron_with_place_field
sampling_frequency = 1000
place_field_centers = generate_position(traversal_path, track_graph2, step_size=10, n_traversals=1)
place_field_centers = np.unique(place_field_centers, axis=0)
fig, ax = plt.subplots()
plot_track_graph(track_graph2, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.scatter(place_field_centers[:, 0], place_field_centers[:, 1], zorder=10, color='red')
spikes2 = np.stack([simulate_neuron_with_place_field(center, position2,
sampling_frequency=sampling_frequency,
variance=6.0**2)
for center in place_field_centers], axis=1)
fig, ax = plt.subplots()
plot_track_graph(track_graph2, ax=ax, draw_edge_labels=True)
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_xlabel("x-position")
ax.set_ylabel("y-position")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
for spike in spikes2.T:
spike_ind = np.nonzero(spike)[0]
ax.scatter(position2[spike_ind, 0], position2[spike_ind, 1])
from replay_trajectory_classification.simulate import simulate_neuron_with_place_field
from track_linearization import get_linearized_position
from replay_trajectory_classification import make_track_graph
import numpy as np
def generate_position(traversal_path, track_graph, step_size=0.020, n_traversals=10):
points = []
for _ in range(n_traversals):
for node1, node2 in traversal_path:
x1, y1 = track_graph.nodes[node1]['pos']
x2, y2 = track_graph.nodes[node2]['pos']
dx, dy = x2 - x1, y2 - y1
dist = np.sqrt(dx**2 + dy**2)
n_points = int(dist // step_size)
w = np.linspace(0, 1, n_points)
points.append((x1 + dx * w, y1 + dy * w))
return np.concatenate(points, axis=1).T
def make_two_environment_data():
node_positions1 = [(40, 80), # 0
(40, 0), # 1
(0, 0), # 2
(0, 80), # 3
(80, 0), # 4
(80, 80), # 5
]
node_positions2 = [(80, 40),
(0, 40),
(0, 80),
(80, 80),
(0, 0),
(80, 0),
]
edges = [(0, 1),
(1, 2),
(2, 3),
(1, 4),
(4, 5)
]
track_graph1 = make_track_graph(node_positions1, edges)
track_graph2 = make_track_graph(node_positions2, edges)
traversal_path = [(0, 1), (1, 2), (2, 3), (3, 2), (2, 1), (1, 0), (0, 1), (1, 4), (4, 5), (5, 4), (4, 1), (1, 0)]
position1 = generate_position(traversal_path, track_graph1)
position2 = generate_position(traversal_path, track_graph2)
edge_order = ((0, 1),
(1, 2),
(2, 3),
(1, 4),
(4, 5)
)
edge_spacing = (30, 0, 30, 0)
position_df1 = get_linearized_position(position1,
track_graph1,
edge_order=edge_order,
edge_spacing=edge_spacing,
use_HMM=False)
position_df2 = get_linearized_position(position2,
track_graph2,
edge_order=edge_order,
edge_spacing=edge_spacing,
use_HMM=False)
sampling_frequency = 1000
place_field_centers1 = generate_position(traversal_path, track_graph1, step_size=10, n_traversals=1)
place_field_centers1 = np.unique(place_field_centers1, axis=0)
spikes1 = np.stack([simulate_neuron_with_place_field(center, position1,
sampling_frequency=sampling_frequency,
variance=6.0**2)
for center in place_field_centers1], axis=1)
place_field_centers2 = generate_position(traversal_path, track_graph2, step_size=10, n_traversals=1)
place_field_centers2 = np.unique(place_field_centers2, axis=0)
spikes2 = np.stack([simulate_neuron_with_place_field(center, position2,
sampling_frequency=sampling_frequency,
variance=6.0**2)
for center in place_field_centers2], axis=1)
return (spikes1, spikes2,
position_df1, position_df2,
track_graph1, track_graph2,
place_field_centers1, place_field_centers2,
position1, position2,
edge_order, edge_spacing
)
(spikes1, spikes2,
position_df1, position_df2,
track_graph1, track_graph2,
place_field_centers1, place_field_centers2,
position1, position2,
edge_order, edge_spacing
) = make_two_environment_data()
```
| Likelihood |
|------------------|
| env1, continuous |
| env2, continuous |
| | env1, continuous | env2, continuous |
|-----------------------|-------------------|-------------------|
| **env1, continuous** | random walk, env1 | uniform |
| **env2, continuous** | uniform | random walk, env2 |
+ Change text inputs for continuous_transition_types to classes
```
from replay_trajectory_classification.bins import (atleast_2d, get_centers,
get_grid, get_track_grid,
get_track_interior)
from dataclasses import dataclass
import networkx as nx
import matplotlib.pyplot as plt
@dataclass
class Environment:
environment_name: str = None
place_bin_size: float = 2.0
track_graph: nx.Graph = None
edge_order: tuple = None
edge_spacing: tuple = None
is_track_interior: np.ndarray = None
position_range: np.ndarray = None
infer_track_interior: bool = True
def fit_place_grid(self, position=None, infer_track_interior=True):
if self.track_graph is None:
(self.edges_,
self.place_bin_edges_,
self.place_bin_centers_,
self.centers_shape_
) = get_grid(position, self.place_bin_size, self.position_range,
self.infer_track_interior)
self.infer_track_interior = infer_track_interior
if self.is_track_interior is None and self.infer_track_interior:
self.is_track_interior_ = get_track_interior(
position, self.edges_)
elif is_track_interior is None and not self.infer_track_interior:
self.is_track_interior_ = np.ones(
self.centers_shape_, dtype=np.bool)
else:
(
self.place_bin_centers_,
self.place_bin_edges_,
self.is_track_interior_,
self.distance_between_nodes_,
self.centers_shape_,
self.edges_,
self.track_graph_with_bin_centers_edges_,
self.original_nodes_df_,
self.place_bin_edges_nodes_df_,
self.place_bin_centers_nodes_df_,
self.nodes_df_
) = get_track_grid(self.track_graph, self.edge_order,
self.edge_spacing, self.place_bin_size)
return self
def plot_grid(self, ax=None):
if self.track_graph is not None:
if ax is None:
fig, ax = plt.subplots(figsize=(15, 2))
plot_graph_as_1D(self.track_graph, self.edge_order,
self.edge_spacing, ax=ax)
for edge in self.edges_[0]:
ax.axvline(edge.squeeze(), linewidth=0.5, color='black')
ax.set_ylim((0, 0.1))
else:
if ax is None:
fig, ax = plt.subplots(figsize=(6, 7))
ax.pcolormesh(self.edges_[0], self.edges_[1], self.is_track_interior_.T,
cmap='bone_r')
ax.set_xticks(self.edges_[0], minor=True)
ax.set_yticks(self.edges_[1], minor=True)
ax.grid(True, which='both')
env1 = Environment(environment_name='w_track1',
place_bin_size=2.0,
track_graph=track_graph1,
edge_spacing=edge_spacing,
edge_order=edge_order)
env1.fit_place_grid(position_df1.linear_position)
env2 = Environment(environment_name='w_track2',
place_bin_size=2.0,
track_graph=track_graph2,
edge_spacing=edge_spacing,
edge_order=edge_order)
env2.fit_place_grid(position_df2.linear_position)
env1, env2
from replay_trajectory_classification.state_transition import _normalize_row_probability, atleast_2d
from scipy.stats import multivariate_normal
import numpy as np
def _random_walk_on_track_graph(
place_bin_centers, movement_var,
place_bin_center_ind_to_node,
distance_between_nodes
):
state_transition = np.zeros(
(place_bin_centers.size, place_bin_centers.size))
gaussian = multivariate_normal(mean=0, cov=movement_var)
for bin_ind1, node1 in enumerate(place_bin_center_ind_to_node):
for bin_ind2, node2 in enumerate(place_bin_center_ind_to_node):
try:
state_transition[bin_ind1, bin_ind2] = gaussian.pdf(
distance_between_nodes[node1][node2]
)
except KeyError:
# bins not on track interior will be -1 and not in distance
# between nodes
continue
return state_transition
@dataclass
class RandomWalk:
environment_name: str = None
movement_var: float = 6.0
def make_state_transition(self, environments: dict):
self.environment = environments[self.environment_name]
if self.environment.track_graph is None:
transition_matrix = np.stack(
[multivariate_normal(
mean=center, cov=self.movement_var).pdf(
place_bin_centers)
for center in self.environment.place_bin_centers_], axis=1)
else:
place_bin_center_ind_to_node = np.asarray(
self.environment.place_bin_centers_nodes_df_.node_id)
transition_matrix = _random_walk_on_track_graph(
self.environment.place_bin_centers_,
self.movement_var,
place_bin_center_ind_to_node,
self.environment.distance_between_nodes_
)
is_track_interior = self.environment.is_track_interior_.ravel(order='F')
transition_matrix[~is_track_interior] = 0.0
transition_matrix[:, ~is_track_interior] = 0.0
self.state_transition_ = _normalize_row_probability(transition_matrix)
return self.state_transition_
@dataclass
class Uniform:
environment_name: str
environment2_name: str = None
def make_state_transition(self, environments):
self.environment1 = environments[self.environment_name]
n_bins1 = self.environment1.place_bin_centers_.shape[0]
is_track_interior1 = self.environment1.is_track_interior_.ravel(order='F')
if self.environment2_name is None:
n_bins2 = n_bins1
is_track_interior2 = is_track_interior1.copy()
else:
self.environment2 = environments[self.environment2_name]
n_bins2 = self.environment2.place_bin_centers_.shape[0]
is_track_interior2 = self.environment2.is_track_interior_.ravel(order='F')
transition_matrix = np.ones((n_bins1, n_bins2))
transition_matrix[~is_track_interior1] = 0.0
transition_matrix[:, ~is_track_interior2] = 0.0
return _normalize_row_probability(transition_matrix)
@dataclass
class Identity:
environment_name: str = None
def make_state_transition(self, environments: dict):
self.environment = environments[self.environment_name]
n_bins = self.environment.place_bin_centers_.shape[0]
transition_matrix = np.identity(n_bins)
is_track_interior = self.environment.is_track_interior_.ravel(order='F')
transition_matrix[~is_track_interior] = 0.0
transition_matrix[:, ~is_track_interior] = 0.0
return _normalize_row_probability(transition_matrix)
@dataclass
class EmpiricalMovement:
environment_name: str = None
speedup: int = 20
def make_state_transition(self, environments, position, is_training=None):
self.environment = environments[self.environment_name]
if is_training is None:
is_training = np.ones((position.shape[0]), dtype=np.bool)
position = atleast_2d(position)[is_training]
state_transition, _ = np.histogramdd(
np.concatenate((position[1:], position[:-1]), axis=1),
bins=self.environment.edges_ * 2, range=self.environment.position_range)
original_shape = state_transition.shape
n_position_dims = position.shape[1]
shape_2d = np.product(original_shape[:n_position_dims])
state_transition = _normalize_row_probability(
state_transition.reshape((shape_2d, shape_2d), order='F'))
return np.linalg.matrix_power(state_transition, self.speedup)
@dataclass
class RandomWalkDirection1():
environment_name: str = None
movement_var: float = 6.0
def make_state_transition(self, environments: dict):
random = (RandomWalk(self.environment_name, self.movement_var)
.make_state_transition(environments))
return _normalize_row_probability(np.triu(random))
@dataclass
class RandomWalkDirection2():
environment_name: str = None
movement_var: float = 6.0
def make_state_transition(self, environments: dict):
random = (RandomWalk(self.environment_name, self.movement_var)
.make_state_transition(environments))
return _normalize_row_probability(np.tril(random))
environments = {'w_track1': env1, 'w_track2': env2}
RandomWalk(environment_name='w_track2').make_state_transition(environments)
Uniform(environment_name='w_track1').make_state_transition(environments)
Identity(environment_name='w_track2').make_state_transition(environments)
EmpiricalMovement(environment_name='w_track2').make_state_transition(environments, position_df2.linear_position)
RandomWalkDirection1(environment_name='w_track2').make_state_transition(environments)
RandomWalkDirection1().make_state_transition(environments)
None == None
[Identity(environment_name='w_track2'), Identity(environment_name='w_track2'), RandomWalk(environment_name='w_track2')]
from dataclasses import dataclass
import numpy as np
@dataclass
class DiagonalDiscrete:
diagonal_value: float = 0.98
def make_state_transition(self, n_states):
strong_diagonal = np.identity(n_states) * self.diagonal_value
is_off_diag = ~np.identity(n_states, dtype=bool)
strong_diagonal[is_off_diag] = (
(1 - self.diagonal_value) / (n_states - 1))
self.state_transition_ = strong_diagonal
return self.state_transition_
@dataclass
class UniformDiscrete:
def make_state_transition(self, n_states):
self.state_transition_ = np.ones((n_states, n_states)) / n_states
return self.state_transition_
@dataclass
class RandomDiscrete:
def make_state_transition(self, n_states):
state_transition = np.random.rand(n_states, n_states)
state_transition /= state_transition.sum(axis=1, keepdims=True)
self.state_transition_ = state_transition
return self.state_transition_
UniformDiscrete().make_state_transition(3), DiagonalDiscrete(0.4).make_state_transition(3), RandomDiscrete().make_state_transition(3)
DiagonalDiscrete(1/5).make_state_transition(5)
from replay_trajectory_classification.continuous_state_transitions import RandomWalk, Uniform
from replay_trajectory_classification.discrete_state_transitions import DiagonalDiscrete
continuous_state_transition_types = [[RandomWalk('w_track1', 6.0), Uniform('w_track1', 'w_track2')],
[Uniform('w_track2', 'w_track1'), RandomWalk('w_track2', 6.0)]]
environments = {'w_track1': env1, 'w_track2': env2}
continuous_state_transition = [[ct.make_state_transition(environments) for ct in ct1]
for ct1 in continuous_state_transition_types]
discrete_state_transition = DiagonalDiscrete(0.98).make_state_transition(n_states=2)
discrete_state_transition
@dataclass
class UniformInitialConditions:
environment_names: tuple
def make_initial_conditions(self, environments):
n_total_place_bins = 0
initial_conditions = []
for environment_name in self.environment_names:
environment = environments[environment_name]
n_place_bins = environment.is_track_interior_.sum()
n_total_place_bins += n_place_bins
initial_conditions.append(environment.is_track_interior_)
return [ic / n_total_place_bins for ic in initial_conditions]
initial_conditions = UniformInitialConditions(('w_track1', 'w_track2')).make_initial_conditions(environments)
initial_conditions
from replay_trajectory_classification.spiking_likelihood import estimate_place_fields
environment_names_to_state = ('w_track1', 'w_track1', 'w_track1', 'w_track1')
encoding_groups_to_state = ('Inbound', 'Inbound', 'Outbound', 'Outbound')
states = tuple(zip(environment_names_to_state, encoding_groups_to_state))
n_time = position_df1.linear_position.values.shape[0]
is_training1 = np.ones((n_time,), dtype=bool)
encoding_group_labels1 = np.concatenate((np.asarray(['Outbound'] * (n_time//2)), np.asarray(['Inbound'] * (n_time - (n_time//2)))))
n_time = position_df2.linear_position.values.shape[0]
is_training2 = np.ones((n_time,), dtype=bool)
encoding_group_labels2 = np.asarray(['Inbound'] * n_time)
training_data = {
'w_track1': (position_df1.linear_position.values[:, np.newaxis], spikes1, is_training1, encoding_group_labels1),
'w_track2': (position_df2.linear_position.values[:, np.newaxis], spikes2, is_training2, encoding_group_labels2),
}
place_fields = {}
for environment_name, encoding_group in set(states):
print(environment_name, encoding_group)
environment = environments[environment_name]
position, spikes, is_training, encoding_group_labels = training_data[environment_name]
place_fields[(environment_name, encoding_group)] = estimate_place_fields(
position=position[is_training & (encoding_group_labels == encoding_group)],
spikes=spikes[is_training & (encoding_group_labels == encoding_group)],
place_bin_centers=environment.place_bin_centers_,
place_bin_edges=environment.place_bin_edges_,
penalty=1E1,
knot_spacing=10)
place_fields
from replay_trajectory_classification.spiking_likelihood import estimate_place_fields
environment_names_to_state = ('w_track1', 'w_track2', 'w_track1', 'w_track2')
encoding_groups_to_state = ('Inbound', 'Inbound', 'Outbound', 'Outbound')
states = tuple(zip(environment_names_to_state, encoding_groups_to_state))
n_time = position_df1.linear_position.values.shape[0]
is_training1 = np.ones((n_time,), dtype=bool)
encoding_group_labels1 = np.concatenate((np.asarray(['Outbound'] * (n_time//2)), np.asarray(['Inbound'] * (n_time - (n_time//2)))))
environment_labels1 = np.asarray(['w_track1'] * n_time)
n_time = position_df2.linear_position.values.shape[0]
is_training2 = np.ones((n_time,), dtype=bool)
encoding_group_labels2 = np.concatenate((np.asarray(['Outbound'] * (n_time//2)), np.asarray(['Inbound'] * (n_time - (n_time//2)))))
environment_labels2 = np.asarray(['w_track2'] * n_time)
position = np.concatenate((position_df1.linear_position.values[:, np.newaxis],
(position_df2.linear_position.values[:, np.newaxis])))
spikes = np.concatenate((spikes1, spikes2))
is_training = np.concatenate((is_training1, is_training2))
encoding_group_labels = np.concatenate((encoding_group_labels1, encoding_group_labels2))
environment_labels = np.concatenate((environment_labels1, environment_labels2))
place_fields = {}
for environment_name, encoding_group in set(states):
print(environment_name, encoding_group)
environment = environments[environment_name]
is_encoding = (encoding_group_labels == encoding_group)
is_environment = (environment_labels == environment_name)
place_fields[(environment_name, encoding_group)] = estimate_place_fields(
position=position[is_training & is_encoding & is_environment],
spikes=spikes[is_training & is_encoding & is_environment],
place_bin_centers=environment.place_bin_centers_,
place_bin_edges=environment.place_bin_edges_,
penalty=1E1,
knot_spacing=10)
place_fields
@dataclass
class TrainingData:
environment_name: str = None
position: np.ndarray = None
spikes: np.ndarray = None
multiunit: np.ndarray = None
is_training: np.ndarray = None
encoding_group_labels: np.ndarray = None
@dataclass
class PredictData:
position: np.ndarray = None
spikes: np.ndarray = None
multiunit: np.ndarray = None
training_data1 = TrainingData(
environment_name='w_track1',
position=position_df1.linear_position.values[:, np.newaxis],
spikes=spikes1,
is_training=is_training1,
encoding_group_labels=encoding_group_labels1
)
training_data2 = TrainingData(
environment_name='w_track2',
position=position_df2.linear_position.values[:, np.newaxis],
spikes=spikes2,
is_training=is_training2,
encoding_group_labels=encoding_group_labels2
)
# or just one long vector of environment, labels, position, spikes
fig, axes = plt.subplots(2, 1, sharex=True, sharey=True)
place_fields['w_track1', 'Inbound'].plot(x='position', hue='neuron', add_legend=False, ax=axes[0]);
place_fields['w_track1', 'Outbound'].plot(x='position', hue='neuron', add_legend=False, ax=axes[1]);
encoding_group_labels
def fit_place_fields(self, position, spikes, is_training=None,
encoding_group_labels=None,
encoding_group_to_state=None):
logger.info('Fitting place fields...')
if is_training is None:
n_time = position.shape[0]
is_training = np.ones((n_time,), dtype=np.bool)
if encoding_group_labels is None:
n_time = position.shape[0]
encoding_group_labels = np.zeros((n_time,), dtype=np.int32)
if encoding_group_to_state is None:
n_states = len(self.continuous_transition_types)
self.encoding_group_to_state_ = np.zeros(
(n_states,), dtype=np.int32)
else:
self.encoding_group_to_state_ = np.asarray(encoding_group_to_state)
is_training = np.asarray(is_training).squeeze()
self.place_fields_ = []
unique_labels = np.unique(encoding_group_labels[is_training])
for encoding_group in unique_labels:
self.place_fields_.append(estimate_place_fields(
position=position[is_training & (
encoding_group_labels == encoding_group)],
spikes=spikes[is_training & (
encoding_group_labels == encoding_group)],
place_bin_centers=self.place_bin_centers_,
place_bin_edges=self.place_bin_edges_,
penalty=self.spike_model_penalty,
knot_spacing=self.knot_spacing))
# how specify training data
# how to incorporate local states
# adding information by multiplying likelihoods (LFPs)
argh = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
blah = []
for row in argh:
blah.append([])
for column in row:
blah[-1].append(column)
blah
```
| github_jupyter |
# Plagiarism Detection Model
Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model.
This task will be broken down into a few discrete steps:
* Upload your data to S3.
* Define a binary classification model and a training script.
* Train your model and deploy it.
* Evaluate your deployed classifier and answer some questions about your approach.
To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook.
> All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**.
It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset.
---
## Load Data to S3
In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data.
>The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3.
Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved.
```
import pandas as pd
import boto3
import sagemaker
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# session and role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# create an S3 bucket
bucket = sagemaker_session.default_bucket()
```
## EXERCISE: Upload your training data to S3
Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples.
You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file.
```
# should be the name of directory you created to save your features data
data_dir = "plagiarism_data"
# set prefix, a descriptive name for a directory
prefix = "plagiarism-data"
# upload all data to S3
input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
print(input_data)
```
### Test cell
Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook).
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
```
---
# Modeling
Now that you've uploaded your training data, it's time to define and train a model!
The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes:
* Use a built-in classification algorithm, like LinearLearner.
* Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html).
* Define a custom PyTorch neural network classifier.
It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model.
---
## EXERCISE: Complete a training script
To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model.
A typical training script:
* Loads training data from a specified directory
* Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.)
* Instantiates a model of your design, with any specified hyperparams
* Trains that model
* Finally, saves the model so that it can be hosted/deployed, later
### Defining and training a model
Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will:
1. Import any extra libraries you need
2. Define any additional model training hyperparameters using `parser.add_argument`
2. Define a model in the `if __name__ == '__main__':` section
3. Train the model in that same section
Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments.
**Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library.
```
# directory can be changed to: source_sklearn or source_pytorch
!pygmentize source_pytorch/train.py
```
### Provided code
If you read the code above, you can see that the starter code includes a few things:
* Model loading (`model_fn`) and saving code
* Getting SageMaker's default hyperparameters
* Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y`
If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links.
---
# Create an Estimator
When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments:
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **role**: Role ARN, which was specified, above.
* **train_instance_count**: The number of training instances (should be left at 1).
* **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* **sagemaker_session**: The session used to train on Sagemaker.
* **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters.
Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`.
## EXERCISE: Define a Scikit-learn or PyTorch estimator
To import your desired estimator, use one of the following lines:
```
from sagemaker.sklearn.estimator import SKLearn
```
```
from sagemaker.pytorch import PyTorch
```
```
# your import and estimator code, here
from sagemaker.pytorch import PyTorch
output_path = 's3://{}/{}'.format(bucket, prefix)
estimator = PyTorch(entry_point='train.py',
source_dir='source_pytorch',
role=role,
framework_version='1.0',
py_version='py3',
instance_count=1,
instance_type='ml.c4.xlarge',
output_path=output_path,
sagemaker_session=sagemaker_session,
hyperparameters={
'input_dim': 4,
'hidden_dim': 20,
'output_dim': 1,
'epochs': 1000
})
```
## EXERCISE: Train the estimator
Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console.
```
%%time
# Train your estimator on S3 training data
estimator.fit({'train': input_data})
```
## EXERCISE: Deploy the trained model
After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point.
To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments:
* **initial_instance_count**: The number of deployed instances (1).
* **instance_type**: The type of SageMaker instance for deployment.
Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used.
```
%%time
# uncomment, if needed
from sagemaker.pytorch import PyTorchModel
model = PyTorchModel(model_data=estimator.model_data,
role = role,
framework_version='1.0',
py_version='py3',
entry_point='predict.py',
source_dir='source_pytorch')
# deploy your model to create a predictor
predictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge') #'ml.m4.medium')
```
---
# Evaluating Your Model
Once your model is deployed, you can see how it performs when applied to our test data.
The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
```
## EXERCISE: Determine the accuracy of your model
Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation.
**To pass this project, your model should get at least 90% test accuracy.**
```
# First: generate predicted, class labels
test_y_preds = predictor.predict(test_x)
print (test_y_preds)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test that your model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
import sklearn
from sklearn import metrics
# Second: calculate the test accuracy
accuracy = metrics.accuracy_score(test_y_preds, test_y, normalize=True)
print(accuracy)
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
```
### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is?
** Answer**: After 1000 training epochs, the model predicted with an accuracy of 100%. That means there were no false positives and no false negatives, because accuracy = TP + TN / TP + TN + FP + FN. The reason for this is probably the small size of the data sets, especially for testing. With a larger validation set, one would expect less than perfect accuracy, even if still >98%.
### Question 2: How did you decide on the type of model to use?
** Answer**: By experimentation. This model with these params reached 100% accuracy very quickly.
----
## EXERCISE: Clean up Resources
After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below.
```
# uncomment and fill in the line below!
predictor.delete_endpoint()
```
### Deleting S3 bucket
When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again.
```
# deleting bucket, uncomment lines below
bucket_to_delete = boto3.resource('s3').Bucket(bucket)
bucket_to_delete.objects.all().delete()
```
### Deleting all your models and instances
When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally.
---
## Further Directions
There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below:
* Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0).
* Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism.
* Use language or character-level analysis to find different (and more) similarity features.
* Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not.
* Use API Gateway and a lambda function to deploy your model to a web application.
These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
| github_jupyter |
## 1. Introduction
We will reimplement the methodology of the paper in Python.
## 2. Preliminary Concepts
Initially, we will recreate the basic variables defined in the paper. To make calculations easier, we will use NaNs instead of zeros if a movie is not rated by a user.
```
import numpy as np
m = 6040 # users
n = 3952 # movies
Rnan = np.full((m, n), np.nan) # Ratings matrix with nans instead of 0s
```
We read the data from the ratings file.
```
import io
# Read the data into the rating matrix
with open('ml-1m/ratings.dat', 'r') as fp:
for line in iter(fp.readline, ''):
l = line.split('::')
Rnan[int(l[0])-1,int(l[1])-1] = int(l[2])
```
We continue defining functions as per the paper. $O_u$ is the item (movie) set of the user $u_i$ and $U_o$ is the user set of the item (movie) $o_j$.
```
def O_u(u_i, R = Rnan): # item set of user u_i
return np.nonzero(1-np.isnan(R[u_i,:]))
def U_o(o_j, R = Rnan): # user set of item o_j
return np.nonzero(1-np.isnan(R[:,o_j]))
print(O_u(0))
print(U_o(0))
def calc_r_bar_v(R = Rnan):
return np.nanmean(R, axis = 1) # mean ratings of user u_i
def calc_r_bar(R = Rnan): # global mean rating
return np.nanmean(R)
r_bar_v = calc_r_bar_v()
r_bar = calc_r_bar()
print('Average rating:',r_bar)
print('Average rating for user 0:',r_bar_v[0])
```
We define the prediction function.
```
def calc_r_hat(u_t, o_j, c_t, R = Rnan, r_bar_v = r_bar_v, r_bar = r_bar):
"""
u_t -> target user
o_j -> target movie
c_t -> similarity vector of t to all users
"""
U_oj = U_o(o_j, R = R)
return r_bar_v[u_t] - r_bar + (np.nansum(c_t[U_oj]*R[U_oj, o_j]) / np.nansum(c_t[U_oj]))
print('Rating of user 0 on movie 0:', Rnan[0,0])
print('Estimated rating (using uniform user similarity):', calc_r_hat(0,0, np.ones(m)))
```
## 3. Random Walk
Instead of defining a probability function from user $u_i$ to movie $o_j$, we calculate the probabilities beforehand and store them in a matrix.
```
def calc_P_uo(R = Rnan):
return (1 - np.isnan(R)) / np.sum(1 - np.isnan(R), axis = 1).reshape(R.shape[0],1) # Type 1 walk, user to movie
P_uo = calc_P_uo()
print(P_uo)
```
We also define the rating similarity matrix for the user $u_i$. The computed ratings are only numbers if $u_i$ and $u_k$ both have a rating for that item, where $k\in [0..m]$.
```
MAXSCORE = 5
def sim(u_i, R = Rnan): # similarity matrix from u_k to o_j, given u_i
return MAXSCORE - np.absolute(R[u_i,:] - R)
sim(0)
```
Using the rating similarity matrix, we can quickly compute the total similarity score for each item $o_j$ over all users, given $u_i$. By not including NaNs, we are calculating the denominators of the Type 2 probabilities.
```
def sum_sim(u_i, R = Rnan): # Sum of similarities for any o_j, give u_i
return np.nansum(sim(u_i, R = R), axis = 0)
sum_sim(0)
```
We can now define the probability function from item (movie) $o_j$ to user $u_k$, given the previous transition was from $u_i$ to $o_j$. Again, we calculate a transition probability matrix to lessen the number of computations. Note that we actually return the transpose of the transition probability matrix, to ease further calculations.
```
def P_ou(u_i, R = Rnan):
"""
Transition probability matrix from movie to user, given
a base user u_i. Note that axis 0 is still the user and
the axis 1 is the movie.
"""
with np.errstate(divide='ignore', invalid='ignore'):
s = sim(u_i, R = R)
return s / np.nansum(s, axis = 0)
P_ou(0)
```
We then calculate the transition probability from a user $u_i$ to any other user $u_k$.
```
def p(u_i, R = Rnan, P_uo = P_uo):
""" Transition probability from user u_i to each user. """
return np.nansum(P_uo[u_i] * P_ou(u_i, R = R), axis = 1)
p(0)
```
The calculated transition probabilities are stacked on top of each other to build the transition probability matrix $P$.
```
def construct_P(R = Rnan):
P_uo = calc_P_uo(R = R)
l = [p(u_i, R = Rnan, P_uo = P_uo) for u_i in range(R.shape[0])]
return np.vstack(l)
P = construct_P()
print(P)
```
Since constructing this matrix takes time, we save it on the local machine, so that we don't have to calculate it again.
```
np.save('P.npy', P)
P = np.load('P.npy')
print(P)
def get_P(R = Rnan, ind = None):
P = None
file_name = 'P.npy' if ind is None else 'P'+str(ind)+'.npy'
try:
P = np.load(file_name)
except FileNotFoundError:
P = construct_P(R = R)
np.save(file_name, P)
return P
```
## 4. Sampling Algorithm
We create a random test set of 5 numbers for our initial walk.
```
size_ts = 5
ts = np.random.randint(m, size=(size_ts,))
ts
```
We swap the test data to the beginning of the transition probability matrix.
```
def front_swap(M, inds, dim = 1):
M_new = np.copy(M)
swap_inds = [np.arange(inds.shape[0]) for i in range(dim)]
orig_inds = [inds for i in range(dim)]
temp = M_new[swap_inds]
M_new[swap_inds] = M_new[orig_inds]
M_new[orig_inds] = temp
return M_new
P_new = front_swap(P, ts, dim = 2)
P_new.shape
```
We can now calculate $P^*$ and $\pi_{\tau.}$.
```
P_star = P_new[size_ts:, size_ts:]
P_star.shape
pi = P_new[:size_ts, size_ts:]
pi.shape
```
### 1. Precomputing
```
def walk(N, n_users, P_star, alpha = 0.9):
W = np.zeros((n_users,n_users), dtype = np.float) # The weight matrix for training set
norm_P_star = P_star / (np.sum(P_star, axis= 1).reshape((n_users),1)) # Normalize the probabilities
for r in range(N): # Do N runs for each training user
users = np.arange(n_users) # Create the currently running users
cur_users = np.copy(users) # The current user after starting from the running user itself.
while users.shape[0] > 0: # While there are currently running users
for u in users: # Walk for each user
u_new = np.random.choice(n_users, 1, p = norm_P_star[cur_users[u], :])[0] # Jump to a new user
cur_users[u] = u_new
W[u, u_new] += 1 # Increment the total number of visits to u_new starting from u
cont = np.random.rand(users.shape[0]) > alpha # Finish runs with alpha probability
users = users[cont]
return W / N # Calculate the average # of visits
W = walk(m, m - size_ts, P_star)
np.save('W.npy',W)
def get_W(N, n_users, P_star, alpha = 0.9, ind = None):
W = None
file_name = 'W.npy' if ind is None else 'W'+str(ind)+'.npy'
try:
W = np.load(file_name)
except FileNotFoundError:
W = walk(N, n_users, P_star, alpha = alpha)
np.save(file_name, W)
return W
ts[0]
c = 0.9*np.sum(pi[0] * W.T, axis = 1)
print(c)
print(c.shape)
c = np.hstack((np.zeros(size_ts), c))
print(c)
print(c.shape)
c_new = front_swap(c, ts, dim = 1)
print(c_new)
print(c_new.shape)
def get_C(W, pi, test_set, alpha, ind = None):
C = None
file_name = 'C.npy' if ind is None else 'C'+str(ind)+'.npy'
try:
C = np.load(file_name)
except FileNotFoundError:
size_ts = test_set.shape[0]
C = np.vstack([front_swap(np.hstack((np.zeros(size_ts), alpha * np.sum(pi[k] * W.T, axis = 1))), \
test_set, dim = 1) for k in range(size_ts)])
np.save(file_name, C)
return C
C = get_C(W, pi, ts, 0.9)
C
def mean_absolute_error(C, test_set, held_out, R = Rnan):
maes = np.zeros(test_set.shape[0])
r_bar_v = calc_r_bar_v(R = R)
r_bar = calc_r_bar(R = R)
for c_ind, u_i in enumerate(test_set):
r_act = R[u_i, held_out[u_i]]
ojs = np.arange(held_out.shape[0])[held_out[u_i]]
r_hat = np.array([calc_r_hat(u_i, o_j, C[c_ind], r_bar_v = r_bar_v, r_bar = r_bar) for o_j in ojs])
maes[c_ind] = np.nanmean(np.absolute(r_act - r_hat))
return maes
mean_absolute_error(C, ts, np.ones((m,n), dtype = np.bool_))
def get_MAE(C, test_set, held_out, R = Rnan, ind = None):
MAE = None
file_name = 'MAE.npy' if ind is None else 'MAE'+str(ind)+'.npy'
try:
MAE = np.load(file_name)
except FileNotFoundError:
MAE = mean_absolute_error(C, test_set, held_out, R = R)
np.save(file_name, MAE)
return MAE
```
## 5. Experiments
We do a 4-fold cross validation. We save the intermediary matrices to files, since the calculations take a long time and we want to be able to continue from where we left off even if we stop the program at a given time.
```
from sklearn.cross_validation import KFold
def generate_test_sets(n_folds = 4):
kfold = KFold(m, n_folds = 4, shuffle = True)
i = 0
l = []
for train_set, test_set in kfold:
np.save('TS'+str(i)+'.npy', test_set)
l.append(test_set)
i += 1
return l
generate_test_sets(4)
def load_test_sets(n_folds = 4):
return [np.load('TS'+str(i)+'.npy') for i in range(n_folds)]
test_sets = load_test_sets(4)
test_sets
def get_test_sets(n_folds = 4):
test_sets = None
try:
test_sets = load_test_sets(n_folds)
except FileNotFoundError:
test_sets = generate_test_sets(n_folds)
return test_sets
```
For each test user, we hold out 90% of their rated items.
```
def generate_held_outs(test_sets, R = Rnan, p = 0.9):
l = []
for i, test_set in enumerate(test_sets):
ho = np.zeros((m,n), dtype = np.bool_)
for test_user in test_set:
items = O_u(test_user, R = R)[0]
np.random.shuffle(items)
items = items[:int(p*items.shape[0])]
ho[test_user,items] = 1
l.append(ho)
np.save('HO'+str(i)+'.npy', ho)
return l
np.sum(generate_held_outs(test_sets)[0])
def load_held_outs(n_folds = 4):
return [np.load('HO'+str(i)+'.npy') for i in range(n_folds)]
held_outs = load_held_outs(4)
held_outs
def get_held_outs(test_sets, R = Rnan, p = 0.9):
held_outs = None
try:
held_outs = load_held_outs(len(test_sets))
except FileNotFoundError:
held_outs = generate_held_outs(test_sets, R = R, p = p)
return held_outs
def generate_maes(n_folds = 4, R = Rnan, held_ratio = 0.9, alpha = 0.9):
test_sets = get_test_sets(n_folds)
held_outs = get_held_outs(test_sets, R = R, p = held_ratio)
maes = []
for i, (test_set, held_out) in enumerate(zip(test_sets, held_outs)):
R_test = np.copy(R)
R_test[held_out] = np.nan
P = get_P(R = R_test, ind = i)
size_ts = test_set.shape[0]
P_new = front_swap(P, test_set, dim = 2)
P_star = P_new[size_ts:, size_ts:]
pi = P_new[:size_ts, size_ts:]
W = get_W(m, m - size_ts, P_star, alpha = alpha, ind = i)
C = get_C(W, pi, test_set, alpha, ind = i)
MAE = get_MAE(C, test_set, held_out, R = R, ind = i)
maes.append(MAE)
return maes
with np.errstate(divide='raise', invalid='raise'):
maes = generate_maes()
C0 = np.load('C0.npy')
print(C0)
```
| github_jupyter |
# MNIST Image Classification with TensorFlow on Cloud ML Engine
This notebook demonstrates how to implement different image models on MNIST using Estimator.
Note the MODEL_TYPE; change it to try out different models
```
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
MODEL_TYPE='dnn' # 'linear', 'dnn', 'dnn_dropout', or 'cnn'
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['MODEL_TYPE'] = MODEL_TYPE
os.environ['TFVERSION'] = '1.8' # Tensorflow version
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
```
## Run as a Python module
In the previous notebook (mnist_linear.ipynb) we ran our code directly from the notebook.
Now since we want to run our code on Cloud ML Engine, we've packaged it as a python module.
The `model.py` and `task.py` containing the model code is in <a href="mnistmodel/trainer">mnistmodel/trainer</a>
**Let's first run it locally for a few steps to test the code.**
```
%bash
rm -rf mnistmodel.tar.gz mnist_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/mnistmodel/trainer \
-- \
--output_dir=${PWD}/mnist_trained \
--train_steps=100 \
--learning_rate=0.01 \
--model=$MODEL_TYPE
```
**Now, let's do it on Cloud ML Engine so we can train on GPU (`--scale-tier=BASIC_GPU`)**
Note the GPU speed up depends on the model type. You'll notice the more complex CNN model trains significantly faster on GPU, however the speed up on the simpler models is not as pronounced.
```
%bash
OUTDIR=gs://${BUCKET}/mnist/trained_${MODEL_TYPE}
JOBNAME=mnist_${MODEL_TYPE}_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/mnistmodel/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--output_dir=$OUTDIR \
--train_steps=10000 --learning_rate=0.01 --train_batch_size=512 \
--model=$MODEL_TYPE --batch_norm
```
## Monitoring training with TensorBoard
Use this cell to launch tensorboard
```
from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/mnist/trained_{}'.format(BUCKET, MODEL_TYPE))
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
```
Here's what it looks like with a linear model for 10,000 steps:
<img src="images/eval_linear_10000.png" width="60%"/>
Here are my results:
Model | Accuracy | Time taken | Model description | Run time parameters
--- | :---: | ---
linear | 91.53 | 3 min | | 100 steps, LR=0.01, Batch=512
linear | 92.73 | 8 min | | 1000 steps, LR=0.01, Batch=512
linear | 92.29 | 18 min | | 10000 steps, LR=0.01, Batch=512
dnn | 98.14 | 15 min | 300-100-30 nodes fully connected | 10000 steps, LR=0.01, Batch=512
dnn | 97.99 | 48 min | 300-100-30 nodes fully connected | 100000 steps, LR=0.01, Batch=512
dnn_dropout | 97.84 | 29 min | 300-100-30-DL(0.1)- nodes | 20000 steps, LR=0.01, Batch=512
cnn | 98.97 | 35 min | maxpool(10 5x5 cnn, 2)-maxpool(20 5x5 cnn, 2)-300-DL(0.25) | 20000 steps, LR=0.01, Batch=512
cnn | 98.93 | 35 min | maxpool(10 11x11 cnn, 2)-maxpool(20 3x3 cnn, 2)-300-DL(0.25) | 20000 steps, LR=0.01, Batch=512
cnn | 99.17 | 35 min | maxpool(10 11x11 cnn, 2)-maxpool(20 3x3 cnn, 2)-300-DL(0.25), batch_norm (logits only) | 20000 steps, LR=0.01, Batch=512
cnn | 99.27 | 35 min | maxpool(10 11x11 cnn, 2)-maxpool(20 3x3 cnn, 2)-300-DL(0.25), batch_norm (logits, deep) | 10000 steps, LR=0.01, Batch=512
cnn | 99.48 | 12 hr | as-above but nfil1=20, nfil2=27, dprob=0.1, lr=0.001, batchsize=233 | (hyperparameter optimization)
## Deploying and predicting with model
Deploy the model:
```
%bash
MODEL_NAME="mnist"
MODEL_VERSION=${MODEL_TYPE}
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/mnist/trained_${MODEL_TYPE}/export/exporter | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=$TFVERSION
```
To predict with the model, let's take one of the example images.
```
import json, codecs
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
HEIGHT=28
WIDTH=28
mnist = input_data.read_data_sets('mnist/data', one_hot=True, reshape=False)
IMGNO=5 #CHANGE THIS to get different images
jsondata = {'image': mnist.test.images[IMGNO].reshape(HEIGHT, WIDTH).tolist()}
json.dump(jsondata, codecs.open('test.json', 'w', encoding='utf-8'))
plt.imshow(mnist.test.images[IMGNO].reshape(HEIGHT, WIDTH));
```
Send it to the prediction service
```
%bash
gcloud ml-engine predict \
--model=mnist \
--version=${MODEL_TYPE} \
--json-instances=./test.json
```
## DO NOT RUN anything beyond this point
This shows you what I did, but trying to repeat this will take several hours.
<br/>
## Hyperparameter tuning
This is what `hyperparam.yaml` looked like:
```
trainingInput:
scaleTier: CUSTOM
masterType: complex_model_m_gpu
hyperparameters:
goal: MAXIMIZE
maxTrials: 30
maxParallelTrials: 2
hyperparameterMetricTag: accuracy
params:
- parameterName: train_batch_size
type: INTEGER
minValue: 32
maxValue: 512
scaleType: UNIT_LINEAR_SCALE
- parameterName: learning_rate
type: DOUBLE
minValue: 0.001
maxValue: 0.1
scaleType: UNIT_LOG_SCALE
- parameterName: nfil1
type: INTEGER
minValue: 5
maxValue: 20
scaleType: UNIT_LINEAR_SCALE
- parameterName: nfil2
type: INTEGER
minValue: 10
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: dprob
type: DOUBLE
minValue: 0.1
maxValue: 0.6
scaleType: UNIT_LINEAR_SCALE
```
This takes <b>13 hours and 250 ML Units</b>, so don't try this at home :)
The key thing is here the --config parameter.
```
%bash
OUTDIR=gs://${BUCKET}/mnist/trained_${MODEL_TYPE}_hparam
JOBNAME=mnist_${MODEL_TYPE}_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/mnistmodel/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--runtime-version=$TFVERSION \
--config hyperparam.yaml \
-- \
--output_dir=$OUTDIR \
--model=$MODEL_TYPE --batch_norm
```
<pre>
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
</pre>
| github_jupyter |
# Batch Normalization
One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train.
One idea along these lines is batch normalization which was proposed by [1] in 2015.
The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However, even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.
The authors of [1] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [1] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.
It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.
[1] [Sergey Ioffe and Christian Szegedy, "Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift", ICML 2015.](https://arxiv.org/abs/1502.03167)
```
# As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def print_mean_std(x,axis=0):
print(' means: ', x.mean(axis=axis))
print(' stds: ', x.std(axis=axis))
print()
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
```
## Batch normalization: forward
In the file `cs231n/layers.py`, implement the batch normalization forward pass in the function `batchnorm_forward`. Once you have done so, run the following to test your implementation.
Referencing the paper linked to above in [1] may be helpful!
```
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print_mean_std(a,axis=0)
gamma = np.ones((D3,))
beta = np.zeros((D3,))
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print_mean_std(a_norm,axis=0)
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
# Now means should be close to beta and stds close to gamma
print('After batch normalization (gamma=', gamma, ', beta=', beta, ')')
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print_mean_std(a_norm,axis=0)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in range(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After batch normalization (test-time):')
print_mean_std(a_norm,axis=0)
```
## Batch normalization: backward
Now implement the backward pass for batch normalization in the function `batchnorm_backward`.
To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.
Once you have finished, run the following to numerically check your backward pass.
```
# Gradient check batchnorm backward pass
np.random.seed(231)
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)
db_num = eval_numerical_gradient_array(fb, beta.copy(), dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
#You should expect to see relative errors between 1e-13 and 1e-8
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
```
## Batch normalization: alternative backward
In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For example, you can derive a very simple formula for the sigmoid function's backward pass by simplifying gradients on paper.
Surprisingly, it turns out that you can do a similar simplification for the batch normalization backward pass too!
In the forward pass, given a set of inputs $X=\begin{bmatrix}x_1\\x_2\\...\\x_N\end{bmatrix}$,
we first calculate the mean $\mu$ and variance $v$.
With $\mu$ and $v$ calculated, we can calculate the standard deviation $\sigma$ and normalized data $Y$.
The equations and graph illustration below describe the computation ($y_i$ is the i-th element of the vector $Y$).
\begin{align}
& \mu=\frac{1}{N}\sum_{k=1}^N x_k & v=\frac{1}{N}\sum_{k=1}^N (x_k-\mu)^2 \\
& \sigma=\sqrt{v+\epsilon} & y_i=\frac{x_i-\mu}{\sigma}
\end{align}
<img src="notebook_images/batchnorm_graph.png" width=691 height=202>
The meat of our problem during backpropagation is to compute $\frac{\partial L}{\partial X}$, given the upstream gradient we receive, $\frac{\partial L}{\partial Y}.$ To do this, recall the chain rule in calculus gives us $\frac{\partial L}{\partial X} = \frac{\partial L}{\partial Y} \cdot \frac{\partial Y}{\partial X}$.
The unknown/hart part is $\frac{\partial Y}{\partial X}$. We can find this by first deriving step-by-step our local gradients at
$\frac{\partial v}{\partial X}$, $\frac{\partial \mu}{\partial X}$,
$\frac{\partial \sigma}{\partial v}$,
$\frac{\partial Y}{\partial \sigma}$, and $\frac{\partial Y}{\partial \mu}$,
and then use the chain rule to compose these gradients (which appear in the form of vectors!) appropriately to compute $\frac{\partial Y}{\partial X}$.
If it's challenging to directly reason about the gradients over $X$ and $Y$ which require matrix multiplication, try reasoning about the gradients in terms of individual elements $x_i$ and $y_i$ first: in that case, you will need to come up with the derivations for $\frac{\partial L}{\partial x_i}$, by relying on the Chain Rule to first calculate the intermediate $\frac{\partial \mu}{\partial x_i}, \frac{\partial v}{\partial x_i}, \frac{\partial \sigma}{\partial x_i},$ then assemble these pieces to calculate $\frac{\partial y_i}{\partial x_i}$.
You should make sure each of the intermediary gradient derivations are all as simplified as possible, for ease of implementation.
After doing so, implement the simplified batch normalization backward pass in the function `batchnorm_backward_alt` and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.
```
np.random.seed(231)
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print('dx difference: ', rel_error(dx1, dx2))
print('dgamma difference: ', rel_error(dgamma1, dgamma2))
print('dbeta difference: ', rel_error(dbeta1, dbeta2))
print('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))
```
## Fully Connected Nets with Batch Normalization
Now that you have a working implementation for batch normalization, go back to your `FullyConnectedNet` in the file `cs231n/classifiers/fc_net.py`. Modify your implementation to add batch normalization.
Concretely, when the `normalization` flag is set to `"batchnorm"` in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.
HINT: You might find it useful to define an additional helper layer similar to those in the file `cs231n/layer_utils.py`. If you decide to do so, do it in the file `cs231n/classifiers/fc_net.py`.
```
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
# You should expect losses between 1e-4~1e-10 for W,
# losses between 1e-08~1e-10 for b,
# and losses between 1e-08~1e-09 for beta and gammas.
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
normalization='batchnorm')
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
if reg == 0: print()
```
# Batchnorm for deep networks
Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
```
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, normalization='batchnorm')
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, normalization=None)
print('Solver with batch norm:')
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True,print_every=20)
bn_solver.train()
print('\nSolver without batch norm:')
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=20)
solver.train()
```
Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
```
def plot_training_history(title, label, baseline, bn_solvers, plot_fn, bl_marker='.', bn_marker='.', labels=None):
"""utility function for plotting training history"""
plt.title(title)
plt.xlabel(label)
bn_plots = [plot_fn(bn_solver) for bn_solver in bn_solvers]
bl_plot = plot_fn(baseline)
num_bn = len(bn_plots)
for i in range(num_bn):
label='with_norm'
if labels is not None:
label += str(labels[i])
plt.plot(bn_plots[i], bn_marker, label=label)
label='baseline'
if labels is not None:
label += str(labels[0])
plt.plot(bl_plot, bl_marker, label=label)
plt.legend(loc='lower center', ncol=num_bn+1)
plt.subplot(3, 1, 1)
plot_training_history('Training loss','Iteration', solver, [bn_solver], \
lambda x: x.loss_history, bl_marker='o', bn_marker='o')
plt.subplot(3, 1, 2)
plot_training_history('Training accuracy','Epoch', solver, [bn_solver], \
lambda x: x.train_acc_history, bl_marker='-o', bn_marker='-o')
plt.subplot(3, 1, 3)
plot_training_history('Validation accuracy','Epoch', solver, [bn_solver], \
lambda x: x.val_acc_history, bl_marker='-o', bn_marker='-o')
plt.gcf().set_size_inches(15, 15)
plt.show()
```
# Batch normalization and initialization
We will now run a small experiment to study the interaction of batch normalization and weight initialization.
The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.
```
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers_ws = {}
solvers_ws = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, normalization='batchnorm')
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, normalization=None)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers_ws[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers_ws[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers_ws[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers_ws[ws].train_acc_history))
best_val_accs.append(max(solvers_ws[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers_ws[ws].val_acc_history))
final_train_loss.append(np.mean(solvers_ws[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers_ws[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.legend()
plt.gca().set_ylim(1.0, 3.5)
plt.gcf().set_size_inches(15, 15)
plt.show()
```
## Inline Question 1:
Describe the results of this experiment. How does the scale of weight initialization affect models with/without batch normalization differently, and why?
## Answer:
When use batch normalization,even if we use a bad weight initialization scale,we can still get a not bad result,but without batch normalization,we are very sensetive to weight initialization scale.
# Batch normalization and batch size
We will now run a small experiment to study the interaction of batch normalization and batch size.
The first cell will train 6-layer networks both with and without batch normalization using different batch sizes. The second layer will plot training accuracy and validation set accuracy over time.
```
def run_batchsize_experiments(normalization_mode):
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
n_epochs=10
weight_scale = 2e-2
batch_sizes = [5,10,50]
lr = 10**(-3.5)
solver_bsize = batch_sizes[0]
print('No normalization: batch size = ',solver_bsize)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, normalization=None)
solver = Solver(model, small_data,
num_epochs=n_epochs, batch_size=solver_bsize,
update_rule='adam',
optim_config={
'learning_rate': lr,
},
verbose=False)
solver.train()
bn_solvers = []
for i in range(len(batch_sizes)):
b_size=batch_sizes[i]
print('Normalization: batch size = ',b_size)
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, normalization=normalization_mode)
bn_solver = Solver(bn_model, small_data,
num_epochs=n_epochs, batch_size=b_size,
update_rule='adam',
optim_config={
'learning_rate': lr,
},
verbose=False)
bn_solver.train()
bn_solvers.append(bn_solver)
return bn_solvers, solver, batch_sizes
batch_sizes = [5,10,50]
bn_solvers_bsize, solver_bsize, batch_sizes = run_batchsize_experiments('batchnorm')
plt.subplot(2, 1, 1)
plot_training_history('Training accuracy (Batch Normalization)','Epoch', solver_bsize, bn_solvers_bsize, \
lambda x: x.train_acc_history, bl_marker='-^', bn_marker='-o', labels=batch_sizes)
plt.subplot(2, 1, 2)
plot_training_history('Validation accuracy (Batch Normalization)','Epoch', solver_bsize, bn_solvers_bsize, \
lambda x: x.val_acc_history, bl_marker='-^', bn_marker='-o', labels=batch_sizes)
plt.gcf().set_size_inches(15, 10)
plt.show()
```
## Inline Question 2:
Describe the results of this experiment. What does this imply about the relationship between batch normalization and batch size? Why is this relationship observed?
## Answer:
If we use batch normalization we should use a bigger batch size,if that we can get a more accurate mean and variance.
# Layer Normalization
Batch normalization has proved to be effective in making networks easier to train, but the dependency on batch size makes it less useful in complex networks which have a cap on the input batch size due to hardware limitations.
Several alternatives to batch normalization have been proposed to mitigate this problem; one such technique is Layer Normalization [2]. Instead of normalizing over the batch, we normalize over the features. In other words, when using Layer Normalization, each feature vector corresponding to a single datapoint is normalized based on the sum of all terms within that feature vector.
[2] [Ba, Jimmy Lei, Jamie Ryan Kiros, and Geoffrey E. Hinton. "Layer Normalization." stat 1050 (2016): 21.](https://arxiv.org/pdf/1607.06450.pdf)
## Inline Question 3:
Which of these data preprocessing steps is analogous to batch normalization, and which is analogous to layer normalization?
1. Scaling each image in the dataset, so that the RGB channels for each row of pixels within an image sums up to 1.
2. Scaling each image in the dataset, so that the RGB channels for all pixels within an image sums up to 1.
3. Subtracting the mean image of the dataset from each image in the dataset.
4. Setting all RGB values to either 0 or 1 depending on a given threshold.
## Answer:
batch normalization 3
layer normalization 124
# Layer Normalization: Implementation
Now you'll implement layer normalization. This step should be relatively straightforward, as conceptually the implementation is almost identical to that of batch normalization. One significant difference though is that for layer normalization, we do not keep track of the moving moments, and the testing phase is identical to the training phase, where the mean and variance are directly calculated per datapoint.
Here's what you need to do:
* In `cs231n/layers.py`, implement the forward pass for layer normalization in the function `layernorm_backward`.
Run the cell below to check your results.
* In `cs231n/layers.py`, implement the backward pass for layer normalization in the function `layernorm_backward`.
Run the second cell below to check your results.
* Modify `cs231n/classifiers/fc_net.py` to add layer normalization to the `FullyConnectedNet`. When the `normalization` flag is set to `"layernorm"` in the constructor, you should insert a layer normalization layer before each ReLU nonlinearity.
Run the third cell below to run the batch size experiment on layer normalization.
```
# Check the training-time forward pass by checking means and variances
# of features both before and after layer normalization
# Simulate the forward pass for a two-layer network
np.random.seed(231)
N, D1, D2, D3 =4, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before layer normalization:')
print_mean_std(a,axis=1)
gamma = np.ones(D3)
beta = np.zeros(D3)
# Means should be close to zero and stds close to one
print('After layer normalization (gamma=1, beta=0)')
a_norm, _ = layernorm_forward(a, gamma, beta, {'mode': 'train'})
print_mean_std(a_norm,axis=1)
gamma = np.asarray([3.0,3.0,3.0])
beta = np.asarray([5.0,5.0,5.0])
# Now means should be close to beta and stds close to gamma
print('After layer normalization (gamma=', gamma, ', beta=', beta, ')')
a_norm, _ = layernorm_forward(a, gamma, beta, {'mode': 'train'})
print_mean_std(a_norm,axis=1)
# Gradient check batchnorm backward pass
np.random.seed(231)
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
ln_param = {}
fx = lambda x: layernorm_forward(x, gamma, beta, ln_param)[0]
fg = lambda a: layernorm_forward(x, a, beta, ln_param)[0]
fb = lambda b: layernorm_forward(x, gamma, b, ln_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)
db_num = eval_numerical_gradient_array(fb, beta.copy(), dout)
_, cache = layernorm_forward(x, gamma, beta, ln_param)
dx, dgamma, dbeta = layernorm_backward(dout, cache)
#You should expect to see relative errors between 1e-12 and 1e-8
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
```
# Layer Normalization and batch size
We will now run the previous batch size experiment with layer normalization instead of batch normalization. Compared to the previous experiment, you should see a markedly smaller influence of batch size on the training history!
```
ln_solvers_bsize, solver_bsize, batch_sizes = run_batchsize_experiments('layernorm')
plt.subplot(2, 1, 1)
plot_training_history('Training accuracy (Layer Normalization)','Epoch', solver_bsize, ln_solvers_bsize, \
lambda x: x.train_acc_history, bl_marker='-^', bn_marker='-o', labels=batch_sizes)
plt.subplot(2, 1, 2)
plot_training_history('Validation accuracy (Layer Normalization)','Epoch', solver_bsize, ln_solvers_bsize, \
lambda x: x.val_acc_history, bl_marker='-^', bn_marker='-o', labels=batch_sizes)
plt.gcf().set_size_inches(15, 10)
plt.show()
```
## Inline Question 4:
When is layer normalization likely to not work well, and why?
1. Using it in a very deep network
2. Having a very small dimension of features
3. Having a high regularization term
## Answer:
2 If use a small dimension,the mean and varience of the dimension will became random ,can't show it's common ground.
| github_jupyter |
There was some problem in building the past .ttl file. I'll redo the steps and debug.
Now that the entities are on Wikidata, while there is no has_positive_marker property there, we can make a local RDF file using Wikidata IDs.
```
import pandas as pd
gene_reference = pd.read_csv("../results/human_gene_reference_from_panglao_to_wikidata_04_11_2020.csv")
cell_type_reference = pd.read_csv("../results/cell_type_reference_from_panglao_to_wikidata_31_10_2020.csv")
markers = pd.read_csv("../data/PanglaoDB_markers_27_Mar_2020.tsv", sep="\t")
```
We want, though, the species specific cell types. Let's get them.
```
from wikidata2df import wikidata2df
# A SPARQL query to return all cats in Wikidata!
query = """
SELECT ?item ?itemLabel ?superclass
WHERE
{
?item wdt:P31 wd:Q189118.
?item wdt:P361 ?superclass.
?superclass wdt:P31 wd:Q189118. # cell type
?item wdt:P703 wd:Q15978631. # human cell type
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
dataframe_to_join = wikidata2df(query)
dataframe_to_join
```
The query syntax is wrong! It is looking for "part of" while it should be looking for "subclass of"
I'll fix the 9 items and re-do the query.
```
from wikidata2df import wikidata2df
# A SPARQL query to return all cats in Wikidata!
query = """
SELECT ?item ?itemLabel ?superclass
WHERE
{
?item wdt:P31 wd:Q189118.
?item wdt:P279 ?superclass.
?superclass wdt:P31 wd:Q189118. # cell type
?item wdt:P703 wd:Q15978631. # human cell type
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
dataframe_to_join = wikidata2df(query)
dataframe_to_join
cell_type_reference
cell_type_reference.merge(dataframe_to_join, left_on="wikidata", right_on="superclass")
```
Some rows are still being lost in the process. Let's understand why.
Row 3 was lost: Pancreatic stellate cells Q1164962
It is present in wikidata, but without a "P279" statement to the general cell type.
Lets look for other cases.
```
import numpy as np
main_list = np.setdiff1d(cell_type_reference["wikidata"].dropna(), dataframe_to_join["superclass"].dropna())
cell_type_reference[[a in main_list for a in cell_type_reference["wikidata"]]]
```
I will re-run the Quickstatements used for adding the P279 statesments originally and hope for a magic fix.
(../quickstatements/P279_correction_08_11_2020.qs)
```
from wikidata2df import wikidata2df
# A SPARQL query to return all cats in Wikidata!
query = """
SELECT ?item ?itemLabel ?superclass
WHERE
{
?item wdt:P31 wd:Q189118.
?item wdt:P279 ?superclass.
?superclass wdt:P31 wd:Q189118. # cell type
?item wdt:P703 wd:Q15978631. # human cell type
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
"""
dataframe_to_join = wikidata2df(query)
dataframe_to_join
```
I had to restart the kernel so to clear the cache of the query.
```
import pandas as pd
gene_reference = pd.read_csv("../results/human_gene_reference_from_panglao_to_wikidata_04_11_2020.csv")
cell_type_reference = pd.read_csv("../results/cell_type_reference_from_panglao_to_wikidata_31_10_2020.csv")
markers = pd.read_csv("../data/PanglaoDB_markers_27_Mar_2020.tsv", sep="\t")
cell_type_reference.merge(dataframe_to_join, left_on="wikidata", right_on="superclass")
import numpy as np
main_list = np.setdiff1d(cell_type_reference["wikidata"], dataframe_to_join["superclass"].dropna())
cell_type_reference[[a in main_list for a in cell_type_reference["wikidata"]]]
```
I am fixing some of those manually.
Satellite glial cells Q63710 has a typo, it should point to https://www.wikidata.org/wiki/Q637104
The query seems to be buggy, though.
I'll proceed and note down the problems.
```
cell_type_reference = cell_type_reference.merge(dataframe_to_join, left_on="wikidata", right_on="superclass")
cell_type_reference.to_csv("../results/human_cell_type_reference_21_11_2020.csv")
cell_type_reference.head()
human_markers = markers[["Hs" in val for val in markers["species"]]]
human_markers_lean = human_markers[["official gene symbol", "cell type"]]
human_markers_lean.to_csv("../results/human_markers_reconciled_13_11_2020.csv")
human_markers_lean.head()
human_markers_lean = human_markers_lean.merge(cell_type_reference, left_on="cell type", right_on="panglao")[["official gene symbol", "cell type", "item"]]
human_markers_lean.columns = ["official gene symbol", "cell type", "cell type id"]
human_markers_lean = human_markers_lean.merge(gene_reference, left_on="official gene symbol", right_on="panglao")[["official gene symbol", "cell type", "cell type id", "wikidata"]]
human_markers_lean.columns = ["official gene symbol", "cell type", "cell type id", "gene id"]
human_markers_lean
import rdflib
g = rdflib.Graph()
for index, row in human_markers_lean.iterrows():
ctp = rdflib.Namespace("http://celltypes.wiki.opencura.com/entity/")
wd = rdflib.Namespace("http://www.wikidata.org/entity/")
wdt = rdflib.Namespace("http://www.wikidata.org/prop/direct/")
s = rdflib.term.URIRef(wd+row["cell type id"])
p1 = rdflib.term.URIRef(ctp+"P9")
o1 = rdflib.term.URIRef(wd+row["gene id"])
g.bind("ctp", ctp)
g.bind("wd", wd)
g.bind("wdt", wdt)
g.add((s, p1, o1))
g.serialize(destination='../results/human_cell_type_markers_21_11_2020.ttl', format='turtle')
```
There we go. Now we have an _almost_ complete .ttl file.
| github_jupyter |
## Import our modules. Remember it is always good to do this at the begining of a notebook.
If you don't have seaborn, you can install it with `conda install seaborn`.
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
### Use the notebook magic to render matplotlib figures inline with the notebook cells
```
%matplotlib inline
```
### Let's begin!
First, we need to load our data.
We'll use pandas `read_csv` function. Now, if you have trouble opening the file, remember how we solved the problem this morning.
```
df = pd.read_csv("HCEPDB_moldata.csv")
```
Let's take a look at the data to make sure it looks right with head and then look at the shape of the data frame.
```
df.head()
df.shape
```
OK, that's a lot of data. Let's take a random subsampling of the full dataframe to make playing with the data faster. This is something you may consider doing when you have large data sets and want to do data exploration. Thankfully, pandas has a nice feature called sample that will take a randome sample from our dataframe.
```
df_sample = df.sample(frac=.1)
df_sample.head()
df_sample.shape
```
Cool. Cool, cool, cool. Now we have a subset of data for some plotting fun. We say some basic plots this AM with pandas, but let's do some nicer ones. Let's start with PCE vs HOMO energy.
```
df.plot.scatter('pce', 'e_homo_alpha')
```
Ooops! We used the wrong dataframe. That took a while, didn't it. We can use the magic %%timeit to see how long that took. By default %%time it repeats the function call some number of times and averages it. For this purpose let's do one time. See the timeit docs [here](http://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-timeit).
```
%%timeit -n 1 -r 1
df.plot.scatter('pce', 'e_homo_alpha')
```
Now let's see for our subsampled dataframe.
```
%%timeit -n 1 -r 1
df_sample.plot.scatter('pce', 'e_homo_alpha')
```
Nice... A lot shorter! Notice about 10% of the data resulted in a 1/10 run time. Makes sense.
But this thing is UGLY! Let's see if we can't pretty it up. First thing is that `pd.plot.XXX` returns a plot object that we can modify before it gets rendered by calling certain methods on the object. Remember you can always use the Jupyter notebook tab completion after an object to find out what methods are available.
```
p_vs_homo_plt = df_sample.plot.scatter('pce', 'e_homo_alpha')
p_vs_homo_plt.set_xlabel('PCE')
p_vs_homo_plt.set_ylabel('HOMO')
p_vs_homo_plt.set_title('Photoconversion Efficiency vs. HOMO energy')
```
That's a bit better, but there are still some things we can do to make it look nicer. Like put it on a grid and make the y-axis label more accurate and increase the size as well as setting the aspect ratio.
```
p_vs_homo_plt = df_sample.plot.scatter('pce', 'e_homo_alpha', figsize=(6,6))
p_vs_homo_plt.set_xlabel('PCE')
p_vs_homo_plt.set_ylabel('$E_{HOMO}$')
p_vs_homo_plt.set_title('Photoconversion Efficiency vs. HOMO energy')
p_vs_homo_plt.grid()
```
Let's take a moment to figure something out. Let's figure out how to do the following:
* How to change the x range to be 2 to 10
* How to change the y range to be -6 to -2
* How to change the font size to 18
* How to change the colors and transparency
### The pandas visualization tools documentation is really good:
* [docs here](https://pandas.pydata.org/pandas-docs/stable/visualization.html)
One thing that is very useful is a scatterplot matrix to show the relationship between variables. Let's make one now. Be patient as this makes a lot of plots!
```
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df_sample, figsize=(10,10), alpha=.2)
```
WOW! That is insane! But it does give us a quick overview of the relationship between all the variables in the data frame. That id column plot is goofy. The ids are the molecule ids and don't contain any molecular information. Let's turn that column into an index and move on.
```
df_sample.set_index('id', inplace=True)
df_sample.head()
```
OK, moving on, let's look at making density plots. These show the probability density of particular values for a variable. Notice how we used a different way of specifying the plot type.
```
df_sample['pce'].plot(kind='kde')
```
Let's plot the kde overtop of the histogram (remember the histogram from this AM?). The key here is to use a secondary axis. First we save the plot object to `ax` then pass that to the second plot.
```
ax = df_sample['pce'].plot(kind='hist')
df_sample['pce'].plot(kind='kde', ax=ax, secondary_y=True)
```
## NEAT!
What about trying other plot styles? We can do this by calling `matplotlib.style.use(...)`. Let's try the `ggplot` style that looks like the ggplot2 default style from R.
```
import matplotlib
matplotlib.style.use('ggplot')
df_sample['pce'].plot(kind='kde')
```
You can find the list of matplotlib styles [here](https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html).
## Seaborn for fun and pretty pictures!
Matplotlib is great for basic scatter plots, bar plots, time series, etc. But if we want to do really fancy plots, we need to look to other tools like Seaborn. This is a super quick intro to seaborn.
We'll make three different contour / surface plots.
* Basic contour plot
* Density plot
Examples roughly taken from [here](https://python-graph-gallery.com/1136-2/).
```
sns.set_style('white')
sns.kdeplot(df_sample['pce'], df_sample['e_homo_alpha'])
sns.kdeplot(df_sample['pce'], df_sample['e_homo_alpha'], cmap='Reds', shade=True, bw=.15)
```
### Super COOL!
Let's go back to pandas and matplotlib and look at subplots.
```
fix, axes = plt.subplots(nrows=2, ncols=1, figsize=(6,6))
df_sample.plot(x='pce', y='e_homo_alpha', ax=axes[0])
df_sample.plot(x='pce', y='e_gap_alpha', ax=axes[1])
```
Ooops! That doesn't look at all right? What's wrong with this figure?
### In class exercise
Fix up the above subplots so that they look like what we might expect. Also, add titles, increase the font size, change colors and alpha, and finally figure out how to change the margins and layout so they are side by side.
| github_jupyter |
```
import os
import pandas as pd
import numpy as np
import json
import pickle
from pprint import pprint
from collections import defaultdict
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import os, sys
parentPath = os.path.abspath("..")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
from src.data import load_source, slice_d
from src.config import Config, get_option_fallback
from knowledge_tracing.trainer import Trainer
projectdir = Path('/code')
assert projectdir.exists()
experiment_name = '20_0218_edm2020_assistments09'
expsoutdir = projectdir / 'output' / experiment_name
cp_rp_paths = []
for expdir in sorted(expsoutdir.iterdir()):
# Checkpoints
cpdir = expdir / 'checkpoints'
lastcpdir = sorted(cpdir.iterdir())[-1]
bestcp_path = sorted(lastcpdir.iterdir())[-1]
print(bestcp_path)
# Report
rpdir = expdir / 'report'
lastrpdir = sorted(rpdir.iterdir())[-1]
rp_path = next(lastrpdir.glob('*.json'))
print(rp_path)
cp_rp_paths.append((bestcp_path, rp_path))
# Asteria
configdics = []
for checkpoint_path, report_path in cp_rp_paths:
with open(report_path, 'r') as f:
report = json.load(f)
config_dic = report['config']
config_dic['load_model'] = str(checkpoint_path)
configdics.append(config_dic)
def get_simu_res(config_dic):
config = Config(config_dic, projectdir)
config.batch_size = 1
trainer = Trainer(config)
trainer.evaluate_model()
seq_size = config.sequence_size
simu = [[0]*i + [1]*(seq_size - i) for i in range(seq_size+1)]
simu_res = dict()
for v in range(config.n_skills):
xs = []
preds = []
for s in simu:
res = trainer.model.loss_batch(
torch.Tensor([(v,a) for a in s]).unsqueeze(0),
torch.Tensor([(v,a) for a in s]).unsqueeze(0),
torch.BoolTensor([True]*seq_size).unsqueeze(0),)
preds.append(res['pred_prob'][-1].item())
xs.append(sum(s))
simu_res[v] = (xs, preds)
return simu_res
simures_list = []
for config_dic in configdics:
simu_res = get_simu_res(config_dic)
simures_list.append(simu_res)
base_res = None
descres_list = []
for simu_res in simures_list:
if base_res is None:
base_res = {k:v for k, v in sorted(simu_res.items(), key=lambda it: it[1][1][0] - it[1][1][-1])}
else:
desc_res = {k:simu_res[k] for k in base_res.keys()}
descres_list.append(desc_res)
h, w = (124+7)//8, 8
figscale = 2.5
hspace = 0.35
fig, axs = plt.subplots(h, w, figsize=(w*figscale, h*figscale))
plt.subplots_adjust(hspace=hspace)
for i, (v, (xidx, sanity)) in enumerate(list(base_res.items())[:h*w]):
ax = axs[i//(w), i%(w)]
ax.set_ylim([0, 1])
ax.set_title('KC{} s{}0'.format(v, '>' if sanity[-1]<sanity[0] else '<'))
sns.lineplot(xidx, sanity, ax=ax, label='0', palette="ch:2.5,.25")
for i, desc_res in enumerate(descres_list):
sns.lineplot(xidx, desc_res[v][1], ax=ax, label=str(i+1), palette="ch:2.5,.25")
ax.legend()
plt.show()
```
# Test auc (manual)
```
# pre straighten
pd.Series([0.796907,0.798236,0.79668,0.796917,0.795682]).describe()
# no pre
pd.Series([0.795603,0.797074,0.795955,0.795035,0.795182]).describe()
config_name = '20_0218_edm2020_assistments09'
expsoutdir = projectdir / 'output' / config_name
# config_path = sorted(projectdir / 'config' / experiment_name).iterdir())[0]
config_path = projectdir / 'config' / config_name / 'assistments09.json'
with open(config_path, 'r') as f:
config_dict = json.load(f)
config_dict['config_name'] = config_path.parent.name
config_dict['exp_name'] = config_path.stem
def check_prev_report(config_name, exp_name, projectdir):
reportdir = projectdir / 'output' / config_name / exp_name / 'report'
checkpointdir = projectdir / 'output' / config_name / exp_name / 'checkpoints'
if not reportdir.exists() or not checkpointdir.exists():
return None
report_path = sorted(reportdir.glob('*/*.json'))[-1]
checkpoint_path = sorted(checkpointdir.glob('*/*.model'))[-1]
return report_path, checkpoint_path
# load previously trained model
report_path, checkpoint_path = check_prev_report(config_path.parent.name, config_path.stem, projectdir)
with open(report_path, 'r') as f:
report_dict = json.load(f)
config_dict = report_dict['config']
config_dict['load_model'] = str(checkpoint_path)
config = Config(config_dict, projectdir=projectdir)
trainer = Trainer(config)
config_name = '20_0218_edm2020_asmt0910'
report_straighten_pre0_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size0.auto', projectdir)
report_straighten_pre5_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size5.auto', projectdir)
report_straighten_pre10_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size10.auto', projectdir)
report_straighten_pre20_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size20.auto', projectdir)
report_straighten_pre50_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size50.auto', projectdir)
report_straighten_pre100_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size100.auto', projectdir)
report_straighten_pre150_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size150.auto', projectdir)
report_straighten_pre200_path, _ = check_prev_report(config_name, 'pre_dummy_epoch_size200.auto', projectdir)
with open(report_straighten_pre0_path, 'r') as f:
report_straighten_pre0 = json.load(f)
with open(report_straighten_pre5_path, 'r') as f:
report_straighten_pre5 = json.load(f)
with open(report_straighten_pre10_path, 'r') as f:
report_straighten_pre10 = json.load(f)
with open(report_straighten_pre20_path, 'r') as f:
report_straighten_pre20 = json.load(f)
with open(report_straighten_pre50_path, 'r') as f:
report_straighten_pre50 = json.load(f)
with open(report_straighten_pre100_path, 'r') as f:
report_straighten_pre100 = json.load(f)
with open(report_straighten_pre150_path, 'r') as f:
report_straighten_pre150 = json.load(f)
with open(report_straighten_pre200_path, 'r') as f:
report_straighten_pre200 = json.load(f)
def show_learning_curve(name, jack_up, idclist_dic, config, idc='eval_auc'):
# fig = plt.figure()
# ax = fig.add_subplot(111)
# x = np.transpose(np.array(list(idclist_dic['epoch'].values())))
x = idclist_dic['epoch']['0'] * 5
x = [i+jack_up for i in x]
y = []
for _y in idclist_dic[idc].values():
y += _y
# ax.plot(x, y, label=k.replace('_', ' '))
sns.lineplot(x=x, y=y, label=name+':'+idc)
# ax.legend()
# ax.set_ylim(0., 1.)
# plt.savefig(outdir / f'{config.model_name}_lc.png')
show_learning_curve('pre200', 200, report_straighten_pre200['indicator'], config)
show_learning_curve('pre150', 150, report_straighten_pre100['indicator'], config)
show_learning_curve('pre100', 100, report_straighten_pre100['indicator'], config)
show_learning_curve('pre50', 50, report_straighten_pre10['indicator'], config)
show_learning_curve('pre20', 20, report_straighten_pre10['indicator'], config)
show_learning_curve('pre10', 10, report_straighten_pre10['indicator'], config)
show_learning_curve('pre5', 5, report_straighten_pre0['indicator'], config)
show_learning_curve('pre0', 0, report_straighten_pre0['indicator'], config)
plt.show()
show_learning_curve('pre200', 200, report_straighten_pre200['indicator'], config, idc='eval_loss')
show_learning_curve('pre150', 150, report_straighten_pre100['indicator'], config, idc='eval_loss')
show_learning_curve('pre100', 100, report_straighten_pre100['indicator'], config, idc='eval_loss')
show_learning_curve('pre50', 50, report_straighten_pre10['indicator'], config, idc='eval_loss')
show_learning_curve('pre20', 20, report_straighten_pre10['indicator'], config, idc='eval_loss')
show_learning_curve('pre10', 10, report_straighten_pre10['indicator'], config, idc='eval_loss')
show_learning_curve('pre5', 5, report_straighten_pre0['indicator'], config, idc='eval_loss')
show_learning_curve('pre0', 0, report_straighten_pre0['indicator'], config, idc='eval_loss')
plt.show()
show_learning_curve('pre0', 0, report_straighten_pre0['indicator'], config)
show_learning_curve('pre5', 0, report_straighten_pre0['indicator'], config)
show_learning_curve('pre10', 0, report_straighten_pre10['indicator'], config)
show_learning_curve('pre20', 0, report_straighten_pre10['indicator'], config)
show_learning_curve('pre50', 0, report_straighten_pre10['indicator'], config)
show_learning_curve('pre100', 0, report_straighten_pre100['indicator'], config)
show_learning_curve('pre150', 0, report_straighten_pre100['indicator'], config)
show_learning_curve('pre200', 0, report_straighten_pre200['indicator'], config)
plt.show()
```
# Test auc
```
# no pre
s = pd.Series([0.795603,0.797074,0.795955,0.795035,0.795182])
print('no pre', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# s.describe()
# pre 10 straighten
s = pd.Series([0.796907,0.798236,0.79668,0.796917,0.795682])
print('pre 10', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 0 straighten
s = pd.Series([f for f in report_straighten_pre0['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre0['indicator']['test_auc'].values()])
print('pre 0', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 5 straighten
s = pd.Series([f for f in report_straighten_pre5['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre5['indicator']['test_auc'].values()])
print('pre 5', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 10 straighten
s = pd.Series([f for f in report_straighten_pre10['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre10['indicator']['test_auc'].values()])
print('pre 10', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 20 straighten
s = pd.Series([f for f in report_straighten_pre20['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre20['indicator']['test_auc'].values()])
print('pre 20', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 50 straighten
s = pd.Series([f for f in report_straighten_pre50['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre50['indicator']['test_auc'].values()])
print('pre 50', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 100 straighten
s = pd.Series([f for f in report_straighten_pre100['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre100['indicator']['test_auc'].values()])
print('pre 100', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 150 straighten
s = pd.Series([f for f in report_straighten_pre150['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre150['indicator']['test_auc'].values()])
print('pre 150', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
# pre 200 straighten
s = pd.Series([f for f in report_straighten_pre200['best']['auc_epoch'].values()])
print('best epoch', end='\t')
print(f'{s.mean():.1f}±{s.std():.1f}')
s = pd.Series([f[0] for f in report_straighten_pre200['indicator']['test_auc'].values()])
print('pre 200', end='\t')
print(f'{s.mean():.6f}±{s.std():.6f}')
```
| github_jupyter |
# Load packages
```
%matplotlib inline
from ifis_tools import database_tools as db
from ifis_tools import asynch_manager as am
from ifis_tools import auxiliar as aux
from wmf import wmf
import pandas as pd
import numpy as np
import os
import pylab as pl
from string import Template
from param_ident import core
from aclayer import core as ac
```
# Hydrographs comparison
```
wat = {'south': pd.read_csv('data/for_hlm/south_skunk/water_virtual.csv', index_col=0)}
wat.update({'turkey' : pd.read_csv('data/for_hlm/turkey/water_virtual.csv', index_col=0)})
f = open('data/for_hlm/south_skunk/control_vr5.sav','r')
L = f.readlines()
f.close()
L = [int(i) for i in L]
a = wat['south'].loc[L]
a.sort_values('Acum', ascending=False).shape
def plot_sim(link, f1,f2, path = None):
link = str(link)
Q = {}
for i in [4,5,6]:
q = pd.read_msgpack('data/hlm_outputs/south/'+link+'_vr'+str(i)+'_.msg')
Q.update({str(i): q})
fig = pl.figure(figsize=(15,5))
ax = fig.add_subplot(111)
idx = Q['4']['Q'][f1:f2].index
for k in Q.keys():
pl.plot(idx, Q[k]['Q'][f1:f2].values, lw = 4, label = k)
ax.legend(loc = 0 , fontsize = 'xx-large')
ax.tick_params(labelsize = 22)
ax.set_ylabel('Streamflow [$m^3 \cdot s^{-1}$]', size = 23)
ax.grid()
ax.set_xticks(idx[::20])
ax.set_xlim(idx[0], idx[-1])
if path is not None:
pl.savefig(path, bbox_inches = 'tight')
plot_sim(272677, '2018-09-20','2018-09-27',)# 'figures/272677_2018_6.png')
_t = M.loc[((M['product'] == '6') & (M['link'] == '272677'))]
h = pl.hist2d(_t.qpeakDiff, _t.tpeakDiff,
bins = [np.arange(-2,2, 0.1), np.arange(-2,2,0.1)], cmap=pl.get_cmap('Spectral_r'),
vmax = 100,vmin = -1, cmin = 1)
cbar = pl.colorbar()
pl.xlim(-1.0,1.0)
pl.ylim(-1.0,1.0)
t5 = M.loc[((M['product'] == '5') & (M['link'] == '272677'))]
t6 = M.loc[((M['product'] == '6') & (M['link'] == '272677'))]
#_t['distance'] = np.sqrt(_t['qpeakDiff']**2 + _t['tpeakDiff']**2)
t5.qt_distance.std()
fig = pl.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.tick_params(labelsize = 20)
pl.scatter(t5.qt_distance, t5.kge, s = 80, edgecolor = 'k')
pl.scatter(t6.qt_distance, t6.kge, s = 80, edgecolor = 'k')
pl.ylim(-0.6,1.0)
ax.set_xlabel('Distance', size = 22)
ax.set_ylabel('KGE', size = 22)
_a = _t.loc[_t['qpeak'] > 50]
pl.scatter(_a.qt_distance, _a.kge)
plot_sim(292255,'2018-06-10','2018-7-10', 'figures/292255_2018_9.png')
plot_sim(291344, '2018-03-10','2018-12-10',)#'figures/291344_2018_9.png')
```
# Peak flow comparison
```
M = pd.read_msgpack('data/processed/south_event_met.msg')
```
## Qpeak by event comparison
```
def get_qpeak_acum(prod):
qp = M.loc[M['product'] == prod, ['qpeakDiff','tpeakDiff', 'link']]
qp = qp.groupby('link').mean()
qp.index = [int(i) for i in qp.index]
qp['acum'] = a['Acum']
return qp
Qp = {'5' : get_qpeak_acum('5')}
Qp.update({'6': get_qpeak_acum('6')})
fig = pl.figure(figsize=(20,8))
ax = fig.add_subplot(121)
for i,co in zip(['5','6'],[ '#ff7f0e','#2ca02c' ]):
ax.scatter(Qp[i]['acum'], Qp[i]['qpeakDiff'], c = co,s = 250,edgecolor = 'k', label = i)
ax.tick_params(labelsize = 22)
ax.legend(loc = 0, fontsize = 'xx-large')
ax.grid()
ax.set_ylabel('Qpeak difference', size = 23)
ax.set_xlabel('Upstream Area [$km^2$]', size = 23)
ax.set_title('a) Mean peak magnitude difference vs Area', size = 23)
#ax.set_xscale('log')
ax = fig.add_subplot(122)
for i,co in zip(['5','6'],[ '#ff7f0e','#2ca02c' ]):
ax.scatter(Qp[i]['acum'], Qp[i]['tpeakDiff'],c=co, s = 250,edgecolor = 'k', label = i)
ax.tick_params(labelsize = 20)
ax.legend(loc = 0, fontsize = 'xx-large')
ax.grid()
# ax.set_xlim(10,2000)
# ax.set_xscale('log')
ax.set_ylabel('Time to peak difference [$h$]', size = 23)
ax.set_xlabel('Upstream Area [$km^2$]', size = 23)
ax.set_title('b) Mean peak time difference vs Area', size = 23)
pl.savefig('figures/qpeak_mag_time_mean_diff_vs_area.png', bbox_inches = 'tight')
```
## Peak flow comparison
```
fig = pl.figure(figsize=(8,8))
ax = fig.add_subplot(111)
bins = np.linspace(10,1000,10)
for i,co in zip(['5','6'],[ '#ff7f0e','#2ca02c' ]):
x1,y1 = get_qmax_intersec(i, '2002','2018')
ax.scatter(x1,y1, c = co,s = 100, edgecolors='k', alpha = 0.7, label = i)
ax.set_xlim(0,7)
ax.set_ylim(0,7)
ax.legend(loc = 0 , fontsize = 'xx-large')
ax.plot([0,1000],[0,1000],'k', lw = 2)
ax.set_xlabel('Reference peak flows', size = 22)
ax.set_ylabel('Comparison peak flows', size = 22)
ax.grid()
ax.tick_params(labelsize = 20)
pl.savefig('figures/peak_flow_comparison.png', bbox_inches = 'tight')
```
## Standarized qpeak difference
```
def get_qmax_intersec(product, y1,y2):
u = M.loc[M['product'] == '4',['qpeak','qmax_anual','link']][y1:y2]
u.set_index([u.index, 'link'], inplace=True)
a = M.loc[M['product'] == product,['qpeak','qmax_anual','link']][y1:y2]
a.set_index([a.index, 'link'], inplace=True)
idx = a.index.intersection(u.index)
return u.loc[idx,'qpeak'] / u.loc[idx,'qmax_anual'], a.loc[idx,'qpeak'] / u.loc[idx,'qmax_anual']
fig = pl.figure(figsize = (20,5))
bins = np.arange(-0.05,0.05,0.005)
x1,y1 = get_qmax_intersec('5', '2002','2018')
er = y1 - x1
h1,b1 = ac.get_histogram(er, bins, inf = bins[0], sup =bins[-1])
cont = 1
names = ['a) Scenario 5','b) Scenario 6','c) Scenario 12']
ax = fig.add_subplot(111)
for prod,co in zip(['5','6'], [ '#ff7f0e','#2ca02c' ]):
#Plot the ifc-hlm setup
#pl.bar(b1[:-1],h1*100, 0.02, align = 'edge',alpha = 0.5, lw = 3, edgecolor = 'k')
#plot one of the best setups
x2,y2 = get_qmax_intersec(prod, '2002','2018')
er2 = y2 - x2
h2,b2 = ac.get_histogram(er2, bins, inf = bins[0], sup =bins[-1])
pl.bar(b2[:-1],h2*100, 0.005, align = 'edge', color = co, alpha = 0.5, lw = 3, edgecolor = 'k', label = prod)
ax.legend(loc = 0 , fontsize = 'xx-large')
ax.tick_params(labelsize = 22)
if cont == 1:
ax.set_xlabel('$Z_s - Z_o$ [adim]', size = 23)
ax.set_ylabel('Fraction [$\%$]', size = 23)
else:
ax.set_yticklabels([])
ax.grid()
ax.set_title(names[cont-1], size = 24)
#ax.set_ylim(0,35)
ax.set_yticks([0,10,20,30])
#ax.set_xticks([-1,-0.5,0.0,0.5,1.0])
cont +=1
pl.savefig('figures/peak_standarized_difference.png', bbox_inches = 'tight')
```
| github_jupyter |
## Herramientas
En este taller usamos pandas, sklearn y OpenCV, las siguientes celdas muestran algunos metodos que usaremos
[Pandas](https://pandas.pydata.org/)
Es una librería muy útil para trabajar con datos tabulares.
Es muy común encontrarla en el análisis de datos y en procesos de Machine Learning.
En este taller la usaremos para cargar un archivo CSV y explorar los datos.
```
# https://pandas.pydata.org/
import pandas as pd
# cargar un archivo csv
data_frame = pd.read_csv('./data/hands_signs.csv')
data_frame.describe()
data_frame.sample(frac=0.001)
```
[sklearn](http://scikit-learn.org/stable/) es una de las librerías de Machine Learning más usadas.
En este notebook usaremos dos de sus funciones, `train_test_split` y `LabelEncoder` para procesar los
datos antes de entrenar redes neuronales en tensorflow.
```
from sklearn.model_selection import train_test_split
x = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
y = [1 , 2 , 3 , 3 , 4 , 5 , 7 , 8 ]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
print("x_train:", x_train)
print("y_train:", y_train)
print("x_test:", x_test)
print("y_test:", y_test)
from sklearn.preprocessing import LabelEncoder
y = ['a', 'a', 'a', 'b', 'b', 'd', 'e', 'f', 'h', 'h']
y_label = LabelEncoder().fit_transform(y)
print(y)
print(y_label)
```
[OpenCV](https://opencv.org/) es una librería de visión computarizada, en este taller la usaremos para cargar imágenes y realizar cambios de formato de color por ejemplo de RGB a escala de grises.
```
import cv2
image = cv2.imread("./data/sign-language-mnist/R/5570.jpg")
print(image.shape)
image
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(image)
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
plt.imshow(image_hsv)
image_hsv
```
## Tensorflow
[Tensorflow](https://www.tensorflow.org/) es una librería de machine learning que hace uso de estructuras de grafos para realizar sus cálculos y computar los resultados de sus operaciones. En las próximas celdas de ese notebook se muestra de forma general como se realizan operaciones con esta librería de aprendizaje.
```
# Importar NumPy y TensorFlow
import numpy as np
import tensorflow as tf
```
<table>
<tr>
<td><p>La imagen a continuación representan los nodos que genera tensorflow después de declarar, inicializar variables y accionar las operaciones de la siguiente celda.<p>
<p> \* En Tensorflow primero se declaran las variables, constantes, operaciones etc., o se llama un método que hace este trabajo y retorna una operación o variable que contiene el resultado.</p>
<p> \* Después de tener las operaciones definidas se crea una sesión, se inicializan las variables y se realiza el computo de las operaciones con el método `sess.run(op)` de la sesión.</p>
</td>
<td><img src="images/graph1.png" height="420px"></td>
</tr>
</table>
<table>
<tr>
<td>
<img src="images/graphnetwork.png" width="620px"></td>
<td>
</tr>
</table>
```
tf.reset_default_graph()
a_1 = tf.Variable(np.array([1,2,3], dtype=np.float32), name="variable_a1")
a_2 = tf.get_variable("variable_a2", initializer=3.32)
b = tf.constant([[0,1], [2, 3]], name="Matrix_B")
c = tf.zeros([2, 3], tf.int32) * 1
d = tf.ones_like(c)
e = tf.lin_space(10.0, 13.0, 4, name="linea")
op = tf.add(b, 25)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("variable a1, arreglo numpy\n", sess.run(a_1))
print("variable a2, varible numérica\n", sess.run(a_2))
print("constante matrix numpy\n", sess.run(b))
print("genera matriz de ceros de dimensión 2x3\n", sess.run(c))
print("genera matriz de unos con la dimensión de c\n", sess.run(d))
print("genera secuencia de números\n", sess.run(e))
print("corre operación op = b + 25\n", sess.run(op))
# reiniciar el grafo de computo
tf.reset_default_graph()
# crear una variable result = 0
result = 0
# declarar dos constantes de tensorflow con valores positivos
# crear una operacion de suma entre las dos constantes
constante1 = tf.constant(7)
constante2 = tf.constant(2)
# en una session de tensor flow ejecutar la operacion de suma y
# guardar el resultado en la variable result
with tf.Session() as sess:
result = sess.run(tf.add(constante1, constante2))
# Imprimir la variable result
print(result)
assert (result > 0), "El resultado debe ser mayor a cero"
```
#### Redes en TensorFlow
El las próximas celdas veremos cómo implementar la red que se muestra en la siguiente imagen usando tensorflow.
<img src="images/ANN-Diagram.png" width="500px">
fuente: [viasat.com](https://techblog.viasat.com/using-artificial-neural-networks-to-analyze-trends-of-a-global-aircraft-fleet/)
Layer 1
$$ X(input) = \begin{bmatrix}
. \\
. \\
. \\
.
\end{bmatrix}_{(4rows, 1columns)} \;\;\; W1 = \begin{bmatrix}
. . . . .\\
. . . . .\\
. . . . .\\
. . . . .
\end{bmatrix}_{(4rows, 5columns)}\;\;\; b1 =\begin{bmatrix}
. \\
. \\
. \\
. \\
.
\end{bmatrix}\;\;\; Z1 =\begin{bmatrix}
. \\
. \\
. \\
. \\
.
\end{bmatrix}\;\;\; A1 = ReLu(Z1) = \begin{bmatrix}
. \\
. \\
. \\
. \\
.
\end{bmatrix}\tag{layer 1}$$
$$ Z1 = W1^T X + b1 \; \; \; $$
$$ A1 = ReLu(Z1) $$
Layer 2
$$ A1(input) = \begin{bmatrix}
. \\
. \\
. \\
. \\
.
\end{bmatrix} \;\;\; W2 = \begin{bmatrix}
. . . . . . .\\
. . . . . . .\\
. . . . . . .\\
. . . . . . .\\
. . . . . . .
\end{bmatrix}_{(5rows, 7columns)}\;\;\; b2 =\begin{bmatrix}
. \\
. \\
. \\
. \\
. \\
. \\
.
\end{bmatrix}\;\;\; Z2 =\begin{bmatrix}
. \\
. \\
. \\
. \\
. \\
. \\
.
\end{bmatrix}\;\;\; A2 = ReLu(Z1) = \begin{bmatrix}
. \\
. \\
. \\
. \\
. \\
. \\
.
\end{bmatrix}\tag{layer 2}$$
$$ Z2 = W2^T A1 + b2 \; \; \; $$
$$ A2 = ReLu(Z2) $$
Layer Output
$$ A2(input) = \begin{bmatrix}
. \\
. \\
. \\
. \\
. \\
. \\
.
\end{bmatrix} \;\;\; \;\;\;\;\;\;\;W3 = \begin{bmatrix}
. . .\\
. . .\\
. . .\\
. . .\\
. . .\\
. . .\\
. . .
\end{bmatrix}_{(7rows, 3columns)}\;\;\;\;\;\;\;\; b3 =\begin{bmatrix}
. \\
. \\
.
\end{bmatrix}\;\;\; Z3 =\begin{bmatrix}
. \\
. \\
.
\end{bmatrix}\;\;\; Output = SoftMax(Z3) = \begin{bmatrix}
. \\
. \\
.
\end{bmatrix}\tag{layer Output}$$
$$ Z3 = W3^T A2 + b3 \; \; \; $$
$$ \hat{y} = Output = Softmax(Z3) $$
Red en una sola linea
$$\hat{y} = softmax(\;\; w_3^T \; ReLu(w_2^T \; ReLu(w_1^T x\;+\;b1)\;\; +\; b_2\;\;)\; + \;b_3\;\; )$$
## la variable *W*
En las redes neuronales, la variable *W* es parte de los parámetros principales de la red, también se la conoce como pesos de la red. Estos parámetros controlan qué conexiones de red se activan y desactivan determinando la salida producida por la red.
Cuando la red está siendo entrenada, los valores de estos parámetros cambian en cada iteración para que la red se ajuste y produzca los resultados deseados.
En la siguiente lista se muestra cómo una variable llamada W se inicializa como una matriz de 5x7 con valores aleatorios
```
# Inicializar matrix W con valores aleatoreos con distribucion normal
tf.set_random_seed(100)
tf.reset_default_graph()
W = tf.get_variable("normal_dist_matrix", shape=(5, 7), initializer = tf.random_normal_initializer() )
_W = None
with tf.Session() as sess:
sess.run(W.initializer)
_W = sess.run(W)
print(_W)
print(type(_W))
```
## La variable b
Esta variable (b) junto con la variable (W) son la parámetros que una red aprenden en el proceso de entrenamiento. Este parámetro contiene números o arreglo de números que van a sumarse después de realizar una operación entre la entrada de la red (x) y el parámetro (W)
```
# usando asignaciones con operacion
tf.reset_default_graph()
b = tf.get_variable('b', initializer = tf.constant(10))
with tf.Session() as sess:
sess.run(b.initializer)
sess.run(b.assign_add(30))
print("b = 10 + 30 = 40:", b.eval())
sess.run(b.assign_add(30))
print("b = 70:", b.eval())
with tf.Session() as sess:
sess.run(b.initializer)
print("After recovery session by definition b = 10: ", b.eval())
```
###### Intentaremos hacer una red en tensorflow que se mostro en la imagen anterior:
$$\hat{y} = softmax(\;\; w_3^T \; ReLu(w_2^T \; ReLu(w_1^T x\;+\;b1)\;\; +\; b_2\;\;)\; + \;b_3\;\; )$$
<img src="images/ANN-Diagram.png" width="300px">
```
tf.reset_default_graph()
tf.set_random_seed(100)
#input layer
X = tf.get_variable('X', shape=( 4), initializer = tf.truncated_normal_initializer())
# parameters layer 1
W_1 = tf.get_variable('W_1', shape=(4, 5), initializer = tf.ones_initializer() )
b_1 = tf.get_variable('B_1', initializer = np.ones(5, dtype=np.float32) )
# hidden layer1 before activation
Z_1 = tf.add(tf.tensordot(tf.transpose(W_1), X, 1), b_1)
# parameters layer 2
b_2 = tf.get_variable("B_2", initializer = np.ones(7, dtype=np.float32))
W_2 = tf.get_variable("W_2", shape=(5,7), initializer = tf.truncated_normal_initializer())
# parmeters ouput layer
b_3 = tf.get_variable("B_3", initializer = np.ones(3, dtype=np.float32))
W_3 = tf.get_variable("W_3", shape=(7,3), initializer = tf.truncated_normal_initializer())
with tf.Session() as sess:
sess.run(tf.variables_initializer([b_1, W_1, X]))
_z1 = shape = sess.run(Z_1)
print("\n\nb1 array, shape:", b_1.get_shape() , "\n", b_1.eval())
print("\n\nW1 matrix, shape:", W_1.get_shape(), "\n", W_1.eval())
print("\n\nX matrix, shape:", X.get_shape(), "\n", X.eval())
print("\n\nZ1 = (W * X + b) matrix, shape:", _z1.shape, "\n", _z1)
```
### ReLu = Max(0, x)
<img src="images/activation.png" width="150px">
```
# tf.reset_default_graph()
# No reiniciamos el grafo para recuperar las computacions anteriores
# usamos la funcion ReLu sobre Z_1
A_1 = tf.nn.relu(Z_1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(A_1)
print("\n\nA_1 = apply relu(Z) matrix, shape:", A_1.get_shape(), "\n", A_1.eval())
Z_2 = tf.add(tf.tensordot(tf.transpose(W_2), A_1, 1), b_2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(Z_2)
print("Printing Z2:", Z_2.eval())
A_2 = tf.nn.relu(Z_2)
Z_3 = tf.add(tf.tensordot(tf.transpose(W_3), A_2,1), b_3)
Z_3_softmax = tf.nn.softmax(Z_3)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print("Z3 values:", Z_3.eval())
_z3_softmax= sess.run(Z_3_softmax)
print("Softmax(Z3):", _z3_softmax)
print("dist:", list(zip([round(x*100,3) for x in _z3_softmax], _z3_softmax)))
it_should_sum_one = np.sum(_z3_softmax)
print("1 = ", it_should_sum_one)
```
### Tic Tac Toe
Ahora tratemos de hacer un red que reciba un juego de Triqui como input, y
su output va a ser quien es el ganador del triqui, que puede ser O, X o
ninguno gana. vamos a representar la salida con un vector de tres posiciones
por ejemplo este vector de salida `[0.7, 0.2, 0.1]` indica que gano el jugador
con la `O` por que tiene la probabilidad mas alta en la posición 1. la posición 2
le corresponde a la `X` y la posición 3 a ninguna opción.
##### Representando la entrada (input) y la salida (output) de la red
La entrada (input) de la red es el tablero de triqui, para representarlo usaremos un
vector de 9 posiciones de la siguiente manera:
<table>
<tr>
<td width="60%">Este juego de triqui donde gana el jugador con la X se representara de la siguiente manera
<b>`[1, 1, 1,-1, 0, 0,-1, 0, 1]`</b> como se puede ver la `X` son el numero 1, la O son el numero
0 y los espacios sin jugar son -1.
</td>
<td><img src="images/triqui1.png" width="100px"></td>
</tr>
<tr>
<td><p>Este juego de triqui no gana ningún jugador y se representa con el siguiente vector</p>
<p><b>`[0, 1, 0, 0, 0, 1, 1, 0, 1]`</b></p></td>
<td><img src="images/triqui2.png" width="100px"></td>
</tr>
<tr>
<td><p>Este juego de triqui gana el jugador con la letra O y se representa con el siguiente vector</p>
<p><b>`[0, 1, 1, 0, 0,-1, 0, 1, 1]`</b></p></td>
<td><img src="images/triqui3.png" width="100px"></td>
</tr>
<tr>
<td>
<p>Este juego de triqui gana el jugador con la letra X y se representa con el siguiente vector</p>
<p><b>`[1, 0, 0, 0, 1,-1, 1,-1, 1]`</b></p>
</td>
<td><img src="images/triqui4.png" width="100px"></td>
</tr>
<tr>
<td><p>Este juego de triqui gana el jugador con la letra O y se representa con el siguiente vector</p>
<p><b>`[0,-1,-1, 1, 0, 1,-1, 1, 0]`</b></p></td>
<td><img src="images/triqui5.png" width="100px"></td>
</tr>
</table>
##### Lo que queremos de la red
Para nosotros es fácil ver quien gano un juego de triqui, pero para un programa no es evidente,
Como escribir un programa que nos diga quien gano dado el tablero del juego ?
Seguramente hay una solución muy sencilla con la programación convencional usando `For` `Loops` `if` etc,
Talvez usar redes es como matar una hormiga con un cañón, pero puede ser entretenido para implementar una red en tensorflow
```
## https://playtictactoe.org/
## ejemplos de entrenamiento Tic Tac Toe(Triqui)
x1 = np.array([1, 1, 0, 0, 1, 1, 1, 0, 0]) ; y1 = 2
x2 = np.array([1, 0, 0,-1, 0, 1, 1, 0, 1]) ; y2 = 2
x3 = np.array([1,-1, 0, 1, 0,-1, 1, 0, 1]) ; y3 = 1
x4 = np.array([1,-1, 1, 0, 0, 0, 1,-1, 0]) ; y4 = 0
x5 = np.array([1, 1, 1,-1, 0, 0,-1, 0, 1]) ; y5 = 1
x6 = np.array([0,-1,-1, 1, 0, 1,-1, 1, 0]) ; y6 = 0
## ejemplos de prueba Tic Tac Toe(Triqui)
x7 = np.array([1, 0, 0, 0, 1,-1, 1,-1, 1]) ; y7 = 1
x8 = np.array([0, 1, 0, 0, 0, 1, 1, 0, 1]) ; y8 = 2
x9 = np.array([0, 1, 1, 0, 0,-1, 0, 1, 1]) ; y9 = 0
X_train = [x1, x2, x3, x4, x5, x6, x7]
y_train = [y1, y2, y3, y4, y5, y6, y7]
X_test = [x7, x8, x9]
y_test = [y7, y8, y9]
```
<img src="images/ANN-Diagram.png" width="300px">
```
tf.reset_default_graph()
tf.set_random_seed(1)
x = tf.placeholder(tf.float32, (9), name="X")
y = tf.placeholder(tf.int32, (None), name="Y")
y_one_hot = tf.one_hot(y, 3)
num_neurons_layer_1 = 80
num_neurons_layer_2 = 100
# parameters layer 1
b1 = tf.get_variable("b1", initializer = np.zeros(num_neurons_layer_1, dtype=np.float32))
W1 = tf.get_variable("W1", (num_neurons_layer_1, 9), initializer = tf.truncated_normal_initializer())
Z1 = tf.add(tf.tensordot(W1, x, 1), b1)
A1 = tf.nn.relu(Z1)
# parameters layer 2
b2 = tf.get_variable("b2", initializer = np.zeros(num_neurons_layer_2, dtype=np.float32))
W2 = tf.get_variable("W2", (num_neurons_layer_2, num_neurons_layer_1), initializer = tf.truncated_normal_initializer())
Z2 = tf.add(tf.tensordot(W2, A1, 1), b2)
A2 = tf.nn.relu(Z2)
# parmameters output layer
W_l = tf.get_variable("W_l", (3, num_neurons_layer_2), initializer=tf.truncated_normal_initializer())
b_l = tf.get_variable("b_l", initializer=np.zeros(3, dtype=np.float32))
logits = tf.add(tf.tensordot(W_l, A2, 1), b_l)
```
##### Aprendiendo los parametros W y b
Hasta el momento hemos visto como se calcula la red hacia adelante, pero no hemos visto como la red recibe retroalimentación para saber como actualiza los parámetros W y b, de forma que la salida de la red nos de el resultado esperado. Esto se logra con un proceso llamado propagación hacia atrás, y en ese proceso los parámetros W y b se actualizan.
<img src="images/backpropagation-in-convolutional-neural-network-8-638.jpg" width="600px">
fuente: https://www.slideshare.net/kuwajima/cnnbp
```
# Learning
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_one_hot)
loss = tf.reduce_mean(entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
train = optimizer.minimize(loss)
#trainning
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(400): #Epocas
train_predictions = []
for i in range(len(X_train)):
sess.run(train, feed_dict={x: X_train[i], y: y_train[i]})
# si el actual epoch es multiplo de 20 ejemplo, 20, 40, 60 etc
# vamos a medir la precision sobre el conjunto de entrenamiento
if (epoch % 20) == 0:
for j in range(len(X_train)):
result = sess.run(tf.nn.softmax(logits), feed_dict={ x: X_train[j], y: y_train[j] })
# verifica si la prediccion corresponde que el verdaro output del input
predict_correct = np.argmax(result) == y_train[j]
train_predictions.append(predict_correct)
accuracy = sum(train_predictions)/float(len(train_predictions))
print("Porcentaje de precisión:", round(accuracy*100, 3) )
print("\n")
```
### Predictions
En las próximas celdas vamos a usar el modelo que fue entrenado para predecir quien gano el triqui con 3 ejemplos no vistos por la red.
<table>
<tr>
<td width="60%"><p>Este juego de triqui donde gana el jugador con la X</p>
<b>`[1, 0, 0, 0, 1,-1, 1,-1, 1]`</b>
</td>
<td><img src="images/triqui7.png" width="100px"></td>
</tr>
</table>
```
#prediction
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# x7 es el triqui donde gana X
# X es representado por el numero 1 o posicion 1 en un arreglo the 3 posiciones
result = sess.run(tf.nn.softmax(logits), feed_dict={ x: x7, y: y7})
print("En este juego gana X:", result)
print("Este resultado gana X ?", np.argmax(result) == 1,)
print("\n")
```
<table>
<tr>
<td width="60%"><p>Este juego de triqui no gana ningun jugador</p>
<b>`[0, 1, 0, 0, 0, 1, 1, 0, 1]`</b>
</td>
<td><img src="images/triqui8.png" width="100px"></td>
</tr>
</table>
```
#prediction
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# x7 es el triqui donde gana X
# X es representado por el numero 1 o posicion 1 en un arreglo the 3 posiciones
result = sess.run(tf.nn.softmax(logits), feed_dict={ x: x8, y: y8})
print("En este juego gana O:", result)
print("Este resultado gana O ?", np.argmax(result) == 2,)
print("\n")
```
<table>
<tr>
<td width="60%"><p>Este juego de triqui gana el jugador con la O</p>
<b>`[0, 1, 1, 0, 0,-1, 0, 1, 1]`</b>
</td>
<td><img src="images/triqui9.png" width="100px"></td>
</tr>
</table>
```
#prediction
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# x7 es el triqui donde gana X
# X es representado por el numero 1 o posicion 1 en un arreglo the 3 posiciones
result = sess.run(tf.nn.softmax(logits), feed_dict={ x: x9, y: y9})
print("En este juego gana O:", result)
print("Este resultado gana O ?", np.argmax(result) == 0,)
print("\n")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/wileyw/DeepLearningDemos/blob/master/handwriting_generator/IBM_Transformer%2BTimeEmbedding.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Notebook
Original code from here: [code](https://github.com/JanSchm/CapMarket/blob/master/bot_experiments/IBM_Transformer%2BTimeEmbedding.ipynb)
Data from here: [data](https://finance.yahoo.com/quote/IBM/history?p=IBM)
Original post from here: [post](https://towardsdatascience.com/stock-predictions-with-state-of-the-art-transformer-and-time-embeddings-3a4485237de6)
We're just running the original code in this notebook
```
import numpy as np
import pandas as pd
import os, datetime
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
print('Tensorflow version: {}'.format(tf.__version__))
import matplotlib.pyplot as plt
plt.style.use('seaborn')
import warnings
warnings.filterwarnings('ignore')
```
## Hyperparameters
```
batch_size = 32
seq_len = 128
d_k = 256
d_v = 256
n_heads = 12
ff_dim = 256
```
## Load IBM data
```
!git clone https://github.com/wileyw/DeepLearningDemos.git
!ls DeepLearningDemos/handwriting_generator/IBM.csv
IBM_path = 'DeepLearningDemos/handwriting_generator/IBM.csv'
df = pd.read_csv(IBM_path, delimiter=',', usecols=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
# Replace 0 to avoid dividing by 0 later on
df['Volume'].replace(to_replace=0, method='ffill', inplace=True)
df.sort_values('Date', inplace=True)
df.tail()
```
## Plot daily IBM closing prices and volume
```
fig = plt.figure(figsize=(15,10))
st = fig.suptitle("IBM Close Price and Volume", fontsize=20)
st.set_y(0.92)
ax1 = fig.add_subplot(211)
ax1.plot(df['Close'], label='IBM Close Price')
ax1.set_xticks(range(0, df.shape[0], 1464))
ax1.set_xticklabels(df['Date'].loc[::1464])
ax1.set_ylabel('Close Price', fontsize=18)
ax1.legend(loc="upper left", fontsize=12)
ax2 = fig.add_subplot(212)
ax2.plot(df['Volume'], label='IBM Volume')
ax2.set_xticks(range(0, df.shape[0], 1464))
ax2.set_xticklabels(df['Date'].loc[::1464])
ax2.set_ylabel('Volume', fontsize=18)
ax2.legend(loc="upper left", fontsize=12)
```
## Calculate normalized percentage change of all columns
```
'''Calculate percentage change'''
df['Open'] = df['Open'].pct_change() # Create arithmetic returns column
df['High'] = df['High'].pct_change() # Create arithmetic returns column
df['Low'] = df['Low'].pct_change() # Create arithmetic returns column
df['Close'] = df['Close'].pct_change() # Create arithmetic returns column
df['Volume'] = df['Volume'].pct_change()
df.dropna(how='any', axis=0, inplace=True) # Drop all rows with NaN values
###############################################################################
'''Normalize price columns'''
min_return = min(df[['Open', 'High', 'Low', 'Close']].min(axis=0))
max_return = max(df[['Open', 'High', 'Low', 'Close']].max(axis=0))
# Min-max normalize price columns (0-1 range)
df['Open'] = (df['Open'] - min_return) / (max_return - min_return)
df['High'] = (df['High'] - min_return) / (max_return - min_return)
df['Low'] = (df['Low'] - min_return) / (max_return - min_return)
df['Close'] = (df['Close'] - min_return) / (max_return - min_return)
###############################################################################
'''Normalize volume column'''
min_volume = df['Volume'].min(axis=0)
max_volume = df['Volume'].max(axis=0)
# Min-max normalize volume columns (0-1 range)
df['Volume'] = (df['Volume'] - min_volume) / (max_volume - min_volume)
###############################################################################
'''Create training, validation and test split'''
times = sorted(df.index.values)
last_10pct = sorted(df.index.values)[-int(0.1*len(times))] # Last 10% of series
last_20pct = sorted(df.index.values)[-int(0.2*len(times))] # Last 20% of series
df_train = df[(df.index < last_20pct)] # Training data are 80% of total data
df_val = df[(df.index >= last_20pct) & (df.index < last_10pct)]
df_test = df[(df.index >= last_10pct)]
# Remove date column
df_train.drop(columns=['Date'], inplace=True)
df_val.drop(columns=['Date'], inplace=True)
df_test.drop(columns=['Date'], inplace=True)
# Convert pandas columns into arrays
train_data = df_train.values
val_data = df_val.values
test_data = df_test.values
print('Training data shape: {}'.format(train_data.shape))
print('Validation data shape: {}'.format(val_data.shape))
print('Test data shape: {}'.format(test_data.shape))
df_train.head()
```
## Plot daily changes of close prices and volume
```
fig = plt.figure(figsize=(15,12))
st = fig.suptitle("Data Separation", fontsize=20)
st.set_y(0.95)
###############################################################################
ax1 = fig.add_subplot(211)
ax1.plot(np.arange(train_data.shape[0]), df_train['Close'], label='Training data')
ax1.plot(np.arange(train_data.shape[0],
train_data.shape[0]+val_data.shape[0]), df_val['Close'], label='Validation data')
ax1.plot(np.arange(train_data.shape[0]+val_data.shape[0],
train_data.shape[0]+val_data.shape[0]+test_data.shape[0]), df_test['Close'], label='Test data')
ax1.set_xlabel('Date')
ax1.set_ylabel('Normalized Closing Returns')
ax1.set_title("Close Price", fontsize=18)
ax1.legend(loc="best", fontsize=12)
###############################################################################
ax2 = fig.add_subplot(212)
ax2.plot(np.arange(train_data.shape[0]), df_train['Volume'], label='Training data')
ax2.plot(np.arange(train_data.shape[0],
train_data.shape[0]+val_data.shape[0]), df_val['Volume'], label='Validation data')
ax2.plot(np.arange(train_data.shape[0]+val_data.shape[0],
train_data.shape[0]+val_data.shape[0]+test_data.shape[0]), df_test['Volume'], label='Test data')
ax2.set_xlabel('Date')
ax2.set_ylabel('Normalized Volume Changes')
ax2.set_title("Volume", fontsize=18)
ax2.legend(loc="best", fontsize=12)
```
## Create chunks of training, validation and test data
```
# Training data
X_train, y_train = [], []
for i in range(seq_len, len(train_data)):
X_train.append(train_data[i-seq_len:i]) # Chunks of training data with a length of 128 df-rows
y_train.append(train_data[:, 3][i]) #Value of 4th column (Close Price) of df-row 128+1
X_train, y_train = np.array(X_train), np.array(y_train)
###############################################################################
# Validation data
X_val, y_val = [], []
for i in range(seq_len, len(val_data)):
X_val.append(val_data[i-seq_len:i])
y_val.append(val_data[:, 3][i])
X_val, y_val = np.array(X_val), np.array(y_val)
###############################################################################
# Test data
X_test, y_test = [], []
for i in range(seq_len, len(test_data)):
X_test.append(test_data[i-seq_len:i])
y_test.append(test_data[:, 3][i])
X_test, y_test = np.array(X_test), np.array(y_test)
print('Training set shape', X_train.shape, y_train.shape)
print('Validation set shape', X_val.shape, y_val.shape)
print('Testing set shape' ,X_test.shape, y_test.shape)
```
## TimeVector
```
class Time2Vector(Layer):
def __init__(self, seq_len, **kwargs):
super(Time2Vector, self).__init__()
self.seq_len = seq_len
def build(self, input_shape):
'''Initialize weights and biases with shape (batch, seq_len)'''
self.weights_linear = self.add_weight(name='weight_linear',
shape=(int(self.seq_len),),
initializer='uniform',
trainable=True)
self.bias_linear = self.add_weight(name='bias_linear',
shape=(int(self.seq_len),),
initializer='uniform',
trainable=True)
self.weights_periodic = self.add_weight(name='weight_periodic',
shape=(int(self.seq_len),),
initializer='uniform',
trainable=True)
self.bias_periodic = self.add_weight(name='bias_periodic',
shape=(int(self.seq_len),),
initializer='uniform',
trainable=True)
def call(self, x):
'''Calculate linear and periodic time features'''
x = tf.math.reduce_mean(x[:,:,:4], axis=-1)
time_linear = self.weights_linear * x + self.bias_linear # Linear time feature
time_linear = tf.expand_dims(time_linear, axis=-1) # Add dimension (batch, seq_len, 1)
time_periodic = tf.math.sin(tf.multiply(x, self.weights_periodic) + self.bias_periodic)
time_periodic = tf.expand_dims(time_periodic, axis=-1) # Add dimension (batch, seq_len, 1)
return tf.concat([time_linear, time_periodic], axis=-1) # shape = (batch, seq_len, 2)
def get_config(self): # Needed for saving and loading model with custom layer
config = super().get_config().copy()
config.update({'seq_len': self.seq_len})
return config
```
## Transformer
```
class SingleAttention(Layer):
def __init__(self, d_k, d_v):
super(SingleAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
def build(self, input_shape):
self.query = Dense(self.d_k,
input_shape=input_shape,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform')
self.key = Dense(self.d_k,
input_shape=input_shape,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform')
self.value = Dense(self.d_v,
input_shape=input_shape,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform')
def call(self, inputs): # inputs = (in_seq, in_seq, in_seq)
q = self.query(inputs[0])
k = self.key(inputs[1])
attn_weights = tf.matmul(q, k, transpose_b=True)
attn_weights = tf.map_fn(lambda x: x/np.sqrt(self.d_k), attn_weights)
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
v = self.value(inputs[2])
attn_out = tf.matmul(attn_weights, v)
return attn_out
#############################################################################
class MultiAttention(Layer):
def __init__(self, d_k, d_v, n_heads):
super(MultiAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.n_heads = n_heads
self.attn_heads = list()
def build(self, input_shape):
for n in range(self.n_heads):
self.attn_heads.append(SingleAttention(self.d_k, self.d_v))
# input_shape[0]=(batch, seq_len, 7), input_shape[0][-1]=7
self.linear = Dense(input_shape[0][-1],
input_shape=input_shape,
kernel_initializer='glorot_uniform',
bias_initializer='glorot_uniform')
def call(self, inputs):
attn = [self.attn_heads[i](inputs) for i in range(self.n_heads)]
concat_attn = tf.concat(attn, axis=-1)
multi_linear = self.linear(concat_attn)
return multi_linear
#############################################################################
class TransformerEncoder(Layer):
def __init__(self, d_k, d_v, n_heads, ff_dim, dropout=0.1, **kwargs):
super(TransformerEncoder, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.n_heads = n_heads
self.ff_dim = ff_dim
self.attn_heads = list()
self.dropout_rate = dropout
def build(self, input_shape):
self.attn_multi = MultiAttention(self.d_k, self.d_v, self.n_heads)
self.attn_dropout = Dropout(self.dropout_rate)
self.attn_normalize = LayerNormalization(input_shape=input_shape, epsilon=1e-6)
self.ff_conv1D_1 = Conv1D(filters=self.ff_dim, kernel_size=1, activation='relu')
# input_shape[0]=(batch, seq_len, 7), input_shape[0][-1] = 7
self.ff_conv1D_2 = Conv1D(filters=input_shape[0][-1], kernel_size=1)
self.ff_dropout = Dropout(self.dropout_rate)
self.ff_normalize = LayerNormalization(input_shape=input_shape, epsilon=1e-6)
def call(self, inputs): # inputs = (in_seq, in_seq, in_seq)
attn_layer = self.attn_multi(inputs)
attn_layer = self.attn_dropout(attn_layer)
attn_layer = self.attn_normalize(inputs[0] + attn_layer)
ff_layer = self.ff_conv1D_1(attn_layer)
ff_layer = self.ff_conv1D_2(ff_layer)
ff_layer = self.ff_dropout(ff_layer)
ff_layer = self.ff_normalize(inputs[0] + ff_layer)
return ff_layer
def get_config(self): # Needed for saving and loading model with custom layer
config = super().get_config().copy()
config.update({'d_k': self.d_k,
'd_v': self.d_v,
'n_heads': self.n_heads,
'ff_dim': self.ff_dim,
'attn_heads': self.attn_heads,
'dropout_rate': self.dropout_rate})
return config
```
## Model
```
def create_model():
'''Initialize time and transformer layers'''
time_embedding = Time2Vector(seq_len)
attn_layer1 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer2 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer3 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
'''Construct model'''
in_seq = Input(shape=(seq_len, 5))
x = time_embedding(in_seq)
x = Concatenate(axis=-1)([in_seq, x])
x = attn_layer1((x, x, x))
x = attn_layer2((x, x, x))
x = attn_layer3((x, x, x))
x = GlobalAveragePooling1D(data_format='channels_first')(x)
x = Dropout(0.1)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.1)(x)
out = Dense(1, activation='linear')(x)
model = Model(inputs=in_seq, outputs=out)
model.compile(loss='mse', optimizer='adam', metrics=['mae', 'mape'])
return model
model = create_model()
model.summary()
callback = tf.keras.callbacks.ModelCheckpoint('Transformer+TimeEmbedding.hdf5',
monitor='val_loss',
save_best_only=True, verbose=1)
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=35,
callbacks=[callback],
validation_data=(X_val, y_val))
model = tf.keras.models.load_model('/content/Transformer+TimeEmbedding.hdf5',
custom_objects={'Time2Vector': Time2Vector,
'SingleAttention': SingleAttention,
'MultiAttention': MultiAttention,
'TransformerEncoder': TransformerEncoder})
###############################################################################
'''Calculate predictions and metrics'''
#Calculate predication for training, validation and test data
train_pred = model.predict(X_train)
val_pred = model.predict(X_val)
test_pred = model.predict(X_test)
#Print evaluation metrics for all datasets
train_eval = model.evaluate(X_train, y_train, verbose=0)
val_eval = model.evaluate(X_val, y_val, verbose=0)
test_eval = model.evaluate(X_test, y_test, verbose=0)
print(' ')
print('Evaluation metrics')
print('Training Data - Loss: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}'.format(train_eval[0], train_eval[1], train_eval[2]))
print('Validation Data - Loss: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}'.format(val_eval[0], val_eval[1], val_eval[2]))
print('Test Data - Loss: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}'.format(test_eval[0], test_eval[1], test_eval[2]))
###############################################################################
'''Display results'''
fig = plt.figure(figsize=(15,20))
st = fig.suptitle("Transformer + TimeEmbedding Model", fontsize=22)
st.set_y(0.92)
#Plot training data results
ax11 = fig.add_subplot(311)
ax11.plot(train_data[:, 3], label='IBM Closing Returns')
ax11.plot(np.arange(seq_len, train_pred.shape[0]+seq_len), train_pred, linewidth=3, label='Predicted IBM Closing Returns')
ax11.set_title("Training Data", fontsize=18)
ax11.set_xlabel('Date')
ax11.set_ylabel('IBM Closing Returns')
ax11.legend(loc="best", fontsize=12)
#Plot validation data results
ax21 = fig.add_subplot(312)
ax21.plot(val_data[:, 3], label='IBM Closing Returns')
ax21.plot(np.arange(seq_len, val_pred.shape[0]+seq_len), val_pred, linewidth=3, label='Predicted IBM Closing Returns')
ax21.set_title("Validation Data", fontsize=18)
ax21.set_xlabel('Date')
ax21.set_ylabel('IBM Closing Returns')
ax21.legend(loc="best", fontsize=12)
#Plot test data results
ax31 = fig.add_subplot(313)
ax31.plot(test_data[:, 3], label='IBM Closing Returns')
ax31.plot(np.arange(seq_len, test_pred.shape[0]+seq_len), test_pred, linewidth=3, label='Predicted IBM Closing Returns')
ax31.set_title("Test Data", fontsize=18)
ax31.set_xlabel('Date')
ax31.set_ylabel('IBM Closing Returns')
ax31.legend(loc="best", fontsize=12)
```
## Model metrics
```
'''Display model metrics'''
fig = plt.figure(figsize=(15,20))
st = fig.suptitle("Transformer + TimeEmbedding Model Metrics", fontsize=22)
st.set_y(0.92)
#Plot model loss
ax1 = fig.add_subplot(311)
ax1.plot(history.history['loss'], label='Training loss (MSE)')
ax1.plot(history.history['val_loss'], label='Validation loss (MSE)')
ax1.set_title("Model loss", fontsize=18)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss (MSE)')
ax1.legend(loc="best", fontsize=12)
#Plot MAE
ax2 = fig.add_subplot(312)
ax2.plot(history.history['mae'], label='Training MAE')
ax2.plot(history.history['val_mae'], label='Validation MAE')
ax2.set_title("Model metric - Mean average error (MAE)", fontsize=18)
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Mean average error (MAE)')
ax2.legend(loc="best", fontsize=12)
#Plot MAPE
ax3 = fig.add_subplot(313)
ax3.plot(history.history['mape'], label='Training MAPE')
ax3.plot(history.history['val_mape'], label='Validation MAPE')
ax3.set_title("Model metric - Mean average percentage error (MAPE)", fontsize=18)
ax3.set_xlabel('Epoch')
ax3.set_ylabel('Mean average percentage error (MAPE)')
ax3.legend(loc="best", fontsize=12)
```
## Model architecture overview
```
tf.keras.utils.plot_model(
model,
to_file="IBM_Transformer+TimeEmbedding.png",
show_shapes=True,
show_layer_names=True,
expand_nested=True,
dpi=96,)
```
# Moving Average
## Moving Average - Load IBM data again, to apply rolling window
```
IBM_path = 'DeepLearningDemos/handwriting_generator/IBM.csv'
df = pd.read_csv(IBM_path, delimiter=',', usecols=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
# Replace 0 to avoid dividing by 0 later on
df['Volume'].replace(to_replace=0, method='ffill', inplace=True)
df.sort_values('Date', inplace=True)
# Apply moving average with a window of 10 days to all columns
df[['Open', 'High', 'Low', 'Close', 'Volume']] = df[['Open', 'High', 'Low', 'Close', 'Volume']].rolling(10).mean()
# Drop all rows with NaN values
df.dropna(how='any', axis=0, inplace=True)
df.head()
```
## Moving Average - Plot daily IBM closing prices and volume
```
fig = plt.figure(figsize=(15,10))
st = fig.suptitle("IBM Close Price and Volume", fontsize=20)
st.set_y(0.92)
ax1 = fig.add_subplot(211)
ax1.plot(df['Close'], label='IBM Close Price')
ax1.set_xticks(range(0, df.shape[0], 1464))
ax1.set_xticklabels(df['Date'].loc[::1464])
ax1.set_ylabel('Close Price', fontsize=18)
ax1.legend(loc="upper left", fontsize=12)
ax2 = fig.add_subplot(212)
ax2.plot(df['Volume'], label='IBM Volume')
ax2.set_xticks(range(0, df.shape[0], 1464))
ax2.set_xticklabels(df['Date'].loc[::1464])
ax2.set_ylabel('Volume', fontsize=18)
ax2.legend(loc="upper left", fontsize=12)
```
## Moving Average - Calculate normalized percentage change for all columns
```
'''Calculate percentage change'''
df['Open'] = df['Open'].pct_change() # Create arithmetic returns column
df['High'] = df['High'].pct_change() # Create arithmetic returns column
df['Low'] = df['Low'].pct_change() # Create arithmetic returns column
df['Close'] = df['Close'].pct_change() # Create arithmetic returns column
df['Volume'] = df['Volume'].pct_change()
df.dropna(how='any', axis=0, inplace=True) # Drop all rows with NaN values
###############################################################################
'''Normalize price columns'''
min_return = min(df[['Open', 'High', 'Low', 'Close']].min(axis=0))
max_return = max(df[['Open', 'High', 'Low', 'Close']].max(axis=0))
# Min-max normalize price columns (0-1 range)
df['Open'] = (df['Open'] - min_return) / (max_return - min_return)
df['High'] = (df['High'] - min_return) / (max_return - min_return)
df['Low'] = (df['Low'] - min_return) / (max_return - min_return)
df['Close'] = (df['Close'] - min_return) / (max_return - min_return)
###############################################################################
'''Normalize volume column'''
min_volume = df['Volume'].min(axis=0)
max_volume = df['Volume'].max(axis=0)
# Min-max normalize volume columns (0-1 range)
df['Volume'] = (df['Volume'] - min_volume) / (max_volume - min_volume)
###############################################################################
'''Create training, validation and test split'''
times = sorted(df.index.values)
last_10pct = sorted(df.index.values)[-int(0.1*len(times))] # Last 10% of series
last_20pct = sorted(df.index.values)[-int(0.2*len(times))] # Last 20% of series
df_train = df[(df.index < last_20pct)] # Training data are 80% of total data
df_val = df[(df.index >= last_20pct) & (df.index < last_10pct)]
df_test = df[(df.index >= last_10pct)]
# Remove date column
df_train.drop(columns=['Date'], inplace=True)
df_val.drop(columns=['Date'], inplace=True)
df_test.drop(columns=['Date'], inplace=True)
# Convert pandas columns into arrays
train_data = df_train.values
val_data = df_val.values
test_data = df_test.values
print('Training data shape: {}'.format(train_data.shape))
print('Validation data shape: {}'.format(val_data.shape))
print('Test data shape: {}'.format(test_data.shape))
df_train.head()
```
## Moving Average - Plot daily changes of close price and volume
```
fig = plt.figure(figsize=(15,12))
st = fig.suptitle("Data Separation", fontsize=20)
st.set_y(0.95)
###############################################################################
ax1 = fig.add_subplot(211)
ax1.plot(np.arange(train_data.shape[0]), df_train['Close'], label='Training data')
ax1.plot(np.arange(train_data.shape[0],
train_data.shape[0]+val_data.shape[0]), df_val['Close'], label='Validation data')
ax1.plot(np.arange(train_data.shape[0]+val_data.shape[0],
train_data.shape[0]+val_data.shape[0]+test_data.shape[0]), df_test['Close'], label='Test data')
ax1.set_xlabel('Date')
ax1.set_ylabel('Normalized Closing Returns')
ax1.set_title("Close Price", fontsize=18)
ax1.legend(loc="best", fontsize=12)
###############################################################################
ax2 = fig.add_subplot(212)
ax2.plot(np.arange(train_data.shape[0]), df_train['Volume'], label='Training data')
ax2.plot(np.arange(train_data.shape[0],
train_data.shape[0]+val_data.shape[0]), df_val['Volume'], label='Validation data')
ax2.plot(np.arange(train_data.shape[0]+val_data.shape[0],
train_data.shape[0]+val_data.shape[0]+test_data.shape[0]), df_test['Volume'], label='Test data')
ax2.set_xlabel('Date')
ax2.set_ylabel('Normalized Volume Changes')
ax2.set_title("Volume", fontsize=18)
ax2.legend(loc="best", fontsize=12)
```
## Moving Average - Create chunks of training, validation, and test data
```
# Training data
X_train, y_train = [], []
for i in range(seq_len, len(train_data)):
X_train.append(train_data[i-seq_len:i]) # Chunks of training data with a length of 128 df-rows
y_train.append(train_data[:, 3][i]) #Value of 4th column (Close Price) of df-row 128+1
X_train, y_train = np.array(X_train), np.array(y_train)
###############################################################################
# Validation data
X_val, y_val = [], []
for i in range(seq_len, len(val_data)):
X_val.append(val_data[i-seq_len:i])
y_val.append(val_data[:, 3][i])
X_val, y_val = np.array(X_val), np.array(y_val)
###############################################################################
# Test data
X_test, y_test = [], []
for i in range(seq_len, len(test_data)):
X_test.append(test_data[i-seq_len:i])
y_test.append(test_data[:, 3][i])
X_test, y_test = np.array(X_test), np.array(y_test)
print(X_train.shape, y_train.shape)
print(X_val.shape, y_val.shape)
```
## Moving Average - Model
```
def create_model():
'''Initialize time and transformer layers'''
time_embedding = Time2Vector(seq_len)
attn_layer1 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer2 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
attn_layer3 = TransformerEncoder(d_k, d_v, n_heads, ff_dim)
'''Construct model'''
in_seq = Input(shape=(seq_len, 5))
x = time_embedding(in_seq)
x = Concatenate(axis=-1)([in_seq, x])
x = attn_layer1((x, x, x))
x = attn_layer2((x, x, x))
x = attn_layer3((x, x, x))
x = GlobalAveragePooling1D(data_format='channels_first')(x)
x = Dropout(0.1)(x)
x = Dense(64, activation='relu')(x)
x = Dropout(0.1)(x)
out = Dense(1, activation='linear')(x)
model = Model(inputs=in_seq, outputs=out)
model.compile(loss='mse', optimizer='adam', metrics=['mae', 'mape'])
return model
model = create_model()
model.summary()
callback = tf.keras.callbacks.ModelCheckpoint('Transformer+TimeEmbedding.hdf5',
monitor='val_loss',
save_best_only=True,
verbose=1)
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=35,
callbacks=[callback],
validation_data=(X_val, y_val))
model = tf.keras.models.load_model('/content/Transformer+TimeEmbedding.hdf5',
custom_objects={'Time2Vector': Time2Vector,
'SingleAttention': SingleAttention,
'MultiAttention': MultiAttention,
'TransformerEncoder': TransformerEncoder})
###############################################################################
'''Calculate predictions and metrics'''
#Calculate predication for training, validation and test data
train_pred = model.predict(X_train)
val_pred = model.predict(X_val)
test_pred = model.predict(X_test)
#Print evaluation metrics for all datasets
train_eval = model.evaluate(X_train, y_train, verbose=0)
val_eval = model.evaluate(X_val, y_val, verbose=0)
test_eval = model.evaluate(X_test, y_test, verbose=0)
print(' ')
print('Evaluation metrics')
print('Training Data - Loss: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}'.format(train_eval[0], train_eval[1], train_eval[2]))
print('Validation Data - Loss: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}'.format(val_eval[0], val_eval[1], val_eval[2]))
print('Test Data - Loss: {:.4f}, MAE: {:.4f}, MAPE: {:.4f}'.format(test_eval[0], test_eval[1], test_eval[2]))
###############################################################################
'''Display results'''
fig = plt.figure(figsize=(15,20))
st = fig.suptitle("Moving Average - Transformer + TimeEmbedding Model", fontsize=22)
st.set_y(0.92)
#Plot training data results
ax11 = fig.add_subplot(311)
ax11.plot(train_data[:, 3], label='IBM Closing Returns')
ax11.plot(np.arange(seq_len, train_pred.shape[0]+seq_len), train_pred, linewidth=3, label='Predicted IBM Closing Returns')
ax11.set_title("Training Data", fontsize=18)
ax11.set_xlabel('Date')
ax11.set_ylabel('IBM Closing Returns')
ax11.legend(loc="best", fontsize=12)
#Plot validation data results
ax21 = fig.add_subplot(312)
ax21.plot(val_data[:, 3], label='IBM Closing Returns')
ax21.plot(np.arange(seq_len, val_pred.shape[0]+seq_len), val_pred, linewidth=3, label='Predicted IBM Closing Returns')
ax21.set_title("Validation Data", fontsize=18)
ax21.set_xlabel('Date')
ax21.set_ylabel('IBM Closing Returns')
ax21.legend(loc="best", fontsize=12)
#Plot test data results
ax31 = fig.add_subplot(313)
ax31.plot(test_data[:, 3], label='IBM Closing Returns')
ax31.plot(np.arange(seq_len, test_pred.shape[0]+seq_len), test_pred, linewidth=3, label='Predicted IBM Closing Returns')
ax31.set_title("Test Data", fontsize=18)
ax31.set_xlabel('Date')
ax31.set_ylabel('IBM Closing Returns')
ax31.legend(loc="best", fontsize=12)
```
## Moving Average - Model metrics
```
'''Display model metrics'''
fig = plt.figure(figsize=(15,20))
st = fig.suptitle("Moving Average - Transformer + TimeEmbedding Model Metrics", fontsize=22)
st.set_y(0.92)
#Plot model loss
ax1 = fig.add_subplot(311)
ax1.plot(history.history['loss'], label='Training loss (MSE)')
ax1.plot(history.history['val_loss'], label='Validation loss (MSE)')
ax1.set_title("Model loss", fontsize=18)
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss (MSE)')
ax1.legend(loc="best", fontsize=12)
#Plot MAE
ax2 = fig.add_subplot(312)
ax2.plot(history.history['mae'], label='Training MAE')
ax2.plot(history.history['val_mae'], label='Validation MAE')
ax2.set_title("Model metric - Mean average error (MAE)", fontsize=18)
ax2.set_xlabel('Epoch')
ax2.set_ylabel('Mean average error (MAE)')
ax2.legend(loc="best", fontsize=12)
#Plot MAPE
ax3 = fig.add_subplot(313)
ax3.plot(history.history['mape'], label='Training MAPE')
ax3.plot(history.history['val_mape'], label='Validation MAPE')
ax3.set_title("Model metric - Mean average percentage error (MAPE)", fontsize=18)
ax3.set_xlabel('Epoch')
ax3.set_ylabel('Mean average percentage error (MAPE)')
ax3.legend(loc="best", fontsize=12)
```
| github_jupyter |
<span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at '<a href="#papermill-error-cell">In [2]</a>'.</span>
```
# Parameters
msgs = "Ran from Airflow at 2022-03-20T18:04:11.892055+00:00!"
```
<span id="papermill-error-cell" style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">Execution using papermill encountered an exception here and stopped:</span>
```
import os
import scipy
import numpy as np
import pandas as pd
from pathlib import Path
# pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
from sklearn import set_config
set_config(display = 'diagram')
from pandas.api.types import infer_dtype
# Scikit Learn import
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer, make_column_selector
from sklearn.decomposition import PCA
from sklearn.svm import OneClassSVM
from sklearn.pipeline import FeatureUnion
# from imblearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split, TimeSeriesSplit
from sklearn.preprocessing import RobustScaler, LabelEncoder
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import FeatureUnion
from sklearn import cluster
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import OrdinalEncoder, MinMaxScaler, QuantileTransformer
from sklearn.impute import SimpleImputer
from sklearn.multioutput import MultiOutputRegressor
# Category Encoder
import category_encoders as ce
# pyod
from pyod.models.abod import ABOD
from pyod.models.cblof import CBLOF
from pyod.models.cof import COF
from pyod.models.iforest import IForest
from pyod.models.hbos import HBOS
from pyod.models.knn import KNN
from pyod.models.lof import LOF
from pyod.models.ocsvm import OCSVM
from pyod.models.pca import PCA as PCA_pyod
from pyod.models.mcd import MCD
from pyod.models.sod import SOD
from pyod.models.sos import SOS
# model
from lightgbm import LGBMClassifier, LGBMRegressor
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier, CatBoostRegressor
from imblearn.over_sampling import SMOTE
import re
import vnquant.DataLoader as dl
from datetime import datetime, timedelta
import pytz
from pathlib import Path
init_date = '2020-01-01'
start_date = '2021-01-01'
end_date = '2022-03-20'
symbols = ["HSG", "HPG", "NKG"]
def get_data(symbol, start_date = start_date, end_date = end_date):
loader = dl.DataLoader(symbols=[symbol], start=start_date, end=end_date, minimal=True)
data = loader.download()
data.columns = [col[0] for col in data.columns]
data = data.reset_index()
data['symbol'] = symbol
data['target_day_1'] = data.close.shift(-1)
data['target_day_2'] = data.close.shift(-2)
data['target_day_3'] = data.close.shift(-3)
data['target_day_4'] = data.close.shift(-4)
data['target_day_5'] = data.close.shift(-5)
return data
df = pd.concat([get_data(symbol) for symbol in symbols], axis = 0)
df = df.sort_values(by=['date'])
df.tail(10)
numeric_columns = ["high", "low", "open", "close", "volume"]
category_columns = ['symbol']
time_columns = ['date']
feature_columns = numeric_columns+category_columns+time_columns
target_columns = ['target_day_1', 'target_day_2', 'target_day_3', 'target_day_4', 'target_day_5']
# X = df.loc[df[target_columns].isnull().any(axis = 1)].drop(columns=target_columns)
# y = df.loc[df[target_columns].notnull().any(axis = 1)][target_columns]
# test = df.loc[df[target_columns].isnull().any(axis = 1)]
class TimePreprocess(BaseEstimator, TransformerMixin):
def __init__(self, list_of_features=["day", "dayofweek", "month"],):
super().__init__()
self.time_features = []
self.list_of_features = list_of_features
def fit(self, X, y = None):
for col in X.columns:
try:
X[col] = pd.to_datetime(X[col])
self.time_features.append(col)
except:
logger.error('Time columns cannot be convert to datetime')
return self
def transform(self, X):
columns = []
for col in self.time_features:
X[col] = pd.to_datetime(X[col])
if 'day' in self.list_of_features:
columns.append(col+'_day')
X[columns[-1]] = X[col].dt.day
if 'month' in self.list_of_features:
columns.append(col+'_month')
X[columns[-1]] = X[col].dt.month
if 'dayofweek' in self.list_of_features:
columns.append(col+'_dayofweek')
X[columns[-1]] = X[col].dt.dayofweek
return X[columns]
def fit_transform(self, X, y = None):
self.fit(X, y)
return self.transform(X)
numeric_preprocess = Pipeline(steps = [
('imputer', SimpleImputer(strategy='constant', fill_value=0)),
('scaler_quantile', QuantileTransformer()),
# ('scaler', MinMaxScaler())
])
category_preprocess = Pipeline(steps = [
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', ce.OneHotEncoder(use_cat_names = True)),
])
preprocessing = ColumnTransformer(transformers=[
('numeric_preprocess', numeric_preprocess, numeric_columns),
('category_columns', category_preprocess, category_columns),
('time_preprocess', TimePreprocess(), time_columns),
# ('hold_feature', HoldFeature(), hold_columns),
],
n_jobs = -1
)
pipe = Pipeline(steps = [
('preprocessing', preprocessing),
('estimator', CatBoostRegressor())
])
# pipe.fit(X, y)
params = {
'estimator': [CatBoostRegressor(verbose = 0),
# LGBMRegressor(verbose = 0),
# XGBRegressor()
]
}
grid = GridSearchCV(estimator = pipe,
param_grid = params,
scoring = 'neg_mean_squared_error',
cv = TimeSeriesSplit(n_splits=5),
n_jobs = -1
)
for target_col in target_columns:
X = df.loc[df[target_col].notnull(), feature_columns]
y = df.loc[df[target_col].notnull(), target_col]
grid.fit(X, y)
print(grid.cv_results_['mean_test_score'])
X_test = df.loc[df[target_col].isnull(), feature_columns]
df.loc[df[target_col].isnull(), target_col] = grid.best_estimator_.predict(X_test)
df.tail()
grid.cv_results_
X_test['target'] = grid.best_estimator_.predict(X_test)
X_test
```
| github_jupyter |
## Thermally driven Convection -pt 2
Analysis of the convection run, and more advanced behaviour
**New concepts:** Advection-diffusion solver template, thermal boundary conditions, Rayleigh number, analysis functions, interpolation
**NOTE:** I saved all the python setup of the previous notebook in a file so we don't need to type it again. We just do this:
```
%run "090-RayleighBenardConvection-setup.py"
# Ready ?
solver.solve()
# plot figure
figtemp = glucifer.Figure( figsize=(800,400) )
figtemp.append( glucifer.objects.Surface(mesh, temperatureField, colours="blue white red") )
figtemp.append( glucifer.objects.VectorArrows(mesh, velocityField/100.0, arrowHead=0.2, scaling=0.1) )
figtemp.show()
print Ra
# (YES !)
```
## Functions
Underworld defines a special class of parallel-safe, _lazy_ functions. These are used by the finite element routines to build viscosity, buoyancy etc at all the sample points in the domain. They are only evaluated when needed (hence *lazy*) and it is the **definition** which we store, not the values. This is important ...
The buoyancy function is a uw vector function, so is the temperature (why ?):
``` python
# Rayleigh number.
Ra = 1.0e4
# Construct a density function.
densityFn = Ra * temperatureField
# Define our vertical (upward) unit vector using a python tuple (this will be automatically converted to a function).
z_hat = ( 0.0, 1.0 )
# Now create a buoyancy force vector using the density and the vertical unit vector.
buoyancyFn = densityFn * z_hat
```
Watch this:
```
## Buoyancy Function and lazy evaluation
location = (1.0,0.0)
print Ra
print buoyancyFn.evaluate(location)
print temperatureField.evaluate(location)
Ra = 1.0e8
print Ra
print buoyancyFn.evaluate(location)
print temperatureField.evaluate(location)
```
An alternative way to do this is to use the `constant` function which is more like a parameter-setting function.
```
# Rayleigh number.
Ra = fn.misc.constant(1.0e4)
# Construct a density function.
densityFn = Ra * temperatureField
# Define our vertical (upward) unit vector using a python tuple (this will be automatically converted to a function).
z_hat = ( 0.0, 1.0 )
# Now create a buoyancy force vector using the density and the vertical unit vector.
buoyancyFn = densityFn * z_hat
print buoyancyFn.evaluate(location)
print temperatureField.evaluate(location)
Ra.value = 1.0e8
print buoyancyFn.evaluate(location)
print temperatureField.evaluate(location)
```
This saves us from having to rebuild the buoyancy force term and re-load it into the template every time we need to change the Rayleigh number.
**NOTE:** if we want to use this now, we have to reload it into the template !!
```
stokes.fn_bodyforce = buoyancyFn
```
We can see if that worked ...
```
time = 0.
step = 0
steps_end = 200
while step < steps_end:
# Solve for the velocity field given the current temperature field.
solver.solve()
# Retrieve the maximum possible timestep for the AD system.
dt = advDiff.get_max_dt()
if step == 0:
dt = 0.
# Advect using this timestep size
advDiff.integrate(dt)
step += 1
```
**Plot final temperature and velocity field**
```
# plot figure
figtemp = glucifer.Figure( figsize=(800,400) )
figtemp.append( glucifer.objects.Surface(mesh, temperatureField, colours="blue white red") )
figtemp.append( glucifer.objects.VectorArrows(mesh, velocityField/100.0, arrowHead=0.2, scaling=0.001) )
figtemp.show()
# Global max min
print fn.view.min_max(temperatureField)
# This is a function that will return the global max / min of the function's last evaluation
# THIS HAS to be done in two steps: evaluate, then ask for max / min
T_maxmin_fn = fn.view.min_max(temperatureField)
T_maxmin_fn.evaluate(mesh)
print T_maxmin_fn.min_global(), T_maxmin_fn.max_global()
```
**Nusselt number**
The Nusselt number is the ratio between convective and conductive heat transfer. In the R-B convection example, that means
\\[
Nu = -h \frac{ \int_0^l \partial_z T (x, z=h) dx}{ \int_0^l T (x, z=0) dx}
\\]
```
nuNumerator = uw.utils.Integral( fn=temperatureField.fn_gradient[1],
mesh=mesh, integrationType='Surface',
surfaceIndexSet=mesh.specialSets["MaxJ_VertexSet"])
nuDenominator = uw.utils.Integral( fn=temperatureField,
mesh=mesh, integrationType='Surface',
surfaceIndexSet=mesh.specialSets["MinJ_VertexSet"])
Nusselt = -1.0 * nuNumerator.evaluate()[0] / nuDenominator.evaluate()[0]
print Nusselt
```
The root mean squared velocity is defined by integrating over the entire simulation domain via
\\[
\begin{aligned}
v_{rms} = \sqrt{ \frac{ \int_V (\mathbf{v}.\mathbf{v}) dV } {\int_V dV} }
\end{aligned}
\\]
where $dV$ denotes an elemental volume (area) of the box.
```
int_vdotv_dV = uw.utils.Integral( fn.math.dot( velocityField, velocityField ), mesh )
int_1_dV = uw.utils.Integral( 1.0 , mesh )
Vrms = int_vdotv_dV.evaluate()[0] / int_1_dV.evaluate()[0]
print "Volume (Area) of the mesh = ", int_1_dV.evaluate()[0]
print "Vrms = ", Vrms
def Vrms_fn():
Vrms = int_vdotv_dV.evaluate()[0] / int_1_dV.evaluate()[0]
return Vrms
def Nu_fn():
Nusselt = -1.0 * nuNumerator.evaluate()[0] / nuDenominator.evaluate()[0]
return Nusselt
```
## Exercise - role of aspect ratio on convection
Compute the Nusselt / Vrms values for one Ra but different aspect ratio.
At an Ra of $10^4$ a perturbation to the initial geometry can create a
stable set of 1, 2 or 3 cells.
How does this influence the Vrms / Nu ?
```
Ra.value = 1.0e4
cells = 1.0
pertStrength = 0.2
deltaTemp = tempMax - tempMin
for index, coord in enumerate(Tmesh.data):
pertCoeff = math.cos( cells / 2.0 * math.pi * coord[0] ) * math.sin( math.pi * coord[1] )
temperatureField.data[index] = tempMin + deltaTemp*(boxHeight - coord[1]) + pertStrength * pertCoeff
temperatureField.data[index] = max(tempMin, min(tempMax, temperatureField.data[index]))
for index in Tmesh.specialSets["MinJ_VertexSet"]:
temperatureField.data[index] = tempMax
for index in Tmesh.specialSets["MaxJ_VertexSet"]:
temperatureField.data[index] = tempMin
glucifer_filename = "Ra1e4-1cell"
store = glucifer.Store(filename=glucifer_filename)
frames = glucifer.Figure(name="1", store=store, figsize=(800,400))
frames.append( glucifer.objects.Surface(Tmesh, temperatureField, colours="blue white red") )
frames.append( glucifer.objects.VectorArrows(mesh, velocityField, arrowHead=0.2, scaling=1.0/Ra.value) )
time = 0.
step = 0
steps_end = 500
Nusselt_t = np.empty(steps_end)
Vrms_t = np.empty(steps_end)
times_t = np.empty(steps_end)
while step < steps_end:
# Solve for the velocity field given the current temperature field.
solver.solve()
# Retrieve the maximum possible timestep for the AD system.
dt = advDiff.get_max_dt()
if step == 0:
dt = 0.
# Advect using this timestep size
advDiff.integrate(dt) ## <- The explicit timestep computations
time += dt
Nusselt_t[step] = Nu_fn()
Vrms_t[step] = Vrms_fn()
times_t[step] = time
if step%5 == 0:
store.step=step
frames.save()
print("Step = {} | time = {} | Nusselt = {} | Vrms = {}".format(step, times_t[step],
Nusselt_t[step], Vrms_t[step] ))
step += 1
view = glucifer.lavavu.Viewer(database="{}.gldb".format(glucifer_filename))
# print(view.steps)
view.control.Panel()
view.control.Checkbox(property='axis')
view.control.ObjectList()
view.control.TimeStepper()
view.control.show()
## Make a plot of Nu / Vrms against time
%matplotlib inline
from matplotlib import pyplot as plt
graph = plt.Figure(figsize=(10,5))
ax = plt.subplot(111)
ax.plot(times_t, Nusselt_t)
ax.plot(times_t, Vrms_t*0.001)
print("Nusselt_no at final state - {}".format(Nusselt_t[-1]))
print("Vrms at final state - {}".format(Vrms_t[-1]))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
file = pd.read_csv("mamographic.csv",sep=',', na_values=["?"])
print(file.tail())
file.isnull().values.any()
file.columns[file.isnull().any()]
file.describe()
file['BI-RADS'].fillna(file['BI-RADS'].mean(),inplace=True)
file['Age'].fillna(file['Age'].mean(),inplace=True)
file['Shape'].fillna(file['Shape'].mean(),inplace=True)
file['Margin'].fillna(file['Margin'].mean(),inplace=True)
file['Density'].fillna(file['Density'].mean(),inplace=True)
file.isnull().values.any()
file.head()
plt.scatter(file['BI-RADS'],file['Age'])
plt.xlabel('BI-RADS')
plt.ylabel('Age')
plt.show()
plt.scatter(file['BI-RADS'],file['Shape'])
plt.xlabel('BI-RADS')
plt.ylabel('Shape')
plt.show()
plt.scatter(file['BI-RADS'],file['Margin'])
plt.xlabel('BI-RADS')
plt.ylabel('Margin')
plt.show()
plt.scatter(file['BI-RADS'],file['Density'])
plt.xlabel('BI-RADS')
plt.ylabel('Density')
plt.show()
from sklearn.cross_validation import train_test_split
train, test = train_test_split(file,test_size=0.2)
features_train=train[['BI-RADS','Age','Shape','Margin','Density']]
features_test = test[['BI-RADS','Age','Shape','Margin','Density']]
labels_train = train.Severity
labels_test = test.Severity
print(train.shape)
print(test.shape)
print(labels_test.head())
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
names = ['GaussianNB','SVM Linear','SVM Rbf','SVM Poly','SVM Linear C','SVM Rbf C','SVM Poly C','Decision tree','Decision tree with minsample','k neighbors','k neighbors with n']
classifiers = [GaussianNB(),
SVC(kernel="linear"),
SVC(kernel="rbf"),
SVC(kernel="poly"),
SVC(kernel="linear", C=1000),
SVC(kernel="rbf", C=1000),
SVC(kernel="poly", C=1000),
DecisionTreeClassifier(),
DecisionTreeClassifier(min_samples_split=5),
KNeighborsClassifier(),
KNeighborsClassifier(n_neighbors=2)]
for name,clf in zip(names,classifiers):
clf.fit(features_train,labels_train)
print("Accuracy:",clf.score(features_test,labels_test))
from sklearn.preprocessing import Imputer
values = file.values
imputer = Imputer()
transformed_values = imputer.fit_transform(values)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
names = ['GaussianNB','SVM Linear','SVM Rbf','SVM Poly','SVM Linear C','SVM Rbf C','SVM Poly C','Decision tree','Decision tree with minsample','k neighbors','k neighbors with n']
classifiers = [GaussianNB(),
SVC(kernel="linear"),
SVC(kernel="rbf"),
SVC(kernel="poly"),
SVC(kernel="linear", C=1000),
SVC(kernel="rbf", C=1000),
SVC(kernel="poly", C=1000),
DecisionTreeClassifier(),
DecisionTreeClassifier(min_samples_split=5),
KNeighborsClassifier(),
KNeighborsClassifier(n_neighbors=2)]
for name,clf in zip(names,classifiers):
clf.fit(features_train,labels_train)
print("Accuracy:",clf.score(features_test,labels_test))
```
| github_jupyter |
# UBI-FIT (flat income tax)
For each level of a UBI, calculate the required flat income tax rate, and associated measures like poverty rate (depth) and inequality.
* Disruption: average decrease to after-tax income (also per person)
* Gini per person
*Data: CPS | Tax year: 2018 | Type: Static | Author: Max Ghenis*
## Setup
### Imports
```
import taxcalc as tc
import taxcalc_helpers as tch
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
tc.__version__
```
### Settings
```
sns.set_style('white')
DPI = 200
mpl.rc('savefig', dpi=DPI)
mpl.rcParams['figure.dpi'] = DPI
mpl.rcParams['figure.figsize'] = 6.4, 4.8 # Default.
mpl.rcParams['font.sans-serif'] = 'Roboto'
mpl.rcParams['font.family'] = 'sans-serif'
# Set title text color to dark gray (https://material.io/color) not black.
TITLE_COLOR = '#212121'
mpl.rcParams['text.color'] = TITLE_COLOR
# Axis titles and tick marks are medium gray.
AXIS_COLOR = '#757575'
mpl.rcParams['axes.labelcolor'] = AXIS_COLOR
mpl.rcParams['xtick.color'] = AXIS_COLOR
mpl.rcParams['ytick.color'] = AXIS_COLOR
# Use Seaborn's default color palette.
# https://stackoverflow.com/q/48958426/1840471 for reproducibility.
sns.set_palette(sns.color_palette())
# Show one decimal in tables.
pd.set_option('precision', 2)
```
### Function to set all tax rates to the same value
```
def rt(rate, year=2019):
return {year: rate}
def fit(rate):
return {
'II_rt1': rt(rate),
'II_rt2': rt(rate),
'II_rt3': rt(rate),
'II_rt4': rt(rate),
'II_rt5': rt(rate),
'II_rt6': rt(rate),
'II_rt7': rt(rate),
'II_rt8': rt(rate),
'PT_rt1': rt(rate),
'PT_rt2': rt(rate),
'PT_rt3': rt(rate),
'PT_rt4': rt(rate),
'PT_rt5': rt(rate),
'PT_rt6': rt(rate),
'PT_rt7': rt(rate),
'PT_rt8': rt(rate),
'CG_nodiff': {2019: True}, # Avoid specifying each CG rate.
'AMT_rt1': rt(rate),
'AMT_rt2': rt(0) # This is the excess rate. Ideally remove AMT.
}
```
## Data
```
recs = tc.Records.cps_constructor()
base = tch.calc_df(records=recs, year=2019, group_vars=['XTOT'],
group_n65=True,
metric_vars=['aftertax_income', 'XTOT'])
def fit_df(rate):
# Only get after-tax income.
# Since only rates change, benefits will stay the same.
reform_df = tch.calc_df(year=2019, group_vars=['XTOT'], group_n65=True,
metric_vars=['aftertax_income', 'XTOT'],
reform=fit(rate))
reform_df['rate'] = rate
return reform_df
l = []
for i in np.arange(0, 1.01, 0.05).tolist():
print(i)
l.append(fit_df(i))
fdf = pd.concat(l)
```
## Analysis
### Calculate UBI for each FIT level
```
cur_afti = tch.weighted_sum(base, 'aftertax_income')
cur_pop = tch.weighted_sum(base, 'XTOT')
fdf_agg = pd.DataFrame(fdf.groupby(['rate']).apply(
lambda x: tch.weighted_sum(x, 'aftertax_income')))
fdf_agg.columns = ['afti']
fdf_agg['rev'] = cur_afti - fdf_agg.afti
fdf_agg['ubi'] = fdf_agg.rev / cur_pop
def nearest(s, val): # TODO
"""Nearest value.
"""
diff = np.abs(s - val)
fdf_agg
ax = fdf_agg.ubi.plot()
sns.despine(left=True, bottom=True)
# plt.axhline(y=cur_gini, c='gray', linewidth=0.5, zorder=-1)
# Revenue-neutral flat tax rate: 16.5%.
# plt.axvline(x=0.165, c='gray', linewidth=0.5, zorder=-1)
# Gini-neutral FIT: 20%.
# plt.axvline(x=0.2, c='gray', linewidth=0.5, zorder=-1)
plt.title('UBI by flat tax rate', loc='left')
ax.grid(color='#eeeeee')
plt.axhline(y=0, c='green', linewidth=0.5, zorder=1)
ax.set(xlabel='Flat income tax rate', ylabel='UBI funded by FIT')
ax.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda y, _: '{:.0%}'.format(y)))
ax.yaxis.set_major_formatter(
mpl.ticker.StrMethodFormatter('${x:,.0f}'))
fdf = pd.merge(fdf, pd.DataFrame(fdf_agg.ubi), left_on='rate',
right_index=True)
fdf['afti'] = fdf.aftertax_income + fdf.ubi
ginis = fdf.groupby('rate').apply(lambda x: tch.gini(x.afti, x.s006))
cur_gini = tch.gini(base.aftertax_income, base.s006)
print(ginis)
cur_gini
ginis - cur_gini # `which` statement?
ax = ginis.plot()
sns.despine(left=True, bottom=True)
ax.grid(color='#eeeeee')
plt.axhline(y=cur_gini, c='green', linewidth=0.5, zorder=1)
# Revenue-neutral flat tax rate: 16.5%.
plt.axvline(x=0.165, c='green', linewidth=0.5, zorder=1)
# Gini-neutral FIT: 20%.
plt.axvline(x=0.21, c='green', linewidth=0.5, zorder=1)
ax.set(xlabel='Flat income tax rate', ylabel='Gini index')
plt.title('Gini coefficient by flat tax rate', loc='left')
ax.xaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda y, _: '{:.0%}'.format(y)))
```
| github_jupyter |
## datasets
This module has the necessary functions to be able to download several useful datasets that we might be interested in using in our models.
```
from fastai.gen_doc.nbdoc import *
from fastai.datasets import *
from fastai.datasets import Config
from pathlib import Path
show_doc(URLs)
```
This contains all the datasets' and models' URLs, and some classmethods to help use them - you don't create objects of this class. The supported datasets are (with their calling name): `S3_NLP`, `S3_COCO`, `MNIST_SAMPLE`, `MNIST_TINY`, `IMDB_SAMPLE`, `ADULT_SAMPLE`, `ML_SAMPLE`, `PLANET_SAMPLE`, `CIFAR`, `PETS`, `MNIST`. To get details on the datasets you can see the [fast.ai datasets webpage](http://course.fast.ai/datasets). Datasets with SAMPLE in their name are subsets of the original datasets. In the case of MNIST, we also have a TINY dataset which is even smaller than MNIST_SAMPLE.
Models is now limited to `WT103` but you can expect more in the future!
```
URLs.MNIST_SAMPLE
```
## Downloading Data
For the rest of the datasets you will need to download them with [`untar_data`](/datasets.html#untar_data) or [`download_data`](/datasets.html#download_data). [`untar_data`](/datasets.html#untar_data) will decompress the data file and download it while [`download_data`](/datasets.html#download_data) will just download and save the compressed file in `.tgz` format.
By default, data will be downloaded to `~/.fastai/data` folder.
Configure the default `data_path` by editing `~/.fastai/config.yml`.
```
show_doc(untar_data)
untar_data(URLs.PLANET_SAMPLE)
show_doc(download_data)
```
Note: If the data file already exists in a <code>data</code> directory inside the notebook, that data file will be used instead of <code>~/.fasta/data</code>. Paths are resolved by calling the function [`datapath4file`](/datasets.html#datapath4file) - which checks if data exists locally (`data/`) first, before downloading to `~/.fastai/data` home directory.
Example:
```
download_data(URLs.PLANET_SAMPLE)
show_doc(datapath4file)
```
All the downloading functions use this to decide where to put the tgz and expanded folder. If `filename` already exists in a <code>data</code> directory in the same place as the calling notebook/script, that is used as the parent directly, otherwise `~/.fastai/config.yml` is read to see what path to use, which defaults to <code>~/.fastai/data</code> is used. To override this default, simply modify the value in your `~/.fastai/config.yml`:
data_path: ~/.fastai/data
```
show_doc(url2path)
show_doc(Config)
```
You probably won't need to use this yourself - it's used by `URLs.datapath4file`.
```
show_doc(Config.get_path)
```
Get the key corresponding to `path` in the [`Config`](/datasets.html#Config).
```
show_doc(Config.data_path)
```
Get the `Path` where the data is stored.
```
show_doc(Config.model_path)
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(Config.create)
show_doc(url2name)
show_doc(Config.get_key)
show_doc(Config.get)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
[Deep Learning Summer School 2019](http://2019.dl-lab.eu) in Gdansk, Poland
Ordinal Regression Tutorial by [Sebastian Raschka](https://sebastianraschka.com)
GitHub Repository: https://github.com/rasbt/DL-Gdasnk2019-tutorial
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
# Modifying the UTKFace DataLoader for Extended Binary Classification
## Imports
```
import time
import os
import pandas as pd
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
```
## The Previous Custom Dataset Class for Cross-Entropy Classification
```python
class UTKDatasetAge(Dataset):
"""Custom Dataset for loading UTKFace images"""
def __init__(self, csv_path, img_dir, transform=None):
df = pd.read_csv(csv_path)
self.img_dir = img_dir
self.csv_path = csv_path
self.df = df
self.y = df['age'].values
self.transform = transform
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.df.iloc[index]['filename']))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
return img, label
def __len__(self):
return self.y.shape[0]
```
## The Modfied DatasetLoader for Extended Binary Classification for Ordinal Regression
```
class UTKDatasetAgeBinary(Dataset):
"""Custom Dataset for loading UTKFace images"""
def __init__(self, csv_path, img_dir, num_classes, transform=None):
df = pd.read_csv(csv_path)
self.img_dir = img_dir
self.csv_path = csv_path
self.df = df
self.y = df['age'].values
self.transform = transform
###################################
# New:
self.num_classes = num_classes
###################################
def __getitem__(self, index):
img = Image.open(os.path.join(self.img_dir,
self.df.iloc[index]['filename']))
if self.transform is not None:
img = self.transform(img)
label = self.y[index]
#############################################################################
##########################################
## EXERCISE: Complete the following lines of code
##########################################
levels = [1]*label + [0]*( ### ???
levels = torch.tensor(levels, dtype=torch.float32)
#############################################################################
return img, label, levels
def __len__(self):
return self.y.shape[0]
```
## Setting Up DataLoaders
```
TRAIN_CSV_PATH = 'training_set.csv'
TEST_CSV_PATH = 'test_set.csv'
IMAGE_PATH = 'UTKFace'
BATCH_SIZE = 256
df_train = pd.read_csv(TRAIN_CSV_PATH)
NUM_CLASSES = np.unique(df_train['age'].values).shape[0]
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
train_indices = torch.arange(0, df_train.shape[0]-1000).numpy()
valid_indices = torch.arange(df_train.shape[0]-1000, df_train.shape[0]).numpy()
del df_train
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
train_transform = transforms.Compose([transforms.Resize((128, 128)),
transforms.RandomCrop((120, 120)),
transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize((128, 128)),
transforms.CenterCrop((120, 120)),
transforms.ToTensor()])
train_dataset = UTKDatasetAgeBinary(csv_path=TRAIN_CSV_PATH,
img_dir=IMAGE_PATH,
num_classes=NUM_CLASSES,
transform=train_transform)
valid_dataset = UTKDatasetAgeBinary(csv_path=TRAIN_CSV_PATH,
img_dir=IMAGE_PATH,
num_classes=NUM_CLASSES,
transform=test_transform)
test_dataset = UTKDatasetAgeBinary(csv_path=TEST_CSV_PATH,
img_dir=IMAGE_PATH,
num_classes=NUM_CLASSES,
transform=test_transform)
train_loader = DataLoader(train_dataset,
batch_size=BATCH_SIZE,
num_workers=8,
sampler=train_sampler)
valid_loader = DataLoader(valid_dataset,
batch_size=BATCH_SIZE,
num_workers=8,
sampler=valid_sampler)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
num_workers=8,
shuffle=False)
# Checking the dataset
for images, labels, levels in test_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
print('Ext. binary dimensions:', levels.shape)
break
for images, labels, levels in valid_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
print('Ext. binary dimensions:', levels.shape)
break
for images, labels,levels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
print('Ext. binary dimensions:', levels.shape)
break
```
## Iterating through the Custom Dataset
```
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(0)
num_epochs = 2
for epoch in range(num_epochs):
for batch_idx, (features, labels, levels) in enumerate(train_loader):
print('Epoch:', epoch+1, end='')
print(' | Batch index:', batch_idx, end='')
print(' | Batch size:', labels.size()[0])
features = features.to(DEVICE)
labels = labels.to(DEVICE)
levels = levels.to(DEVICE)
break
print(f'Labels:\n {labels}\n\n')
print(f'Levels:\n {levels}\n\n')
```
## Expected Loss for Random Prediction
```
-np.log(0.5) * 40
```
| github_jupyter |
```
import numpy as np
from astropy.table import Table
from scipy.sparse import lil_matrix
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
import time
def jaccard(a,b):
"""
Calculate Jaccard distance between two arrays.
:param a: array
array of neighbors
:param b: array
array of neighbors
:return: Jaccard distance.
"""
a = np.array(a, dtype='int')
b = np.array(b, dtype='int')
a = a[a > -1]
b = b[b > -1]
union = np.union1d(a, b)
intersection = np.intersect1d(a, b)
return 1.0 - len(intersection)*1.0 / len(union)
def iterator_dist(indices, min_k):
"""
Generator for computing distance matrix.
:param indices: 2d array
array of arrays of neighbors
:param min_k: int
minimum number of shared neighbors
"""
for n in range(len(indices)):
for m in indices[n][indices[n] > n]:
if len(np.intersect1d(indices[m], indices[n])) > min_k:
dist = jaccard(indices[n], indices[m])
yield (n, m, dist)
# load data from APOGEE
directory = '/home/boquan/Data/'
filename = 'allStar-l31c.2.fits' # download data from https://www.sdss.org/dr14/irspec/spectro_data/
table = Table.read(directory+filename)
table = table[table['GLAT'] > 15] # a subsample of stars with galactic latitude > 15 degrees
elements = ['C_FE', 'CI_FE', 'N_FE', 'O_FE', 'NA_FE', 'MG_FE', 'AL_FE', 'SI_FE', 'P_FE', 'S_FE', 'K_FE', 'CA_FE',
'TI_FE', 'TIII_FE', 'V_FE', 'CR_FE', 'MN_FE', 'FE_H', 'CO_FE', 'NI_FE']
df = table[elements+['VHELIO_AVG', 'APOGEE_ID', 'SNR', 'STARFLAG']].to_pandas()
df = df[df['STARFLAG'] == 0]
df['APOGEE_ID'] = df['APOGEE_ID'].str.decode('utf-8') # remove duplicates by keeping the highest SNR
df = df.sort_values('SNR', ascending=False)
df = df.drop_duplicates(subset='APOGEE_ID', keep='first')
df = df.mask(df == -9999)
df = df.dropna(subset=elements+['VHELIO_AVG'])
print(f'{len(df)} stars left after dropping NaNs and applying the cut in b')
# parameters
num_nbhrs = 50 # number of nearest neighbors to retrieve for each star in the initial data space
min_k = 10 # minimum number of shared neighbors between two stars to be connected
eps = 0.35 # search radius parameter in DBSCAN
min_samples = 8 # minimum number of data points within the search radius to be considered a core point
# data matrix
data = df[elements].values
# get nearest neighbors
start = time.time()
nbrs = NearestNeighbors(n_neighbors=num_nbhrs, metric='manhattan').fit(data)
distances, indices = nbrs.kneighbors(data)
print('Nearest neighbors found. ')
print(f'Took {time.time() - start:.2f} seconds')
# generate distance matrix
start = time.time()
S = lil_matrix((data.shape[0], data.shape[0]))
for (n, m, dist) in iterator_dist(indices, min_k):
S[n, m] = dist
S += S.transpose()
print('Distance matrix created. ')
print(f'Took {time.time() - start:.2f} seconds')
# DBSCAN clustering with precomputed distance matrix
start = time.time()
db = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', n_jobs=-1).fit(S)
labels = db.labels_
n_clumps = max(labels)+1
print(f'{n_clumps} clusters found ({np.sum(labels > -1)} stars left)')
print(f'Took {time.time() - start:.2f} seconds')
```
| github_jupyter |
```
%load_ext watermark
%watermark -d -u -a 'Andreas Mueller, Kyle Kastner, Sebastian Raschka' -v -p numpy,scipy,matplotlib,scikit-learn
```
# SciPy 2016 Scikit-learn Tutorial
# Model Evaluation, Scoring Metrics, and Dealing with Class Imbalances
In the previous notebook, we already went into some detail on how to evaluate a model and how to pick the best model. So far, we assumed that we were given a performance measure, a measure of the quality of the model. What measure one should use is not always obvious, though.
The default scores in scikit-learn are ``accuracy`` for classification, which is the fraction of correctly classified samples, and ``r2`` for regression, with is the coefficient of determination.
These are reasonable default choices in many scenarious; however, depending on our task, these are not always the definitive or recommended choices.
Let's take look at classification in more detail, going back to the application of classifying handwritten digits.
So, how about training a classifier and walking through the different ways we can evaluate it? Scikit-learn has many helpful methods in the ``sklearn.metrics`` module that can help us with this task:
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=2)
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.svm import LinearSVC
digits = load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42,
stratify=y,
test_size=0.25)
classifier = LinearSVC().fit(X_train, y_train)
y_test_pred = classifier.predict(X_test)
print("Accuracy: %f" % classifier.score(X_test, y_test))
```
Here, we predicted 93.8% of samples correctly. For multi-class problems, it is often interesting to know which of the classes are hard to predict, and which are easy, or which classes get confused. One way to get more information about misclassifications is ``the confusion_matrix``, which shows for each true class, how frequent a given predicted outcome is.
```
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_test_pred)
```
A plot is sometimes more readable:
```
plt.matshow(confusion_matrix(y_test, y_test_pred))
plt.colorbar()
plt.xlabel("Predicted label")
plt.ylabel("True label")
```
We can see that most entries are on the diagonal, which means that we predicted nearly all samples correctly. The off-diagonal entries show us that many eights were classified as ones, and that nines are likely to be confused with many other classes.
Another useful function is the ``classification_report`` which provides precision, recall, fscore and support for all classes.
Precision is how many of the predictions for a class are actually that class. With TP, FP, TN, FN standing for "true positive", "false positive", "true negative" and "false negative" repectively:
Precision = TP / (TP + FP)
Recall is how many of the true positives were recovered:
Recall = TP / (TP + FN)
f1-score is the geometric average of precision and recall:
F1 = 2 x (precision x recall) / (precision x recall)
The values of all these values above are in the closed interval [0, 1], where 1 means a perfect score.
```
from sklearn.metrics import classification_report
print(classification_report(y_test, y_test_pred))
```
These metrics are helpful in two particular cases that come up often in practice:
1. Imbalanced classes, that is one class might be much more frequent than the other.
2. Asymmetric costs, that is one kind of error is much more "costly" than the other.
Let's have a look at 1. first. Say we have a class imbalance of 1:9, which is rather mild (think about ad-click-prediction where maybe 0.001% of ads might be clicked):
```
np.bincount(y) / y.shape[0]
```
As a toy example, let's say we want to classify the digits three against all other digits:
```
X, y = digits.data, digits.target == 3
```
Now we run cross-validation on a classifier to see how well it does:
```
from sklearn.cross_validation import cross_val_score
from sklearn.svm import SVC
cross_val_score(SVC(), X, y)
```
Our classifier is 90% accurate. Is that good? Or bad? Keep in mind that 90% of the data is "not three". So let's see how well a dummy classifier does, that always predicts the most frequent class:
```
from sklearn.dummy import DummyClassifier
cross_val_score(DummyClassifier("most_frequent"), X, y)
```
Also 90% (as expected)! So one might thing that means our classifier is not very good, it doesn't to better than a simple strategy that doesn't even look at the data.
That would be judging too quickly, though. Accuracy is simply not a good way to evaluate classifiers for imbalanced datasets!
```
np.bincount(y) / y.shape[0]
```
ROC Curves
=======
A much better measure is using the so-called ROC (Receiver operating characteristics) curve. A roc-curve works with uncertainty outputs of a classifier, say the "decision_function" of the ``SVC`` we trained above. Instead of making a cut-off at zero and looking at classification outcomes, it looks at every possible cut-off and records how many true positive predictions there are, and how many false positive predictions there are.
The following plot compares the roc curve of three parameter settings of our classifier on the "three vs rest" task.
```
from sklearn.metrics import roc_curve, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
for gamma in [.01, .05, 1]:
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate (recall)")
svm = SVC(gamma=gamma).fit(X_train, y_train)
decision_function = svm.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, decision_function)
acc = svm.score(X_test, y_test)
auc = roc_auc_score(y_test, svm.decision_function(X_test))
plt.plot(fpr, tpr, label="acc:%.2f auc:%.2f" % (acc, auc), linewidth=3)
plt.legend(loc="best");
```
With a very small decision threshold, there will be few false positives, but also few false negatives, while with a very high threshold, both true positive rate and false positive rate will be high. So in general, the curve will be from the lower left to the upper right. A diagonal line reflects chance performance, while the goal is to be as much in the top left corner as possible. This means giving a higher decision_function value to all positive samples than to any negative sample.
In this sense, this curve only considers the ranking of the positive and negative samples, not the actual value.
As you can see from the curves and the accuracy values in the legend, even though all classifiers have the same accuracy, 89%, which is even lower than the dummy classifier, one of them has a perfect roc curve, while one of them performs on chance level.
For doing grid-search and cross-validation, we usually want to condense our model evaluation into a single number. A good way to do this with the roc curve is to use the area under the curve (AUC).
We can simply use this in ``cross_val_score`` by specifying ``scoring="roc_auc"``:
```
from sklearn.cross_validation import cross_val_score
cross_val_score(SVC(), X, y, scoring="roc_auc")
```
Built-In and custom scoring functions
=======================================
There are many more scoring methods available, which are useful for different kinds of tasks. You can find them in the "SCORERS" dictionary. The only documentation explains all of them.
```
from sklearn.metrics.scorer import SCORERS
print(SCORERS.keys())
```
It is also possible to define your own scoring metric. Instead of a string, you can provide a callable to as ``scoring`` parameter, that is an object with a ``__call__`` method or a function.
It needs to take a model, a test-set features ``X_test`` and test-set labels ``y_test``, and return a float. Higher floats are taken to mean better models.
Let's reimplement the standard accuracy score:
```
def my_accuracy_scoring(est, X, y):
return np.mean(est.predict(X) == y)
cross_val_score(SVC(), X, y, scoring=my_accuracy_scoring)
```
The interesting thing about this interface is that we can access any attributes of the estimator we trained. Let's say we have trained a linear model, and we want to penalize having non-zero coefficients in our model selection:
```
def my_super_scoring(est, X, y):
return np.mean(est.predict(X) == y) - np.mean(est.coef_ != 0)
```
We can evaluate if this worked as expected, by grid-searching over l1 and l2 penalties in a linear SVM. An l1 penalty is expected to produce exactly zero coefficients:
```
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
y = digits.target
grid = GridSearchCV(LinearSVC(C=.01, dual=False),
param_grid={'penalty' : ['l1', 'l2']},
scoring=my_super_scoring)
grid.fit(X, y)
print(grid.best_params_)
```
# Exercise
In previous sections, we typically used the accuracy measure to evaluate the performance of our classifiers. A related measure that we haven't talked about, yet, is the average-per-class accuracy (APCA). As we remember, the accuracy is defined as
$$ACC = \frac{TP+TN}{n},$$
where *n* is the total number of samples. This can be generalized to
$$ACC = \frac{T}{n},$$
where *T* is the number of all correct predictions in multi-class settings.

Given the following arrays of "true" class labels and predicted class labels, can you implement a function that uses the accuracy measure to compute the average-per-class accuracy as shown below?
```
y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
confusion_matrix(y_true, y_pred)
# %load solutions/16A_avg_per_class_acc.py
```
| github_jupyter |
# Creating a Bathymetric Surface from ICESAT-2 data
The spaceborne ICESAT-2 LiDAR instrument is a photo counting LiDAR which has a wavelength of 532 nm. At this wavelength the signal penetrates into waterbodies and therefore point samples of water depths can be retrived (e.g., Thomas et al., 2021) down to 40 m in depth. However, given that icesat-2 just provides point samples it would be useful to be able to use these samples to create a bathymetric surface. The visible bands of optical data (e.g., Landsat and Sentinel-2) also provide some information from below the water surface (e.g., Thomas et al., 2021) and these can be assumed to have some relationship to the depth of the water.
In these notebooks you will explore this relationship and look at two methods of using multiple satellite images as with cloud cover is very often the case that multiple images will be required. Therefore, these notebooks will show you how to generate image composites using RSGISLib and to summarise results from multiple images.
See ICESAT-2 technical specification: https://icesat-2.gsfc.nasa.gov/science/specs
Thomas, N. et al. Space‐Borne Cloud‐Native Satellite‐Derived Bathymetry (SDB) Models Using ICESat‐2 And Sentinel‐2. Geophys Res Lett 48, (2021).
# Visualise ICESAT-2
This first notebook will look at visualising some of the ICESAT-2.
# 1. Imports
```
import os
import geopandas
%matplotlib inline
import matplotlib.pyplot as plt
import numpy
from shapely.geometry import Polygon
```
# 2. Specify the input layers
```
vec_file = "../data/is2_bathy/is2_bermuda_depth.gpkg"
vec_lyr_path1 = "ic2_depth_bermuda_path1"
vec_lyr_path2 = "ic2_depth_bermuda_path2"
vec_lyr_path3 = "ic2_depth_bermuda_path3"
vec_lyr_path4 = "ic2_depth_bermuda_path4"
vec_lyr_path5 = "ic2_depth_bermuda_path5"
vec_lyr_path6 = "ic2_depth_bermuda_path6"
vec_lyr_path7 = "ic2_depth_bermuda_path7"
vec_lyr_path8 = "ic2_depth_bermuda_path8"
```
# 3. Read Layer to Data Frame
```
path1_gpd_df = geopandas.read_file(vec_file, layer=vec_lyr_path1)
path1_gpd_df
```
# 4. Visualise Path 1
```
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
path1_gpd_df.plot(ax=ax, marker=".", markersize=0.5)
```
# 5. Subset the Path for Plotting
## 5.1 Define the BBOX
```
min_x = 328622
max_x = 330146
min_y = 3586577
max_y = 3592615
# Create a custom polygon
sub_poly = Polygon(
[(min_x, max_y), (max_x, max_y), (max_x, min_y), (min_x, min_y), (min_x, max_y)]
)
sub_poly_gdf = geopandas.GeoDataFrame([1], geometry=[sub_poly], crs=path1_gpd_df.crs)
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
path1_gpd_df.plot(ax=ax, marker=".", markersize=0.5)
sub_poly_gdf.plot(ax=ax, color="red", alpha=0.5)
```
## 5.2 Subset the GeoDataFrame
Use the clip function.
```
path1_gpd_df_sub = path1_gpd_df.clip(sub_poly)
path1_gpd_df_sub
```
## 5.3 Plot the subset
```
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
path1_gpd_df_sub.plot(ax=ax)
```
# 6. Plot Depth values
## 6.1 Get Depth and Latitude Values
```
# Get the latidude and depth values to create scatter plot.
depth_vals = path1_gpd_df_sub["Depth"].values
lat_vals = path1_gpd_df_sub["Latitude"].values
```
## 6.2 Plot the Depths
The blue line is the water surface while the black points are the depths below the surface. Note, the scale in the x and y axis of the plot are different so bare that in mind when interpreting the plot.
```
# Plot the data
fig, ax = plt.subplots(1, 1, figsize=(25, 5))
ax.scatter(lat_vals, depth_vals, s=10, marker=".", color="black", alpha=0.5)
ax.axhline(y=0.0, color="blue", linestyle="-")
fig.tight_layout()
```
| github_jupyter |
```
# MATH FUNCTIONS IN PYTHON
# SOURCE - https://docs.python.org/3/library/math.html
#--------------------------------------------------------------------------------------------------
# first we need to import the math module
# This module provides access to the mathematical functions defined by the C standard.
#
# These functions cannot be used with complex numbers; use the functions of the same name from the cmath module if you require support for complex numbers. The distinction between functions which support complex numbers and those which don’t is made since most users do not want to learn quite as much mathematics as required to understand complex numbers. Receiving an exception instead of a complex result allows earlier detection of the unexpected complex number used as a parameter, so that the programmer can determine how and why it was generated in the first place.
#
# The following functions are provided by this module. Except when explicitly noted otherwise, all return values are floats.
import math
# --------------Number-theoretic and representation functions--------------------------------------
long_string = '''
math.ceil(x)
Return the ceiling of x, the smallest integer greater than or equal to x.
If x is not a float, delegates to x.__ceil__(), which should return an Integral value.
'''
print(long_string)
print("\n--------------------math.ceil(x)-------------------------------")
print(f"math.ceil(x) --- for number = 404 --- {math.ceil(404)}")
print(f"math.ceil(x) --- for number = 404.01 --- {math.ceil(404.01)}")
print(f"math.ceil(x) --- for number = 404.36 --- {math.ceil(404.36)}")
print(f"math.ceil(x) --- for number = 404.50 --- {math.ceil(404.50)}")
print(f"math.ceil(x) --- for number = 404.86 --- {math.ceil(404.86)}")
print("---------------------------------------------------------------\n")
long_string = '''
math.comb(n, k)
Return the number of ways to choose k items from n items without repetition and without order.
Evaluates to n! / (k! * (n - k)!) when k <= n and evaluates to zero when k > n.
Also called the binomial coefficient because it is equivalent to the coefficient of k-th term in polynomial expansion of the expression (1 + x) ** n.
Raises TypeError if either of the arguments are not integers. Raises ValueError if either of the arguments are negative.
'''
print(long_string)
print("\n-------------------math.comb(n, k)-----------------------------")
print(f"math.comb(n, k) --- for number = 404, 10 --- {math.comb(404, 10)}")
print(f"math.comb(n, k) --- for number = 10, 2 --- {math.comb(10, 2)}")
print(f"math.comb(n, k) --- for number = 10, 1 --- {math.comb(10, 1)}")
print(f"math.comb(n, k) --- for number = 10, 10 --- {math.comb(10, 10)}")
print(f"math.comb(n, k) --- for number = 10, 11 --- {math.comb(10, 11)}")
# print(f"math.comb(n, k) --- for number = 404.01 --- {math.comb(404.01, 10)}") #TypeError: 'float' object cannot be interpreted as an integer
#print(f"math.comb(n, k) --- for number = -404 --- {math.comb(-404, 10)}") #ValueError: n must be a non-negative integer
print("---------------------------------------------------------------\n")
#-------------------------------------------------------------------------------------------------
#like these, there are many such examples. You can try them one-by-one
#Also please add them here. Your contribution shall benefit many others!!
# SOURCE - https://www.programiz.com/python-programming/modules/math
# ceil(x) Returns the smallest integer greater than or equal to x.
# copysign(x, y) Returns x with the sign of y
# fabs(x) Returns the absolute value of x
# factorial(x) Returns the factorial of x
# floor(x) Returns the largest integer less than or equal to x
# fmod(x, y) Returns the remainder when x is divided by y
# frexp(x) Returns the mantissa and exponent of x as the pair (m, e)
# fsum(iterable) Returns an accurate floating point sum of values in the iterable
# isfinite(x) Returns True if x is neither an infinity nor a NaN (Not a Number)
# isinf(x) Returns True if x is a positive or negative infinity
# isnan(x) Returns True if x is a NaN
# ldexp(x, i) Returns x * (2**i)
# modf(x) Returns the fractional and integer parts of x
# trunc(x) Returns the truncated integer value of x
# exp(x) Returns e**x
# expm1(x) Returns e**x - 1
# log(x[, base]) Returns the logarithm of x to the base (defaults to e)
# log1p(x) Returns the natural logarithm of 1+x
# log2(x) Returns the base-2 logarithm of x
# log10(x) Returns the base-10 logarithm of x
# pow(x, y) Returns x raised to the power y
# sqrt(x) Returns the square root of x
# acos(x) Returns the arc cosine of x
# asin(x) Returns the arc sine of x
# atan(x) Returns the arc tangent of x
# atan2(y, x) Returns atan(y / x)
# cos(x) Returns the cosine of x
# hypot(x, y) Returns the Euclidean norm, sqrt(x*x + y*y)
# sin(x) Returns the sine of x
# tan(x) Returns the tangent of x
# degrees(x) Converts angle x from radians to degrees
# radians(x) Converts angle x from degrees to radians
# acosh(x) Returns the inverse hyperbolic cosine of x
# asinh(x) Returns the inverse hyperbolic sine of x
# atanh(x) Returns the inverse hyperbolic tangent of x
# cosh(x) Returns the hyperbolic cosine of x
# sinh(x) Returns the hyperbolic cosine of x
# tanh(x) Returns the hyperbolic tangent of x
# erf(x) Returns the error function at x
# erfc(x) Returns the complementary error function at x
# gamma(x) Returns the Gamma function at x
# lgamma(x) Returns the natural logarithm of the absolute value of the Gamma function at x
# pi Mathematical constant, the ratio of circumference of a circle to it's diameter (3.14159...)
# e mathematical constant e (2.71828...)
# math.acos(x) Returns the arc cosine value of x
# math.acosh(x) Returns the hyperbolic arc cosine of x
# math.asin(x) Returns the arc sine of x
# math.asinh(x) Returns the hyperbolic arc sine of x
# math.atan(x) Returns the arc tangent value of x
# math.atan2(y, x) Returns the arc tangent of y/x in radians
# math.atanh(x) Returns the hyperbolic arctangent value of x
# math.ceil(x) Rounds a number upwards to the nearest integer, and returns the result
# math.comb(n, k) Returns the number of ways to choose k items from n items without repetition and order
# math.copysign(x, y) Returns a float consisting of the value of the first parameter and the sign of the second parameter
# math.cos(x) Returns the cosine of x
# math.cosh(x) Returns the hyperbolic cosine of x
# math.degrees(x) Converts an angle from radians to degrees
# math.dist(p, q) Calculates the euclidean distance between two specified points (p and q), where p and q are the coordinates of that point
# math.erf(x) Returns the error function of x
# math.erfc(x) Returns the complementary error function of x
# math.exp(x) Returns the value of Ex, where E is Euler's number (approximately 2.718281...), and x is the number passed to it
# math.expm1(x) Returns the value of Ex - 1, where E is Euler's number (approximately 2.718281...), and x is the number passed to it
# math.fabs(x) Returns the absolute value of a number
# math.factorial() Returns the factorial of a number
# math.floor(x) Rounds a number downwards to the nearest integer, and returns the result
# math.fmod(x, y) Returns the remainder of specified numbers when a number is divided by another number
# math.frexp() Returns the mantissa and the exponent, of a specified value
# math.fsum(iterable) Returns the sum of all items in an iterable (tuples, arrays, lists, etc.)
# math.gamma(x) Returns the gamma value of x
# math.gcd() Returns the highest value that can divide two integers
# math.hypot() Find the Euclidean distance from the origin for n inputs
# math.isclose() Checks whether two values are close, or not
# math.isfinite(x) Checks whether x is a finite number
# math.isinf(x) Check whether x is a positive or negative infinty
# math.isnan(x) Checks whether x is NaN (not a number)
# math.isqrt(n) Returns the nearest integer square root of n
# math.ldexp(x, i) Returns the expression x * 2i where x is mantissa and i is an exponent
# math.lgamma(x) Returns the log gamma value of x
# math.log(x, base) Returns the natural logarithm of a number, or the logarithm of number to base
# math.log10(x) Returns the base-10 logarithm of x
# math.log1p(x) Returns the natural logarithm of 1+x
# math.log2(x) Returns the base-2 logarithm of x
# math.perm(n, k) Returns the number of ways to choose k items from n items with order and without repetition
# math.pow(x, y) Returns the value of x to the power of y
# math.prod(iterable, *, start=1) Returns the product of an iterable (lists, array, tuples, etc.)
# math.radians(x) Converts a degree value (x) to radians
# math.remainder(x, y) Returns the closest value that can make numerator completely divisible by the denominator
# math.sin(x) Returns the sine of x
# math.sinh(x) Returns the hyperbolic sine of x
# math.sqrt(x) Returns the square root of x
# math.tan(x) Returns the tangent of x
# math.tanh(x) Returns the hyperbolic tangent of x
# math.trunc(x) Returns the truncated integer parts of x
# Math Constants
# Constant Description
# math.e Returns Euler's number (2.7182...)
# math.inf Returns a floating-point positive infinity
# math.nan Returns a floating-point NaN (Not a Number) value
# math.pi Returns PI (3.1415...)
# math.tau Returns tau (6.2831...)
```
| github_jupyter |
```
from sklearn.preprocessing import LabelBinarizer
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, model_from_json
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras.constraints import maxnorm
from keras import regularizers
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, SeparableConv2D
from keras.layers import concatenate, add, Lambda
from keras.layers import LSTM
from keras.models import Model
from keras.regularizers import l2
from keras.applications import imagenet_utils, Xception
from keras.preprocessing.image import img_to_array
from keras.preprocessing.sequence import pad_sequences
import keras.backend as K
import numpy as np
import json
import os
import cv2
import h5py
import random
import matplotlib.pyplot as plt
%matplotlib inline
from helpers import TrainingMonitor
from helpers import Utils
output_path = "../output/"
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
import pickle
data = pickle.load(open('../input/datasets/UCF-101-hdf5/full_sequence_rgbmean.pkl', 'rb'))
train_split, val_split = 0.7, 0.15
random.shuffle(data)
train_len = int(train_split * len(data))
train_val_len = int((train_split + val_split) * len(data))
train = data[:train_len]
val = data[train_len:train_val_len]
test = data[train_val_len:]
x_train, y_train = np.array([t["imgs"] for t in train]), [t["label"] for t in train]
x_val, y_val = np.array([t["imgs"] for t in val]), [t["label"] for t in val]
x_test, y_test = np.array([t["imgs"] for t in test]), [t["label"] for t in test]
x_train = pad_sequences(x_train, maxlen=1, truncating="post")
x_val = pad_sequences(x_val, maxlen=1, truncating="post")
x_test = pad_sequences(x_test, maxlen=1, truncating="post")
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 60*80*3))
x_val = x_val.reshape((x_val.shape[0], x_val.shape[1], 60*80*3))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 60*80*3))
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_val = lb.fit_transform(y_val)
y_test = lb.fit_transform(y_test)
x_val.shape
num_classes = y_train.shape[1]
num_classes
model = Sequential()
model.add(LSTM(64, return_sequences=True, input_shape=(x_train.shape[1:])))
model.add(LSTM(128, return_sequences=True))
model.add(LSTM(256))
model.add(Dense(num_classes))
model.add(Activation("sigmoid"))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=64,
epochs=5,
validation_data=(x_val, y_val),
shuffle="batch")
scores = model.evaluate(x_test, y_test, verbose=0)
print("Train: %.2f%%; Val: %.2f%%; Test: %.2f%%" %
(np.max(history.history['acc'])*100, np.max(history.history['val_acc'])*100, scores[1]*100)
)
model = Sequential()
model.add(LSTM(256, stateful=True, batch_input_shape=(5, 1, 14400)))
model.add(Dense(num_classes))
model.add(Activation("sigmoid"))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
for i in range(5):
model.fit(x_train, y_train, epochs=1, batch_size=5)
model.reset_states()
```
| github_jupyter |
# Cats and Dogs Problem Solution
The inspiration for this script comes from a beautiful [keras blog](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
```
#Imports
import os
from random import shuffle
#Keras imports
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
#First divide train data into 3 parts - train, test, validation.
#We are doing this for using ImageDataGenarator Class
all_image_names = os.listdir('data/orig/train')
shuffle(all_image_names)
#Send shuffled data into different folders.
l = len(all_image_names)
train = all_image_names[:int(0.80*l)]
validation = all_image_names[int(0.80*l):int(0.90*l)]
test = all_image_names[int(0.90*l):]
os.mkdir('data/new')
os.mkdir('data/new/train')
os.mkdir('data/new/train/cats/')
os.mkdir('data/new/train/dogs')
os.mkdir('data/new/validation')
os.mkdir('data/new/validation/cats')
os.mkdir('data/new/validation/dogs')
os.mkdir('data/new/test')
os.mkdir('data/new/test/cats')
os.mkdir('data/new/test/dogs')
#We must also dividing image into proper subfolders.
#Another prerequisite for ImageDataGenerator Class
for x in train:
if x.split('.')[0] == 'cat':
os.rename('data/orig/train/'+x , 'data/new/train/cats/'+x)
else:
os.rename('data/orig/train/'+x , 'data/new/train/dogs/'+x)
for x in validation:
if x.split('.')[0] == 'cat':
os.rename('data/orig/train/'+x , 'data/new/validation/cats/'+x)
else:
os.rename('data/orig/train/'+x , 'data/new/validation/dogs/'+x)
for x in test:
if x.split('.')[0] == 'cat':
os.rename('data/orig/train/'+x , 'data/new/test/cats/'+x)
else:
os.rename('data/orig/train/'+x , 'data/new/test/dogs/'+x)
#Let us prepare our data
train_conf = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
validation_conf = ImageDataGenerator(rescale=1./255) #Only scaling in test data
train_generator = train_conf.flow_from_directory(
'data/new/train/', #Target directory
target_size=(100, 100), #All images will be resized to 150x150
batch_size=40,
class_mode='binary') #We will later use binary_crossentropy loss, hence we need binary labels
#Generator for validation data
validation_generator = validation_conf.flow_from_directory(
'data/new/validation/',
target_size=(100, 100),
batch_size=40,
class_mode='binary')
#Create model
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(100, 100, 3))) #Convo Layer
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, 3, 3)) #Convo Layer
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3)) #Convo Layer
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#The model so far outputs 3D feature maps (height, width, features), we shall flatten those features
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64)) #Fully Connected Layer
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1)) #Fully Connected Output Layer
model.add(Activation('sigmoid'))
#Compile our model
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
#Let us fit data into the model
model.fit_generator(
train_generator,
samples_per_epoch=20000,
nb_epoch=10,
validation_data=validation_generator,
nb_val_samples=800)
model.save_weights('try_1.h5') # saving weights after training
```
| github_jupyter |
# PageRank
In this notebook, we will use both NetworkX and cuGraph to compute the PageRank of each vertex in our test dataset. The NetworkX and cuGraph processes will be interleaved so that each step can be compared.
Notebook Credits
* Original Authors: Bradley Rees and James Wyles
* Created: 08/13/2019
* Updated: 10/28/2019
RAPIDS Versions: 0.10.0
Test Hardware
* GV100 32G, CUDA 10.0
## Introduction
Pagerank is measure of the relative importance, also called centrality, of a vertex based on the relative importance of it's neighbors. PageRank was developed by Google and is (was) used to rank it's search results. PageRank uses the connectivity information of a graph to rank the importance of each vertex.
See [Wikipedia](https://en.wikipedia.org/wiki/PageRank) for more details on the algorithm.
To compute the Pagerank scores for a graph in cuGraph we use:<br>
**cugraph.pagerank(G,alpha=0.85, max_iter=100, tol=1.0e-5)**
* __G__: cugraph.Graph object
* __alpha__: float, The damping factor represents the probability to follow an outgoing edge. default is 0.85
* __max_iter__: int, The maximum number of iterations before an answer is returned. This can be used to limit the execution time and do an early exit before the solver reaches the convergence tolerance. If this value is lower or equal to 0 cuGraph will use the default value, which is 100
* __tol__: float, Set the tolerance the approximation, this parameter should be a small magnitude value. The lower the tolerance the better the approximation. If this value is 0.0f, cuGraph will use the default value which is 0.00001. Setting too small a tolerance can lead to non-convergence due to numerical roundoff. Usually values between 0.01 and 0.00001 are acceptable.
Returns:
* __df__: a cudf.DataFrame object with two columns:
* df['vertex']: The vertex identifier for the vertex
* df['pagerank']: The pagerank score for the vertex
## cuGraph Notice
The current version of cuGraph has some limitations:
* Vertex IDs need to be 32-bit integers.
* Vertex IDs are expected to be contiguous integers starting from 0.
cuGraph provides the renumber function to mitigate this problem. Input vertex IDs for the renumber function can be either 32-bit or 64-bit integers, can be non-contiguous, and can start from an arbitrary number. The renumber function maps the provided input vertex IDs to 32-bit contiguous integers starting from 0. cuGraph still requires the renumbered vertex IDs to be representable in 32-bit integers. These limitations are being addressed and will be fixed soon.
### Test Data
We will be using the Zachary Karate club dataset
*W. W. Zachary, An information flow model for conflict and fission in small groups, Journal of
Anthropological Research 33, 452-473 (1977).*

### Prep
```
# The notebook compares cuGraph to NetworkX,
# therefore there some additional non-RAPIDS python libraries need to be installed.
# Please run this cell if you need the additional libraries
!pip install networkx
!pip install scipy
# Import needed libraries
import cugraph
import cudf
from collections import OrderedDict
# NetworkX libraries
import networkx as nx
from scipy.io import mmread
```
### Some Prep
```
# define the parameters
max_iter = 100 # The maximum number of iterations
tol = 0.00001 # tolerance
alpha = 0.85 # alpha
# Define the path to the test data
datafile='../data/karate-data.csv'
```
---
# NetworkX
```
# Read the data, this also created a NetworkX Graph
file = open(datafile, 'rb')
Gnx = nx.read_edgelist(file)
pr_nx = nx.pagerank(Gnx, alpha=alpha, max_iter=max_iter, tol=tol)
pr_nx
```
Running NetworkX is that easy.
Let's seet how that compares to cuGraph
----
# cuGraph
### Read in the data - GPU
cuGraph depends on cuDF for data loading and the initial Dataframe creation
The data file contains an edge list, which represents the connection of a vertex to another. The `source` to `destination` pairs is in what is known as Coordinate Format (COO). In this test case, the data is just two columns. However a third, `weight`, column is also possible
```
# Read the data
gdf = cudf.read_csv(datafile, names=["src", "dst"], delimiter='\t', dtype=["int32", "int32"] )
```
### Create a Graph
```
# create a Graph using the source (src) and destination (dst) vertex pairs from the Dataframe
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source='src', destination='dst')
```
### Call the PageRank algorithm
```
# Call cugraph.pagerank to get the pagerank scores
gdf_page = cugraph.pagerank(G)
```
_It was that easy!_
Compared to NetworkX, the cuGraph data loading might have been more steps, but using cuDF allows for a wider range of data to be loaded.
----
Let's now look at the results
```
# Find the most important vertex using the scores
# This methods should only be used for small graph
bestScore = gdf_page['pagerank'][0]
bestVert = gdf_page['vertex'][0]
for i in range(len(gdf_page)):
if gdf_page['pagerank'][i] > bestScore:
bestScore = gdf_page['pagerank'][i]
bestVert = gdf_page['vertex'][i]
print("Best vertex is " + str(bestVert) + " with score of " + str(bestScore))
```
The top PageRank vertex and socre match what was found by NetworkX
```
# A better way to do that would be to find the max and then use that values in a query
pr_max = gdf_page['pagerank'].max()
def print_pagerank_threshold(_df, t=0) :
filtered = _df.query('pagerank >= @t')
for i in range(len(filtered)):
print("Best vertex is " + str(filtered['vertex'][i]) +
" with score of " + str(filtered['pagerank'][i]))
print_pagerank_threshold(gdf_page, pr_max)
```
----
a PageRank score of _0.10047_ is very low, which can be an indication that there is no more central vertex than any other. Rather than just looking at the top score, let's look at the top three vertices and see if there are any insights that can be inferred.
Since this is a very small graph, let's just sort and get the first three records
```
sort_pr = gdf_page.sort_values('pagerank', ascending=False)
sort_pr.head(3).to_pandas()
```
Going back and looking at the graph with the top three vertices highlighted (illustration below) it is easy to see that the top scoring vertices also appear to be the vertices with the most connections.
Let's look at sorted list of degrees (since the graph is undirected and symmetrized, the out degree is the same as the in degree)
```
d = G.degrees()
d.sort_values('out_degree', ascending=False).head(4)
```
<img src="../img/zachary_graph_pagerank.png" width="600">
___
Copyright (c) 2019, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
___
| github_jupyter |
# Итерационные методы для собственных значений
## PINVIT
- Идея - минимизировать отношение Релея
- Используем градиентный спуск предобусловленный матрицей $(A - \sigma I)$
```
import numpy as np
import scipy.sparse.linalg as spsplin
import scipy.sparse as spsp
def pinvit(A, x0, sigma, tau, num_iter, tol, inexact=True):
n = A.shape[0]
if inexact:
splu = spsplin.spilu(A - sigma * spsp.eye(n))
else:
splu = spsplin.splu(A - sigma * spsp.eye(n))
# splu = spsplin.splu(spsp.eye(n))
x = x0.copy()
conv = [x]
for k in range(num_iter):
lam = (x @ A @ x) / (x @ x)
x = x - tau * splu.solve(A @ x - lam * x)
conv.append(x)
if np.linalg.norm(A @ x - lam * x) < tol:
break
return x, (x @ A @ x) / (x @ x), conv
import matplotlib.pyplot as plt
%matplotlib inline
n = 100
ex = np.ones(n)
lp1 = spsp.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr')
e = spsp.eye(n)
A = spsp.kron(lp1, e) + spsp.kron(e, lp1)
plt.spy(A, markersize=1)
true_eigval, true_eigvec = spsplin.eigsh(A, which="SM", k=1)
print(true_eigval)
x0 = np.random.randn(A.shape[0])
pinvit_eigvec, pinvit_eigval, conv_pinvit_inexact = pinvit(A, x0, 0, 1, 300, 1e-6, True)
print(pinvit_eigval)
pinvit_eigvec, pinvit_eigval, conv_pinvit_exact = pinvit(A, x0, 0, 1, 100, 1e-6, False)
print(pinvit_eigval)
plt.semilogy([np.linalg.norm(A @ x - (x @ A @ x) * x / (x @ x)) for x in conv_pinvit_exact], label="Exact")
plt.semilogy([np.linalg.norm(A @ x - (x @ A @ x) * x / (x @ x)) for x in conv_pinvit_inexact], label="Inexact")
plt.legend()
def shift_inverse_iteration_fast(A, x0, num_iter, shift, tol):
x = x0.copy()
conv = [x]
n = A.shape[0]
splu = spsplin.splu(A - shift * spsp.eye(n))
for i in range(num_iter):
x = splu.solve(x)
x = x / np.linalg.norm(x)
conv.append(x)
if np.linalg.norm(A @ x - x @ A @ x * x) < tol:
break
return x, conv
shift = 0.
num_iter = 100
tol=1e-6
middle_eigv, conv_m = shift_inverse_iteration_fast(A, x0, num_iter, shift, tol)
print(middle_eigv @ A @ middle_eigv)
plt.semilogy([np.linalg.norm(A @ x - (x @ A @ x) * x) for x in conv_m])
plt.semilogy([np.linalg.norm(A @ x - (x @ A @ x) * x) for x in conv_m], label="Shift-Invert")
# plt.semilogy([np.linalg.norm(A @ x - (x @ A @ x) * x / (x @ x)) for x in conv_pinvit_exact], label="Exact")
plt.semilogy([np.linalg.norm(A @ x - (x @ A @ x) * x / (x @ x)) for x in conv_pinvit_inexact], label="Inexact")
plt.legend()
```
| github_jupyter |
# Accessing and Processing the Optical Absorption and Attenuation (OPTAA) Data from OOI
OOI uses the [Sea-Bird Electronics, AC-S In-Situ Spectrophotometer](https://www.seabird.com/ac-s-spectral-absorption-and-attenuation-sensor/product?id=60762467715) to measure the in situ absorption and beam attenuation coefficients from approximately 400 to 750 nm at a 4 nm resolution. Each sensor is equipped with a unique filter set that determines the exact wavelengths measured and the total number of wavelengths measured. This difference in the number of measured wavelengths and, to a certain extent, their specific values can cause complications when trying to compare measurements across deployments. One solution, presented here, is to reset the number of wavelengths for each unique instrument/deployment instance using fill values to pad out the number of wavelengths and the absorption and beam attenuation measurements to a fixed number of wavelengths. This creates a common dimension for the number of wavelengths that can be used when subsequently concatenating data across deployments.
An additional issue is the volume of data produced by the sensor. A full deployment's worth of data can get rather unwieldy. Per a recommendation from the vendor, the first 60 seconds worth of data should be discarded after an instrument starts up. Dropping those records will help to reduce the volume of the data. We can also use burst averaging to further reduce the volume. Burst averaging has an added benefit in helping to reduce some of the point-to-point noisiness that is common in bio-optical data (biology is messy!).
```
%matplotlib inline
# load the different libraries and modules needed for the analysis
import csv
import io
import numpy as np
import os
import pandas as pd
import xarray as xr
from ooi_data_explorations.common import load_gc_thredds
from pyseas.data.opt_functions import opt_internal_temp, opt_external_temp
import warnings
warnings.filterwarnings('ignore')
from matplotlib import pyplot as plt
plt.style.use('seaborn-white')
```
For this example, we'll look at the data from Deployment 3 of the Oregon Shelf Surface Mooring. The instrument failed after approximately 8 weeks into the deployment, so this is not a full deployments worth of data. This particular instrument is run once an hour for 2-4 minutes at ~4 Hz. Each measurement cycle represents a burst measurement.
Rather than requesting the data from the M2M interface, we'll request the data from the [OOI Gold Copy THREDDS server](https://thredds.dataexplorer.oceanobservatories.org/thredds/catalog/ooigoldcopy/public/catalog.html). This data is pulled from the M2M system by automated processes on a regular schedule, and is used as the backend data source for [Data Explorer](https://dataexplorer.oceanobservatories.org/). Pulling this pre-computed and organized data, compared to issuing an M2M data request, waiting while the data is collected and processed, and then downloading the results, can represent a significant time-savings for the researcher (on the order of hours for some of the larger, more complex data sets). The functions used to access the data from the Gold Copy can be found in the python section of the [OOI Data Explorations](https://github.com/oceanobservatories/ooi-data-explorations/tree/master/python) GitHub repository.
```
# Setup needed parameters for the request
site = 'CE02SHSM' # OOI Net site designator
node = 'RID27' # OOI Net node designator
sensor = '01-OPTAAD000' # OOI Net sensor designator
stream = 'optaa_dj_dcl_instrument_recovered' # OOI Net stream name
method = 'recovered_host' # OOI Net data delivery method
tag = '.*deployment0003.*OPTAA.*\\.nc$' # limit request to OPTAA NetCDF files from Deployment 3
# download the data from the OOI Gold Copy THREDDS server
data = load_gc_thredds(site, node, sensor, method, stream, tag)
```
Per the vendor recommendation we'll drop the first 60 seconds worth of data and then we'll average each burst. Note, more recently, the vendor has recommended dropping the first 2 minutes worth of data, but the instrument was operated for several deployment cycles using a 2-minute burst duration schedule. If we delete the first two minutes worth of data, we would delete the entire burst. For now, we'll just use the initial recommendation.
```
# Delete the first 60 seconds of the data record per recommendation from the vendor
data.elapsed_run_time.values = data.elapsed_run_time.where(data.elapsed_run_time / 1000 > 60)
data = data.dropna(dim='time', subset=['elapsed_run_time'])
data = data.sortby('time')
# median average each burst, centering the window on the time range of interest (default is left shifted)
burst = data.resample(time='900s', base=3150, loffset='450s', skipna=True).median(keep_attrs=True)
burst = burst.where(~np.isnan(burst.deployment), drop=True)
burst
```
The data needs some slight re-organization so it can be used across deployments and sites. Specifically, each OPTAA will have a different number of wavelengths, based on the specific filter set used in the instrument. In order to merge the data across deployments and sites, the number of wavelengths needs to be padded to a common number (we'll use 100 since the instruments all measure between 80 and 90 wavelengths).
```
# create an xarray dataset of the 2D variables, padding the number of wavelengths to a consistent
# length of 100 using fill values.
wavelength_number = np.arange(100).astype(np.int32) # used as a dimensional variable
num_wavelengths = burst.num_wavelengths.values[0].astype(np.int32)
pad = 100 - num_wavelengths
fill_nan = np.tile(np.ones(pad) * np.nan, (len(burst.time), 1))
fill_int = np.tile(np.ones(pad) * -9999999, (len(burst.time), 1))
wavelength_a = np.concatenate([burst.wavelength_a.values, fill_nan], axis=1)
wavelength_c = np.concatenate([burst.wavelength_c.values, fill_nan], axis=1)
ac = xr.Dataset({
'wavelength_a': (['time', 'wavelength'], wavelength_a),
'a_signal_counts': (['time', 'wavelength'], np.concatenate([burst.a_signal_counts.astype(np.int32),
fill_int], axis=1)),
'a_reference_counts': (['time', 'wavelength'], np.concatenate([burst.a_reference_counts.astype(np.int32),
fill_int], axis=1)),
'optical_absorption': (['time', 'wavelength'], np.concatenate([burst.optical_absorption, fill_nan],
axis=1)),
'wavelength_c': (['time', 'wavelength'], wavelength_c),
'c_signal_counts': (['time', 'wavelength'], np.concatenate([burst.c_signal_counts.astype(np.int32),
fill_int], axis=1)),
'c_reference_counts': (['time', 'wavelength'], np.concatenate([burst.c_reference_counts.astype(np.int32),
fill_int], axis=1)),
'beam_attenuation': (['time', 'wavelength'], np.concatenate([burst.beam_attenuation, fill_nan],
axis=1))
}, coords={'time': (['time'], burst.time.values), 'wavelength': wavelength_number})
# drop the original 2D variables from the burst data set
drop = burst.drop(['wavelength_a', 'a_signal_counts', 'a_reference_counts', 'optical_absorption',
'wavelength_c', 'c_signal_counts', 'c_reference_counts', 'beam_attenuation'])
# if we used burst averaging, reset the data type for the 'a' and 'c' signal and reference
# dark values, and the other raw parameters
int_arrays = ['a_signal_dark_counts', 'a_reference_dark_counts', 'c_signal_dark_counts',
'c_reference_dark_counts', 'internal_temp_raw', 'external_temp_raw',
'pressure_counts', 'num_wavelengths', 'deployment']
for k in drop.variables:
if k in int_arrays:
drop[k] = drop[k].astype(np.int32)
# recombine the two datasets
optaa = xr.merge([drop, ac])
# reset the attributes, which the merging drops
optaa.attrs = data.attrs
for v in optaa.variables:
optaa[v].attrs = data[v].attrs
```
If the data from a co-located CTD is missing (instrument or telemetry failed), then the parameters `temp` and `practical_salinity` will be missing as well as the derived `optical_absorption` and `beam_attenuation` parameters which require temperature and salinity as inputs to the functions. The last step needed before cross-comparisons (between deployments or sites) is to check for the presence of these values and, if they are missing, create them using NaNs as a fill value.
```
# check for data from a co-located CTD, if not present add with the appropriate attributes
if 'temp' not in optaa.variables:
optaa['temp'] = ('time', optaa['deployment'] * np.nan)
optaa['temp'].attrs = {
'long_name': 'Seawater Temperature',
'standard_name': 'sea_water_temperature',
'units': 'degree_Celsius',
'comment': ('Normally this would be seawater temperature data from a co-located CTD. However, data from ' +
'that sensor is unavailable. This value has been filled with NaNs to preserve the structure ' +
'of the data set.'),
'data_product_identifier': 'TEMPWAT_L1',
}
optaa['practical_salinity'] = ('time', optaa['deployment'] * np.nan)
optaa['practical_salinity'].attrs = {
'long_name': 'Practical Salinity',
'standard_name': 'sea_water_practical_salinity',
'units': '1',
'comment': ('Normally this would be seawater salinity data from a co-located CTD. However, data from ' +
'that sensor is unavailable. This value has been filled with NaNs to preserve the structure ' +
'of the data set.'),
'data_product_identifier': 'PRACSAL_L2'
}
optaa['optical_absorption'] = (['time, wavelength'], a_signal_counts * np.nan)
optaa['beam_attenuation'] = (['time, wavelength'], c_signal_counts * np.nan)
```
## Save the data for future work
At this point the optaa record is at a point where it can be saved locally or used in further work looking across deployments or between sites.
Some of the work one might want to do is a review of the data by comparing the instrument temperature records versus the co-located CTD data, by computing different parameters (chlorophyll concentration, POC, and absorption ratios) from the absorption and attenuation records and by reviewing the shape and magnitude of the aborption and attenuation spectra.
```
# convert internal and external temperature sensors from raw counts to degrees Celsius
optaa['internal_temp'] = opt_internal_temp(optaa['internal_temp_raw'])
optaa['external_temp'] = opt_external_temp(optaa['external_temp_raw'])
%matplotlib inline
# Plot the instrument and co-located CTD temperature records
plt.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=(16,9))
plt.plot(optaa['time'], optaa['temp'], '-k', label='CTD')
plt.plot(optaa['time'], optaa['internal_temp'], label='AC-S Internal')
plt.plot(optaa['time'], optaa['external_temp'], label='AC-S External')
plt.ylabel('Seawater Temperature (degC)')
plt.legend()
```
The CTD seawater temperature and the instrument internal temperature are inputs to the functions used to calculate the optical absorption and beam attenuation. The data from the external temperature sensor on the AC-S is not used, but it should be producing values very close to the CTD (the AC-S temperaure sensor is not as accurate as the CTD). All three measurements should be very similar. If they are not, that would be a good indication that there is a problem with one of the sensors.
```
def estimate_chl_poc(optaa):
"""
Derive estimates of Chlorophyll-a and particulate organic carbon (POC)
concentrations from the temperature, salinity and scatter corrected
absorption and beam attenuation data.
:param optaa: xarray dataset with the scatter corrected absorbance data.
:return optaa: xarray dataset with the estimates for chlorophyll and POC
concentrations added.
"""
# use the standard chlorophyll line height estimation with an extinction coefficient of 0.020.
m676 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 676.0))
m650 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 650.0))
m715 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 715.0))
apg = optaa['optical_absorption']
aphi = apg[:, m676] - 39/65 * apg[:, m650] - 26/65 * apg[:, m715]
optaa['estimated_chlorophyll'] = aphi / 0.020
# estimate the POC concentration from the attenuation at 660 nm
m660 = np.nanargmin(np.abs(optaa['wavelength_c'].values[0, :] - 660.0))
cpg = optaa['beam_attenuation']
optaa['estimated_poc'] = cpg[:, m660] * 380
return optaa
def calculate_ratios(optaa):
"""
Calculate pigment ratios to use in analyzing community composition and/or
bloom health. As these ratios are subject to the effects of biofouling it
is expected that these values will start to become chaotic with noise
dominating the signal. Thus these ratios can also serve as biofouling
indicators.
:param optaa: xarray dataset with the scatter corrected absorbance data.
:return optaa: xarray dataset with the estimates for chlorophyll and POC
concentrations added.
"""
apg = optaa['optical_absorption']
m412 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 412.0))
m440 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 440.0))
m490 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 490.0))
m530 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 530.0))
m676 = np.nanargmin(np.abs(optaa['wavelength_a'].values[0, :] - 676.0))
optaa['ratio_cdom'] = apg[:, m412] / apg[:, m440]
optaa['ratio_carotenoids'] = apg[:, m490] / apg[:, m440]
optaa['ratio_phycobilins'] = apg[:, m530] / apg[:, m440]
optaa['ratio_qband'] = apg[:, m676] / apg[:, m440]
return optaa
# Calculate the chlorophyll and particulate organic carbon (POC) concentrations and key pigment ratios
optaa = estimate_chl_poc(optaa)
optaa = calculate_ratios(optaa)
```
The AC-S data can be used to create estimates of the chlorophyll and particulate organic carbon (POC) concentration. These can be compared to data from a co-located fluorometer to help validate the performance of the sensors. Additionally, pigment ratios can be calculated to assess the impacts of biofouling, potential changes in community composition, light history or bloom health and age, .
* CDOM Ratio -- ratio of CDOM absorption in the violet portion of the spectrum at 412 nm relative to chlorophyll absorption at 440 nm. Ratios greater than 1 indicate a preponderance of CDOM absorption relative to chlorophyll.
* Carotenoid Ratio -- ratio of carotenoid absorption in the blue-green portion of the spectrum at 490 nm relative to chlorophyll absorption at 440 nm. A changing carotenoid to chlorophyll ratio may indicate a shift in phytoplankton community composition in addition to changes in light history or bloom health and age.
* Phycobilin Ratio -- ratio of phycobilin absorption in the green portion of the spectrum at 530 nm relative to chlorophyll absorption at 440 nm. Different phytoplankton, notably cyanobacteria, utilize phycobilins as accessory light harvesting pigments. An increasing phycobilin to chlorophyll ratio may indicate a shift in phytoplankton community composition.
* Q Band Ratio -- the Soret and the Q bands represent the two main absorption bands of chlorophyll. The former covers absorption in the blue region of the spectrum, while the latter covers absorption in the red region. A decrease in the ratio of the intensity of the Soret band at 440 nm to that of the Q band at 676 nm may indicate a change in phytoplankton community structure. All phytoplankton contain chlorophyll a as the primary light harvesting pigment, but green algae and dinoflagellates contain chlorophyll b and c, respectively, which are spectrally redshifted compared to chlorophyll a.
```
# plot the estimated chlorophyll and POC concentrations
plt.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=(16,9))
ax1 = plt.subplot(211)
plt.plot(optaa['time'], optaa['estimated_chlorophyll'])
plt.ylabel('Chlorophyll ($\mu$g/L)')
plt.setp(ax1.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.ylim([0, 25])
ax2 = plt.subplot(212, sharex=ax1)
plt.plot(optaa['time'], optaa['estimated_poc'])
plt.ylabel('POC (mg C/m$^3$)')
plt.ylim([0, 1500])
plt.show()
# plot the pigment ratios
plt.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=(16,11))
ax1 = plt.subplot(411)
plt.plot(optaa['time'], optaa['ratio_cdom'])
plt.ylabel('CDOM Ratio')
plt.setp(ax1.get_xticklabels(), visible=False) # turn off x-axis ticklabels
ax2 = plt.subplot(412, sharex=ax1)
plt.plot(optaa['time'], optaa['ratio_carotenoids'])
plt.ylabel('Carotenoid Ratio')
plt.setp(ax2.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.ylim([0, 1])
ax3 = plt.subplot(413, sharex=ax1)
plt.plot(optaa['time'], optaa['ratio_phycobilins'])
plt.ylabel('Phycobilin Ratio')
plt.setp(ax3.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.ylim([0, 1])
ax4 = plt.subplot(414, sharex=ax1)
plt.plot(optaa['time'], optaa['ratio_qband'])
plt.ylabel('Q Band Ratio')
plt.ylim([0, 1])
plt.show()
```
Finally, examining the shape and magnitudes of the absorption and attenuation spectra can be helpful in assessing the instrument performance in addition to revealing interesting information about the in situ absorption and attenuation properties of the seawater.
```
# create groups of data organized by the week
weeks = optaa.groupby('time.week').groups
# plot weekly summaries of the absorption spectra (the mean and the min and max range)
plt.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=(16,9))
ax1 = plt.subplot(241)
week = optaa.isel(time=weeks[21])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 21')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.ylim([0, 1.2])
plt.ylabel('Absorption (m$^{-1}$)')
plt.xlim([400, 715])
plt.setp(ax1.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.legend()
ax2 = plt.subplot(242, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[22])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 22')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.setp(ax2.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.setp(ax2.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.legend()
ax3 = plt.subplot(243, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[23])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 23')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.setp(ax3.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.setp(ax3.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.legend()
ax4 = plt.subplot(244, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[24])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 24')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.setp(ax4.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.setp(ax4.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.legend()
ax5 = plt.subplot(245, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[25])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 25')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.ylabel('Absorption (m$^{-1}$)')
plt.xlabel('Wavelength (nm)')
plt.legend()
ax6 = plt.subplot(246, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[26])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 26')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.setp(ax6.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.xlabel('Wavelength (nm)')
plt.legend()
ax7 = plt.subplot(247, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[27])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 27')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.setp(ax7.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.xlabel('Wavelength (nm)')
plt.legend()
ax8 = plt.subplot(248, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[28])
plt.plot(week['wavelength_a'][0, :], week['optical_absorption'].mean(axis=0), '-k', label='Week 28')
plt.fill_between(week['wavelength_a'][0, :], week['optical_absorption'].min(axis=0), week['optical_absorption'].max(axis=0))
plt.setp(ax8.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.xlabel('Wavelength (nm)')
plt.legend()
plt.show()
# plot weekly summaries of the attenuation spectra (the mean and the min and max range)
plt.rcParams.update({'font.size': 14})
fig = plt.figure(figsize=(16,9))
ax1 = plt.subplot(241)
week = optaa.isel(time=weeks[21])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 21')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.ylim([0, 15])
plt.ylabel('Attenuation (m$^{-1}$)')
plt.xlim([400, 715])
plt.setp(ax1.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.legend()
ax2 = plt.subplot(242, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[22])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 22')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.setp(ax2.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.setp(ax2.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.legend()
ax3 = plt.subplot(243, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[23])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 23')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.setp(ax3.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.setp(ax3.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.legend()
ax4 = plt.subplot(244, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[24])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 24')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.setp(ax4.get_xticklabels(), visible=False) # turn off x-axis ticklabels
plt.setp(ax4.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.legend()
ax5 = plt.subplot(245, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[25])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 25')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.ylabel('Attenuation (m$^{-1}$)')
plt.xlabel('Wavelength (nm)')
plt.legend()
ax6 = plt.subplot(246, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[26])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 26')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.setp(ax6.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.xlabel('Wavelength (nm)')
plt.legend()
ax7 = plt.subplot(247, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[27])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 27')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.setp(ax7.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.xlabel('Wavelength (nm)')
plt.legend()
ax8 = plt.subplot(248, sharex=ax1, sharey=ax1)
week = optaa.isel(time=weeks[28])
plt.plot(week['wavelength_c'][0, :], week['beam_attenuation'].mean(axis=0), '-k', label='Week 28')
plt.fill_between(week['wavelength_c'][0, :], week['beam_attenuation'].min(axis=0), week['beam_attenuation'].max(axis=0))
plt.setp(ax8.get_yticklabels(), visible=False) # turn off y-axis ticklabels
plt.xlabel('Wavelength (nm)')
plt.legend()
plt.show()
```
| github_jupyter |
# Pricing assets with the risk-free metric
## Vanilla assets
1. Based on mainly observations select a microscopic process that generates the price path of the asset or its underlier.<br/>
For example, in the simplest case this microscopic process is a normalized random walk with a constant drift.
2. Generate paths with this microscopic process `from the current time to the maturity of the priced asset`.
2. For each of the $\,N\,$ paths, calculate the value of the investigated asset at the path's end point.
3. Take the average of all end point values. In other words, take each end point value with a weight that is the inverse of the number of paths.
4. Discount this average to $\,t=0\,$ to obtain the PV of the asset.
## Exotic assets
5. For exotic assets (many of them are derivatives), the price at a given time can trigger decisions in the contract,<br/>
and thereby the microscopic or macroscopic rules generating the price may be changed.
6. After 2008 many analytical and numerical calculations include a nonzero probability of default,
<br/>
and the consequences of a default event usually depend on the asset's price. So in this sense,
<br/>
a large portion of current assets are priced as exotic assets.
# Self-financing portfolio
## Plain English explanations
The following are several partial explanations of the term "self-financing".
1. The term `"self-financing" describes` the `time evolution` of a portfolio, `not its current state`.
2. This portfolio is similar to a locked box. You can move capital inside this box, but not into our out from this box.
3. In other words,
<br/>
(3a) if any of the assets within this portfolio has gains, then that gain is invested in the assets of the portfolio,
<br/>
and (3b) if any of the assets has losses, then that loss can be compensated for only from one of the portfolio's other assets.
4. "A portfolio is self-financing" means that the portfolio has no incoming or outgoing capital.
5. In other words, if your portfolio is made up of assets A and B, and you have a loss on A,
<br/>
then you can revert A to its initial value $-$ that is, buy more A $-$ only by selling B.
## Examples
1. You start a portfolio made up of 100 USD in a cash account and 1 share of Apple stock.
2. If the stock's value goes up, you can sell stock and put the cash on your cash account.
3. If the stock's value goes down, you can compensate for the loss of value by buying more stock with the cash on your cash account.
4. Example no.3. shows that your cash account `can be used for hedging`,
<br/>
which means compensating for the loss of the value of your stocks.
## Formal definition
1. Notations<br/>
With $\,i=1,\,2,\,\dots\,n\,$ consider the assets $\,A_{\,i}\left(\,t\,\right)\,$, their weights $\,w_{\,i}\left(\,t\,\right)\,$, and the portfolio $\,\Pi=\sum_{\,i=1}^{\,n}\,w_i\left(\,t\,\right)\,A_i\left(\,t\,\right)\,$.
<br/>
During the infinitesimally short $\,dt\,$ time between $\,t\,$ and $\,t+dt\,$ the $\,i$th asset value changes by $\,dA_i\,$.
2. Definition with the above notations<br/>
From time $\,t\,$ to time $\,t+dt\,$ the portfolio $\,\Pi\,$ `is a self-financing portfolio if`
<br/>
between $\,t\,$ and $\,t+dt\,$ the value of $\,\Pi\,$ changes by
$\,d\,\Pi =\sum_{\,i=1}^{\,n}\,w_i\left(\,t\,\right)\,dA_i\left(\,t\,\right)\,$.
# Black-Scholes pricing
## Notations. Definitions. Assumptions.
### Notations
1. At time $\,t\,$ the `price of the stock` is $\,S\left(\,t\,\right)\,$.
2. If the price of the stock is $\,S\,$, then the `price of a derivative` on this underlying stock is $\,f\left(\,S\,\right)\,$.
3. During the infinitesimally short $\,dt\,$ time the infinitesimally small `change of the Wiener process` if $\,dz\,$.
### Definition of the risk-free asset
The risk-free asset's `value follows a known (announced) determinstic function` over time.
### Assumptions
Assumptions used during the derivation of the Black-Scholes-Merton equation:
1. The `stock price` follows the generalized Wiener process $\,dS=\mu\,S\,dt + \sigma\,S\,dz\,$ with constant $\,\mu\,$ drift and constant $\,\sigma\,$ volatility.
2. The $\,r\,$ risk-free short rate is (a) constant over time and (b) identical for all maturities.
3. There is a `bond` that is `risk-free`, and its price is $\,B\left(\,t\,\right)=\mathrm{e}^{\,r\,t}$. Therefore, the differential form is $\,dB=r\ B\ dt\,$.
4. Any amount of money can be lent or borrowed at any time at the risk-free rate.
5. Stock trading can happen at any time point. In other words, trading is continuous in time.
6. Any partial amount of stock can be traded.
7. The stock pays no dividends.
8. There are no transaction fees, taxes, or any other trading-related costs.
9. There are no arbitrage opportunities at any time.
## Calculating the Black-Scholes-Merton equation with a Replicating portfolio
We do not know the price $\,f\left(\,S\left(t\right),\,t\,\right)\,$ of the derivative, however,
we do know the prices of two other assets:<br/>
the price $\,B\left(\,t\,\right)\,$ of the bond and the price $\,S\left(\,t\,\right)\,$ of the derivative's underlier.
`Replicate the derivative` price $\,f\,$ with a portfolio $\,\Pi\,$ containing the bond and the underlying with the<br/>weight functions $\,w_B\left(\,t\,\right)\,$ and $\,w_S\left(\,t\,\right)\,$.
For simplicity, let's write $\,w_B\,$ and $\,w_S\,$ without the time dependence:
$(1) \ \ \ \ f\left(\,t\,\right) \,=\, w_B\,B\left(\,t\,\right) \,+\, w_S\,S\left(\,t\,\right)\,.$
From $\,(1)\,$ we can express the total value that we have in the bond:
$(2) \ \ \ \ w_B\,B\left(\,t\,\right) \,=\, f\left(\,t\,\right) \,-\, w_S\,S\left(\,t\,\right)\,.$
We assumed that there is no arbitrage, dividends, fees, taxes, or other costs, therefore, the replicating portfolio
<br/>is a `self-financing portfolio` between time $\,t\,$ and time $\,t + dt\,$ when the derivative's price changes by
$(3) \ \ \ \ df \,=\, w_B\ dB \,+\, w_S\ dS \,.$
Insert $\,dB\,$ and $\,dS\,$ into $\,(3)\,$ to obtain
$(4)\ \ \ \ df \,=\, w_B\ r\ B\ dt \,+\, \color{red}{w_S}\,\left(\,\color{red}{\mu\,S\,dt} + \sigma\,S\,dz\,\right) \,.$
For reference, this is the differential form of Ito's lemma that we obtained previously:
$(5) \ \ \ \ \displaystyle{df \,=\, \left(\,\frac{\partial\,f}{\partial\,t} \,+\, \frac{1}{2}\,\frac{\partial^{\,2}\,f}{\partial\,S^{\,2}}\,\sigma^{\,2}\,S^{\,2} \,+\, \color{red}{\frac{\partial\,f}{\partial\,S}\,\mu\,S}\,\right)\,\color{red}{dt} \,+\, \frac{\partial\,f}{\partial\,S}\ \sigma\ S\ dz}\,.$
In both $\,(4)\,$ and $\,(5)\,$ the left hand side is $\,df\,$. Now look at the $\,w_S\,\mu\,S\,dt\,$ term on the right hand side of $\,(4)\,$.
Observe that on the right hand side of $\,(5)\,$ only the $\,\displaystyle{\frac{\partial\,f}{\partial\,S}\,\mu\,S\,dt}\,$ term
contains $\,\mu\,S\,dt\,$. Therefore,
$(5)\ \ \ \ \displaystyle{\frac{\partial\,f}{\partial\,S}\,\mu\,S\,dt = w_S\,\mu\,S\,dt}$
and
$(6)\ \ \ \ w_S=\displaystyle{\frac{\partial\,f}{\partial\,S}}\,.$
From the r.h.s. of $\,(2)\,$ insert the value of $\,w_B\,B\,$ into $\,(4)\,$ to get
$(7)\ \ \ \ df \,=\, \left(\,f - w_S\,S\,\right)\, r\ dt \,+\, w_S\,\left(\,\mu\,S\,dt + \sigma\,S\,dz\,\right) \, .$
The left hand side of $\,(5)\,$ and the l.h.s. of $\,(7)\,$ are equal,
therefore, their right hand sides are also equal:
$(8)\ \ \ \ \displaystyle{
\left(\,f - w_S\,S\,\right)\, r\ dt \,+\, \color{blue}{w_S}\,\left(\,\color{blue}{\mu\,S\,dt} + \sigma\,S\,dz\,\right)
\,=\,
\left(\,\frac{\partial\,f}{\partial\,t} \,+\, \frac{1}{2}\,\frac{\partial^{\,2}\,f}{\partial\,S^{\,2}}\,\sigma^{\,2}\,S^{\,2} \,+\, \color{blue}{\frac{\partial\,f}{\partial\,S}\,\mu\,S}\,\right)\,\color{blue}{dt} \,+\, \frac{\partial\,f}{\partial\,S}\ \sigma\ S\ dz}\,.$
Based on $\,(5)\,$, remove the two terms colored blue on the two sides of $\,(8)\,$ to obtain
$(9)\ \ \ \ \displaystyle{
\left(\,f - w_S\,S\,\right)\, r\ dt \,+\, \color{green}{w_S\ \sigma\ S\ dz}
\,=\,
\left(\,\frac{\partial\,f}{\partial\,t} \,+\, \frac{1}{2}\,\frac{\partial^{\,2}\,f}{\partial\,S^{\,2}}\,\sigma^{\,2}\,S^{\,2} \right)\,dt \,+\, \color{green}{\frac{\partial\,f}{\partial\,S}\ \sigma\ S\ dz}}\,.$
Based on $\,(6)\,$, remove the two terms colored green on the two sides of $\,(9)\,$ to obtain
$(10)\ \ \ \ \displaystyle{
\left(\,f - w_S\,S\,\right)\, r\ dt
\,=\,
\left(\,\frac{\partial\,f}{\partial\,t} \,+\, \frac{1}{2}\,\frac{\partial^{\,2}\,f}{\partial\,S^{\,2}}\,\sigma^{\,2}\,S^{\,2} \right) dt}\,.$
1. On both sides of $\,(10)\,$, omit the $\,dt\,$ multiplier.
2. On the left hand side of $\,(10)\,$ substitute into $\,w_S\,$ the partial derivate from the r.h.s. of $\,(6)\,$.
3. Arrange all remaining terms on one side.
After these steps we obtain the `Black-Scholes-Merton differential equation`
<br/>
for the price of a derivative on a non-dividend paying underlying stock:
$(11)\ \ \ \ \displaystyle{0 = -\ r\ f + \frac{\partial\ f}{\partial\ t} + r\ S\ \frac{\partial\ f}{\partial\ S} + \frac{1}{2}\ \sigma^{\ 2}\ S^{\ 2}\ \frac{\partial^{\ 2}\ f}{\partial\ S^{\ 2}}}\,.$
##### Checking the Black-Scholes-Merton equation on a Forward
With the above notations and $\,S_0 := S\left(\,t=0\,\right)\,$ the price of a forward on the selected underlying assset is
$\,f\left(\,S\left(\,t\,\right),\,t\,\right)=S_0\ \mathrm{e}^{\,r\,t}\,$.
For this function the right hand side of $\,(11)\,$ is
$\ - r\ S_0\ \mathrm{e}^{\,r\,t} + r\ S_0\ \mathrm{e}^{\,r\,t} + 0 + 0 = 0\,$.
## Array broadcasting and Vectorized calculations in `numpy`
##### Array Broadcasting
Please see a detailed documentation at https://numpy.org/doc/stable/user/theory.broadcasting.html
```
import numpy as np
def numpy_broadcasting_examples():
print("\nExamples for array broadcasting in Python")
print("\na\tb\ta * b")
print("")
for a, b in ((2, 5), (2, np.array([5, 6])), (np.array([2, 3]), np.array([5, 6])), ([2, 3], np.array([5, 6]))):
print(f'{a}\t{b}\t{a * b}')
numpy_broadcasting_examples()
```
##### Vectorized calculation
Performing the same mathematical operation many times is very time-consuming.
<br/>
In the `ndarray` data structure the parallel calculation of identical operations for groups is included by default.
In the example below the dot product is calculated with a for loop and with numpy's built-in vectorization.
<br/>
Note that the two differences: (1) vector * vector element-wise multiplication (2) summing.
```
import numpy as np
import datetime
import time
RANDOM_SEED = 23498234
def vectorization_speed_test_of_dot_product(n_10_exp=None, n_repeat=None):
"""Speed test of vectorization for multiplication n_repeat times."""
np.random.seed(RANDOM_SEED)
# the initial vector
rnd_numbers = np.random.random_sample(np.power(10, n_10_exp))
print(f'Time of n = 10^{n_10_exp} multiplications without vectorization '
'and with vectorization in milliseconds')
print(f'Repeated {n_repeat} times')
print('\nID of Run\n\tCalculation time\n\tWith for loop [ms]\n\t\tVectorized [ms]\n\t\t\tSpeedup Ratio\n')
for i_repeat in range(n_repeat):
# for loop, no vectorization
t_start = datetime.datetime.now()
dot_product_with_for_loop = 0.0
for i in range(len(rnd_numbers)):
dot_product_with_for_loop += rnd_numbers[i] * rnd_numbers[i]
t_end = datetime.datetime.now()
t_total_no_vect = t_end - t_start
# same calculation with vectorization
t_start = datetime.datetime.now()
dot_product_with_for_vectorization = np.sum(rnd_numbers * rnd_numbers)
t_end = datetime.datetime.now()
t_total_with_vect = t_end - t_start
print('{}\t{}\t{}\t{}'
.format(i_repeat + 1, np.round(0.001 * t_total_no_vect.microseconds, 2),
np.round(0.001 * t_total_with_vect.microseconds, 2),
np.round(t_total_no_vect.microseconds / t_total_with_vect.microseconds, 2)))
vectorization_speed_test_of_dot_product(n_10_exp=7, n_repeat=10)
```
## Black-Scholes price of a European call option on a non-dividend-paying stock
##### Definitions (only the ones not yet mentioned)
* $\,T\,$ is the `time to maturity` of the option
* $\,K\,$ is the `strike price` of the option
* $\,S_{\ 0}\,$ is the `spot price` of the underlying stock
* The CDF of the std. normal distribution is $\ N\left(x\right)={\displaystyle\frac{1}{\sqrt{2\pi\,}} \int_{\normalsize{-\infty}}^{\normalsize{\ x}} dy\ \exp\left(\frac{-y^{\ 2}}{2}\right)=\frac{1}{2}\ \left[\ 1 + \mathrm{erf}\left( \frac{x}{\sqrt{2\,}} \right)\ \right] }\ .$
* The natural logarithm (in other words, the base $\,\mathrm{e}$ logarithm) $\ $ of $\ x\ $ is $\,\log\left(\ x\ \right)\,$.
* Definitions for the pricing: $\ d_{\ 1}={\displaystyle{\frac{ \log \left(\ S_{\ 0}\ \mathit{/}\ K\ \right) + \left(\ r + \ \sigma^{\ 2}\ \mathit{/}\ \,2\ \right)\ T }{\sigma\ \sqrt{T\,}} }}\ $ and $\ d_{\ 2}=d_{\ 1}-\sigma\ \sqrt{T\,}\ .$
##### Result from the Black-Scholes-Merton differential equation
Based on the Black-Scholes-Merton PDE in $\,(11)\,$,
<br/>
the price of a European call option on a non-dividend-paying underlying stock is
$(12) \ \ \ \ c=S_{\ 0}\ N \left(\ d_{\ 1}\ \right) - K\ e^{\ -\ r\ T}\ N \left(\ d_{\ 2}\ \right)\ $.
A derivation of this formula from the BSM PDE is [available here](https://en.wikipedia.org/wiki/Black%E2%80%93Scholes_equation#Solving_the_Black%E2%80%93Scholes_PDE).
```
import numpy as np
from scipy.special import erf
from matplotlib import pyplot as plt
def std_norm_cdf(x):
"""Cumulated Distribution Function of the Standard Normal Distribution"""
return 0.5 * (1 + erf(x / np.sqrt(2)))
def eur_call_black_scholes_price(T, K, S0, sigma, r):
"""
Price of the European call option.
If K is an np.array, then the output will be the np.array containing the corresponding prices.
r: constant risk-free short rate
T: time to maturity given in year fractions
S0: spot price of the underlying stock
sigma: volatility of the underlying stock
K: strike price of the option
Note:
- all but one input values have to be float, one input can be an np.array with float members
- T, S0, sigma and K have to positive
"""
d1 = (np.log(S0 / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * T ** 0.5)
d2 = d1 - sigma * T ** 0.5
return S0 * std_norm_cdf(d1) - K * np.exp((-1.0) * r * T) * std_norm_cdf(d2)
# test
assert round(eur_call_black_scholes_price(1, 1, 1, 0.1, 0.03), 6) == 0.055819, 'Failed test for eur_call_bs_price '
```
##### Plotting the Black-Scholes price
```
import numpy as np
import matplotlib.pyplot as plt
R = 0.03
S0 = 1.0
SIGMA = 0.1
K_MIN = S0 * (1.0 - 3.0 * SIGMA)
K_MAX = S0 * (1.0 + 3.0 * SIGMA)
K_STEP_NUMBER = 200
K_LIST = np.linspace(K_MIN, K_MAX, K_STEP_NUMBER)
T_LIST = 1.0, 2.0, 5.0
LINE_STYLE_LIST = '-', '--', ':'
LINE_WIDTH = 4
FONT_SIZE = 16
FIGURE_SIZE = (13, 8)
LABEL_PAD = 20
HANDLE_LENGTH = 2.5
def plot_black_scholes_price_eur_call_option():
"""
Black-Scholes price of a European call option
- for several maturities
- as a function of the strike price
"""
plt.figure(figsize=FIGURE_SIZE)
for line_style, T in zip(LINE_STYLE_LIST, T_LIST):
f_of_K = eur_call_black_scholes_price(T, K_LIST, S0, SIGMA, R)
plt.plot(K_LIST, f_of_K, label='T={:g}'.format(T), color='blue', linestyle=line_style, lw=LINE_WIDTH)
plt.legend(bbox_to_anchor=(1.05, .95), loc=2, borderaxespad=0., fontsize=FONT_SIZE, handlelength=HANDLE_LENGTH)
plt.title('\nBlack - Scholes price of a European call option on a non-dividend-paying stock\n'
r'$S_{\,0}$ ' + '= {:g}, '.format(S0) +
r'$\sigma$ ' + '= {:.1g}, '.format(SIGMA) +
r'$r$ ' + '= {:g}, '.format(R) +
'$T$: Time to maturity in years\n', fontsize=FONT_SIZE)
plt.xlabel('$K\ \ $ [strike price]', fontsize=FONT_SIZE, labelpad=LABEL_PAD)
plt.ylabel('$f\ \ $ [option price]', fontsize=FONT_SIZE, labelpad=LABEL_PAD)
plt.tick_params(labelsize=FONT_SIZE)
plt.axhline(0, c='gray', ls='dotted')
plt.axvline(S0, c='gray', ls='dotted')
ax = plt.gca()
ax.set_xticks([K_MIN, S0, K_MAX])
ax.set_xticklabels([r'$S_{\,0}\,\left(\,1-3\,\sigma\,\right)$', '$S_{\,0}$',
r'$S_{\,0}\,\left(\,1+3\,\sigma\,\right)$'], fontsize=FONT_SIZE)
plt.show()
plot_black_scholes_price_eur_call_option()
```
## Comparison to the binomial tree model
In the $\,n\to\infty\,$ limit the price calculated with the (correctly normalized) $\,n\,$- step binomial tree converges to the Black-Scholes price.
# Greeks
The Black-Scholes price of a financial derivative product depends on many variables.
<br/>For example, the price of the call option is $\ \ f \,=\, f\,\big(\,S\!\left(t\right),\,T,\,K,\,\sigma,\,Y\!\left(t\right),\, \dots\,\big)$.
In finance, `the Greeks are partial derivatives` of the derivative product's price with respect to one of its variables,
or one of the components of a variable. They are denoted by (mainly) Greek letters:
$\,\Delta\,$, $\,\Gamma\,$, $\,\Theta\,$, $\,\nu\,$, $\,\rho\,$, $\dots$
## Delta
Delta: $\Delta$
If the price of the underlying changes by an infinitesimally small $\,dS\,$ amount, and nothing else changes,
<br/>
then the infinitesimally small price change of the derivative product is $\,\Delta\,$ times the change of the underlying.
In mathematical form the definition is
$$ \Delta : = \frac{\partial\ f}{\partial\ S}\ .$$
### Task: Hedging
1. Donald Duck's portfolio contained 1 unit of a derivative product and some amount of the derivative's underlying.
2. Within a very short time, the price of the underlying changed by a small amount, but the total value of the portfolio was left unchanged.
`Question:` How much of the underlying was Donald Duck holding ?
Help: Use a Greek to explain your answer.
##### Note
Often it is an organization's goal to keep the total value of a selected portfolio constant.
## Theta
Theta: $\Theta$
The price of the call option changes over time.
For example, if you start with an $\,S\left(\,t=0\,\right)\,$ that is a little below $\,K\,$,
<br/>
then it is still possible that at maturity $\,S\left(\,t=T\,\right) > K\,$, and so the call option will have positive payoff.
If time changes by a small amount and nothing else changes, then the price of the derivative product<br/>
changes by $\ -\Theta\ $ times this small amount of time.
In mathematical form the defintion is
$$ \Theta : = -\ \frac{\partial\ f}{\partial\ t}\ .$$
## Further Greeks
* $\Gamma\,$, called `gamma`, is the second partial derivative with respect to the underlying:
$$ \Gamma : = \frac{\partial^{\,2}f}{\partial\,S^{\,2}}\ .$$
* $\nu\,$
- in the Greek alphabet this is the letter "nu" and it is pronounced /ˈnjuː/
- in finance, this is called `vega`, and it is the partial derivative with respect to the volatility of the underlying:
$$ \nu : = \frac{\partial\,f}{\partial\,\sigma}\ .$$
* $\rho\,$, called `rho`, is the partial derivative with respect to the risk-free rate:
$$ \rho : = \frac{\partial\,f}{\partial\,r}\ .$$
##### A similar measure: PV01
_Additional definitions_
* One `base point (bp)` is 0.01 percent. In other words: one bp is $\,10^{\,-\,4}\,$ part of a unit.
* A `parallel shift of the yield curve` means changing the yield curve by the same amount at all maturities.
* The $\,\mathrm{PV01}\,$ is the change of the `derivative product`'s value upon a 1bp parallel shift of the yield curve.
* Not to be confused with the $\,\mathrm{DV01}\,$, which is the dollar change of a `bond`'s value when the yield is changed by 1bp.
# Non-Gaussian PDFs of short-time increments
For many real processes the observed PDF of finite short time increments differs from the normal distribution.
<br/>
Thus, the Wiener process can be only an approximate continuous-time process to generate these real finite short time increments.
Compare with the normal distribution:
1. Fit the normal distribution's $\,E\,$ and $\,\sigma\,$ to the observed distribution of short-time increments.
2. Compare higher moments, usually the 3rd (skew) and the 4th (kurtosis) moments.
## Kurtosis
1. In finance extreme events can infer large losses. Thus, the "weights" of the tails of distributions are frequently investigated.
2. The `kurtosis` is the 4th central moment of a distribution, it is the expectation value of $\,\left[\,\left(\,x-\mu\,\right)\,\mathit{/}\,\sigma\,\right]^{\,4}\,$.
3. The kurtosis of the standard normal distribution is 3.
4. The `excess kurtosis` is the kurtosis minus 3. Thus, the excess kurtosis of the standard normal distribution is 0.
### Excess kurtosis of distributions
1. Always positive (platykurtic): Exponential, Double exponential (Laplace), Poisson
2. Always zero (mesokurtic): Normal
3. Always negative (leptokurtic): Uniform
4. Depends on the distribution's parameter: Bernoulli, Binomial
Short time returns of stocks often have a platykurtic distribution.
<br/>
In other words, their pdfs often have `heavy tails`, and usually also heavy centers.
### Example: compare distributions with negative, zero and positive excess kurtosis
###### Tasks
1. In the code below, change the mean of the normal (Gaussian) distribution.
2. Change the width of the double exponential (Laplace) distribution.
3. Change the width of the uniform distribution.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
LINE_WIDTH = 4
FIGURE_SIZE = (10, 6.18)
LAPLACE_SCALE = 1.0
LABEL_PAD = 15
UNIFORM_WID = 3.0
HANDLE_LENGTH = 2.5
def plot_compare_normal_laplace_uniform():
"""
Compare three distributions and their excess kurtosis: Gaussian (normal), Laplace (double exponential), uniform
"""
# --- settings ---
plt.figure(figsize=FIGURE_SIZE)
range_min, range_max, n_step = -3.0, 3.0, 1000
x_axis = np.linspace(range_min, range_max, n_step)
pdf_normal = stats.norm.pdf(x_axis, 0, 1)
pdf_laplace = stats.laplace.pdf(x_axis, 0, LAPLACE_SCALE)
pdf_uniform = [1.0 / UNIFORM_WID if -0.5 * UNIFORM_WID <= x <= 0.5 * UNIFORM_WID else 0.0 for x in x_axis]
my_fontsize = 14
# --- plotting ---
plt.plot(x_axis, pdf_uniform, color='#11dd11', label='K < 0 Uniform', lw=LINE_WIDTH, linestyle='dotted')
plt.plot(x_axis, pdf_normal, color='blue', label='K = 0 Std. Normal', lw=LINE_WIDTH, linestyle='solid')
plt.plot(x_axis, pdf_laplace, color='magenta', label='K > 0 Double Exp.', lw=LINE_WIDTH, linestyle='dashed')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=my_fontsize, handlelength=HANDLE_LENGTH)
plt.xlabel('Stochastic variable', fontsize=my_fontsize, labelpad=LABEL_PAD)
plt.ylabel('Probability Density Function\n(also called: distribution)', fontsize=my_fontsize, labelpad=LABEL_PAD)
plt.title('Distributions with different excess kurtosis', fontsize=my_fontsize)
plt.tick_params(labelsize=my_fontsize)
plt.axhline(0, c='gray', ls='dotted')
plt.axvline(0, c='gray', ls='dotted')
plt.show()
plot_compare_normal_laplace_uniform()
```
# Implied volatility: Turning the Black-Scholes formula inside out
##### Summary
If you insert the implied volatility in the Black-Scholes formula, then you will obtain the option values observed on the market.
## How to apply theory to practice ?
* As mentioned before, the `Black-Scholes theoretical price` of a financial derivative product depends on many variables:
<br/>
$f_{B-S}\,\big(\,S\!\left(t\right),\,T,\,K,\,\sigma,\,Y\!\left(t\right),\,\cdots\,\big)$.
* The pdf of the short time increments of stocks is `systematically different from the normal distribution`, most importantly, it is skewed and `platykurtic`.
* In other words, fitting a normal distribution on the center of a non-normal measured distribution<br/>
or on the sides of the same measured non-normal distribution will give different fitted $\,\mu\,$ and $\,\sigma\,$ values.
* Among the inputs of the Black-Scholes formula the `future volatility`, $\,\sigma\,$, can be hard to estimate.
* Also, over the lifetime of a trade the volatility is often far from constant, it may change rapidly.
## A practical solution
* The prices of many standardized option contracts are available on the market.
* Instead of adjusting the Black-Scholes theoretical formula to account for the systematic differences from the normal distribution, we use the formula in the opposite direction and `calculate the volatility from the price`. This result is called the implied volatility.
## Comments
* With the implied volatility we are inverting the Black-Scholes formula by ignoring the pdf's differences from the normal distribution.
# Recommended reading
* Press release of the [`1997 Nobel Prize in Economic Sciences`](https://www.nobelprize.org/prizes/economic-sciences/1997/press-release/)
* Article on Investopedia: [`Long-Term Capital Management (LTCM)`](https://www.investopedia.com/terms/l/longtermcapital.asp)
| github_jupyter |
```
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import deepthought, mne, os
from deepthought.util.logging_util import configure_custom
configure_custom(debug=False)
mne.set_log_level('INFO')
### TODO: change this for each subject
subject = 'P01'
from deepthought.datasets.openmiir.metadata import get_stimuli_version
STIMULI_VERSION = get_stimuli_version(subject)
## NOTE: ERPs seem to be slightly nicer when we do NOT resample but work on 512 Hz data
sfreq = 64
from deepthought.experiments.bcmi2015.preprocessing import load_and_preprocess_raw
raw, trial_events, beat_events = load_and_preprocess_raw(subject, sfreq=sfreq, verbose=False)
from deepthought.datasets.openmiir.preprocessing.events import filter_beat_events, remove_overlapping_events
from deepthought.analysis.tempo.autocorrelation import *
from deepthought.datasets.openmiir.metadata import load_stimuli_metadata
meta = load_stimuli_metadata(version=STIMULI_VERSION)
import matplotlib.gridspec as gridspec
def plot_bar_evoked_paper(raw, beat_events, # data to plot and beat markers
stim_id, # selected stimulus id
tmin=-0.1, tmax=2.4, # time window in s
allow_overlap=False, # allow overlapping epochs?
override_step_bpm=None # for manipulating the assumed BPM (i.e. simulating wrong assumptions)
):
tick_fontsize = 13
sfreq = raw.info['sfreq']
gt_beat_tempo = float(meta[stim_id]['bpm'])
gt_bar_tempo = float(gt_beat_tempo) / meta[stim_id]['beats_per_bar']
# print stim_id, meta[stim_id]['label'], meta[stim_id]['approx_bar_length']
fig = plt.figure(figsize=(15,5))
subplot_grid = gridspec.GridSpec(2, 3, height_ratios=[1.4,1])
bar_length = gt_bar_tempo / 60.0 #meta[stim_id]['approx_bar_length']
bar_markers = []
for b in xrange(16):
if b*bar_length < tmax:
bar_markers.append(b*bar_length * 1000)
def get_erp(cond):
bar_events = filter_beat_events(beat_events, stimulus_ids=[stim_id], beat_counts=[1], conditions=[cond])
if override_step_bpm is not None:
# simulate wrong bar length corresponding to override_step_bpm
sample_len = bpm_to_autocorr_index(override_step_bpm, sfreq=sfreq)
first_onset = bar_events[0,0]
for i, event in enumerate(bar_events):
event[0] = first_onset + i * sample_len
if not allow_overlap:
bar_events = remove_overlapping_events(bar_events, tmin, tmax, sfreq)
# print 'selected events: ', len(bar_events)
eeg_picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=False, stim=False)
bar_epochs = mne.Epochs(raw, bar_events, None,
tmin, tmax, preload=True,
proj=False, picks=eeg_picks, verbose=False)
evoked = bar_epochs.average()
# print evoked.data.shape
evoked.resample(sfreq)
# print evoked.data.shape
return evoked
erps = dict()
for cond in [1,2,3]:
erps[cond] = get_erp(cond)
def plot_erp(axes, evoked):
# evoked.plot(axes=axes, show=False, ylim=ylim, hline=[0], titles=perc_titles)
# [l.set_color('gray') for l in axes.get_lines()]
times = 1e3 * evoked.times # time in miliseconds
uV_scaling = 1e6
erp_mean = evoked.data.mean(axis=0)
erp_std = evoked.data.std(axis=0)
hyp_limits = (erp_mean - erp_std, erp_mean + erp_std) # std
# hyp_limits = (np.min(evoked.data, axis=0), np.max(evoked.data, axis=0)) # range
axes.plot(times, uV_scaling * erp_mean, color='black', linewidth=1)
axes.set_xlim((times[0], times[-1]))
axes.set_ylim((-5,5))
axes.grid()
plt.xlabel('time (ms) relative to downbeat')
for m in bar_markers:
axes.axvline(m, color='gray', linestyle='--', linewidth=2)
hyp_limits = uV_scaling * np.asarray(hyp_limits)
axes.fill_between(times, hyp_limits[0], y2=hyp_limits[1], color='black', alpha=0.25)
def plot_tempo_curve(data, stim_id, gt_beat_tempo, gt_bar_tempo, axes=None):
ac = compute_autocorrelation(data)
# limit tempo range
bpm_min = int(np.ceil(autocorr_index_to_bpm(len(ac), sfreq=sfreq)))
bpm_max = int(np.floor(autocorr_index_to_bpm(1, sfreq=sfreq)))
# print bpm_min, bpm_max
# bpm_max = min(bpm_max, 220)
x = np.arange(bpm_min, 121, 1)
tempo_curve = compute_tempo_histogram_from_ac(ac, sfreq=sfreq)
y = tempo_curve(x)
if axes is None:
plt.figure(figsize=(10,3))
axes = plt.gca()
axes.set_xlim((0, x[-1]))
axes.set_xlabel('tempo (BPM)')
axes.set_ylabel('autocorrelation')
axes.plot(x,y, color='black')
axes.axvline(0.5 * gt_bar_tempo, color='grey', linestyle='--', linewidth=2) # half
axes.axvline(gt_bar_tempo, color='grey', linestyle='--', linewidth=2)
axes.axvline(2 * gt_bar_tempo, color='grey', linestyle='--', linewidth=2) # double
axes.grid()
title_fontsize = 16
axes = plt.subplot(subplot_grid[0, 0])
plot_erp(axes, erps[1])
axes.set_ylabel('data (uV)')
plt.title('perception', fontsize=title_fontsize)
axes = plt.subplot(subplot_grid[0, 1])
plot_erp(axes, erps[2])
plt.setp(axes.get_yticklabels(), visible=False)
plt.title('cued imagination', fontsize=title_fontsize)
axes = plt.subplot(subplot_grid[0, 2])
plot_erp(axes, erps[3])
plt.setp(axes.get_yticklabels(), visible=False)
plt.title('uncued imagination', fontsize=title_fontsize)
axes = plt.subplot(subplot_grid[1, 0])
plot_tempo_curve(erps[1].data.mean(axis=0), stim_id, gt_beat_tempo, gt_bar_tempo, axes=axes)
axes.set_ylim((-0.5,0.5))
axes = plt.subplot(subplot_grid[1, 1])
plot_tempo_curve(erps[2].data.mean(axis=0), stim_id, gt_beat_tempo, gt_bar_tempo, axes=axes)
axes.set_ylim((-0.5,0.5))
axes.set_ylabel('')
plt.setp(axes.get_yticklabels(), visible=False)
axes = plt.subplot(subplot_grid[1, 2])
plot_tempo_curve(erps[3].data.mean(axis=0), stim_id, gt_beat_tempo, gt_bar_tempo, axes=axes)
axes.set_ylim((-0.5,0.5))
axes.set_ylabel('')
plt.setp(axes.get_yticklabels(), visible=False)
plt.subplots_adjust(wspace=0.08, hspace=0.3)
# plt.subplots_adjust(top=0.92)
# fig.suptitle('#{} {}'.format(stim_id, meta[stim_id]['label']), fontsize=20)
plt.show()
print 'Top: Mean and standard deviation over all 64 EEG channels of the bar-aligned ERPs'
if allow_overlap:
print '(with epoch overlap)'
else:
print '(without epoch overlap)'
print 'for', meta[stim_id]['label'], 'in conditions 1--3.'
print 'Bottom: Corresponding autocorrelation scores in the relevant tempo range.'
print 'Dashed lines indicate downbeats (top) and the approximate bar tempo of the stimulus plus its lower tempo octave (bottom).'
print 'NOTE: downbeat times are based on audio beat detection in stimulus!'
return fig
print '============= correct bar alignment =============='
fig = plot_bar_evoked_paper(raw, beat_events, 1, tmin=-0.1, tmax=2.4, allow_overlap=False, override_step_bpm=None)
# fig.savefig('plots/non-overlap_bar-aligned-ERPs.pdf', bbox_inches='tight')
fig = plot_bar_evoked_paper(raw, beat_events, 1, tmin=-0.1, tmax=2.4, allow_overlap=True, override_step_bpm=None)
wrong_bpm = 50
print '============= incorrect bar alignment ({} BPM) =============='.format(wrong_bpm)
fig = plot_bar_evoked_paper(raw, beat_events, 1, tmin=-0.1, tmax=2.4, allow_overlap=False, override_step_bpm=wrong_bpm)
fig = plot_bar_evoked_paper(raw, beat_events, 1, tmin=-0.1, tmax=2.4, allow_overlap=True, override_step_bpm=wrong_bpm)
# from deepthought.datasets.openmiir.constants import STIMULUS_IDS
# for stim_id in STIMULUS_IDS:
# fig = plot_bar_evoked_paper(raw, beat_events, stim_id, tmin=-0.1, tmax=2.4, allow_overlap=False)
```
| github_jupyter |
```
import csv
import pandas as pd
from collections import Counter
from collections import defaultdict
from matplotlib import pyplot as plt
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn import metrics
import pickle
import math
import re
import enchant
import os
import glob
import numpy as np
np.random.seed(512)
from nltk.corpus import words
from dateutil.parser import parse
from datetime import datetime
import itertools
def is_url(row, reg, pat):
samples = row[12:17]
for sample in samples:
if reg.match(str(sample).strip()) and len(re.sub(pat, '', str(sample).strip())) == 0:
return True
return False
def is_datetime(row):
samples = row[12:17]
for sample in samples:
try:
_ = pd.Timestamp(sample)
return True
except ValueError:
try:
_ = pd.Timestamp(int(sample))
return True
except ValueError:
continue
return False
def is_email(row, reg):
samples = row[12:17]
for sample in samples:
if reg.match(str(sample)):
return True
return False
def is_num(row, reg):
samples = row[12:17]
for sample in samples:
try:
_ = float(str(sample))
return True
except ValueError:
if reg.match(str(sample).lower()):
return True
return False
url_pat = r"(http|ftp|https):\/\/([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?"
url_reg = re.compile(url_pat)
list_pat = r"((\d|\w|\')+(,|>|;|:|\-|`\.|\||\*){1}\s?(\d|\w|\')+){2,}"
list_reg = re.compile(list_pat)
num_pat = r"([\$|\w]?(\d|\d\.\d|\d\,\d)+\s?[\$|\w]?)"
num_reg = re.compile(num_pat)
email_pat = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,6}\b"
email_reg = re.compile(email_pat)
testdf = pd.read_csv('../../Benchmark-Labeled-Data/data_test.csv')
test_metadata = pd.read_csv('../../RawCSV/Metadata/meta_data.csv')
test_merged = pd.merge(testdf,test_metadata,on='Record_id')
y_true = test_merged.y_act.values.tolist()
def get_col_dtype(col):
if col.dtype =="object":
try:
col_new = pd.to_numeric(col.dropna().unique())
return 'Numeric'
except:
return "Object"
if col.dtype =="bool": return "Object"
else:
if col.dtype == 'float64' or col.dtype == 'int64':
return 'Numeric'
else:
return col.dtype
predictions = []
y_truth_iter = []
data = test_merged
prv_csv_name = ''
for index, data in test_merged.iterrows():
if index%100==0: print(index)
s1 = data['sample_1']
avg_tokens = len(str(s1).split(' '))
nans = data['%_nans']
dist_val = data['%_dist_val']
num_dist_val = data['num_of_dist_val']
mean_wd_cnt = data['mean_word_count']
std_wd_cnt = data['std_dev_word_count']
has_delims = data['has_delimiters']
col = data['Attribute_name']
csv_name = '../../RawCSV/RawCSVFiles/' + str(data['name'])
if prv_csv_name != csv_name: df = pd.read_csv(csv_name,encoding='latin1')
try: df_col = df[col]
except:
predictions.append(1)
continue
curtype = get_col_dtype(df_col)
if str(curtype) == 'Numeric':
y_truth_iter.append(data['y_act'])
if nans > 99.99:
predictions.append(7)
continue
if num_dist_val < 10 :
y_truth_iter.append(data['y_act'])
predictions.append(7)
else:
predictions.append(0)
continue
else:
if nans > 99.99:
y_truth_iter.append(data['y_act'])
predictions.append(7)
continue
if dist_val > 99.99:
y_truth_iter.append(data['y_act'])
predictions.append(7)
continue
if num_dist_val < 100 :
y_truth_iter.append(data['y_act'])
predictions.append(1)
else:
y_truth_iter.append(data['y_act'])
if float(nans) >= 0.90:
predictions.append(8)
elif float(mean_wd_cnt) <= 2.0:
if is_datetime(data):
predictions.append(2)
elif is_email(data, email_reg):
predictions.append(8)
elif is_url(data, url_reg, url_pat):
predictions.append(4)
elif is_num(data, num_reg):
predictions.append(5)
else:
predictions.append(8)
elif float(std_wd_cnt) < 10.0:
if has_delims == True:
predictions.append(6)
else:
predictions.append(8)
else:
predictions.append(3)
continue
print(accuracy_score(y_true, predictions))
print(confusion_matrix(y_true, predictions))
```
| github_jupyter |
This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# Challenge Notebook
## Problem: Add two numbers whose digits are stored in a linked list in reverse order.
* [Constraints](#Constraints)
* [Test Cases](#Test-Cases)
* [Algorithm](#Algorithm)
* [Code](#Code)
* [Unit Test](#Unit-Test)
* [Solution Notebook](#Solution-Notebook)
## Constraints
* Can we assume this is a non-circular, singly linked list?
* Yes
* Do we expect the return to be in reverse order too?
* Yes
* What if one of the inputs is None?
* Return None for an invalid operation
* How large are these numbers--can they fit in memory?
* Yes
* Can we assume we already have a linked list class that can be used for this problem?
* Yes
* Can we assume this fits in memory?
* Yes
## Test Cases
* Empty list(s) -> None
* Add values of different lengths
* Input 1: 6->5->None
* Input 2: 9->8->7
* Result: 5->4->8
* Add values of same lengths
* Exercised from values of different lengths
* Done here for completeness
## Algorithm
Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
## Code
```
%run ../linked_list/linked_list.py
%load ../linked_list/linked_list.py
class MyLinkedList(LinkedList):
def add_reverse(self, first_list, second_list):
# TODO: Implement me
pass
```
## Unit Test
**The following unit test is expected to fail until you solve the challenge.**
```
# %load test_add_reverse.py
from nose.tools import assert_equal
class TestAddReverse(object):
def test_add_reverse(self):
print('Test: Empty list(s)')
assert_equal(MyLinkedList().add_reverse(None, None), None)
assert_equal(MyLinkedList().add_reverse(Node(5), None), None)
assert_equal(MyLinkedList().add_reverse(None, Node(10)), None)
print('Test: Add values of different lengths')
# Input 1: 6->5->None
# Input 2: 9->8->7
# Result: 5->4->8
first_list = MyLinkedList(Node(6))
first_list.append(5)
second_list = MyLinkedList(Node(9))
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 8])
print('Test: Add values of same lengths')
# Input 1: 6->5->4
# Input 2: 9->8->7
# Result: 5->4->2->1
first_head = Node(6)
first_list = MyLinkedList(first_head)
first_list.append(5)
first_list.append(4)
second_head = Node(9)
second_list = MyLinkedList(second_head)
second_list.append(8)
second_list.append(7)
result = MyLinkedList().add_reverse(first_list, second_list)
assert_equal(result.get_all_data(), [5, 4, 2, 1])
print('Success: test_add_reverse')
def main():
test = TestAddReverse()
test.test_add_reverse()
if __name__ == '__main__':
main()
```
## Solution Notebook
Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/linked_lists/add_reverse/add_reverse_solution.ipynb) for a discussion on algorithms and code solutions.
| github_jupyter |
# Keyboard BCI
The name "brain-computer interface" suggests that you're using your brain to control a computer. In this notebook, we build a BCI whose `action` is to send keystrokes to the computer.
You can probably think of a number of different applications for something like this. One example would be to use your BCI as a game controller where a certain kind of brain signal is a trigger for a keystroke corresponding to a certain in-game action.
```
# importing the `generic_BCI` class
from neurol.BCI import generic_BCI
```
For simplicity, let's have our trigger be a blink for this BCI. `neurol` has a blink classifier that we can use.
```
from neurol.models import model_tools
blink_model = model_tools.get_model('blink_balanced')
```
This particular model is trained on epochs of 125 samples of two channels (AF7, AF8). So it expects epochs of shape `[125, 2]`, and the predictor classifies on inputs of shape `[n_epochs, 125, 2]`.
Let's use the model on some dummy input to see what it's output looks like.
```
import numpy as np
input_ = np.random.random([10, 125, 2]) # 10 random epochs of two channels
pred = blink_model.predict(input_)
print(pred)
```
The output is a softmax prediction (first entry for the non-blink classification and second for blink)
Let's use it to build our BCI's classifier
```
def blink_clf(buffer, clb_info):
# get the latest 125 samples from the channels we want
input_ = buffer[-125:, 1:3]
# expand dims so that input is of shape [1, 125, 2]
input_ = np.expand_dims(input_, axis=0)
# run model to make prediction
pred = blink_model.predict(input_)
# get index of higher probability prediction
pred_idx = np.argmax(pred)
# determin whether prediction is a blink
is_blink = bool(pred_idx)
return is_blink
```
Now we can define the action we want our BCI to perform based on the results of the classifier.
Let's say we want our BCI to send a spacebar keystroke every time it detects a blink.
To do this, we'll use the `pynput` package.
```
from pynput.keyboard import Key, Controller
keyboard = Controller()
def key_action(is_blink):
# press spacebar if classifier thinks a blink occured
if is_blink:
keyboard.press(Key.space)
keyboard.release(Key.space)
```
That's all we need. We're going to keep it simple and not bother with a transformer or calibrator.
We can now define the BCI!
```
keyboard_bci = generic_BCI(blink_clf, transformer=None, action=key_action, calibrator=None)
```
Finally, we'll set up our stream.
```
from neurol.connect_device import get_lsl_EEG_inlets
from neurol import streams
inlet = get_lsl_EEG_inlets()[0] # gets first inlet, assuming only one EEG streaming device is connected
# we ask the stream object to manage a buffer of 1024 samples from the inlet
stream = streams.lsl_stream(inlet, buffer_length=1024)
```
Okay! We can test it out now.
```
try:
keyboard_bci.run(stream)
except KeyboardInterrupt:
stream.close()
print('\n')
print('QUIT BCI')
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive', force_remount = True)
%tensorflow_version 2.x
!pip uninstall keras -y
!pip uninstall keras-nightly -y
!pip uninstall keras-Preprocessing -y
!pip uninstall keras-vis -y
!pip uninstall tensorflow -y
!pip install napari[all]
!pip install tensorflow==2.2.0
!pip install keras==2.3.0
!pip install vollseg
!pip install napari[all]
import os
import glob
import sys
import numpy as np
from tqdm import tqdm
from tifffile import imread, imwrite
from csbdeep.models import Config, CARE
from vollseg import StarDist3D, UNET, VollSeg, MASKUNET
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
from pathlib import Path
image_dir = 'data/tiffiles/'
model_dir = 'models/'
save_dir = image_dir + 'Results/'
noise_model_name = 'noise_model'
unet_model_name = 'unet_model'
star_model_name = 'star_model'
roi_model_name = 'roi_model'
if unet_model_name is not None:
unet_model = UNET(config = None, name = unet_model_name, basedir = model_dir)
else:
unet_model = None
if star_model_name is not None:
star_model = StarDist3D(config = None, name = star_model_name, basedir = model_dir)
else:
star_model = None
if noise_model_name is not None:
noise_model = CARE(config=None, name= noise_model_name, basedir = model_dir)
else:
noise_model = None
if roi_model_name is not None:
roi_model = MASKUNET(config = None, name = roi_model_name, basedir = model_dir)
else:
roi_model = None
Raw_path = os.path.join(image_dir, '*.tif')
filesRaw = glob.glob(Raw_path)
filesRaw.sort
#Minimum size in pixels for the cells to be segmented
min_size = 1
#Minimum size in pixels for the mask region, regions below this threshold would be removed
min_size_mask=10
#maximum size of the region, set this to veto regions above a certain size
max_size = 1000000
#Adjust the number of tiles depending on how good your GPU is, tiling ensures that your image tiles fit into the runtime
#memory
n_tiles = (2,2,2)
#If your Unet model is weak we will use the denoising model to obtain the semantic segmentation map, set this to False if this
#is the case else set tit o TRUE if you are using Unet to obtain the semantic segmentation map.
dounet = False
#If you want to do seedpooling from unet and stardist set this to true else it will only take stardist seeds
seedpool = True
#Wether unet create labelling in 3D or slice by slice can be set by this parameter, if true it will merge neighbouring slices
slice_merge = False
#Use probability map for stardist to perform watershedding or use distance map
UseProbability = True
donormalize=True
lower_perc=1
upper_perc=99.8
#For 2D images we have the option of segmenting RGB->Greyscale, if so set this to true else let it be False
RGB = False
#Set up the axes keyword depending on the type of image you have, if it is a time lapse movie of XYZ images
#your axes would be TZYX, if it is a timelapse of 2D images the axes would be TYX, for a directory of XYZ images
#the axes is ZYX and for a directory of XY images the axes is YX
axes = 'ZYX'
for fname in filesRaw:
image = imread(fname)
Name = os.path.basename(os.path.splitext(fname)[0])
VollSeg( image,
unet_model = unet_model,
star_model = star_model,
roi_model= roi_model,
noise_model = noise_model,
seedpool = seedpool,
axes = axes,
min_size = min_size,
min_size_mask = min_size_mask,
max_size = max_size,
donormalize=donormalize,
lower_perc= lower_perc,
upper_perc=upper_perc
n_tiles = n_tiles,
slice_merge = slice_merge,
UseProbability = UseProbability,
save_dir = save_dir,
Name = Name,
dounet = dounet,
RGB = RGB)
```
| github_jupyter |
<h1>gcForest Algorithm</h1>
<p>The gcForest algorithm was suggested in Zhou and Feng 2017 ( https://arxiv.org/abs/1702.08835 , refer for this paper for technical details) and I provide here a python3 implementation of this algorithm.<br>
I chose to adopt the scikit-learn syntax for ease of use and hereafter I present how it can be used.</p>
```
from GCForest import gcForest
from sklearn.datasets import load_iris, load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
```
<h2>Before starting, a word about sizes.</h2>
<p>*Note* : I recommend the reader to look at this section with the original paper next to the computer to see what I am talking about.</p>
<p>The main technical problem in the present gcForest implementation so far is the memory usage when slicing the input data.
A naive calculation can actually give you an idea of the number and sizes of objects the algorithm will be dealing with.</p>
<p>Starting with a dataset of $N$ samples of size $[l,L]$ and with $C$ classes, the initial "size" is:<br><br>
$S_{D} = N.l.L$</p>
<p>**Slicing Step**<br>
If my window is of size $[w_l,w_L]$ and the chosen stride are $[s_l,s_L]$ then the number of slices per sample is :<br>
<br>
$n_{slices} = \left(\frac{l-w_l}{s_l}+1\right)\left(\frac{L-w_L}{s_L}+1\right)$<br><br>
Obviously the size of slice is $w_l.w_L$ hence the total size of the sliced data set is :<br><br>
$S_{sliced} = N.w_l.w_L.\left(\frac{l-w_l}{s_l}+1\right)\left(\frac{L-w_L}{s_L}+1\right)$<br>
This is when the memory consumption is its peak maximum.</p>
<p>**Class Vector after Multi-Grain Scanning**<br>
Now all slices are fed to the random forest to generate *class vectors*.
The number of class vector per random forest per window per sample is simply equal to the number of slices given to the random forest $n_{cv}(w) = n_{slices}(w)$.
Hence, if we have $N_{RF}$ random forest per window the size of a class vector is (recall we have $N$ samples and $C$ classes):<br><br>
$S_{cv}(w) = N.n_{cv}(w).N_{RF}.C$<br><br>
And finally the total size of the Multi-Grain Scanning output will be:<br><br>
$S_{mgs} = N.\sum_{w} N_{RF}.C.n_{cv}(w)$
</p>
<p>This short calculation is just meant to give you an idea of the data processing during the Multi-Grain Scanning phase. The actual memory consumption depends on the format given (aka float, int, double, etc.) and it might be worth looking at it carefully when dealing with large datasets.</p>
<h2>Iris example</h2>
<p>The iris data set is actually not a very good example as the gcForest algorithm is better suited for time series and images where informations can be found at different scales in one sample.<br>
Nonetheless it is still an easy way to test the method.</p>
```
# loading the data
iris = load_iris()
X = iris.data
y = iris.target
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.33)
```
<p>First calling and training the algorithm.
A specificity here is the presence of the 'shape_1X' keyword to specify the shape of a single sample.
I have added it as pictures fed to the machinery might not be square.<br>
Obviously it is not very relevant for the iris data set but still, it has to be defined.</p>
<p>**New in version 0.1.3** : possibility to directly use an int as shape_1X for sequence data.</p>
```
gcf = gcForest(shape_1X=4, window=2, tolerance=0.0)
gcf.fit(X_tr, y_tr)
```
<p>Now checking the prediction for the test set:<p>
```
pred_X = gcf.predict(X_te)
print(pred_X)
# evaluating accuracy
accuracy = accuracy_score(y_true=y_te, y_pred=pred_X)
print('gcForest accuracy : {}'.format(accuracy))
```
<h2>Digits Example</h2>
<p>A much better example is the digits data set containing images of hand written digits.
The scikit data set can be viewed as a mini-MNIST for training purpose.</p>
```
# loading the data
digits = load_digits()
X = digits.data
y = digits.target
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.4)
```
<p> ... taining gcForest ... (can take some time...) </p>
```
gcf = gcForest(shape_1X=[8,8], window=[4,6], tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)
gcf.fit(X_tr, y_tr)
```
<p> ... and predicting classes ... </p>
```
pred_X = gcf.predict(X_te)
print(pred_X)
# evaluating accuracy
accuracy = accuracy_score(y_true=y_te, y_pred=pred_X)
print('gcForest accuracy : {}'.format(accuracy))
```
<h2>Using mg-scanning and cascade_forest Sperately</h2>
<p>As the Multi-Grain scanning and the cascade forest modules are quite independent it is possible to use them seperately.<br>
If a target `y` is given the code automaticcaly use it for training otherwise it recalls the last trained Random Forests to slice the data.</p>
```
gcf = gcForest(shape_1X=[8,8], window=5, min_samples_mgs=10, min_samples_cascade=7)
X_tr_mgs = gcf.mg_scanning(X_tr, y_tr)
X_te_mgs = gcf.mg_scanning(X_te)
```
<p>It is now possible to use the mg_scanning output as input for cascade forests using different parameters. Note that the cascade forest module does not directly return predictions but probability predictions from each Random Forest in the last layer of the cascade. Hence the need to first take the mean of the output and then find the max.</p>
```
gcf = gcForest(tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)
_ = gcf.cascade_forest(X_tr_mgs, y_tr)
pred_proba = gcf.cascade_forest(X_te_mgs)
tmp = np.mean(pred_proba, axis=0)
preds = np.argmax(tmp, axis=1)
accuracy_score(y_true=y_te, y_pred=preds)
gcf = gcForest(tolerance=0.0, min_samples_mgs=20, min_samples_cascade=10)
_ = gcf.cascade_forest(X_tr_mgs, y_tr)
pred_proba = gcf.cascade_forest(X_te_mgs)
tmp = np.mean(pred_proba, axis=0)
preds = np.argmax(tmp, axis=1)
accuracy_score(y_true=y_te, y_pred=preds)
```
<h3>Skipping mg_scanning</h3>
<p>It is also possible to directly use the cascade forest and skip the multi grain scanning step.</p>
```
gcf = gcForest(tolerance=0.0, min_samples_cascade=20)
_ = gcf.cascade_forest(X_tr, y_tr)
pred_proba = gcf.cascade_forest(X_te)
tmp = np.mean(pred_proba, axis=0)
preds = np.argmax(tmp, axis=1)
accuracy_score(y_true=y_te, y_pred=preds)
```
| github_jupyter |
# 회귀분석
## 검증하고자 하는 것 : 맛집 프로그램별 SNS채널(네이버 블로그)에 미치는 영향력
### 분석계획
### 1. 독립변수에 방송 프로그램 외 변수들을 추가하면서 R^2가 높아지는지 확인 & R^2가 가장 높은 회귀식 도출
### 2. 방송 프로그램별 회귀식을 만들어 포스팅 증가에 가장 영향을 미치는 요인 찾아보기
### 3. 2017년 데이터(train set)로 회귀식을 만든 후, 2018년 데이터(test set)로 예측해보고 정확도 확인
```
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
from scipy import stats
from statsmodels.compat import lzip
import statsmodels
import matplotlib.pyplot as plt
df = pd.read_csv('raw_data/reg.csv')
df.head()
```
## X : 방송(생생정보, 맛있는녀석들, 수요미식회), 방영전 포스팅 수, 서울(서울:1, 서울 외 지역:0), 계절(봄, 여름, 가을, 겨울)
## Y : 방영 후 포스팅 증감 수
## 1. 데이터 전처리 : 더미변수 만들기
```
#봄(3-5월), 여름(6-8월), 가을(9-11월), 겨울(12-2월)
df.loc[(df['방영월']>2)&(df['방영월']<6), '계절'] = '봄'
df.loc[(df['방영월']>5)&(df['방영월']<9), '계절'] = '여름'
df.loc[(df['방영월']>8)&(df['방영월']<12), '계절'] = '가을'
df.loc[(df['방영월']<3)|(df['방영월']==12), '계절'] = '겨울'
df.head()
df['서울'] = df['서울'].astype('category')
df.info()
def dummy_data(data, columns):
for column in columns:
data = pd.concat([data, pd.get_dummies(data[column], prefix = column)], axis=1)
data = data.drop(column, axis=1)
return data
dummy_columns = ['방송', '서울', '계절']
reg = dummy_data(df, dummy_columns)
reg.head()
reg.describe()
reg.columns
reg.to_csv('raw_data/reg_tidy.csv', index=False)
reg.drop(['방송_맛있는녀석들' , '서울_0'], axis=1 ,inplace= True)
reg.head()
reg.drop(['방영후' , '방영월'], axis=1 ,inplace= True)
reg.head()
```
# 분석1 : 독립변수에 방송 프로그램 외 변수들을 추가하면서 R^2가 높아지는지 확인 & R^2가 가장 높은 회귀식 도출
## 기본모형 : 증가(y) ~ 방송_생생정보(x1) + 방송_수요미식회(x2) + 방영전(x3)
```
model1 = smf.ols("증가 ~ 방송_생생정보 + 방송_수요미식회 + 방영전", data=reg).fit(cov_type='HC0')
```
## 1. 가정
### 1) 정규성
```
df['증가'].plot(kind='hist')
```
왼쪽으로 치우쳐 있어 변환이 필요해 보이나, 음수 값이 있어서 log변환이나 box-cox변환을 할 수 없음
Q-Q plot을 보자
```
stats.probplot(model1.resid, dist="norm", plot= plt)
plt.title("Model1 Residuals Q-Q Plot")
```
엄밀히 정규성을 따른다고 말하기 어렵지만, 데이터 갯수(317)가 크니 중심극한정리(CLT)에 의해 정규분포를 따른다고 가정하겠음
### 2) 등분산
```
resid = model1.resid
plt.scatter(model1.predict(), resid)
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breuschpagan(model1.resid, model1.model.exog)
lzip(name, test)
```
p-value < 0.05 이므로 등분산 가정도 만족을 안 한다..
### 3) 오차의 독립성
```
statsmodels.stats.stattools.durbin_watson(model1.resid)
```
2에 가까울수록 독립성 가정이 만족하는데, 2에 가까우니 독립성 가정도 만족
### 4) 다중공선성
```
reg.corr()
```
변수들 사이에 강한 상관관계가 보이지 않으므로 다중공선성 문제는 없다고 봐도 무방함
## 2. 회귀분석 결과
```
#model1 = smf.ols("증가 ~ 방송_생생정보 + 방송_수요미식회 + 방영전", data=reg).fit(cov_type='HC0')
model1.summary()
```
F-statstic의 p-value < 0.05 이므로 회귀모형이 유의함
**방영 후 포스팅 증감 = 16.6001 + 3.6074(생생정보통) + 53.6919(수요미식회) + 0.1595(방영전)**
**해석**
1. 생생정보통이 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 3.6074개 더 많다.
**2. 수요미식회가 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 53.6919개 더 많다.**
3. 방영전 포스팅이 1개 증가하면 방영후 포스팅이 0.1595개 더 증가한다.
**4. R^2 = 0.46 : 방송 프로그램, 방영전 포스팅 갯수 변수들의 분산이 방영후 포스팅 증가분의 분산을 46% 정도 설명한다. 즉, 모형의 설명력이 46% 된다고 해석할 수 있다.**
**독립변수의 p-value를 봤을 때, 통계적으로 유의한 변수는 수요미식회뿐...**
## 서울 변수 추가 : 증가(y) ~ 방송_생생정보(x1) + 방송_수요미식회(x2) + 방영전(x3) + 서울_1(x4)
```
model2 = smf.ols("증가 ~ 방송_생생정보 + 방송_수요미식회 + 방영전 + 서울_1", data=reg).fit(cov_type='HC0')
```
## 1. 가정
### 1) 정규성
```
stats.probplot(model2.resid, dist="norm", plot= plt)
plt.title("Model2 Residuals Q-Q Plot")
```
엄밀히 정규성을 따른다고 말하기 어렵지만, 데이터 갯수(317)가 크니 중심극한정리(CLT)에 의해 정규분포를 따른다고 가정하겠음
### 2) 등분산
```
resid = model2.resid
plt.scatter(model2.predict(), resid)
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breuschpagan(model2.resid, model2.model.exog)
lzip(name, test)
```
p-value < 0.05 이므로 등분산 가정도 만족을 안 한다..
### 3) 오차의 독립성
```
statsmodels.stats.stattools.durbin_watson(model2.resid)
```
2에 가까울수록 독립성 가정이 만족하는데, 2에 가까우니 독립성 가정도 만족
## 2. 회귀분석 결과
```
#model2 = smf.ols("증가 ~ 방송_생생정보 + 방송_수요미식회 + 방영전 + 서울_1", data=reg).fit(cov_type='HC0')
model2.summary()
```
F-statstic의 p-value < 0.05 이므로 회귀모형이 유의함
**방영 후 포스팅 증감 = 14.3407 + 4.5905(생생정보통) + 53.1939(수요미식회) + 0.16(방영전) + 3.4289(서울_1)**
**해석**
1. 생생정보통이 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 4.5905개 더 많다.
**2. 수요미식회가 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 53.1939개 더 많다.**
3. 방영전 포스팅이 1개 증가하면 방영후 포스팅이 0.16개 더 증가한다.
4. 서울에 위치한 식당이 그렇지 않은 식당보다 방영 후 포스팅 증가분이 3.4289이 더 많다.
**5. R^2 = 0.462 : 방송 프로그램, 방영전 포스팅 갯수 변수들의 분산이 방영후 포스팅 증가분의 분산을 46.2% 정도 설명한다. 즉, 모형의 설명력이 46.2% 된다고 해석할 수 있다. 서울 변수를 추가했더니 0.2% 설명력이 늘었음**
**독립변수의 p-value를 봤을 때, 통계적으로 유의한 변수는 수요미식회뿐... 서울 여부 변수를 추가했는데 유의한 변수가 아니었음**
## 계절 변수 추가 : 증가(y) ~ 방송_생생정보(x1) + 방송_수요미식회(x2) + 방영전(x3) + 계절_여름(x4) + 계절_가을(x5) + 계절_겨울(x6)
```
model3 = smf.ols("증가 ~ 방송_생생정보 + 방송_수요미식회 + 방영전 + 계절_여름 + 계절_가을 + 계절_겨울", data=reg).fit(cov_type='HC0')
model3.summary()
```
### 1. 가정
#### 1) 정규성
- Prob(JB) < 0.05 이므로 정규성 만족 x
#### 2) 등분산
- Prob(Omnibus) < 0.05이므로 등분산도 만족 x..
#### 3) 독립성
- Durbin-Watson = 2.082 : 2에 가까우므로 독립은 만족
### 2. 회귀분석 결과
F-statstic의 p-value < 0.05 이므로 회귀모형이 유의함
**방영 후 포스팅 증감 = 20.1117 + 3.7480(생생정보통) + 53.7501(수요미식회) + 0.1635(방영전) - 6.8170(계절_여름) -7.5058(계절_가을)
-3.2874(계절_겨울)**
**해석**
1. 생생정보통이 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 3.7480개 더 많다.
**2. 수요미식회가 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 53.7501개 더 많다.**
**3. 방영전 포스팅이 1개 증가하면 방영후 포스팅이 0.1635개 더 증가한다.**
4. 봄에 방영된 식당이 다른 계절에 방영된 식당보다 방영 후 포스팅 증가분이 더 많다.
**5. R^2 = 0.466 : 방송 프로그램, 방영전 포스팅 갯수 변수들의 분산이 방영후 포스팅 증가분의 분산을 46.6% 정도 설명한다. 즉, 모형의 설명력이 46.2% 된다고 해석할 수 있다. 계절 변수를 추가했더니 0.6% 설명력이 늘었음**
**독립변수의 p-value를 봤을 때, 통계적으로 유의한 변수는 '수요미식회'와 '방영전 포스팅 갯수'뿐... 계절 변수를 추가했는데 유의한 변수가 아니었음(대신 방영전 변수가 통계적으로 유의하게 됨)**
## 서울, 계절 변수 추가 : 증가(y) ~ 방송_생생정보(x1) + 방송_수요미식회(x2) + 방영전(x3) + 계절_여름(x4) + 계절_가을(x5) + 계절_겨울(x6) + 서울_1(x7)
```
model4 = smf.ols("증가 ~ 방송_생생정보 + 방송_수요미식회 + 방영전 + 계절_여름 + 계절_가을 + 계절_겨울 + 서울_1", data=reg).fit(cov_type='HC0')
model4.summary()
```
### 1. 가정
#### 1) 정규성
- Prob(JB) < 0.05 이므로 정규성 만족 x
#### 2) 등분산
- Prob(Omnibus) < 0.05이므로 등분산도 만족 x..
#### 3) 독립성
- Durbin-Watson = 2.076 : 2에 가까우므로 독립은 만족
### 2. 회귀분석 결과
F-statstic의 p-value < 0.05 이므로 회귀모형이 유의함
**방영 후 포스팅 증감 = 17.9499 + 4.7403(생생정보통) + 53.2231(수요미식회) + 0.1636(방영전) - 7.0785(계절_여름) - 7.3920(계절_가을)
- 3.6171(계절_겨울) + 3.4830(서울_1)**
**해석**
1. 생생정보통이 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 4.7403개 더 많다.
**2. 수요미식회가 맛있는 녀석들보다 방영후 포스팅 증가분이 평균 53.2231개 더 많다.**
**3. 방영전 포스팅이 1개 증가하면 방영후 포스팅이 0.1636개 더 증가한다.**
4. 봄에 방영된 식당이 다른 계절에 방영된 식당보다 방영 후 포스팅 증가분이 더 많다.
5. 서울에 위치한 식당이 그렇지 않은 식당보다 방영 후 포스팅 증가분이 3.4830이 더 많다.
**6. R^2 = 0.468 : 방송 프로그램, 방영전 포스팅 갯수 변수들의 분산이 방영후 포스팅 증가분의 분산을 46.8% 정도 설명한다. 즉, 모형의 설명력이 46.8% 된다고 해석할 수 있다. 계절 변수를 추가했더니 0.8% 설명력이 늘었음**
**독립변수의 p-value를 봤을 때, 통계적으로 유의한 변수는 '수요미식회'와 '방영전 포스팅 갯수'뿐... 계절과 서울 변수를 추가했는데 유의한 변수가 아니었음(대신 방영전 변수가 통계적으로 유의하게 됨)**
| github_jupyter |
# Swedes without any close friends
This notebook explores and visualizes the proportion of Swedes stating they have no close friends.
- Date: 2019-04-04
- Source: [SCB: Undersökningarna av levnadsförhållanden](http://www.statistikdatabasen.scb.se/pxweb/sv/ssd/START__LE__LE0101__LE0101R/LE0101R07/?rxid=710c09ba-1e21-4bb5-9619-441e5ba97d8b)
### Setup
```
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from src.plot import ds_plot
%config InlineBackend.figure_format = 'retina'
plt.style.use('../assets/datastory.mpltstyle')
df = pd.read_csv('../data/raw/friends.csv', encoding='latin1', skiprows=1)
```
### Transform
```
df = df.drop('indikator', axis=1)
df.columns = ['age', 'sex', 'percent_2008', 'percent_2016',
'error_2008', 'error_2016']
men = df[df['sex'] == 'män']
women = df[df['sex'] == 'kvinnor']
```
### Plot
```
# Men. Grouped bar chart with error bars
fig, ax = ds_plot()
ax.bar(men.age.index - 0.3, men.percent_2016, 0.4, color='#2bbaff', yerr=men.error_2016)
ax.bar(men.age.index + 0.3, men.percent_2008, 0.4, color='#57d298', yerr=men.error_2008)
ax.set_xticks(men.age.index)
ax.set_xticklabels(men.age.values)
ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
ax.set_xlabel('')
plt.gcf().set_facecolor('#ffffff');
# Men. Line chart with confidence intervals
fig, ax = ds_plot()
men.plot(x='age', y='percent_2016', ylim=(0, 50), ax=ax, label='Män 2017', color='#2bbaff') #blue
ax.plot(men.age, men.percent_2016, 'o', color='#2bbaff')
men.plot(x='age', y='percent_2008', ax=ax, label='Män 2009', color='#57d298') # green
ax.plot(men.age, men.percent_2008, 'o', color='#57d298')
ax.fill_between(men.age,
men.percent_2016 + men.error_2016,
men.percent_2016 - men.error_2016,
color='#d5f1ff', alpha=0.5)
ax.fill_between(men.age,
men.percent_2008 + men.error_2008,
men.percent_2008 - men.error_2008,
color='#ddf6ea', alpha=0.5)
ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
ax.set_xlabel('')
ax.legend().remove()
fig.set_facecolor('#ffffff')
fig.savefig('../charts/2019-04-04-no-friends.png')
# Women. Line chart with confidence intervals
fig, ax = ds_plot()
women.plot(x='age', y='percent_2016', ylim=(0, 50), ax=ax, label='Kvinnor 2017', color='#2bbaff')
ax.plot(women.age, women.percent_2016, 'o', color='#2bbaff')
women.plot(x='age', y='percent_2008', ax=ax, label='Kvinnor 2009', color='#57d298')
ax.plot(women.age, women.percent_2008, 'o', color='#57d298')
ax.fill_between(women.age,
women.percent_2016 + women.error_2016,
women.percent_2016 - women.error_2016,
color='#d5f1ff', alpha=0.5)
ax.fill_between(women.age,
women.percent_2008 + women.error_2008,
women.percent_2008 - women.error_2008,
color='#ddf6ea', alpha=0.5)
ax.yaxis.set_major_formatter(ticker.PercentFormatter(decimals=0))
ax.set_xlabel('')
plt.gcf().set_facecolor('#ffffff');
```
| github_jupyter |
#### Copyright 2017 Google LLC.
```
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Regresión logística
**Objetivos de aprendizaje:**
* replantear el predictor del valor mediano de las casas (de los ejercicios anteriores) como un modelo de clasificación binaria
* comparar la eficacia de la regresión logística frente a la regresión lineal para un problema de clasificación binaria
Al igual que en el ejercicio anterior, trabajamos con el conjunto de datos de viviendas en California, pero esta vez lo convertiremos en un problema de clasificación binaria al predecir si una manzana es de costo elevado. Por el momento, también volveremos a los atributos predeterminados.
## Planteamiento del problema como clasificación binaria
El objetivo de nuestro conjunto de datos es `median_house_value`, que es un atributo numérico (de valor continuo). Podemos crear una etiqueta booleana al aplicar un umbral a este valor continuo.
Dados los atributos que describen una manzana, queremos predecir si se trata de una manzana de costo elevado. Para preparar los objetivos para entrenar y evaluar los datos, definimos un umbral de clasificación del percentil 75° para el valor mediano de las casas (un valor de aproximadamente 265,000). Todos los valores de las casas por encima del umbral se etiquetan como `1` y los demás, como `0`.
## Preparación
Ejecuta las celdas a continuación para cargar los datos y preparar los atributos de entrada y objetivos.
```
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
```
Observa que el código es levemente distinto del de los ejercicios anteriores. En lugar de usar `median_house_value` como objetivo, creamos un nuevo objetivo binario, `median_house_value_is_high`.
```
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Create a boolean categorical feature representing whether the
# median_house_value is above a set threshold.
output_targets["median_house_value_is_high"] = (
california_housing_dataframe["median_house_value"] > 265000).astype(float)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
```
## ¿Cómo se desempeñaría la regresión lineal?
Para ver por qué es eficaz la regresión logística, primero entrenemos un modelo simple que use regresión lineal. Este modelo usará etiquetas con valores del conjunto `{0, 1}` e intentará predecir un valor continuo que esté lo más cerca posible de `0` o `1`. Además, queremos interpretar el resultado como una probabilidad, de manera que sería ideal si el resultado estuviera dentro del rango `(0, 1)`. De este modo, aplicaríamos un umbral de `0.5` para determinar la etiqueta.
Ejecuta las celdas a continuación para entrenar el modelo de regresión lineal con [LinearRegressor](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearRegressor).
```
def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_linear_regressor_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
linear_regressor = train_linear_regressor_model(
learning_rate=0.000001,
steps=200,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## Tarea 1: ¿Podemos calcular la pérdida logística para estas predicciones?
**Examina las predicciones y decide si podemos usarlas o no para calcular la pérdida logística.**
`LinearRegressor` usa la pérdida L2, que no se desempeña muy bien al penalizar las clasificaciones incorrectas cuando el resultado se interpreta como una probabilidad. Por ejemplo, debería haber una diferencia enorme si un ejemplo negativo se clasificara como positivo con una probabilidad de 0.9 frente a 0.9999, pero la pérdida L2 no diferencia estos casos de forma taxativa.
Por el contrario, `LogLoss` penaliza mucho más estos "errores de certeza". Recuerda que `LogLoss` se define de la siguiente manera:
$$Pérdida logística = \sum_{(x,y)\in D} -y \cdot log(y_{pred}) - (1 - y) \cdot log(1 - y_{pred})$$
Pero, primero, tendremos que obtener los valores de predicción. Podríamos usar `LinearRegressor.predict` para obtenerlos.
Dadas las predicciones y los objetivos, ¿podemos calcular `LogLoss`?
### Solución
Haz clic más abajo para que se muestre la solución.
```
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
_ = plt.hist(validation_predictions)
```
## Tarea 2: Entrenar un modelo de regresión logística y calcular la pérdida logística con el conjunto de validación
Para usar regresión logística, simplemente usa [LinearClassifier](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier) en lugar de `LinearRegressor`. Completa el código a continuación.
**NOTA**: Al ejecutar `train()` y `predict()` en un modelo de `LinearClassifier`, puedes acceder a las probabilidades predichas con valores reales a través de la clave `"probabilities"` en el diccionario devuelto, p. ej., `predictions["probabilities"]`. La función [log_loss](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html) de Sklearn es práctica para calcular la pérdida logística con estas probabilidades.
```
def train_linear_classifier_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearClassifier` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear classifier object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_classifier = # YOUR CODE HERE: Construct the linear classifier.
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss (on training data):")
training_log_losses = []
validation_log_losses = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn)
training_probabilities = np.array([item['probabilities'] for item in training_probabilities])
validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn)
validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities])
training_log_loss = metrics.log_loss(training_targets, training_probabilities)
validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_log_loss))
# Add the loss metrics from this period to our list.
training_log_losses.append(training_log_loss)
validation_log_losses.append(validation_log_loss)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.tight_layout()
plt.plot(training_log_losses, label="training")
plt.plot(validation_log_losses, label="validation")
plt.legend()
return linear_classifier
linear_classifier = train_linear_classifier_model(
learning_rate=0.000005,
steps=500,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
### Solución
Haz clic más abajo para ver la solución.
```
def train_linear_classifier_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearClassifier` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear classifier object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(training_examples),
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value_is_high"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss (on training data):")
training_log_losses = []
validation_log_losses = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_probabilities = linear_classifier.predict(input_fn=predict_training_input_fn)
training_probabilities = np.array([item['probabilities'] for item in training_probabilities])
validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn)
validation_probabilities = np.array([item['probabilities'] for item in validation_probabilities])
training_log_loss = metrics.log_loss(training_targets, training_probabilities)
validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_log_loss))
# Add the loss metrics from this period to our list.
training_log_losses.append(training_log_loss)
validation_log_losses.append(validation_log_loss)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.tight_layout()
plt.plot(training_log_losses, label="training")
plt.plot(validation_log_losses, label="validation")
plt.legend()
return linear_classifier
linear_classifier = train_linear_classifier_model(
learning_rate=0.000005,
steps=500,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
```
## Tarea 3: Calcular la exactitud y representar una curva ROC para el conjunto de validación
Algunas de las métricas que resultan útiles para la clasificación son la [exactitud](https://es.wikipedia.org/wiki/Precisi%C3%B3n_y_exactitud) del modelo, la [curva ROC](https://es.wikipedia.org/wiki/Curva_ROC) y el área bajo la curva ROC (AUC). Examinaremos estas métricas.
`LinearClassifier.evaluate` calcula las métricas útiles, como la exactitud y el AUC.
```
evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn)
print("AUC on the validation set: %0.2f" % evaluation_metrics['auc'])
print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy'])
```
Para obtener las tasas de verdaderos positivos y falsos positivos que se necesitan para representar una curva ROC, puedes usar probabilidades de clase, como aquellas que se calculan con `LinearClassifier.predict`, y la función [roc_curve](http://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics) de Sklearn.
```
validation_probabilities = linear_classifier.predict(input_fn=predict_validation_input_fn)
# Get just the probabilities for the positive class.
validation_probabilities = np.array([item['probabilities'][1] for item in validation_probabilities])
false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve(
validation_targets, validation_probabilities)
plt.plot(false_positive_rate, true_positive_rate, label="our model")
plt.plot([0, 1], [0, 1], label="random classifier")
_ = plt.legend(loc=2)
```
**Comprueba si puedes ajustar la configuración de aprendizaje del modelo entrenado en la Tarea 2 para mejorar el AUC.**
Con frecuencia, determinadas métricas mejoran en detrimento de otras, y debes encontrar la configuración que logre un buen equilibrio.
**Verifica si todas las métricas mejoran al mismo tiempo.**
```
# TUNE THE SETTINGS BELOW TO IMPROVE AUC
linear_classifier = train_linear_classifier_model(
learning_rate=0.000005,
steps=500,
batch_size=20,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn)
print("AUC on the validation set: %0.2f" % evaluation_metrics['auc'])
print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy'])
```
### Solución
Haz clic más abajo para conocer una solución posible.
Una solución posible que funciona es simplemente entrenar por más tiempo, siempre que no realicemos un sobreajuste.
Esto se puede lograr al incrementar el número de pasos, el tamaño del lote o ambos.
Todas las métricas mejoran a la vez, de manera que nuestra métrica de pérdida es un buen representante tanto del AUC como de la exactitud.
Observa cómo hace muchas más iteraciones simplemente para restringir algunas unidades más del AUC. Esto ocurre comúnmente, pero, con frecuencia, incluso esta pequeña ganancia vale la pena.
```
linear_classifier = train_linear_classifier_model(
learning_rate=0.000003,
steps=20000,
batch_size=500,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
evaluation_metrics = linear_classifier.evaluate(input_fn=predict_validation_input_fn)
print("AUC on the validation set: %0.2f" % evaluation_metrics['auc'])
print("Accuracy on the validation set: %0.2f" % evaluation_metrics['accuracy'])
```
| github_jupyter |
# Sampler statistics
When checking for convergence or when debugging a badly behaving
sampler, it is often helpful to take a closer look at what the
sampler is doing. For this purpose some samplers export
statistics for each generated sample.
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pymc3 as pm
%matplotlib inline
```
As a minimal example we sample from a standard normal distribution:
```
model = pm.Model()
with model:
mu1 = pm.Normal("mu1", mu=0, sd=1, shape=10)
with model:
step = pm.NUTS()
trace = pm.sample(2000, tune=1000, init=None, step=step, njobs=2)
```
NUTS provides the following statistics:
```
trace.stat_names
```
- `mean_tree_accept`: The mean acceptance probability for the tree that generated this sample. The mean of these values across all samples but the burn-in should be approximately `target_accept` (the default for this is 0.8).
- `diverging`: Whether the trajectory for this sample diverged. If there are many diverging samples, this usually indicates that a region of the posterior has high curvature. Reparametrization can often help, but you can also try to increase `target_accept` to something like 0.9 or 0.95.
- `energy`: The energy at the point in phase-space where the sample was accepted. This can be used to identify posteriors with problematically long tails. See below for an example.
- `energy_error`: The difference in energy between the start and the end of the trajectory. For a perfect integrator this would always be zero.
- `max_energy_error`: The maximum difference in energy along the whole trajectory.
- `depth`: The depth of the tree that was used to generate this sample
- `tree_size`: The number of leafs of the sampling tree, when the sample was accepted. This is usually a bit less than $2 ^ \text{depth}$. If the tree size is large, the sampler is using a lot of leapfrog steps to find the next sample. This can for example happen if there are strong correlations in the posterior, if the posterior has long tails, if there are regions of high curvature ("funnels"), or if the variance estimates in the mass matrix are inaccurate. Reparametrisation of the model or estimating the posterior variances from past samples might help.
- `tune`: This is `True`, if step size adaptation was turned on when this sample was generated.
- `step_size`: The step size used for this sample.
- `step_size_bar`: The current best known step-size. After the tuning samples, the step size is set to this value. This should converge during tuning.
If the name of the statistic does not clash with the name of one of the variables, we can use indexing to get the values. The values for the chains will be concatenated.
We can see that the step sizes converged after the 1000 tuning samples for both chains to about the same value. The first 2000 values are from chain 1, the second 2000 from chain 2.
```
plt.plot(trace['step_size_bar'])
```
The `get_sampler_stats` method provides more control over which values should be returned, and it also works if the name of the statistic is the same as the name of one of the variables. We can use the `chains` option, to control values from which chain should be returned, or we can set `combine=False` to get the values for the individual chains:
```
sizes1, sizes2 = trace.get_sampler_stats('depth', combine=False)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)
ax1.plot(sizes1)
ax2.plot(sizes2)
accept = trace.get_sampler_stats('mean_tree_accept', burn=1000)
sb.distplot(accept, kde=False)
accept.mean()
```
Find the index of all diverging transitions:
```
trace['diverging'].nonzero()
```
It is often useful to compare the overall distribution of the
energy levels with the change of energy between successive samples.
Ideally, they should be very similar:
```
energy = trace['energy']
energy_diff = np.diff(energy)
sb.distplot(energy - energy.mean(), label='energy')
sb.distplot(energy_diff, label='energy diff')
plt.legend()
```
If the overall distribution of energy levels has longer tails, the efficiency of the sampler will deteriorate quickly.
# Multiple samplers
If multiple samplers are used for the same model (e.g. for continuous and discrete variables), the exported values are merged or stacked along a new axis.
```
model = pm.Model()
with model:
mu1 = pm.Bernoulli("mu1", p=0.8)
mu2 = pm.Normal("mu2", mu=0, sd=1, shape=10)
with model:
step1 = pm.BinaryMetropolis([mu1])
step2 = pm.Metropolis([mu2])
trace = pm.sample(10000, init=None, step=[step1, step2], njobs=2, tune=1000)
trace.stat_names
```
Both samplers export `accept`, so we get one acceptance probability for each sampler:
```
trace.get_sampler_stats('accept')
```
| github_jupyter |
```
# Load Packages
import pandas as pd
import numpy as np
import random
import sklearn
from sklearn.model_selection import LeaveOneOut
from sklearn import preprocessing
from matplotlib import pyplot as plt
%matplotlib inline
# load window methylation data
A = pd.read_csv("Window_Meth.csv")
# load window methylation data for chromosome 1
# A = pd.read_csv("Window_chr_Meth.csv")
# load folate data
B = pd.read_csv("Folate_placenta.csv")
# remove row without enough coverage
A = A[~(A == 0).any(axis=1)]
# Matrix and modify data structure
n_tr = 70
n_te = 17
m = len(A)
X_tr = np.zeros((n_tr,m))
y_tr = np.zeros(n_tr)
A1 = A.values
A2 = A1[:,1:]
A3 = A2.astype(np.float)
A4 = A3.transpose()
B1 = B.values
B2 = B1[:,1]
B3 = B2.astype(np.float)
B1 = B.values
B2 = B1[:,1]
B3 = B2.astype(np.float)
# Random shuffle samples to seperate training and testing set
ind = list(range(87))
random.seed(20)
random.shuffle(ind)
ind_tr = ind[:70]
ind_te = ind[70:]
X_tr = A4[ind_tr,:]
X_te = A4[ind_te,:]
y_tr = B3[ind_tr]
y_te = B3[ind_te]
# scale data
from sklearn.preprocessing import scale
X_tr = scale(X_tr)
X_te = scale(X_te)
y_tr = scale(y_tr)
y_te = scale(y_te)
```
## Linear Regression
```
from sklearn.linear_model import LinearRegression
reg = LinearRegression().fit(X_tr, y_tr)
Regre_pred = reg.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, Regre_pred, multioutput='variance_weighted')
plt.plot(range(17),Regre_pred)
plt.plot(range(17),y_te)
plt.show()
```
## Nearest neighbors
```
from sklearn.neighbors import KNeighborsRegressor
neigh = KNeighborsRegressor(n_neighbors=2, p = 7, weights = 'distance')
neigh.fit(X_tr, y_tr)
neigh_pred = neigh.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, neigh_pred, multioutput='variance_weighted')
plt.plot(range(17),neigh_pred)
plt.plot(range(17),y_te)
plt.show()
```
## SVM
```
from sklearn import svm
from sklearn.svm import SVR
svc = svm.SVR(kernel = 'linear', degree = 2)
svc.fit(X_tr, y_tr)
svm_pred = svc.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, svm_pred, multioutput='variance_weighted')
plt.plot(range(17),svm_pred)
plt.plot(range(17),y_te)
plt.show()
```
## Random Forrest
```
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(max_depth=2, random_state=1)
rf.fit(X_tr, y_tr)
rf_pred = rf.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, rf_pred, multioutput='variance_weighted')
plt.plot(range(17),rf_pred)
plt.plot(range(17),y_te)
plt.show()
```
## Adaboost
```
from sklearn.ensemble import AdaBoostRegressor
from sklearn.tree import DecisionTreeRegressor
adb = AdaBoostRegressor(learning_rate=2.0, loss='linear')
adb.fit(X_tr,y_tr)
adb_pred = adb.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, adb_pred, multioutput='variance_weighted')
plt.plot(range(17),rf_pred)
plt.plot(range(17),y_te)
plt.show()
```
## Gaussian Process
```
from sklearn.datasets import make_friedman2
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
# Gaussian Process
kernel = DotProduct() + WhiteKernel()
gpr = GaussianProcessRegressor(kernel=kernel,random_state=10).fit(X_tr, y_tr)
gpr_pred = gpr.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, gpr_pred, multioutput='variance_weighted')
plt.plot(range(17),gpr_pred)
plt.plot(range(17),y_te)
plt.show()
```
## Decision Tree
```
from sklearn import tree
clf = tree.DecisionTreeRegressor(max_leaf_nodes=15)
clf = clf.fit(X_tr, y_tr)
clf_pred = clf.predict(X_te)
# R2 score
from sklearn.metrics import r2_score
r2_score(y_te, clf_pred, multioutput='variance_weighted')
plt.plot(range(17),clf_pred)
plt.plot(range(17),y_te)
plt.show()
```
## Feature Selection and Feature Importance
```
# Feature Extraction with PCA
import numpy
from pandas import read_csv
from sklearn.decomposition import PCA
# feature extraction for the 0.95 variance
pca = PCA(0.95)
fit = pca.fit(X_tr)
# feature extraction for the top 30
pca = PCA(n_components=30)
fit = pca.fit(X_tr)
train_img = pca.transform(X_tr)
test_img = pca.transform(X_te)
X_tr = train_img
X_te = test_img
X_tr.shape
```
## Neural Network
### Adapted from Tensorflow neural network tutorial
```
import tensorflow as tf
import math
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
# Set mini batches
def random_mini_batches(X, Y, mini_batch_size = 20, seed = 1):
m = X.shape[1]
mini_batches = []
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))
num_complete_minibatches = math.floor(m/mini_batch_size)
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# create placeholders
def create_placeholders(n_x, n_y):
X = tf.placeholder(tf.float32, [n_x, None], name="X")
Y = tf.placeholder(tf.float32, [n_y, None], name="Y")
return X, Y
X, Y = create_placeholders(30, 1)
print("X = " + str(X))
print("Y = " + str(Y))
# one layer
def initialize_parameters():
W1 = tf.get_variable("W1", [1, 30], initializer = tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1", [1, 1], initializer = tf.zeros_initializer())
parameters = {"W1": W1,
"b1": b1}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
# forward propagation
def forward_propagation(X, parameters):
W1 = parameters['W1']
b1 = parameters['b1']
Z1 = tf.add(tf.matmul(W1, X), b1)
return Z1
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(30, 1)
parameters = initialize_parameters()
Z1 = forward_propagation(X, parameters)
print("Z1 = " + str(Z1))
# compute cost
def compute_cost(Z1, Y):
logits = tf.transpose(Z1)
labels = tf.transpose(Y)
cost = tf.reduce_mean(tf.nn.l2_loss(logits - labels))
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(30, 1)
parameters = initialize_parameters()
Z1 = forward_propagation(X, parameters)
cost = compute_cost(Z1, Y)
print("cost = " + str(cost))
# R2 score calculator
def R_squared(y, y_pred):
residual = tf.reduce_sum(tf.square(tf.subtract(y, y_pred)))
total = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))
r2 = tf.subtract(1.0, tf.div(residual, total))
return r2
np.std(y_tr)*3
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 2000, minibatch_size = 10, print_cost = True, std = 3):
ops.reset_default_graph()
tf.set_random_seed(1)
seed = 3
(n_x, m) = X_train.shape
n_y = Y_train.shape[0]
costs = []
X, Y = create_placeholders(n_x, n_y)
parameters = initialize_parameters()
Z1 = forward_propagation(X, parameters)
cost = compute_cost(Z1, Y)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
epoch_cost = 0.
num_minibatches = int(m / minibatch_size)
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})
epoch_cost += minibatch_cost / num_minibatches
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
parameters = sess.run(parameters)
print("Parameters have been trained!")
r2 = R_squared(Y, Z1)
r2avg = tf.reduce_mean(tf.cast(r2, "float"))
print("Train R2:", r2avg.eval({X: X_train, Y: Y_train}))
print("Test R2:", r2avg.eval({X: X_test, Y: Y_test}))
correct_prediction = tf.math.less(tf.abs(Y - Z1), std)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Train accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print("Test accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
b = tf.add(Z1, Z1)/2
plt.plot(range(17), b.eval({X: X_test, Y: Y_test}).T)
plt.plot(range(17), Y_test.T)
return parameters
# origize data format for neural network
X_tr = X_tr.T
X_te = X_te.T
y_tr = np.reshape(y_tr,[1,70])
y_te = np.reshape(y_te,[1,17])
parameters = model(X_tr, y_tr, X_te, y_te)
```
## Reference
scikit-learn: https://github.com/scikit-learn/scikit-learn
pandas: https://github.com/pandas-dev/pandas
deep learning example: https://github.com/enggen/Deep-Learning-Coursera
| github_jupyter |
# t-test
___
There may be situations where the standard deviation of the population is unknown, and the sample size is small. In all such cases, we use the T-distribution. This distribution is also called *Student’s T distribution*.
The following are the chief characteristics of the T-distribution:
+ The T-distribution is similar in shape to a normal distribution, except that it is slightly flatter.
+ The sample size is small, generally less than 30.
+ The T-distribution uses the concept of degrees of freedom. The degrees of freedom are the number of observations in a statistical test that can be estimated independently.
Example: Suppose we have three numbers x, y and z. If we know the mean is 5. we can say the sum of numbers should be 5*3 = 15. Now we have freedom to choose any number as x and y, but not z. z should be choosen in a way such that the numbers add upto 15 so the mean turns to be 5. So even though we have 3 numbers we have freedom to choose only 2. i.e. we have 2 degrees of freedom.
+ As the sample size decreases, the degrees of freedom reduce, or in other words, the certainty with which the population parameter can be predicted from the sample parameter reduces.The degrees of freedom (df) in the T-distribution is the number of samples (n) -1, or in other words, df = n - 1

The formula for the critical test statistic in a one-sample t-test is given by the following
equation:
$$t = \frac{\overline x - \mu}{\frac{s}{\sqrt n}}$$
where $\overline x$ is the sample mean, $\mu$ is the population mean, $s$ is the sample standard deviation and $n$ is the sample size.
## One-sample t-test
A one-sample t-test is similar to a one-sample z-test, with the following differences:
1. The size of the sample is small (<30).
2. The population standard deviation is not known; we use the sample standard deviation(s) to calculate the standard error.
3. The critical statistic here is the t-statistic, given by the following formula:
$$t = \frac{\overline x - \mu}{\frac{s}{\sqrt n}}$$
A coaching institute, preparing students for an exam, has 200 students, and the average score of the students in the practice tests is 80. It takes a sample of nine students and records their scores; it seems that the average score has now increased. These are the scores of these ten students: 80, 87, 80, 75, 79, 78, 89, 84, 88. Conduct a hypothesis test at a 5% significance level to verify if there is a significant increase in the average score.
$H_0:\mu = 80$
$H_1:\mu > 80$
```
import numpy as np
import scipy.stats as stats
sample = np.array([80,87,80,75,79,78,89,84,88])
stats.ttest_1samp(sample,80)
```
Since the p-value is greater than 0.05, we fail to reject the null hypothesis. Hence, we cannot conclude that the average score of students has changed.
## Two-sample t-test
A two-sample t-test is used when we take samples from two populations, where both the sample sizes are less than 30, and both the population standard deviations are unknown. Formula:
$$t = \frac{\overline x_1 - \overline x_2}{\sqrt{S_p^2(\frac{1}{n_1}+\frac{1}{n_2})}}$$
Where $x_1$ and $x_2$ are the sample means
The degrees of freedom: $df=n_1 + n_2 − 2$
The pooled variance $S_p^2 = \frac{(n_1 -1)S_1^2 + (n_2-1)S_2^2}{n_1+n_2-2}$
A coaching institute has centers in two different cities. It takes a sample of ten students from each center and records their
scores, which are as follows:
|Center A:| 80, 87, 80, 75, 79, 78, 89, 84, 88|
|---------|-----------------------------------|
|Center B:| 81, 74, 70, 73, 76, 73, 81, 82, 84|
Conduct a hypothesis test at a 5% significance level, and verify if there a significant difference in the average scores of the
students in these two centers.
$H_0:\mu_1 = \mu_2$
$H_1:\mu_1 != \mu_2$
```
a = np.array([80,87,80,75,79,78,89,84,88])
b = np.array([81,74,70,73,76,73,81,82,84])
stats.ttest_ind(a,b)
```
We can conclude that there is a significant difference in the average scores of students in the two centers of the coaching
institute since the p-value is less than 0.05
## Two-sample t-test for paired samples
This test is used to compare population means from samples that are dependent on each other, that is, sample values are measured twice using the same test group.
+ A measurement taken at two different times (e.g., pre-test and post-test score with an intervention administered between the two time points)
+ A measurement taken under two different conditions (e.g., completing a test under a "control" condition and an "experimental" condition)
This equation gives the critical value of the test statistic for a paired two-sample t-test:
$$t = \frac{\overline d}{s/\sqrt{n}}$$
Where $\overline d$ is the average of the difference between the elements of the two samples. Both
the samples have the same size, $n$.
S = standard deviation of the differences between the elements of the two samples =
$$\sqrt{\frac{\sum d^2 -((\sum d)^2/ n)}{n -1}}$$
The coaching institute is conducting a special program to improve the performance of the students. The scores of the same set of students are compared before and after the special program. Conduct a hypothesis test at a 5% significance level to verify if the scores have improved because of this program.
```
a = np.array([80,87,80,75,79,78,89,84,88])
b = np.array([81,89,83,81,79,82,90,82,90])
stats.ttest_rel(a,b)
```
We can conclude, at a 5% significance level, that the average score has improved after the
special program was conducted since the p-value is less than 0.05
| github_jupyter |
```
import sys
# Add the path to system, local or mounted S3 bucket, e.g. /dbfs/mnt/<path_to_bucket>
sys.path.append('./secrets.py')
import logging
import math
import os
from influxdb import DataFrameClient
import numpy as np
import matplotlib.mlab as mlab
import pandas as pd
import matplotlib.pyplot as plt
from tabulate import tabulate
from tqdm import tqdm
%matplotlib inline
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
# Need to ssh tunnel for this to work
# ssh -L 8086:localhost:8086 aq.byu.edu -N
influx = DataFrameClient(
host=HOST,
port=PORT,
username=USERNAME,
password=PASSWORD,
database=DATABASE,
)
def large_query(influx, measurement, query, total=None, limit=100_000):
if total is not None:
total = math.ceil(total / limit)
with tqdm(total=total) as pbar:
offset = 0
while True:
new_query = query + " LIMIT {} OFFSET {}".format(limit, offset)
data = influx.query(new_query)
data = data[measurement]
received = len(data)
pbar.update(1)
yield data
offset += limit
if received != limit:
break
def load_data(filename):
if os.path.exists(filename):
LOGGER.info("Loading cached data...")
return pd.read_hdf(filename)
LOGGER.info("Downloading data...")
result = influx.query(
"SELECT COUNT(sequence) FROM air_quality_sensor WHERE time > '2019-10-01' AND time <= now()"
)
count = result["air_quality_sensor"].values[0][0]
queries = large_query(
influx,
"air_quality_sensor",
"SELECT * FROM air_quality_sensor WHERE time > '2019-10-01' AND time <= now()",
count,
)
all_data = pd.concat(list(queries), sort=False)
all_data.to_hdf(filename, "data")
return all_data
data = load_data("aq_data.h5")
gold_data = load_data("aq_data.h5")
LOGGER.info("Done loading data...")
# Day - This is the working boxplot for hybrid only Mongolia deployed sensors
# https://stackoverflow.com/questions/22800079/converting-time-zone-pandas-dataframe
# https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.between_time.html
from IPython.core.debugger import set_trace
# https://matplotlib.org/3.1.3/gallery/statistics/boxplot_color.html
data = gold_data
print(data.index[1])
# data = data.tz_convert(None)
# data.index = data.index.tz_localize('GMT')
data.index = data.index.tz_convert('Asia/Ulaanbaatar')
print(data.index[1])
labels = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
plt.xlabel('Dates')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors')
plt.grid(True)
days = ['05', '06', '07', '08', '09', '10', '11']
data = data[data.pm2_5 >= 0]
data = data[data.location_name == 'Mongolia']
unmodified_gers = ['NJ', 'NS', 'NU', 'OK', 'OR', 'PA', 'RI', 'SD', 'UT', 'VA', 'WI']
data = data[(data.name == unmodified_gers[0]) | (data.name == unmodified_gers[1]) | (data.name == unmodified_gers[2]) | (data.name == unmodified_gers[3]) | (data.name == unmodified_gers[4]) | (data.name == unmodified_gers[5]) | (data.name == unmodified_gers[6]) | (data.name == unmodified_gers[7]) | (data.name == unmodified_gers[8]) | (data.name == unmodified_gers[9]) | (data.name == unmodified_gers[10])]
all_sensors_names = []
for name, sensor_data in data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = data.index.day_name()
print(type(day_names))
print(day_names[:10])
monday = data[(data.index.day_name() == 'Monday')].between_time('15:00:00', '01:00:00')['pm2_5']
tuesday = data[data.index.day_name() == 'Tuesday'].between_time('15:00:00', '01:00:00')['pm2_5']
wednesday = data[data.index.day_name() == 'Wednesday'].between_time('15:00:00', '01:00:00')['pm2_5']
thursday = data[data.index.day_name() == 'Thursday'].between_time('15:00:00', '01:00:00')['pm2_5']
friday = data[data.index.day_name() == 'Friday'].between_time('15:00:00', '01:00:00')['pm2_5']
saturday = data[data.index.day_name() == 'Saturday'].between_time('15:00:00', '01:00:00')['pm2_5']
sunday = data[data.index.day_name() == 'Sunday'].between_time('15:00:00', '01:00:00')['pm2_5']
all_days = [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
results = plt.boxplot(all_days, showfliers=False, labels=labels, showmeans=True, meanline=True)
plt.savefig("./weely_hybrid_day_boxplot_pm_2_5/weely_hybrid_day_boxplot_pm_2_5.png", format='png')
# print(results)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
data = gold_data
# Day each sensors - This is the working boxplot for hybrid only Mongolia deployed sensors
# https://stackoverflow.com/questions/22800079/converting-time-zone-pandas-dataframe
# https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.between_time.html
import numpy as np
from IPython.core.debugger import set_trace
# https://matplotlib.org/3.1.3/gallery/statistics/boxplot_color.html
data = gold_data
print(data.index[1])
# data = data.tz_convert(None)
# data.index = data.index.tz_localize('GMT')
data.index = data.index.tz_convert('Asia/Ulaanbaatar')
print(data.index[1])
labels = ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DE', 'FL', 'IA', 'KS', 'KY', 'LA', 'MD', 'ME', 'MI', 'MN', 'MS', 'MT', 'NC', 'NH']
plt.xlabel('Sensors')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors')
plt.grid(True)
days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
data = data[data.pm2_5 >= 0]
data = data[data.location_name == 'Mongolia']
unmodified_gers = ['NJ', 'NS', 'NU', 'OK', 'OR', 'PA', 'RI', 'SD', 'UT', 'VA', 'WI']
data = data[(data.name == unmodified_gers[0]) | (data.name == unmodified_gers[1]) | (data.name == unmodified_gers[2]) | (data.name == unmodified_gers[3]) | (data.name == unmodified_gers[4]) | (data.name == unmodified_gers[5]) | (data.name == unmodified_gers[6]) | (data.name == unmodified_gers[7]) | (data.name == unmodified_gers[8]) | (data.name == unmodified_gers[9]) | (data.name == unmodified_gers[10])]
all_sensors_names = []
for name, sensor_data in data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = data.index.day_name()
print(type(day_names))
print(day_names[:10])
# for name, sensor_data in data.groupby("name"):
for day in days_of_week:
plt.xlabel('Sensors for {0}'.format(day))
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors for {0}'.format(day))
plt.grid(True)
sensors_data = list()
sensors_name = list()
temp = data[data.index.day_name() == day]
# temp = temp.groupby("name")
for name, sensor_data in temp.groupby("name"):
sensors_name.append(name)
sensors_data.append(temp.groupby("name").get_group(name)["pm2_5"].between_time('15:00:00', '01:00:00').to_numpy().tolist())
results = plt.boxplot(sensors_data, showfliers=False, labels=sensors_name, showmeans=True, meanline=True)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
plt.show()
# monday = data[(data.index.day_name() == 'Monday')]
# monday = monday['AK'].between_time('15:00:00', '01:00:00')['pm2_5']
# tuesday = data[data.index.day_name() == 'Tuesday'].between_time('15:00:00', '01:00:00')['pm2_5']
# wednesday = data[data.index.day_name() == 'Wednesday'].between_time('15:00:00', '01:00:00')['pm2_5']
# thursday = data[data.index.day_name() == 'Thursday'].between_time('15:00:00', '01:00:00')['pm2_5']
# friday = data[data.index.day_name() == 'Friday'].between_time('15:00:00', '01:00:00')['pm2_5']
# saturday = data[data.index.day_name() == 'Saturday'].between_time('15:00:00', '01:00:00')['pm2_5']
# sunday = data[data.index.day_name() == 'Sunday'].between_time('15:00:00', '01:00:00')['pm2_5']
# all_days = [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
# results = plt.boxplot(all_days, showfliers=False, labels=labels, showmeans=True, meanline=True)
# plt.savefig("./weely_hybrid_day_boxplot_pm_2_5/weely_hybrid_day_boxplot_pm_2_5.png", format='png')
# print(results)
# print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
# print('caps: ', [item.get_ydata()[1] for item in results['caps']])
# print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
# print('medians: ', [item.get_ydata()[1] for item in results['medians']])
# print('means: ', [item.get_ydata()[1] for item in results['means']])
# print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
data = gold_data
# Night - This is the working boxplot for hybrid only Mongolia deployed sensors
# https://stackoverflow.com/questions/22800079/converting-time-zone-pandas-dataframe
# https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.between_time.html
from IPython.core.debugger import set_trace
# https://matplotlib.org/3.1.3/gallery/statistics/boxplot_color.html
data = gold_data
print(data.index[1])
# data = data.tz_convert(None)
# data.index = data.index.tz_localize('GMT')
data.index = data.index.tz_convert('Asia/Ulaanbaatar')
print(data.index[1])
# data.tz_localize('UTC', level=8)
labels = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
plt.xlabel('Dates')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors')
plt.grid(True)
days = ['05', '06', '07', '08', '09', '10', '11']
data = data[data.pm2_5 >= 0]
data = data[data.location_name == 'Mongolia']
unmodified_gers = ['NJ', 'NS', 'NU', 'OK', 'OR', 'PA', 'RI', 'SD', 'UT', 'VA', 'WI']
data = data[(data.name == unmodified_gers[0]) | (data.name == unmodified_gers[1]) | (data.name == unmodified_gers[2]) | (data.name == unmodified_gers[3]) | (data.name == unmodified_gers[4]) | (data.name == unmodified_gers[5]) | (data.name == unmodified_gers[6]) | (data.name == unmodified_gers[7]) | (data.name == unmodified_gers[8]) | (data.name == unmodified_gers[9]) | (data.name == unmodified_gers[10])]
all_sensors_names = []
for name, sensor_data in data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = data.index.day_name()
print(type(day_names))
print(day_names[:10])
monday = data[(data.index.day_name() == 'Monday')].between_time('01:01', '07:59')['pm2_5']
tuesday = data[data.index.day_name() == 'Tuesday'].between_time('01:01', '07:59')['pm2_5']
wednesday = data[data.index.day_name() == 'Wednesday'].between_time('01:01', '07:59')['pm2_5']
thursday = data[data.index.day_name() == 'Thursday'].between_time('01:01', '07:59')['pm2_5']
friday = data[data.index.day_name() == 'Friday'].between_time('01:01', '07:59')['pm2_5']
saturday = data[data.index.day_name() == 'Saturday'].between_time('01:01', '07:59')['pm2_5']
sunday = data[data.index.day_name() == 'Sunday'].between_time('01:01', '07:59')['pm2_5']
all_days = [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
results = plt.boxplot(all_days, showfliers=False, labels=labels, showmeans=True, meanline=True)
plt.savefig("./weely_hybrid_night_boxplot_pm_2_5/weely_hybrid_night_boxplot_pm_2_5.png", format='png')
# print(results)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
data = gold_data
# Night each sensors - This is the working boxplot for hybrid only Mongolia deployed sensors
# https://stackoverflow.com/questions/22800079/converting-time-zone-pandas-dataframe
# https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.between_time.html
from IPython.core.debugger import set_trace
# https://matplotlib.org/3.1.3/gallery/statistics/boxplot_color.html
data = gold_data
print(data.index[1])
# data = data.tz_convert(None)
# data.index = data.index.tz_localize('GMT')
data.index = data.index.tz_convert('Asia/Ulaanbaatar')
print(data.index[1])
# data.tz_localize('UTC', level=8)
labels = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
plt.xlabel('Dates')
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors')
plt.grid(True)
days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
data = data[data.pm2_5 >= 0]
data = data[data.location_name == 'Mongolia']
unmodified_gers = ['NJ', 'NS', 'NU', 'OK', 'OR', 'PA', 'RI', 'SD', 'UT', 'VA', 'WI']
data = data[(data.name == unmodified_gers[0]) | (data.name == unmodified_gers[1]) | (data.name == unmodified_gers[2]) | (data.name == unmodified_gers[3]) | (data.name == unmodified_gers[4]) | (data.name == unmodified_gers[5]) | (data.name == unmodified_gers[6]) | (data.name == unmodified_gers[7]) | (data.name == unmodified_gers[8]) | (data.name == unmodified_gers[9]) | (data.name == unmodified_gers[10])]
all_sensors_names = []
for name, sensor_data in data.groupby("name"):
all_sensors_names.append(name)
print("All Sensors names:", all_sensors_names)
day_names = data.index.day_name()
print(type(day_names))
print(day_names[:10])
for day in days_of_week:
plt.xlabel('Sensors for {0}'.format(day))
plt.ylabel('PM 2.5 Value')
plt.title('Week PM 2.5 for sensors for {0}'.format(day))
plt.grid(True)
sensors_data = list()
sensors_name = list()
temp = data[data.index.day_name() == day]
# temp = temp.groupby("name")
for name, sensor_data in temp.groupby("name"):
sensors_name.append(name)
sensors_data.append(temp.groupby("name").get_group(name)["pm2_5"].between_time('01:01', '07:59').to_numpy().tolist())
results = plt.boxplot(sensors_data, showfliers=False, labels=sensors_name, showmeans=True, meanline=True)
print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
print('caps: ', [item.get_ydata()[1] for item in results['caps']])
print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
print('medians: ', [item.get_ydata()[1] for item in results['medians']])
print('means: ', [item.get_ydata()[1] for item in results['means']])
print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
plt.show()
# monday = data[(data.index.day_name() == 'Monday')].between_time('01:01', '07:59')['pm2_5']
# tuesday = data[data.index.day_name() == 'Tuesday'].between_time('01:01', '07:59')['pm2_5']
# wednesday = data[data.index.day_name() == 'Wednesday'].between_time('01:01', '07:59')['pm2_5']
# thursday = data[data.index.day_name() == 'Thursday'].between_time('01:01', '07:59')['pm2_5']
# friday = data[data.index.day_name() == 'Friday'].between_time('01:01', '07:59')['pm2_5']
# saturday = data[data.index.day_name() == 'Saturday'].between_time('01:01', '07:59')['pm2_5']
# sunday = data[data.index.day_name() == 'Sunday'].between_time('01:01', '07:59')['pm2_5']
# all_days = [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
# results = plt.boxplot(all_days, showfliers=False, labels=labels, showmeans=True, meanline=True)
# plt.savefig("./weely_hybrid_night_boxplot_pm_2_5/weely_hybrid_night_boxplot_pm_2_5.png", format='png')
# # print(results)
# print('whiskers: ', [item.get_ydata()[1] for item in results['whiskers']])
# print('caps: ', [item.get_ydata()[1] for item in results['caps']])
# print('boxes: ', [item.get_ydata()[1] for item in results['boxes']])
# print('medians: ', [item.get_ydata()[1] for item in results['medians']])
# print('means: ', [item.get_ydata()[1] for item in results['means']])
# print('fliers: ', [item.get_ydata()[1] for item in results['fliers']])
data = gold_data
```
| github_jupyter |
# Math - Algebra
[](https://colab.research.google.com/github/rhennig/EMA6938/blob/main/Notebooks/4.Math_Algebra.ipynb)
(Based on https://online.stat.psu.edu/stat462/node/132/ and https://www.geeksforgeeks.org/ml-normal-equation-in-linear-regression)
Linear algebra is the branch of mathematics concerning linear equations,
$$
a_{1}x_{1}+\cdots +a_{n}x_{n}=b,
$$
linear maps ,
$$
(x_{1},\ldots ,x_{n})\mapsto a_{1}x_{1}+\cdots +a_{n}x_{n},
$$
and their representations in vector spaces and through matrices. Linear algebra is a key foundation to the field of machine learning, from the notations used to describe the equations and operation of algorithms to the efficient implementation of algorithms in code.
## 1. Motivational Example of Linear Regression
We first derive the linear regression model in matrix form. In linear regression, we fit a linear function to a dataset of $N$ data points $(x_i, y_i)$. The linear model is given by
$$
y(x) = \beta_0 + \beta_1 x.
$$
Linear regression desscribes the data by minimizing the least squares deviation between the data and the linear model:
$$
y_i = \beta_0 + \beta_1 x_i + \epsilon _i, \, \text{for }i = 1, \dots , n.
$$
Here the $\epsilon_i$ describes the deviation between the model and data and are assumed to be Gaussian distributed.
Writing out the set of equations for $i = 1, \dots, n$, we obtain $n$ equations:
$$
y_1 = \beta_0 + \beta_1 x_1 + \epsilon _1 \\
y_2 = \beta_0 + \beta_1 x_2 + \epsilon _2 \\
\vdots \\
y_n = \beta_0 + \beta_1 x_n + \epsilon _n \\
$$
We can formulate the above simple linear regression function in matrix notation:
$$
\begin{bmatrix} y_1 \\ y_2 \\ \vdots \\ y_n \end{bmatrix} =
\begin{bmatrix}
1 & x_1 \\
1 & x_2 \\
\vdots \\
1 & x_n
\end{bmatrix}
\begin{bmatrix}
\beta_0 \\
\beta_1
\end{bmatrix} +
\begin{bmatrix}
\epsilon_1 \\
\epsilon_2 \\
\vdots \\
\epsilon_n
\end{bmatrix}.
$$
We can write this matrix equation in a more compact form
$$
{\bf Y} = {\bf X} {\bf \beta} + {\bf \epsilon},
$$
where
- **X** is an n × 2 matrix.
- **Y** is an n × 1 column vector
- **β** is a 2 × 1 column vector
- **ε** is an n × 1 column vector.
The matrix **X** and vector **β** are multiplied together using the techniques of matrix multiplication.
And, the vector **Xβ** is added to the vector **ε** using the techniques of matrix addition.
Let's quickly review matrix algebra, the subject of mathematics that deals with operations of matrices, vectors, and tensors.
## 2. Definition of a matrix
An r × c matrix is a rectangular array of symbols or numbers arranged in r rows and c columns. A matrix is frequently denoted by a capital letter in boldface type.
Here are three examples of simple matrices. The matrix **A** is a 2 × 2 square matrix containing numbers:
$$
{\bf A} = \begin{bmatrix} 23 & 9 \\ 20 & 7 \end{bmatrix}.
$$
The matrix **B** is a 5 × 3 matrix containing numbers:
$$
{\bf B} = \begin{bmatrix}
1 & 40 & 1.9 \\
1 & 65 & 2.5 \\
1 & 71 & 2.8 \\
1 & 80 & 3.4 \\
1 & 92 & 3.1
\end{bmatrix}.
$$
And, the matrix **X** is a 6 × 3 matrix containing a column of 1's and two columns of various x variables:
$$
{\bf X} = \begin{bmatrix}
1 & x_{11} & x_{12} \\
1 & x_{21} & x_{22} \\
1 & x_{31} & x_{32} \\
1 & x_{41} & x_{42} \\
1 & x_{51} & x_{52}
\end{bmatrix}.
$$
## 3. Definition of a Vector and a Scalar
A column vector is an r × 1 matrix, that is, a matrix with only one column. A vector is almost often denoted by a single lowercase letter in boldface type. The following vector **s** is a 3 × 1 column vector containing numbers:
$$
{\bf s} = \begin{bmatrix} 30 \\ 4 \\ 2013 \end{bmatrix}.
$$
A row vector is an 1 × c matrix, that is, a matrix with only one row. The vector **m** is a 1 × 4 row vector containing numbers:
$$
{\bf m} = \begin{bmatrix} 23 & 9 & 20 & 7 \end{bmatrix}.
$$
A 1 × 1 "matrix" is called a scalar, but it's just an ordinary number, such as 24 or $\pi$.
## 4. Matrix Multiplication
Recall the term **Xβ**, which appears in the regression function:
$$
{\bf Y} = {\bf X} {\bf \beta} + {\bf \epsilon}.
$$
This is an example of a matrix multiplication. Since matrices have different numbers and columns, there are some constraints when multiplying matrices together. Two matrices can be multiplied together only if the **number of columns of the first matrix equals the number of rows of the second matrix**.
When you multiply the two matrices:
- the number of rows of the resulting matrix equals the number of rows of the first matrix, and
- the number of columns of the resulting matrix equals the number of columns of the second matrix.
For example, if **A** is a 2 × 3 matrix and **B** is a 3 × 5 matrix, then the matrix multiplication **AB** is possible. The resulting matrix **C** = **AB** has 2 rows and 5 columns. That is, **C** is a 2 × 5 matrix. Note that the matrix multiplication **BA** is not possible.
For another example, if **X** is an n × (k+1) matrix and **β** is a (k+1) × 1 column vector, then the matrix multiplication **Xβ** is possible. The resulting matrix **Xβ** has n rows and 1 column. That is, **Xβ** is an n × 1 column vector.
Now that we know when we can multiply two matrices together, here is the basic rule for multiplying **A** by **B** to get **C** = **AB**:
The entry in the i$^\mathrm{th}$ row and j$^\mathrm{th}$ column of **C** is the inner product — that is, element-by-element products added together — of the i$^\mathrm{th}$ row of **A** with the j$^\mathrm{th}$ column of **B**.
For example:
$$
A = \begin{bmatrix} 1 & 9 & 7 \\ 8 & 1 & 2 \end{bmatrix}
$$
$$
B = \begin{bmatrix} 3 & 2 & 1 & 5 \\ 5 & 4 & 7 & 3 \\ 6 & 9 & 6 & 8 \end{bmatrix}
$$
$$
C = A B =
\begin{bmatrix} 1 & 9 & 7 \\ 8 & 1 & 2 \end{bmatrix}
\begin{bmatrix} 3 & 2 & 1 & 5 \\ 5 & 4 & 6 & 3 \\ 6 & 9 & 7 & 8 \end{bmatrix}
= \begin{bmatrix} 90 & 101 & 106 & 88 \\ 41 & 38 & 27 & 59 \end{bmatrix}
$$
```
# Check the matrix multiplication result in Python using the numpy function matmul
import numpy as np
A = np.array([[1, 9, 7], [8, 1, 2]])
B = np.array([[3, 2, 1, 5], [5, 4, 7, 3], [6, 9, 6, 8]])
print("A = \n", A)
print("B = \n", B)
# matmul multiplies two matrices
# Remember that the operation "*" multiplies woi matrices element by element,
# which is not a matrix multiplication
C = np.matmul(A, B)
print("AB = \n", C)
```
That is, the entry in the first row and first column of **C**, denoted $c_{11}$, is obtained by:
$$
c_{11} = 1(3) + 9(5) +7(6) = 90
$$
And, the entry in the first row and second column of **C**, denoted $c_{12}$, is obtained by:
$$
c_{12} = 1(2) + 9(4) + 7(9) = 101
$$
And, the entry in the second row and third column of C, denoted c23, is obtained by:
$$
c_{23} = 8(1) + 1(7) + 2(6) = 27
$$
## 5. Matrix Addition
Remember the expression **Xβ** + **ε** that appears in the regression function:
$$
{\bf Y} = \bf{X \beta} + {\bf \epsilon}
$$
is an example of matrix addition. Again, there are some restrictions — you cannot just add any two matrices together. Two matrices can be added together only if they have the same number of rows and columns. Then, to add two matrices, simply add the corresponding elements of the two matrices.
For example:
$$
{\bf C} = {\bf A} + {\bf B} =
\begin{bmatrix} 2 & 1 & 3 \\ 4 & 8 & 5 \\ -1 & 7 & 6 \end{bmatrix}
+
\begin{bmatrix} 7 & 9 & 2 \\ 5 & -3 & 1 \\ 2 & 1 & 8 \end{bmatrix}
=
\begin{bmatrix} 9 & 10 & 5 \\ 9 & 5 & 6 \\ 1 & 8 & 14\end{bmatrix}
$$
```
# Check the matrix addition result in Python using the numpy addition operation
import numpy as np
A = np.array([[2, 1, 3], [4, 8, 5], [-1, 7, 6]])
B = np.array([[7, 9, 2], [5, -3, 1], [2, 1, 8]])
print("A = \n", A)
print("B = \n", B)
C = A + B
print("AB = \n", C)
```
## 6. Least Squares Estimates of Linear Regression Coefficients
As we will discuss later, minimizing the mean squared error of model prediction and data leads to the following equation for the coefficient vector ${\bf \beta}$:
$$
{\bf \beta} = \begin{bmatrix} \beta_0 \\ \vdots \\ \beta_k \end{bmatrix}
= ({\bf X}' {\bf X})^{-1} {\bf X}' {\bf Y},
$$
where
- $({\bf X}' {\bf X})^{-1}$ is the inverse of the ${\bf X}' {\bf X}$ matrix, and
- ${\bf X}'$ is the transpose of the ${\bf X}$ matrix.
Let's remind ourselves of the transpose and inverse of a matrix.
## 7. Transpose of a Matrix
The transpose of a matrix **A** is a matrix, denoted as $\bf A'$ or ${\bf A}^T$, whose rows are the columns of ${\bf A}$ and whose columns are the rows of ${\bf A}$ — all in the same order.
For example, the transpose of the 3 × 2 matrix A:
$$
{\bf A} = \begin{bmatrix} 1 & 4 \\ 7 & 5 \\ 8 & 9 \end{bmatrix}
$$
is the 2 × 3 matrix $\bf A'$:
$$
{\bf A}' = {\bf A}^T = \begin{bmatrix} 1 & 7 & 8 \\ 4 & 5 & 9 \end{bmatrix}
$$
The ${\bf X}$ matrix in the simple linear regression setting is:
$$
{\bf X} = \begin{bmatrix}
1 & x_1 \\
1 & x_2 \\
\vdots \\
1 & x_n
\end{bmatrix}.
$$
Hence, the ${\bf X}'{\bf X}$ matrix in the linear regression is:
$$
{\bf X}'{\bf X} = \begin{bmatrix}
1 & 1 & \dots & 1\\
x_1 & x_2 & & x_n
\end{bmatrix}
\begin{bmatrix}
1 & x_1 \\
1 & x_2 \\
\vdots \\
1 & x_n
\end{bmatrix}
= \begin{bmatrix}
1 & \sum_{i=1}^n x_i \\ \sum_{i=1}^n x_i & \sum_{i=1}^n x_i^2
\end{bmatrix}.
$$
## 8. The Inverse of a Matrix
The inverse ${\bf A}^{-1}$ of a **square matrix A** is the unique matrix such that:
$$
{\bf A}^{-1} {\bf A} = {\bf I} = {\bf A} {\bf A}^{-1}.
$$
That is, the inverse of ${\bf A}$ is the matrix ${\bf A}^{-1}$ that you multiply ${\bf A}$ by to obtain the identity matrix ${\bf I}$. Note that the inverse only exists for square matrices.
Now, finding inverses, particularly for large matrices, is a complicated task. We will use numpy to calculate the inverses.
## 9. Solution for Linear Regresssion
We will use a data set from the Python library sklearn for linear regression.
```
# import required modules
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
# Create data set
x,y = make_regression(n_samples=100,n_features=1,n_informative=1,noise = 10,random_state=10)
# Plot the generated data set
plt.figure(figsize=(8, 6))
plt.rcParams['font.size'] = '16'
plt.scatter(x, y, s = 30, marker = 'o')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Scatter Data', fontsize=20)
plt.show()
# Convert the vector of y variables into a column vector
y=y.reshape(100,1)
# Create matrix X by adding x0=1 to each instance of x and taking the transpose
X = np.array([np.ones(len(x)), x.flatten()]).T
print("Matrix X =\n", X[1:5, :], "\n ...\n")
# Determining the coefficients of linear regression
# by calculating the inverse of (X'X) and multiplying it by X'Y.
XTX = np.matmul(X.T, X)
print("Matrix X'X =\n", XTX, "\n")
XTXinv = np.linalg.inv(XTX)
print("Inverse of (X'X) =\n", XTXinv, "\n")
beta = np.matmul(XTXinv, np.matmul(X.T, y))
# Display best values obtained.
print("Regression coefficients\n β0 = ", beta[0,0], "\n β1 = ", beta[1,0])
# Predict the values for given data instance.
x_sample=np.array([[-2.5],[3]])
x_sample_new=np.array([np.ones(len(x_sample)),x_sample.flatten()]).T
y_predicted = np.matmul(x_sample_new, beta)
# Plot the generated data set
plt.figure(figsize=(8, 6))
plt.rcParams['font.size'] = '16'
plt.scatter(x, y, s = 30, marker = 'o')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Scatter Data', fontsize=20)
plt.plot(x_sample, y_predicted, color='orange')
plt.show()
# Verification using scikit learn function for linear regression
from sklearn.linear_model import LinearRegression
lr = LinearRegression() # Object
lr.fit(x, y) # Fit method.
# Print obtained theta values.
print("β0 = ", lr.intercept_[0], "\nβ1 = ", lr.coef_[0,0])
```
## 10. Practice
The hat matrix converts values from the observed variable $y_i$ into the estimated values $\hat y$ obtained with the least squares method. The hat matrix, $\bf H$, is given by
$$
{\bf H} = {\bf X} ({\bf X}' {\bf X})^{-1} {\bf X}'
$$
Calculate the hat matrix, $\bf H$, and show that you obtain the predicted $y$-values by creating a plot.
```
# Calculate the hat matrix
# Apply the hat matrix to the y-values to generate the y predictions
# Plot the predicted and original y values vs. the x values
```
Knowning the hat matrix, $\bf H$, we can also express the $R^2$ value for the linear regression using a matrix equation:
$$
R^2 = 1 - \frac{{\bf y}'({\bf 1} - {\bf H}){\bf y}}{{\bf y}'({\bf 1} - {\bf M}){\bf y}}
$$
where $\bf 1$ is the identity matrix,
$$
{\bf M} = {\bf l}({\bf l}'{\bf l})^{-1}{\bf l}',
$$
and ${\bf l}$ is a column vector of 1's.
Calculate the $R^2$ value using the above matrix form of the equations.
```
# Create a column vector of 1's with length 100
# Calculate the matrix M
# Calculate R2
```
| github_jupyter |
# An intro to Python & Jupyter notebooks
This is a jupyter notebook! It is actually running in your browser and translating it into Python! Super neat. It allows us to write text AND code in the same place. For example, this is a markdown cell where I can write myself notes.
First we'll take a tour of jupyter notebooks:
* run a cell:
```
print('Hi there')
```
A few ways to run a cell:
* with the ">" button
* with Cell -> Run Cells
* my fav: SHIFT+RET
Note: colors automatically change in jupyter notebook -- jupyter notebook is trying to help you with helpful coloring!
A tour of markdown:
* Headers with "#"
* Lists
* *italics* and **bold**
Go into Markdown with Cell -> Cell Type -> Markdown or my fav: ESC+m
## Comments
Sometimes we want to leave ourselves (or others!) some notes in our code, we can do this with the `#` symbols:
```
# this bit of code doesn't get run
```
## Data types and expressions
```
print(1) # integers -- look another comment!
print(1.5) # float
print('Hello there!') # a string
```
We can figure out what "type" of object we are dealing with by using the `type` function (more on functions in the next few days):
```
type(1)
type(1.5)
type('Hello there!')
```
The last one "str" means string.
We can use Python like a big fancy calculator too:
```
5+6
5.5+6
32/8
```
Note: in the above one even though we used two integers we got out a floating number -- this is because Python automatically converts integers to floats on division so that it won't give you the wrong answer on something like:
```
32/5
```
## Variables and assignments
```
# single assignment
thing = 1
# so thing is my variable name, and 1 is the value.
# thing is now associated with the value of 1
# when I print out 1 and thing, they would appear the same
# the object type is also going to be the same,
# because the variable inherets what the contents are
print(1)
print(thing)
print( type(1) )
print( type(thing) )
# multiple assignment
# You won't use this too often, but you'll see it when we start
# working with more complex data structures
# copy this code into PyCharm and play around with it a bit to explore
x = 'fizzy'
y = 'pop'
a, b = x, y
print(x, y)
print(a, b)
```
## More on strings
String variables come with some interesting things we can do with them:
```
myText = 'I am some text, pretty neat!'
print(myText)
```
We can grab individual letters by their *index*:
```
print(myText[1]) # here I'm grabbing the letter at index 1 by using "[1]"
print(myText[0])
```
Note I got a space when I grabbed the index 1, but actually got the first element for an index of 0.
This is because Python "indexes starting at zero". Other codes start at 1, but Python starts at 0.
We can grab parts of string by "slicing":
```
print(myText[0:5])
```
How long is our string?
```
print(len(myText))
```
Using the "len" function tells us that our bit of text is 28 characters long.
Depending on the type of variable we are using there are sometimes functions that are "associated" with each variable that can be accessed with a "."
For example, if we have a string variable, we can make it all upper case with:
```
print(myText.upper())
```
We won't be using too many of these, but if you've ever heard of "object oriented programming" this is what is being refered to -- this function `upper()` is *associated* with the *object* `myText`.
Some other Python-specific things, both ' and " are fine:
```
print('Hi!')
print("Hi!")
```
You can also combine them in weird ways:
```
print(" ' ")
print('"')
```
Also, we can do big bunches of text with three ":
```
largeText = """
So much text!
I have so many things to say.
I can't think of them right now, but I'm sure they are there in my brain.
"""
print(largeText)
```
## Lists
Another useful type of variable is the list type.
```
myList = [1, 2, 3, 4, 5]
print(myList)
```
We can index lists a lot like strings:
```
print(myList[0])
```
and slice as well:
```
print(myList[0:3])
```
What do you think the following does?
```python
print(myList[2:])
```
Or what about:
```python
print(myList[:-2])
```
Lists are a little weird because you can "mix" different types of things:
```
mySecondList = [1, 3, 5.5, 'hi there']
print(mySecondList)
```
Lists also have some "special" functions, like let's say we want to add another element to our list -- we do this with the `append` function that *belongs* to whatever list we are dealing with:
```
mySecondList.append('another element!')
print(mySecondList)
```
## Numpy arrays
One thing that we might want to do is do math to an element of a list:
```
myList = [1, 2, 3]
myList[0] = myList[0] + 5
print(myList)
```
Naturally, an extention of this would be to add 5 to *all* elements of the list, let's see what happens when we try to do this:
```
myList = myList + 5
```
Whoa -- what happened?? This has to do with the type of object that the list is in Python. (If you go into CS you'll learn more about this). For our purposes we can get around this by using a set of functions from the `numpy` library.
To use these functions we have to load in this library since it doesn't come "pre-packaged" with Python:
```
import numpy
```
Did you get an error? Try:
```
#!conda install -c anaconda numpy ---yes
```
This *installs* the package with Anaconda. You can also do this with the GUI!
```
print(numpy)
```
Ok, so that might be a little funny looking, but this is basically telling us that all the code for the `numpy` library is stored in site-packages.
Let's make an array with numpy:
```
myArray = numpy.array([4.1, 4.4, 3.5])
print(myArray)
```
We can then do stuff we want to!
```
myArray = myArray + 5
print(myArray)
```
You can also import packages with a "shorthand". The one you see most with numpy is np:
```
import numpy as np
```
... then we can do:
```
myArray = np.array([5, 6, 7.7])
print(myArray)
```
| github_jupyter |
# N-grams
## Overview
An *n-gram* -- in the context of parsing natural languages such as English -- is a sequence of *n* consecutive *tokens* (which we might define as characters separated by whitespace) from some passage of text. Based on the following passage:
> I really really like cake.
We have the following 2-grams:
[('I', 'really'), ('really', 'really'), ('really', 'like'), ('like', 'cake.')]
And the following 3-grams:
[('I', 'really', 'really'),
('really', 'really', 'like'),
('really', 'like', 'cake.')]
(I omit a 1-gram listing because it would merely be a list of all tokens in the original text.)
Among other things, n-grams are useful for describing the vocabulary of and statistical correlation between tokens in a sample body of text (e.g., as taken from a book). We can use an n-gram model to determine the likelihood of finding
a particular sequence of words after another. This information, in turn, can be used to generate passages of text that statistically mimic the sample.
We can convert the above 3-gram list into the following lookup structure (i.e., a dictionary mapping strings to lists of 2-tuples), where the first token of each n-gram maps to all sequences that follow it in the text:
{'I': [('really', 'really')],
'really': [('really', 'like'), ('like', 'cake.')]}
We can now generate passages of text using the following method:
1. Select a random key and use it as the start token of the passage. It will also serve as the current token for the next step.
2. Select a random tuple from the list associated with the current token and append the sequence to the passage. The last token of the selected sequence will be the new current token.
3. If the current token is a key in the dictionary then simply repeat step 2, otherwise select another random key from the dictionary as the current token and append it to the passage before repeating step 2.
E.g., we might start by selecting `'I'` in step (1), which gives us `('really', 'really')` as our only choice in (2). The second `'really'` in that tuple is the new current token (which is a valid key), which takes us back to (2) and gives us a choice between two tuples. If we choose `('like', 'cake.')`, then we have `'cake.'` as our new current token --- it is not a key in the map, however, so we'd have to choose a new random key if we wanted to generate a longer passage. Either way, the passage we've generated thus far is `'I really really like cake.'` (which also happens to be the original passage).
Here's a lengthier passage that could be generated from the 3-gram dictionary above -- note that for clarity I've added `*`'s every time a new random key is selected (i.e., when the previous token isn't a key in the dictionary):
> \* really like cake. \* I really really really like \* really like cake. \* I really really really like \* really
This gets more interesting when we build n-gram dictionaries from lengthier bodies of text. For instance, the following text was generated (with a little programmed embellishment for prettier capitalization and punctuation) from a 3-gram dictionary extracted from Romeo's famous balcony monologue:
> Lamp her eyes were there they in their spheres till they in her eyes in all the fairest stars in all the heaven having some business do wear it is my love! O it is envious her cheek would through the heaven having some business do entreat her eyes were there they in their spheres till they in her eyes to.
For reference, here is the dictionary entry for the token `'her'` used to generate the above:
'her': [('maid', 'art'),
('maid', 'since'),
('vestal', 'livery'),
('eyes', 'to'),
('eyes', 'were'),
('head?', 'The'),
('cheek', 'would'),
('eyes', 'in'),
('cheek', 'upon'),
('hand!', 'O')],
If you haven't already guessed it, your assignment is to implement a function that constructs an n-gram dictionary from a list of strings (tokens), and another that returns a passage of text generated from a given n-gram dictionary.
## Implementation Details
Before you start working on the aforementioned functions, it's important to consider how we'll be parsing passages for tokens.
Here's the body of Romeo's balcony soliloquy:
```
ROMEO_SOLILOQUY = """
But, soft! what light through yonder window breaks?
It is the east, and Juliet is the sun.
Arise, fair sun, and kill the envious moon,
who is already sick and pale with grief,
That thou her maid art far more fair than she:
be not her maid, since she is envious;
her vestal livery is but sick and green
and none but fools do wear it; cast it off.
It is my lady, O, it is my love!
O, that she knew she were!
She speaks yet she says nothing: what of that?
Her eye discourses; I will answer it.
I am too bold, 'tis not to me she speaks:
two of the fairest stars in all the heaven,
having some business, do entreat her eyes
to twinkle in their spheres till they return.
What if her eyes were there, they in her head?
The brightness of her cheek would shame those stars,
as daylight doth a lamp; her eyes in heaven
would through the airy region stream so bright
that birds would sing and think it were not night.
See, how she leans her cheek upon her hand!
O, that I were a glove upon that hand,
that I might touch that cheek!"""
```
Using the string's built-in `split` method --- previously mentioned in class --- along with `lower`, we can derive from the passage a list of tokens.
```
toks = [t.lower() for t in ROMEO_SOLILOQUY.split()]
toks[:8]
```
We could do more interesting things (such as separating out punctuation), but we'll keep our parser simple. For the sake of consistency, we'll rely on this fairly straighttforward approach to parsing. Onwards!
### `compute_ngrams`
Your first task is to write `compute_ngrams`, which will take a list of tokens, a value `n` indicating the n-gram length (e.g., 3 for 3-grams), and return an n-gram dictionary. The keys in the returned dictionary should all be strings, whose values will be lists of one or more tuples. Note that even in the case of `n`=2 (which would be the minimum value) the dictionary should map strings to lists of 1-tuples (i.e., instead of to lists of individual tokens).
```
def compute_ngrams(toks, n=2):
"""Returns an n-gram dictionary based on the provided list of tokens."""
ngram = {}
for i in range(len(toks)-(n-1)):
ngram.setdefault(toks[i], []).append(tuple(toks[i+1:i+n]))
return ngram
n = {'fd':1,'g':3}
n['fd'] = n.get(n['fd'], 1) + 1
n['fd']
simple_toks = [t.lower() for t in 'I really really like cake.'.split()]
compute_ngrams(simple_toks, n=2)
```
And now for some simple tests:
```
# (5 points)
from unittest import TestCase
tc = TestCase()
simple_toks = [t.lower() for t in 'I really really like cake.'.split()]
compute_ngrams(simple_toks)
tc.assertEqual(compute_ngrams(simple_toks),
{'i': [('really',)], 'like': [('cake.',)], 'really': [('really',), ('like',)]})
tc.assertEqual(compute_ngrams(simple_toks, n=3),
{'i': [('really', 'really')],
'really': [('really', 'like'), ('like', 'cake.')]})
romeo_toks = [t.lower() for t in ROMEO_SOLILOQUY.split()]
dct = compute_ngrams(romeo_toks, n=4)
tc.assertEqual(dct['but'], [('sick', 'and', 'green'), ('fools', 'do', 'wear')])
tc.assertEqual(dct['it'],
[('is', 'the', 'east,'),
('off.', 'it', 'is'),
('is', 'my', 'lady,'),
('is', 'my', 'love!'),
('were', 'not', 'night.')])
```
I've also placed the entire text of Peter Pan (courtesy of [Project Gutenberg][]) on the server, to be used to stress test your function just a bit. Evaluate the following cell to read the text of the book into `peter_pan_text`.
If you're not on the course server, you can uncomment the line to read the text directly from the Project Gutenberg website and comment out the lines which access the file for testing.
[Project Gutenberg]: http://gutenberg.org
```
import urllib.request
PETER_PAN_FILENAME = '/srv/cs331/peterpan.txt'
PETER_PAN_URL = 'https://www.gutenberg.org/files/16/16-0.txt'
# if you're not on the course server, uncomment the line below to read the text over the web
peter_pan_text = urllib.request.urlopen(PETER_PAN_URL).read().decode()
# if you uncommented the line above, comment out the two lines below
# with open(PETER_PAN_FILENAME) as infile:
# peter_pan_text = infile.read()
chapt1_start = peter_pan_text.index('All children')
print(peter_pan_text[chapt1_start:chapt1_start+1000])
```
Time for some larger test cases!
```
# (5 points)
from unittest import TestCase
tc = TestCase()
pp_toks = [t.lower() for t in peter_pan_text.split()]
dct = compute_ngrams(pp_toks, n=3)
tc.assertEqual(dct['crocodile'],
[('passes,', 'but'),
('that', 'happened'),
('would', 'have'),
('was', 'in'),
('passed', 'him,'),
('is', 'about'),
('climbing', 'it.'),
('that', 'was'),
('pass', 'by'),
('and', 'let'),
('was', 'among'),
('was', 'waiting')])
tc.assertEqual(len(dct['wendy']), 202)
tc.assertEqual(len(dct['peter']), 243)
```
### Random selection
One more thing before you start work on generating passages from an n-gram dictionary: we need a way to choose a random item from a sequence.
The [`random.choice` function](https://docs.python.org/3/library/random.html#random.choice) provides just this functionality. Consider (and feel free to play with) the following examples --- you should, at the very least, evaluate the cell a few separate times to see the results:
```
import random
print(random.choice(['lions', 'tigers', 'bears']))
print(random.choice(range(100)))
print(random.choice([('really', 'like'), ('like', 'cake')]))
```
Note that a separate tutorial on random number generators (and other [`random` module](https://docs.python.org/3/library/random.html) APIs) will be posted separately, but for now just understanding how to use `random.choice` should be sufficient for this assignment.
### `gen_passage`
Finally, you're ready to implement `gen_passage`, which will take an n-gram dictionary and a length for the passage to generate (as a token count).
As described earlier, it will work as follows:
1. Select a random key from the dictionary and use it as the start token of the passage. It will also serve as the current token for the next step.
2. Select a random tuple from the list associated with the current token and append the sequence to the passage. The last token of the selected sequence will be the new current token.
3. If the current token is a key in the dictionary then simply repeat step 2, otherwise select another random key from the map as the current token and append it to the passage before repeating step 2.
You will use `random.choice` whenever a random selection needs to be made. In order for your results to be reproduceable, be sure to sort the dictionary's keys (which, recall, are in no discernible order) before selecting a random one, like this (assuming `ngram_dict` is the dictionary):
random.choice(sorted(ngram_dict.keys()))
```
# ( 5 points)
import random
def gen_passage(ngram_dict, length=100):
# first word
token = random.choice(sorted(ngram_dict.keys()))
passage = [token]
while len(passage) != length:
# append random works that follow token
add = list(random.choice(ngram_dict[token]))
passage += add
# pick next token
try:
ngram_dict[passage[-1]]
token = passage[-1]
except:
# no token selected, pick another one and add token to passage
token = random.choice(sorted(ngram_dict.keys()))
passage += token
return ' '.join(passage)
```
For the following test cases to work, it is *critical* that you do not invoke `random.choice` more than is absolutely necessary, and only as prescribed in the steps described above!
Note that in addition to the automated test cases, we'll also be manually grading your code above.
```
# (5 points)
tc = TestCase()
random.seed(1234)
simple_toks = [t.lower() for t in 'I really really like cake.'.split()]
tc.assertEqual(gen_passage(compute_ngrams(simple_toks), 10),
'like cake. i really really really really like cake. i')
random.seed(1234)
romeo_toks = [t.lower() for t in ROMEO_SOLILOQUY.split()]
tc.assertEqual(gen_passage(compute_ngrams(romeo_toks), 10),
'too bold, \'tis not night. see, how she leans her')
```
| github_jupyter |
# Wavelets and sweeps
This notebook looks at the convolutional model of a seismic trace — first with an impulse-type wavelet, such as a Ricker — then with a simulated Vibroseis sweep.
First, the usual preliminaries.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Load geophysical data
```
import lasio
l30 = lasio.read('../data/L-30.las')
```
Compute acoustic impedance.
We'll take every 24th point as a (very) approximate basis change, from 0.1524 m to about 1 ms.
```
dt = l30.curves['DT'].data
rhob = l30.curves['RHOB'].data
ai = rhob * 1000000/dt
ai = ai[::6]
plt.figure(figsize=(16, 2))
plt.plot(ai)
plt.show()
```
Compute the reflection coefficients.
```
rc = (ai[1:] - ai[:-1]) / (ai[1:] + ai[:-1])
rc[np.isnan(rc)] = 0
```
## Impulsive wavelet
Convolve with a wavelet.
```
def ricker(f, length=0.128, dt=0.001):
t = np.arange(-length/2, (length-dt)/2, dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
return t, y
f = 30
t, w = ricker(f)
plt.plot(t, w)
plt.show()
syn = np.convolve(rc, w, mode='same')
plt.figure(figsize=(16,2))
plt.plot(syn)
plt.show()
```
If we are recording with dynamite or even an airgun, this might be an acceptable model of the seismic. But if we're using Vibroseis, things get more complicated.
## The Vibroseis sweep
[Read more about vibroseis 'wavelets' and deconvolution](http://csegrecorder.com/articles/view/vibroseis-deconvolution-an-example-from-pikes-peak-saskatchewan).
```
from scipy.signal import chirp
duration = 3
dt = 0.001
t = np.arange(0, duration , dt)
sweep = chirp(t, 4, duration, 60)
plt.figure(figsize=(16,2))
plt.plot(sweep, 'g')
plt.show()
```
The sudden displacement is unphysical. Let's apply a [Tukey window](https://en.wikipedia.org/wiki/Window_function#Tukey_window) so the sweep can ramp up and down in amplitude.
```
from scipy.signal import tukey
window = tukey(sweep.size, alpha=0.2) # alpha=0.1: taper 10% of the signal.
sweep *= window
plt.figure(figsize=(16,2))
plt.plot(sweep, 'g')
plt.show()
```
## Model a field recording
To see what we get from the earth, we can convolve this with the reflectivity:
```
field = np.convolve(rc, sweep)
plt.figure(figsize=(16,2))
plt.plot(field, 'g')
plt.show()
```
Nasty.
## Correlation
It turns out that we can correlate this recorded trace with the sweep to get the 'impulsive' equivalent. For this reason, the recorded sweep is usually stored in the SEGY file, perhaps as an extra trace.
Note that this operation is the same as convolving with the timer-reversed sweep:
correlated = np.convolve(field, sweep[::-1], mode='valid')
But we'll use correlation:
```
correlated = np.correlate(field, sweep, mode='valid')
plt.figure(figsize=(16,2))
plt.plot(correlated, 'g')
plt.show()
```
## The Klauder wavelet
When we want to forward model a Vibroseis trace, we can produce an impulsive wavelet that is equivalent to the sweep by correlating it with itself. (Autocorrelation is the same as convolution with the sweep's own time-reverse.)
```
wavelet = np.correlate(sweep, sweep, mode='same')
```
As before, this is the same as convolution with its time-reversed self:
```
np.allclose(wavelet, np.convolve(sweep, sweep[::-1], mode='same'))
plt.plot(wavelet, 'g')
plt.show()
```
This wavelet is way too long. Let's crop it and window:
```
wavelet = wavelet[1200:1800]
wavelet *= np.hamming(600)
plt.plot(wavelet, 'g')
plt.show()
wavelet = wavelet / np.amax(wavelet)
syn_vibe = np.convolve(rc, wavelet, mode='same')
```
We can compare this trace to the one we 'recorded' with a Ricker wavelet.
```
plt.figure(figsize=(16,2))
plt.plot(syn, alpha=0.5)
plt.plot(syn_vibe)
plt.show()
```
Let's zoom in a bit so we can see it:
```
plt.figure(figsize=(16,2))
plt.plot(syn[2000:2500], alpha=0.5)
plt.plot(syn_vibe[2000:2500])
plt.show()
```
<hr />
<div>
<img src="https://avatars1.githubusercontent.com/u/1692321?s=50"><p style="text-align:center">© Agile Geoscience 2016</p>
</div>
| github_jupyter |
```
import syft as sy
```
# Part 1: Launch a Duet Server
```
duet = sy.launch_duet(loopback=True)
```
# Part 2: Upload data to Duet Server
```
import torch as th
# Data owner has age data of 6 people
age_data = th.tensor([25, 32, 49, 65, 88, 22])
# Data owner names the data with tag "ages"
age_data = age_data.tag("ages")
# Data owner adds a description to the tensor where age data is located
age_data = age_data.describe("This is a list of ages of 6 people.")
# Finally the data owner UPLOADS THE DATA to the Duet server and makes it pointable
# by data scientists. NOTE: The data is still on the Data Owners machine and cannot be
# viewed or retrieved by any Data Scientists without permission.
age_data_pointer = age_data.send(duet, pointable=True)
# Once uploaded, the data owner can see the object stored in the tensor
duet.store
# To see it in a human-readable format, data owner can also pretty-print the tensor information
duet.store.pandas
```
### <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png" alt="he-black-box" width="100"/> Checkpoint 1 : Now STOP and run the Data Scientist notebook until the same checkpoint.
# Part 3: Response to requests coming from Data Scientist
```
# To check if there is a request from the Data Scientist, run this command occasionally
# or when there is a notification of new request in the DUET LIVE STATUS
duet.requests.pandas
# There's a new request! Let's check what it says.
duet.requests[0].request_description
# Doesn't look like a convincing request :( Have to deny it sorry!
duet.requests[0].deny()
# No more request
duet.requests.pandas
```
### <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png" alt="he-black-box" width="100"/> Checkpoint 2 : Now STOP and run the Data Scientist notebook until the same checkpoint.
```
# Oh there's a new request!
duet.requests.pandas
# Let's check what it says.
assert (
duet.requests[0].request_description
== "I am data scientist and I need to know the average age for my analysis."
)
duet.requests[0].request_description
# The request looks reasonable. Should be accepted :)
duet.requests[0].accept()
```
### Add request handlers
```
# You can automatically accept or deny requests, which is great for testing.
# We have more advanced handlers coming soon.
duet.requests.add_handler(action="accept")
```
### <img src="https://github.com/OpenMined/design-assets/raw/master/logos/OM/mark-primary-light.png" alt="he-black-box" width="100"/> Checkpoint 3 : Well done!
| github_jupyter |
```
from abc import ABCMeta, abstractmethod, abstractproperty
import enum
import numpy as np
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
import pandas
from matplotlib import pyplot as plt
%matplotlib inline
```
## Bernoulli Bandit
We are going to implement several exploration strategies for simplest problem - bernoulli bandit.
The bandit has $K$ actions. Action produce 1.0 reward $r$ with probability $0 \le \theta_k \le 1$ which is unknown to agent, but fixed over time. Agent's objective is to minimize regret over fixed number $T$ of action selections:
$$\rho = T\theta^* - \sum_{t=1}^T r_t$$
Where $\theta^* = \max_k\{\theta_k\}$
**Real-world analogy:**
Clinical trials - we have $K$ pills and $T$ ill patient. After taking pill, patient is cured with probability $\theta_k$. Task is to find most efficient pill.
A research on clinical trials - https://arxiv.org/pdf/1507.08025.pdf
```
class BernoulliBandit:
def __init__(self, n_actions=5):
self._probs = np.random.random(n_actions)
@property
def action_count(self):
return len(self._probs)
def pull(self, action):
if np.random.random() > self._probs[action]:
return 0.0
return 1.0
def optimal_reward(self):
""" Used for regret calculation
"""
return np.max(self._probs)
def step(self):
""" Used in nonstationary version
"""
pass
def reset(self):
""" Used in nonstationary version
"""
class AbstractAgent(metaclass=ABCMeta):
def init_actions(self, n_actions):
self._successes = np.zeros(n_actions)
self._failures = np.zeros(n_actions)
self._total_pulls = 0
@abstractmethod
def get_action(self):
"""
Get current best action
:rtype: int
"""
pass
def update(self, action, reward):
"""
Observe reward from action and update agent's internal parameters
:type action: int
:type reward: int
"""
self._total_pulls += 1
if reward == 1:
self._successes[action] += 1
else:
self._failures[action] += 1
@property
def name(self):
return self.__class__.__name__
class RandomAgent(AbstractAgent):
def get_action(self):
return np.random.randint(0, len(self._successes))
```
### Epsilon-greedy agent
> **for** $t = 1,2,...$ **do**
>> **for** $k = 1,...,K$ **do**
>>> $\hat\theta_k \leftarrow \alpha_k / (\alpha_k + \beta_k)$
>> **end for**
>> $x_t \leftarrow argmax_{k}\hat\theta$ with probability $1 - \epsilon$ or random action with probability $\epsilon$
>> Apply $x_t$ and observe $r_t$
>> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$
> **end for**
Implement the algorithm above in the cell below:
```
class EpsilonGreedyAgent(AbstractAgent):
def __init__(self, epsilon = 0.01):
self._epsilon = epsilon
def get_action(self):
# YOUR CODE HERE
if np.random.random() < self._epsilon:
return np.random.randint(len(self._successes))
else:
return np.argmax(self._successes/(self._successes + self._failures))
@property
def name(self):
return self.__class__.__name__ + "(epsilon={})".format(self._epsilon)
```
### UCB Agent
Epsilon-greedy strategy heve no preference for actions. It would be better to select among actions that are uncertain or have potential to be optimal. One can come up with idea of index for each action that represents otimality and uncertainty at the same time. One efficient way to do it is to use UCB1 algorithm:
> **for** $t = 1,2,...$ **do**
>> **for** $k = 1,...,K$ **do**
>>> $w_k \leftarrow \alpha_k / (\alpha_k + \beta_k) + \sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$
>> **end for**
>> $x_t \leftarrow argmax_{k}w$
>> Apply $x_t$ and observe $r_t$
>> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$
> **end for**
__Note:__ in practice, one can multiply $\sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$ by some tunable parameter to regulate agent's optimism and wilingness to abandon non-promising actions.
More versions and optimality analysis - https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf
```
class UCBAgent(AbstractAgent):
def get_action(self):
# YOUR CODE HERE
Q = self._successes/(self._successes + self._failures)
UCB = np.sqrt(2*np.log(self._total_pulls)/(self._successes + self._failures))
return np.argmax(Q + UCB)
@property
def name(self):
return self.__class__.__name__
```
### Thompson sampling
UCB1 algorithm does not take into account actual distribution of rewards. If we know the distribution - we can do much better by using Thompson sampling:
> **for** $t = 1,2,...$ **do**
>> **for** $k = 1,...,K$ **do**
>>> Sample $\hat\theta_k \sim beta(\alpha_k, \beta_k)$
>> **end for**
>> $x_t \leftarrow argmax_{k}\hat\theta$
>> Apply $x_t$ and observe $r_t$
>> $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$
> **end for**
More on Tompson Sampling:
https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf
```
class ThompsonSamplingAgent(AbstractAgent):
def get_action(self):
# YOUR CODE HERE
return np.argmax(np.random.beta(self._successes + 1, self._failures + 1))
@property
def name(self):
return self.__class__.__name__
from collections import OrderedDict
def get_regret(env, agents, n_steps=5000, n_trials=50):
scores = OrderedDict({
agent.name : [0.0 for step in range(n_steps)] for agent in agents
})
for trial in range(n_trials):
env.reset()
for a in agents:
a.init_actions(env.action_count)
for i in range(n_steps):
optimal_reward = env.optimal_reward()
for agent in agents:
action = agent.get_action()
reward = env.pull(action)
agent.update(action, reward)
scores[agent.name][i] += optimal_reward - reward
env.step() # change bandit's state if it is unstationary
for agent in agents:
scores[agent.name] = np.cumsum(scores[agent.name]) / n_trials
return scores
def plot_regret(scores):
for agent in agents:
plt.plot(scores[agent.name])
plt.legend([agent.name for agent in agents])
plt.ylabel("regret")
plt.xlabel("steps")
plt.show()
# Uncomment agents
agents = [
EpsilonGreedyAgent(),
UCBAgent(),
ThompsonSamplingAgent()
]
regret = get_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10)
plot_regret(regret)
```
### Submit to coursera
```
from submit import submit_bandits
submit_bandits(regret, agents, '', '')
```
| github_jupyter |
```
from pathlib import Path
import numpy as np
import pandas as pd
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
import logging
import warnings
from random import shuffle
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
import umap
warnings.filterwarnings('ignore')
pd.set_option('display.expand_frame_repr', False)
np.random.seed(42)
logging.basicConfig(
filename='doc2vec.log',
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%H:%M:%S')
```
## Load Data
```
df = pd.read_csv('yelp_sample.csv')
df = df[df.stars.isin([1, 3, 5])]
df.info()
sns.distplot(df.text.str.split().str.len());
```
## Doc2Vec
```
sentences = []
for i, (stars, text) in df.iterrows():
sentences.append(TaggedDocument(words=text.split(), tags=[i]))
size=100
window=5
min_count=100
epochs=5,
negative=5
dm_concat=0
dbow_words=0
model = Doc2Vec(documents=sentences,
dm=1,
size=size,
window=window,
min_count=min_count,
workers=8,
epochs=epochs,
negative=negative,
dm_concat=dm_concat,
dbow_words=dbow_words)
# model.build_vocab(sentences)
model.train(sentences, total_examples=model.corpus_count, epochs=model.epochs)
pd.DataFrame(model.most_similar('good'), columns=['token', 'similarity'])
```
## Persist Model
```
model.save('test.model')
model = Doc2Vec.load('test.model')
```
## Evaluate
```
y = pd.factorize(df.stars)[0]
X = np.zeros(shape=(len(df), size))
for i in range(len(df)):
X[i] = model.docvecs[i]
embedding = pd.DataFrame(umap.UMAP().fit_transform(X),
columns=['x', 'y'])
embedding.to_csv('umap.csv', index=False)
embedding = pd.read_csv('umap.csv')
embedding['label'] = y
e = embedding.sample(frac=.1)
e.plot.scatter(x='x', y='y', c=e.label, figsize=(14, 8), cmap='viridis');
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
mode = pd.Series(y_train).mode().iloc[0]
baseline = accuracy_score(y_true=y_test, y_pred=np.full_like(y_test, fill_value=mode))
print(f'Baseline Score: {baseline:.2%}')
pd.Series(y_test).value_counts(normalize=True)
from sklearn.utils import class_weight
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train),
y_train)
class_weights
```
## LightGBM
## Random Forest
```
rf = RandomForestClassifier(n_jobs=-1,
n_estimators=100,
class_weight='balanced_subsample')
rf.fit(X_train, y_train)
y_pred = rf.predict(X_test)
print(f'Accuracy: {accuracy_score(y_true=y_test, y_pred=y_pred):.2%}')
confusion_matrix(y_true=y_test, y_pred=y_pred)
```
## Logistic Regression
```
lr = LogisticRegression(multi_class='multinomial', solver='lbfgs', class_weight='balanced')
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
print(f'Accuracy: {accuracy_score(y_true=y_test, y_pred=y_pred):.2%}')
confusion_matrix(y_true=y_test, y_pred=y_pred)
```
| github_jupyter |
## 02. Multiple Parameters
In this tutorial, you will learn how to:
* Optimize the Objective Function with Multiple HyperParameters
* Define different types of Search Space
在本教程中,您将学习如何:
* 优化多超参数的目标函数
* 定义不同类型的搜索空间
### Optimizing Multi Parameters Objective function
```
# import fmin interface from UltraOpt
from ultraopt import fmin
# hdl2cs can convert HDL(Hyperparams Describe Language) to CS(Config Space)
from ultraopt.hdl import hdl2cs
import numpy as np
import pandas as pd
import seaborn as sns
from collections import Counter
%matplotlib inline
```
Declares a objective function to optimize. Unlike last time, we will optimize the function with two Hyperparameters, $x$ and $y$.
声明要优化的目标函数。与上次不同,我们将使用两个超参数$x$和$y$来优化函数。
$$ z = sin\sqrt{x^2 + y^2} $$
```
def evaluate(config:dict):
x, y = config['x'], config['y']
return np.sin(np.sqrt(x**2 + y**2))
```
Just like last time, let's try visualizing it. But unlike last time, there are two Hyperparameters, so we need to visualize them in 3D space.
就像上次一样,我们尝试对其进行可视化。但与上次不同的是,这次有两个超参数,所以我们需要在三维空间中可视化它们。
```
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
x = np.linspace(-6, 6, 30)
y = np.linspace(-6, 6, 30)
x, y = np.meshgrid(x, y)
z = evaluate({'x': x, 'y': y})
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(x, y, z, cmap=cm.coolwarm)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
```
Likewise, let's define the search space. However, this time, you need to define two search spaces($x, y$), so you put each of them in the `dict` type **config** parameter. The return value **loss** more smaller, the **config** more better.
同样,让我们定义搜索空间。但这次,您需要定义两个超参数$(x,y)$,因此将它们分别放在`dict` type 的 **config** 参数中。返回值 **损失(loss)** 越小, **配置(config)** 越好。
重复 `BasicTutorial` 的步骤,让我们定义HDL $\rightarrow$ 转为CS $\rightarrow$ 采样config $\rightarrow$ 评价config
```
HDL = {
"x": {"_type": "uniform", "_value": [-6, 6]},
"y": {"_type": "uniform", "_value": [-6, 6]},
}
CS = hdl2cs(HDL)
CS
configs = [config.get_dictionary() for config in CS.sample_configuration(5)]
configs
losses = [evaluate(config) for config in configs]
best_ix = np.argmin(losses)
print(f"optimal config: {configs[best_ix]}, \noptimal loss: {losses[best_ix]}")
```
很好,让我们用UltraOpt来对评价函数进行优化吧!
```
result = fmin(
eval_func=evaluate, # 评价函数
config_space=HDL, # 配置空间
optimizer="ETPE", # 优化器
n_iterations=100 # 迭代数
)
result
```
查看优化过程的拟合曲线:
```
plt.rcParams['figure.figsize'] = (8, 6)
result.plot_convergence();
```
### Define different types of Search Space
UltraOpt一共有8种类型的超参:
|超参变量类型(`_type`)|参数列表(`_value`)|举例|描述|
|----|----|----|----|
| "choice" | options |`["苹果","梨子","葡萄"]`|选项(options)之间没有可比较关系|
| "ordinal" | sequence |`["小学","中学","大学"]`|序列(sequence)之间存在可比较关系|
| "uniform" | [low, high] |`[0, 100]`|均匀分布|
| "quniform" | [low, high, q] |`[0, 100, 20]`|间隔为 `q` 的离散均匀分布|
| "loguniform" | [low, high] | `[0.1, 100]`| `log` 缩放的均匀分布|
| "qloguniform" | [low, high, q] | `[1, 100, 1]`| `log` 缩放的离散均匀分布|
| "int_uniform" | [low, high] | `[0, 10]`| 间隔为 `1` 的离散均匀分布, `int`类型|
| "int_quniform" | [low, high, q] |`[0, 10, 2]`| 间隔为 `q` 的离散均匀分布, `int`类型|
作为教学目的,我们定义一个空间,囊括所有类型的超参:
```
HDL = {
"hp_choice": {"_type": "choice", "_value": ["apple", "pear", "grape"]},
"hp_ordinal": {"_type": "ordinal", "_value": ["Primary school", "Middle school", "University"] },
"hp_uniform": {"_type": "uniform", "_value": [0, 100]},
"hp_quniform": {"_type": "quniform", "_value": [0, 100, 20]},
"hp_loguniform": {"_type": "loguniform", "_value": [0.1, 100]},
"hp_qloguniform": {"_type": "qloguniform", "_value": [10, 100, 10]},
"hp_int_uniform": {"_type": "int_uniform", "_value": [0, 10]},
"hp_int_quniform": {"_type": "int_quniform", "_value": [0, 10, 2]},
}
```
如果您定义的超参描述语言`HDL`可以正常被`ultraopt.hdl.hdl2cs`函数转换,说明您定义的`HDL`正确无误:
```
CS = hdl2cs(HDL)
CS
```
我们从配置空间`CS`中随机采样`1000`个:
```
configs = [config.get_dictionary() for config in CS.sample_configuration(1000)]
variables = {key:[config[key] for config in configs] for key in HDL}
hp_choice_cnt = Counter(variables["hp_choice"])
hp_ordinal_cnt = Counter(variables["hp_ordinal"])
```
然后可视化随机采样下各变量的分布:
```
plt.rcParams['figure.figsize'] = (20, 10); plt.subplot(2,4,1); plt.title("choice"); plt.pie(list(hp_choice_cnt.values()), labels=list(hp_choice_cnt.keys()), autopct='%1.0f%%');
plt.subplot(2,4,2); plt.title("ordinal"); plt.pie(list(hp_ordinal_cnt.values()), labels=list(hp_ordinal_cnt.keys()), autopct='%1.0f%%'); plt.subplot(2,4,3); plt.title("uniform"); sns.distplot(variables["hp_uniform"]); plt.subplot(2,4,4); plt.title("quniform"); plt.hist(variables["hp_quniform"], bins=20);
plt.subplot(2,4,5); plt.title("loguniform"); sns.distplot(variables["hp_loguniform"]); plt.subplot(2,4,6); plt.title("qloguniform"); plt.hist(variables["hp_qloguniform"], bins=25); plt.xticks(range(10,110,10));
plt.subplot(2,4,7); plt.title("int_uniform"); plt.xticks(range(11));plt.hist(variables["hp_int_uniform"], bins=30);plt.subplot(2,4,8); plt.title("int_quniform");plt.hist(variables["hp_int_quniform"], bins=20);
```
可以看到:
- 图1 图2 分别为 `choice` 选择类型超参 和 `ordinal` 有序类型超参。
+ 且他们都是离散变量。
+ 随机情况每个选项(option)被选中的几率都大约为 $\frac{1}{3}$。
- 图3 图5 分别是 `uniform` 和 `loguniform`。
+ 他们都是连续变量,所以用核密度估计图进行可视化。
+ `uniform` 类型的超参服从均匀分布。
+ `loguniform` 类型的超参在对其取对数后也服从均匀分布。
- 图4 的 `quniform` 间隔 `q` 为 `10` 。
- 图6 的 `qloguniform` 间隔 `q` 为 `10` 。
---
作为教学目的,我们定义一个依赖这8个变量的目标函数:
```
def evaluate(config: dict):
choice2numerical = dict(zip(["apple", "pear", "grape"], [3,2,4]))
ordinal2numerical = dict(zip(["Primary school", "Middle school", "University"], [1, 2, 3]))
interact1 = np.sin(config["hp_int_uniform"] - choice2numerical[config["hp_choice"]]) * np.sin(config["hp_int_quniform"] - ordinal2numerical[config["hp_ordinal"]])
interact2 = np.sin(config["hp_uniform"] - choice2numerical[config["hp_choice"]]*10) * np.sin(config["hp_quniform"] - ordinal2numerical[config["hp_ordinal"]]*10)
interact3 = ((interact1 - np.log(config["hp_loguniform"])) - 2) ** 2 - ((interact2 - np.log(config["hp_qloguniform"])) - 1) ** 2
return interact3
```
使用`ultraopt.fmin`进行优化:
```
result = fmin(
eval_func=evaluate, # 评价函数
config_space=HDL, # 配置空间
optimizer="ETPE", # 优化器
n_iterations=200 # 迭代数
)
```
打印优化结果汇总表:
```
result
```
查看优化过程的拟合曲线:
```
plt.rcParams['figure.figsize'] = (8, 6)
result.plot_convergence();
```
| github_jupyter |
# Comparative analysis
## Imports & Parameters
```
import os, sys
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import rasterio
from tqdm import tqdm_notebook as tqdm
from sklearn.model_selection import train_test_split
from itertools import product
from functools import partial
from scipy.ndimage import uniform_filter
# Add local module to the path
src = os.path.abspath('../src')
if src not in sys.path:
sys.path.append(src)
from metadata import City, CITIES, DATA_DIR
import classification as cls
import raster as rst
from landsat import Scene
%matplotlib inline
%load_ext autoreload
%autoreload 2
RANDOM_SEED = 2018
def pretty(city_id):
"""Format city label."""
name = city_id.title()
name = name.replace('_', '-')
return name
def write_raster(array, rasterio_profile, filename):
"""Write raster to disk."""
profile = rasterio_profile.copy()
profile.update(dtype=array.dtype.name, nodata=None, transform=None)
with rasterio.open(filename, 'w', **profile) as dst:
dst.write(array, 1)
return filename
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
# Plotting style
plt.style.use(['seaborn-paper', 'seaborn-whitegrid'])
plt.rc('font', family='serif', serif='cmr10', size=12)
plt.rc('axes', titlesize=12)
plt.rc('axes', labelsize=12)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=12)
plt.rc('legend', fontsize=12)
plt.rc('figure', titlesize=12)
plt.rc('text', usetex=True)
```
## Reference classification
The reference classification makes use of the reference polygons for training. Because reference polygons are used for both training and validation, polygons are randomly splitted between both datasets according to the `TEST_SIZE` parameter. In other words, half the polygons are used for training, and the others for validation. The procedure is performed `N` times and then the scores are averaged. Fixed random seeds are used to allow the reproducibility of the results (through the `RANDOM_SEED` and the `random_seeds` variables).
```
TEST_SIZE = 0.5
N = 20
# Set of fixed seeds for reproducibility of train_test_split function
random_seeds = np.arange(1, N+1, 1)
results = {}
progress = tqdm(total=len(CITIES) * N)
for city_name in CITIES:
city = City(city_name)
output_dir = os.path.join(city.output_dir, 'ref')
os.makedirs(output_dir, exist_ok=True)
scene = Scene(city.landsat_dir)
shape = scene.red.shape
mean_probabilities = np.zeros(shape=shape, dtype=np.float64)
results[city_name] = []
for n, seed in zip(range(N), random_seeds):
training_dataset = np.zeros(shape=shape, dtype=np.uint8)
testing_dataset = np.zeros(shape=shape, dtype=np.uint8)
bu_train, bu_test = train_test_split(city.reference_builtup, test_size=TEST_SIZE, random_state=seed)
so_train, so_test = train_test_split(city.reference_baresoil, test_size=TEST_SIZE, random_state=seed)
lv_train, lv_test = train_test_split(city.reference_lowveg, test_size=TEST_SIZE, random_state=seed)
hv_train, hv_test = train_test_split(city.reference_highveg, test_size=TEST_SIZE, random_state=seed)
# Training dataset
training_dataset = np.zeros(shape=shape, dtype=np.uint8)
for polygons, value in zip([bu_train, so_train, lv_train, hv_train], [1, 2, 3, 4]):
y = rst.rasterize(polygons, city.profile).astype(np.bool)
training_dataset[y] = value
# Testing dataset
testing_dataset = np.zeros(shape=shape, dtype=np.uint8)
for polygons, value in zip([bu_test, so_test, lv_test, hv_test], [1, 2, 3, 4]):
y = rst.rasterize(polygons, city.profile).astype(np.bool)
testing_dataset[y] = value
# Classification
probabilities = cls.classify(
scene, training_dataset, oversampling=True, water=city.water, n_jobs=6, random_state=RANDOM_SEED, n_estimators=100)
mean_probabilities = mean_probabilities + probabilities
# Assessment
results[city_name].append(cls.assess(probabilities, testing_dataset))
progress.update(1)
mean_probabilities = mean_probabilities / N
write_raster(mean_probabilities, city.profile, os.path.join(output_dir, 'probabilities.tif'))
progress.close()
METRICS = ['f1_score', 'precision', 'recall', 'builtup_accuracy', 'baresoil_accuracy', 'lowveg_accuracy', 'highveg_accuracy']
scores_ref = pd.DataFrame(index=CITIES, columns=METRICS)
for city, metric in product(CITIES, METRICS):
array = np.array([result[metric] for result in results[city]])
scores_ref.at[(city, metric)] = array
```
Averaged and rounded scores for each case study:
```
rounder = partial(round, ndigits=2)
scores_ref_mean = scores_ref.applymap(np.mean).applymap(rounder)
scores_ref_std = scores_ref.applymap(np.std).applymap(rounder)
scores_ref_mean.to_csv(os.path.join(DATA_DIR, 'output', 'scores_ref_mean.csv'))
scores_ref_std.to_csv(os.path.join(DATA_DIR, 'output', 'scores_ref_std.csv'))
display(scores_ref_mean)
display(scores_ref_std)
```
## Using OSM buildings & non-built objects
```
BUILDINGS_MIN_COVERAGE = 0.2
METRICS = ['f1_score', 'precision', 'recall', 'builtup_accuracy', 'baresoil_accuracy', 'lowveg_accuracy', 'highveg_accuracy']
scores_osm_a = pd.DataFrame(index=CITIES, columns=METRICS)
progress = tqdm(total=len(CITIES))
for city_name in CITIES:
city = City(city_name)
scene = Scene(city.landsat_dir)
training_dataset = np.zeros(shape=scene.red.shape, dtype=np.uint8)
training_dataset[city.buildings_cover >= 0.2] = 1
training_dataset[city.nonbuilt_raster > 0] = 2
testing_dataset = city.reference
probabilities = cls.classify(
scene, training_dataset, oversampling=True, water=city.water, n_jobs=8, random_state=RANDOM_SEED, n_estimators=100)
assessment = cls.assess(probabilities, testing_dataset)
for metric in METRICS:
scores_osm_a.at[(city_name, metric)] = round(assessment[metric], 2)
# Save results to disk
output_dir = os.path.join(city.output_dir, 'osm_a')
os.makedirs(output_dir, exist_ok=True)
write_raster(probabilities, city.profile, os.path.join(output_dir, 'probabilities.tif'))
write_raster(training_dataset, city.profile, os.path.join(output_dir, 'training_dataset.tif'))
with open(os.path.join(output_dir, 'assessment.json'), 'w') as f:
f.write(json.dumps(assessment, indent=True, cls=NumpyEncoder))
progress.update(1)
progress.close()
scores_osm_a.to_csv(os.path.join(DATA_DIR, 'output', 'scores_osm_a.csv'))
scores_osm_a
```
## Using urban blocks & urban distance
```
BLOCKS_MAX_SIZE = 3.0 # in hectares
BUILDINGS_MIN_COVER = 0.2 # 20%
URBAN_DIST_THRESHOLD = 250 # in meters
METRICS = ['f1_score', 'precision', 'recall', 'builtup_accuracy', 'baresoil_accuracy', 'lowveg_accuracy', 'highveg_accuracy']
scores_osm_b = pd.DataFrame(index=CITIES, columns=METRICS)
progress = tqdm(total=len(CITIES))
for city_name in CITIES:
city = City(city_name)
scene = Scene(city.landsat_dir)
mask = np.logical_or(scene.mask, city.water)
training_dataset = np.zeros(shape=scene.red.shape, dtype=np.uint8)
buildings = (city.buildings_cover >= BUILDINGS_MIN_COVER) & ~mask
blocks = (city.blocks_raster <= BLOCKS_MAX_SIZE) & (city.blocks_raster > 0.1) & ~mask
nonbuilt = (city.nonbuilt_raster > 0) & ~mask
urban_distance = (city.urban_distance >= URBAN_DIST_THRESHOLD) & ~mask
positive = np.logical_or(buildings, blocks)
negative = np.logical_or(nonbuilt, urban_distance)
confused = np.logical_and(positive, negative)
positive[confused] = 0
negative[confused] = 0
n_positive = np.count_nonzero(positive)
n_negative = np.count_nonzero(negative)
if n_negative > 2 * n_positive:
negative = rst.random_choice(negative, size=n_positive*2)
n_negative = np.count_nonzero(negative)
training_dataset[positive] = 1
training_dataset[negative] = 2
testing_dataset = city.reference
probabilities = cls.classify(
scene, training_dataset, oversampling=True, water=city.water, n_jobs=8, random_state=RANDOM_SEED, n_estimators=100)
classes = np.zeros(shape=probabilities.shape, dtype=np.uint8)
classes[probabilities >= 0.75] = 1
classes[probabilities < 0.75] = 2
classes[city.water] = 2
classes[scene.mask] = 0
assessment = cls.assess(probabilities, testing_dataset, threshold=0.75)
for metric in METRICS:
scores_osm_b.at[(city_name, metric)] = round(assessment[metric], 2)
# Write results to disk
output_dir = os.path.join(city.output_dir, 'osm_b')
os.makedirs(output_dir, exist_ok=True)
write_raster(probabilities, city.profile, os.path.join(output_dir, 'probabilities.tif'))
write_raster(training_dataset, city.profile, os.path.join(output_dir, 'training_dataset.tif'))
with open(os.path.join(output_dir, 'assessment.json'), 'w') as f:
f.write(json.dumps(assessment, indent=True, cls=NumpyEncoder))
progress.update(1)
progress.close()
scores_osm_b.to_csv(os.path.join(DATA_DIR, 'output', 'scores_osm_b.csv'))
scores_osm_b
```
## Influence of the training sample size
```
count = pd.DataFrame(index=CITIES, columns=['positive', 'negative'])
for city_name in CITIES:
city = City(city_name)
with rasterio.open(os.path.join(city.output_dir, 'osm_b', 'training_dataset.tif')) as src:
train = src.read(1)
count.at[(city_name, 'positive')] = np.count_nonzero(train == 1)
count.at[(city_name, 'negative')] = np.count_nonzero(train == 2)
count.to_csv(os.path.join(DATA_DIR, 'output', 'n_training_samples.csv'))
count.sort_values(by='positive')
count.join(scores_osm_b)[['positive', 'f1_score']].sort_values(by='f1_score', ascending=False)
count_ = count.drop('johannesburg')
scores_osm_b_ = scores_osm_b.drop('johannesburg')
f, ax = plt.subplots(figsize=(7, 4))
x = count_.positive.astype(np.float64)
y = scores_osm_b_.f1_score
ax.scatter(x, y, label=None)
ax.set_ylim(0.88, 1.00)
ax.set_xlim(0, 110000)
slope, intercept, r_value, p_value, std_err = linregress(x, y)
# Plot line of fit
x_values = np.array(ax.get_xlim())
y_values = slope * x_values + intercept
ax.plot(x_values, y_values, color='C3', linewidth=2, linestyle=':', label='Least square fit')
ax.annotate(
f'$R^2$ = {round(r_value ** 2, 3)}\n$P$ = {round(p_value, 4)}',
(100000, 0.89),
bbox=dict(facecolor='white', edgecolor='black', boxstyle='round,pad=0.5'),
ha='center'
)
ax.legend(frameon=True)
ax.set_xlabel('Number of training samples (pixels)')
ax.set_ylabel('F1-Score')
f.tight_layout()
f.savefig('../paper/figures/n_samples_fit.png')
f.savefig('../paper/figures/n_samples_fit.pdf');
cities = ('windhoek', 'antananarivo', 'katsina', 'nairobi', 'kampala', 'dakar', 'johannesburg')
n_samples = np.linspace(1000, 20000, 10).astype(int)
per_n_samples = pd.DataFrame(index=cities, columns=n_samples)
progress = tqdm(total=len(cities) * len(n_samples))
for city_name in cities:
city = City(city_name)
scene = Scene(city.landsat_dir)
mask = np.logical_or(scene.mask, city.water)
buildings = (city.buildings_cover >= BUILDINGS_MIN_COVER) & ~mask
blocks = (city.blocks_raster <= BLOCKS_MAX_SIZE) & (city.blocks_raster > 0.1) & ~mask
nonbuilt = (city.nonbuilt_raster > 0) & ~mask
urban_distance = (city.urban_distance >= URBAN_DIST_THRESHOLD) & ~mask
positive = np.logical_or(buildings, blocks)
negative = np.logical_or(nonbuilt, urban_distance)
confused = np.logical_and(positive, negative)
positive[confused] = 0
negative[confused] = 0
for n in n_samples:
training_dataset = np.zeros(shape=scene.red.shape, dtype=np.uint8)
n_ = min(np.count_nonzero(positive), n)
positive_ = rst.random_choice(positive, n_, random_seed=RANDOM_SEED)
negative_ = rst.random_choice(negative, n_, random_seed=RANDOM_SEED)
training_dataset[positive_] = 1
training_dataset[negative_] = 2
probabilities = cls.classify(
scene, training_dataset, oversampling=True, water=city.water, n_jobs=8, random_state=RANDOM_SEED, n_estimators=10)
assessment = cls.assess(probabilities, city.reference, threshold=0.75)
per_n_samples.at[(city.name, n)] = round(assessment['f1_score'], 3)
progress.update(1)
progress.close()
per_n_samples.to_csv(os.path.join(DATA_DIR, 'output', 'per_n_samples.csv'))
per_n_samples
f, ax = plt.subplots(figsize=(7, 4))
x = n_samples
y_mean = per_n_samples.mean(axis=0)
y_min = per_n_samples.min(axis=0)
y_max = per_n_samples.max(axis=0)
ax.plot(y_mean, label='Mean')
ax.plot(y_min, color='C0', linewidth=1, alpha=0.5)
ax.plot(y_max, color='C0', linewidth=1, alpha=0.5)
ax.fill_between(x, y_min, y_max, facecolor='C0', alpha=0.2, label='Range')
ax.set_xlabel('Number of training samples (pixels)')
ax.set_ylabel('F1-Score')
ax.set_xlim(1000, 20000)
ax.set_ylim(0.86, 1.00)
ax.legend(frameon=True)
f.tight_layout()
f.savefig('../paper/figures/per_n_samples.png')
f.savefig('../paper/figures/per_n_samples.pdf');
```
### In Nairobi & Kampala
```
n_samples = np.linspace(1000, 50000, 20).astype(int)
n_samples_scores = pd.DataFrame(index=['kampala', 'nairobi'], columns=n_samples)
progress = tqdm(total=len(n_samples) * 2)
for city_name in ('kampala', 'nairobi'):
city = City(city_name)
scene = Scene(city.landsat_dir)
mask = np.logical_or(scene.mask, city.water)
buildings = (city.buildings_cover >= BUILDINGS_MIN_COVER) & ~mask
blocks = (city.blocks_raster <= BLOCKS_MAX_SIZE) & (city.blocks_raster > 0.1) & ~mask
nonbuilt = (city.nonbuilt_raster > 0) & ~mask
urban_distance = (city.urban_distance >= URBAN_DIST_THRESHOLD) & ~mask
positive = np.logical_or(buildings, blocks)
negative = np.logical_or(nonbuilt, urban_distance)
confused = np.logical_and(positive, negative)
positive[confused] = 0
negative[confused] = 0
for n in n_samples:
training_dataset = np.zeros(shape=scene.red.shape, dtype=np.uint8)
n_ = min(np.count_nonzero(positive), n)
positive_ = rst.random_choice(positive, n_, random_seed=RANDOM_SEED)
negative_ = rst.random_choice(negative, n_, random_seed=RANDOM_SEED)
training_dataset[positive_] = 1
training_dataset[negative_] = 2
probabilities = cls.classify(
scene, training_dataset, oversampling=True, water=city.water, n_jobs=8, random_state=RANDOM_SEED, n_estimators=10)
assessment = cls.assess(probabilities, city.reference, threshold=0.75)
n_samples_scores.at[(city_name, n)] = round(assessment['f1_score'], 3)
progress.update(1)
progress.close()
f, ax = plt.subplots(figsize=(7, 4))
ax.plot(n_samples_scores.loc['kampala'], label='Kampala')
ax.plot(n_samples_scores.loc['nairobi'], label='Nairobi')
ax.set_ylim(0.92, 0.98)
ax.set_xlim(1000, 50000)
ax.set_ylabel('F1-Score')
ax.set_xlabel('Number of training samples per class (pixels)')
ax.legend(frameon=True)
f.tight_layout()
f.savefig('../paper/figures/n_samples_nairobi_kampala.png')
f.savefig('../paper/figures/n_samples_nairobi_kampala.pdf');
```
## Influence of max. block size
```
BLOCKS_MAX_SIZES = [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0]
per_block_size = pd.DataFrame(index=CITIES, columns=BLOCKS_MAX_SIZES)
progress = tqdm(total=len(CITIES) * len(BLOCKS_MAX_SIZES))
for city_name in CITIES:
city = City(city_name)
scene = Scene(city.landsat_dir)
mask = np.logical_or(scene.mask, city.water)
buildings = (city.buildings_cover >= BUILDINGS_MIN_COVER) & ~mask
nonbuilt = (city.nonbuilt_raster > 0) & ~mask
urban_distance = (city.urban_distance >= URBAN_DIST_THRESHOLD) & ~mask
for max_size in BLOCKS_MAX_SIZES:
training_dataset = np.zeros(shape=scene.red.shape, dtype=np.uint8)
blocks = (city.blocks_raster <= max_size) & (city.blocks_raster > 0) & ~mask
positive = np.logical_or(buildings, blocks)
negative = np.logical_or(nonbuilt, urban_distance)
confused = np.logical_and(positive, negative)
positive[confused] = 0
negative[confused] = 0
n_positive = np.count_nonzero(positive)
n_negative = np.count_nonzero(negative)
if n_negative > 2 * n_positive:
negative = rst.random_choice(negative, size=n_positive*2)
n_negative = np.count_nonzero(negative)
training_dataset[positive] = 1
training_dataset[negative] = 2
probabilities = cls.classify(
scene, training_dataset, oversampling=True, water=city.water, n_jobs=8, random_state=RANDOM_SEED, n_estimators=10)
assessment = cls.assess(probabilities, city.reference, threshold=0.75)
per_block_size.at[(city.name, max_size)] = round(assessment['f1_score'], 3)
progress.update(1)
progress.close()
per_block_size.to_csv(os.path.join(DATA_DIR, 'output', 'per_block_size.csv'))
per_block_size
f, ax = plt.subplots(figsize=(7, 4))
x = np.linspace(1, 5, 9)
y_mean = per_block_size.mean(axis=0)
y_min = per_block_size.min(axis=0)
y_max = per_block_size.max(axis=0)
ax.plot(y_mean, label='Mean')
ax.plot(y_min, color='C0', linewidth=1, alpha=0.5)
ax.plot(y_max, color='C0', linewidth=1, alpha=0.5)
ax.fill_between(x, y_min, y_max, facecolor='C0', alpha=0.2, label='Range')
ax.set_xlim(1, 5)
ax.set_ylim(0.75, 0.98)
ax.set_xlabel('Max. block surface (ha)')
ax.set_ylabel('F1-Score')
ax.legend(frameon=True)
f.tight_layout()
f.savefig('../paper/figures/per_block_size.png')
f.savefig('../paper/figures/per_block_size.pdf');
```
## Influence of the probability threshold
```
thresholds = np.linspace(0, 1, 21)
per_threshold = pd.DataFrame(index=CITIES, columns=thresholds)
progress = tqdm(total=len(CITIES) * len(thresholds))
for city_name in CITIES:
city = City(city_name)
testing_dataset = city.reference
with rasterio.open(os.path.join(city.output_dir, 'osm_b', 'probabilities.tif')) as src:
probabilities = src.read(1)
for threshold in thresholds:
assessment = cls.assess(probabilities, testing_dataset, threshold=threshold)
per_threshold.at[(city.name, threshold)] = round(assessment['f1_score'], 2)
progress.update(1)
progress.close()
per_threshold
f, ax = plt.subplots(figsize=(7, 4))
x = thresholds
y_mean = per_threshold.mean(axis=0)
y_min = per_threshold.min(axis=0)
y_max = per_threshold.max(axis=0)
ax.plot(y_mean, label='Mean')
ax.plot(y_min, color='C0', linewidth=1, alpha=0.5)
ax.plot(y_max, color='C0', linewidth=1, alpha=0.5)
ax.fill_between(x, y_min, y_max, facecolor='C0', alpha=0.2, label='Range')
ax.set_xlim(0, 1)
ax.set_ylim(0.50, 1.00)
ax.set_xlabel('Probability threshold')
ax.set_ylabel('F1-Score')
ax.axvline(x=0.6, color='C3', linestyle=':', label='Optimal\nthreshold')
ax.legend(frameon=True)
f.tight_layout()
f.savefig('../paper/figures/per_prob_threshold.png')
f.savefig('../paper/figures/per_prob_threshold.pdf');
```
## Post-processing
```
kernel_sizes = [1, 3, 5, 7, 9]
postprocessing_scores = pd.DataFrame(index=CITIES, columns=kernel_sizes)
progress = tqdm(total=len(CITIES) * len(kernel_sizes))
for city_name in CITIES:
city = City(city_name)
with rasterio.open(os.path.join(city.output_dir, 'osm_b', 'probabilities.tif')) as src:
probabilities = src.read(1)
for size in kernel_sizes:
probabilities_ = uniform_filter(probabilities, size=size)
assessment = cls.assess(probabilities_, city.reference, threshold=0.6)
postprocessing_scores.at[(city_name, size)] = round(assessment['f1_score'], 2)
progress.update(1)
progress.close()
postprocessing_scores
f, ax = plt.subplots(figsize=(7, 4))
x = kernel_sizes
y_mean = postprocessing_scores.mean(axis=0)
y_min = postprocessing_scores.min(axis=0)
y_max = postprocessing_scores.max(axis=0)
ax.plot(y_mean, label='Mean')
ax.plot(y_min, color='C0', linewidth=1, alpha=0.5)
ax.plot(y_max, color='C0', linewidth=1, alpha=0.5)
ax.fill_between(x, y_min, y_max, facecolor='C0', alpha=0.2, label='Range')
ax.set_xlim(1, 9)
ax.set_xlabel('Kernel size (pixels)')
ax.set_ylabel('F1-Score')
ax.set_xticks(kernel_sizes)
ax.axvline(x=3, color='C3', linestyle=':', label='Optimal size')
ax.legend(frameon=True, loc='lower right')
f.tight_layout()
f.savefig('../paper/figures/postprocessing_scores.png')
f.savefig('../paper/figures/postprocessing_scores.pdf');
# Write post-processed map to disk
for city_name in CITIES:
city = City(city_name)
for scheme in ('osm_a', 'osm_b', 'ref'):
with rasterio.open(os.path.join(city.output_dir, scheme, 'probabilities.tif')) as src:
probabilities = src.read(1)
probabilities = uniform_filter(probabilities, size=3)
bu_map = probabilities >= 0.75
bu_map = bu_map.astype(np.uint8)
output_f = os.path.join(city.output_dir, scheme, 'map.tif')
write_raster(bu_map, city.profile, output_f)
```
## Summary
```
from itertools import product
scores_ref = pd.read_csv(os.path.join(DATA_DIR, 'output', 'scores_ref_mean.csv'), index_col=0)
scores_osm_a = pd.read_csv(os.path.join(DATA_DIR, 'output', 'scores_osm_a.csv'), index_col=0)
scores_osm_b = pd.read_csv(os.path.join(DATA_DIR, 'output', 'scores_osm_b.csv'), index_col=0)
cls_labels = ['osm_a', 'osm_b', 'ref']
metrics = ['f1_score', 'precision', 'recall']
columns = [cls + '_' + metric for cls, metric in product(cls_labels, metrics)]
scores = pd.DataFrame(index=CITIES, columns=columns)
for cls_scores, cls_label in zip([scores_osm_a, scores_osm_b, scores_ref], cls_labels):
for city, metric in product(CITIES, metrics):
col = cls_label + '_' + metric
scores.at[(city, col)] = cls_scores.at[(city, metric)]
scores.loc['mean'] = scores.mean().round(2)
scores.loc['std'] = scores.std().round(2)
scores
```
| github_jupyter |
```
#Importing Environment and ImpStates
from env_2_stochastic_high import Environment2,StartandGoal,ImportDynamics
from SophAgent import SophAgentActions
from QlearningAgent import QAgent
[startstate,goalstate]=StartandGoal()
#Btrue is only used for plotting-model Accuracy
Btrue=ImportDynamics()
import numpy as np
import math
import random
#startstate
start=startstate
#goal-state
goal=goalstate
#Paramters
numS=70
numA=8
T_min=2
T_max=30
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
#Experimenting success rate of Q_Agent from T=1 to T-15 training loops=5000
SuccessRateQ10K=[]
training_loops=10000
test_trials=1000
for ii in range(T_min,T_max):
T=ii
#Retrieving action selection matrix from SophAgent
QLearned=QAgent(T,training_loops)
score=0
for j in range(test_trials):
#episode-start
state=startstate
for i in range(0,T-1):
kingsmoves=[0,1,2,3,4,5,6,7]
action=np.argmax(QLearned[state,:])
rew,new_state=Environment2(state,action)
state=new_state
if(new_state==goalstate):
score+=1
break
SuccessRateQ10K.append(score/test_trials)
#Experimenting success rate of Q_Agent from T=1 to T-15 training loops=5000
SuccessRateQ20K=[]
training_loops=20000
test_trials=1000
for ii in range(T_min,T_max):
T=ii
#Retrieving action selection matrix from SophAgent
QLearned=QAgent(T,training_loops)
score=0
for j in range(test_trials):
#episode-start
state=startstate
for i in range(0,T-1):
kingsmoves=[0,1,2,3,4,5,6,7]
action=np.argmax(QLearned[state,:])
rew,new_state=Environment2(state,action)
state=new_state
if(new_state==goalstate):
score+=1
break
SuccessRateQ20K.append(score/test_trials)
#Learning
trials=5000
b=np.zeros((numA,numS,numS))
Blearned=np.zeros((numS,numA,numS))
Time_horizon=[]
modeldeviation5K=[]
successrate5K=[]
for jj in range(T_min,T_max):
T=jj
Time_horizon.append(jj)
for ii in range(trials):
act=np.zeros((T-1))
stmo=np.zeros((T-1))
st=np.zeros((T-1))
state=startstate
for i in range(T-1):
action=random.randint(0,7)
act[i]=action
stmo[i]=state
rew,next_state=Environment2(state,action)
st[i]=next_state
state=next_state
for i in range(numA):
for j in range(T-1):
for k in range(numA):
if(i==act[j]):
stvec=np.zeros((numS,1))
stvec[int(st[j])]=1
stmovec=np.zeros((1,numS))
stmovec[0,int(stmo[j])]=1
b[i,:,:]+=0.125*np.kron(stvec,stmovec)
for i in range(numA):
for j in range(numS):
Blearned[j,i,:]=softmax(b[i,:,j])
modeldeviation5K.append(np.linalg.norm(Blearned-Btrue))
Qpi=SophAgentActions(T,Blearned)
test_trials=1000
score=0
for ii in range(test_trials):
state=startstate
for i in range(T-1):
action=np.random.choice([0,1,2,3,4,5,6,7],p=Qpi[i,:,state])
rew,next_state=Environment2(state,action)
#print(state,action)
state=next_state
if(state==goalstate):
score+=1
break
successrate5K.append(score/test_trials)
#Learning
trials=10000
b=np.zeros((numA,numS,numS))
Blearned=np.zeros((numS,numA,numS))
modeldeviation10K=[]
successrate10K=[]
for jj in range(T_min,T_max):
T=jj
for ii in range(trials):
act=np.zeros((T-1))
stmo=np.zeros((T-1))
st=np.zeros((T-1))
state=startstate
for i in range(T-1):
action=random.randint(0,7)
act[i]=action
stmo[i]=state
rew,next_state=Environment2(state,action)
st[i]=next_state
state=next_state
for i in range(numA):
for j in range(T-1):
for k in range(numA):
if(i==act[j]):
stvec=np.zeros((numS,1))
stvec[int(st[j])]=1
stmovec=np.zeros((1,numS))
stmovec[0,int(stmo[j])]=1
b[i,:,:]+=0.125*np.kron(stvec,stmovec)
for i in range(numA):
for j in range(numS):
Blearned[j,i,:]=softmax(b[i,:,j])
modeldeviation10K.append(np.linalg.norm(Blearned-Btrue))
Qpi=SophAgentActions(T,Blearned)
test_trials=10000
score=0
for ii in range(test_trials):
state=startstate
for i in range(T-1):
action=np.random.choice([0,1,2,3,4,5,6,7],p=Qpi[i,:,state])
rew,next_state=Environment2(state,action)
#print(state,action)
state=next_state
if(state==goalstate):
score+=1
break
successrate10K.append(score/test_trials)
import matplotlib.pyplot as plt
plt.plot(Time_horizon,successrate5K,linestyle='--',color='red')
plt.plot(Time_horizon,successrate10K,linestyle='-',color='red')
plt.plot(Time_horizon,SuccessRateQ10K,color='black')
plt.plot(Time_horizon,SuccessRateQ20K,color='blue')
plt.legend(["SophAgent (5K B-updates)","SophAgent (10K B-updates)","QLearning10K","QLearning20K"])
plt.title("High stochastic case (Level 4)")
plt.xlabel("Time-horizon")
plt.ylabel("Success rate over $10^4$ trials")
plt.savefig('ResultsLevel-4CA.eps',format='eps', dpi=500, bbox_inches='tight')
plt.plot(Time_horizon,modeldeviation5K,linestyle='--',color='red')
plt.plot(Time_horizon,modeldeviation10K,linestyle='-',color='red')
plt.legend(["Model deviation 5K B-updates", "Model deviation 10K B-updates"])
plt.title("Model deviation (High Stochastic case)")
plt.xlabel("Time-horizon")
plt.ylabel("Model deviation $(||B_{Learned} - B_{true}||)$")
plt.savefig('ResultsLevel-4CB.eps',format='eps', dpi=500, bbox_inches='tight')
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Image segmentation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/ImageSegmentation_ModelSubclassing.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/tree/master/community/en/ImageSegmentation_ModelSubclassing.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
</table>
This tutorial focuses on the task of image segmentation, using an encoder-decoder architecture, implemented with model subclassing API.
## What is image segmentation?
So far you have seen image classification, where the task of the network is to assign a label or class to an input image. However, suppose you want to know where an object is located in the image, the shape of that object, which pixel belongs to which object, etc. In this case you will want to segment the image, i.e., each pixel of the image is given a label. Thus, the task of image segmentation is to train a neural network to output a pixel-wise mask of the image. This helps in understanding the image at a much finer granularity, i.e., the pixel level. Image segmentation has many applications in medical imaging, self-driving cars and satellite imaging, to name a few.
The dataset that will be used for this tutorial is the [Oxford-IIIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/), created by Parkhi *et al*. The dataset consists of images, their corresponding labels, and pixel-wise masks. The masks are basically labels for each pixel. Each pixel is given one of three categories :
* Class 1 : Pixel belonging to the pet.
* Class 2 : Pixel bordering the pet.
* Class 3 : None of the above/ Surrounding pixel.
```
!pip install git+https://github.com/tensorflow/examples.git
import tensorflow as tf
assert tf.__version__.startswith('2')
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
tf.executing_eagerly()
from IPython.display import clear_output
import matplotlib.pyplot as plt
```
## Download the Oxford-IIIT Pets dataset
The dataset is already included in TensorFlow datasets, all that is needed to do is download it. The segmentation masks are included in version 3.0.0, which is why this particular version is used.
```
dataset, info = tfds.load('oxford_iiit_pet:3.0.0', with_info=True)
```
The following code performs a simple augmentation of flipping an image. In addition, image is normalized to [0,1]. Finally, as mentioned above the pixels in the segmentation mask are labeled either {1, 2, 3}. For the sake of convenience, let's subtract 1 from the segmentation mask, resulting in labels that are : {0, 1, 2}.
```
def normalize(input_image, input_mask):
input_image = tf.cast(input_image, tf.float32)/128.0 - 1
input_mask -= 1
return input_image, input_mask
@tf.function
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
```
The dataset already contains the required splits of test and train and so let's continue to use the same split.
```
TRAIN_LENGTH = info.splits['train'].num_examples
BATCH_SIZE = 64
BUFFER_SIZE = 1000
STEPS_PER_EPOCH = TRAIN_LENGTH // BATCH_SIZE
train = dataset['train'].map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = dataset['test'].map(load_image_test)
train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test_dataset = test.batch(BATCH_SIZE)
```
Let's take a look at an image example and it's corresponding mask from the dataset.
```
def display(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
for image, mask in train.take(1):
sample_image, sample_mask = image, mask
display([sample_image, sample_mask])
print(sample_image.shape)
```
## Define the model
The model used here consists of an encoder (downsampler) and decoder (upsampler).
Here we define encoder and decoder using model subclassing API. Each encoder block is basically a sequence of Conv layers and MaxPooling layers (UpSampling layers for decoder block), plus residual connection. The output activation of each block is used as input for the next block, where the activation is not only further processed in the sequence of Conv/MaxPooling layers as in a Sequential model, but also processed in residual layers and added to its output.
The reason to output three channels is because there are three possible labels for each pixel. Think of this as multi-classification where each pixel is classified into three classes.
```
OUTPUT_CHANNELS = 3
from tensorflow.keras import layers
class EncoderBlock(tf.keras.Model):
def __init__(self, filter_size):
# initilize instance variables
super(EncoderBlock, self).__init__()
self.filter_size = filter_size
# define layers
self.layer_1 = layers.Activation('relu')
self.layer_2 = layers.SeparableConv2D(self.filter_size, 3, padding='same')
self.layer_3 = layers.BatchNormalization()
self.layer_4 = layers.Activation('relu')
self.layer_5 = layers.SeparableConv2D(self.filter_size, 3, padding='same')
self.layer_6 = layers.BatchNormalization()
self.layer_7 = layers.MaxPooling2D(3, strides=2, padding='same')
# project residual
self.residual_layer = layers.Conv2D(self.filter_size, 1, strides=2, padding='same')
def call(self, inputs):
x = self.layer_1(inputs)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.layer_4(x)
x = self.layer_5(x)
x = self.layer_6(x)
x = self.layer_7(x)
residual = self.residual_layer(inputs)
x = layers.add([x, residual])
return x
class DecoderBlock(tf.keras.Model):
def __init__(self, filter_size):
# initilize instance variables
super(DecoderBlock, self).__init__()
self.filter_size = filter_size
# define layers
self.layer_1 = layers.Activation('relu')
self.layer_2 = layers.Conv2DTranspose(self.filter_size, 3, padding='same')
self.layer_3 = layers.BatchNormalization()
self.layer_4 = layers.Activation('relu')
self.layer_5 = layers.Conv2DTranspose(self.filter_size, 3, padding='same')
self.layer_6 = layers.BatchNormalization()
self.layer_7 = layers.UpSampling2D(2)
# project residual
self.residual_layer_1 = layers.UpSampling2D(2)
self.residual_layer_2 = layers.Conv2D(filter_size, 1, padding='same')
def call(self, inputs):
x = self.layer_1(inputs)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.layer_4(x)
x = self.layer_5(x)
x = self.layer_6(x)
x = self.layer_7(x)
residual = self.residual_layer_1(inputs)
residual = self.residual_layer_2(residual)
x = layers.add([x, residual])
return x
class ImageSegmentationModel(tf.keras.Model):
def __init__(self, output_channels, dynamic=True):
# initilize instance variables
super(ImageSegmentationModel, self).__init__()
self.output_channels = output_channels
self.entry_block_1 = layers.Conv2D(32, 3, strides=2, padding='same')
self.entry_block_2 = layers.BatchNormalization()
self.entry_block_3 = layers.Activation('relu')
self.encoder_block_1 = EncoderBlock(64)
self.encoder_block_2 = EncoderBlock(128)
self.encoder_block_3 = EncoderBlock(256)
self.decoder_block_1 = DecoderBlock(256)
self.decoder_block_2 = DecoderBlock(128)
self.decoder_block_3 = DecoderBlock(64)
self.decoder_block_4 = DecoderBlock(32)
self.output_layer = layers.Conv2D(
output_channels, 3, activation='sigmoid', padding='same')
def call(self, inputs):
x = self.entry_block_1(inputs)
x = self.entry_block_2(x)
x = self.entry_block_3(x)
x = self.encoder_block_1(x)
x = self.encoder_block_2(x)
x = self.encoder_block_3(x)
x = self.decoder_block_1(x)
x = self.decoder_block_2(x)
x = self.decoder_block_3(x)
x = self.decoder_block_4(x)
x = self.output_layer(x)
return x
```
## Train the model
Now, all that is left to do is to compile and train the model. The loss used here is losses.sparse_categorical_crossentropy. The reason to use this loss function is that the network is trying to assign each pixel a label, just like multi-class prediction. In the true segmentation mask, each pixel has either a {0,1,2}. The network here is outputting three channels. Essentially, each channel is trying to learn to predict a class, and losses.sparse_categorical_crossentropy is the recommended loss for such a scenario. Using the output of the network, the label assigned to the pixel is the channel with the highest value. This is what the create_mask function is doing.
```
model = ImageSegmentationModel(OUTPUT_CHANNELS)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
Let's try out the model to see what it predicts before training.
```
def create_mask(pred_mask):
pred_mask = tf.argmax(pred_mask, axis=-1)
pred_mask = pred_mask[..., tf.newaxis]
return pred_mask[0]
def show_predictions(dataset=None, num=1):
if dataset:
for image, mask in dataset.take(num):
pred_mask = model.predict(image)
display([image[0], mask[0], create_mask(pred_mask)])
else:
display([sample_image, sample_mask,
create_mask(model.predict(sample_image[tf.newaxis, ...]))])
show_predictions()
```
Let's observe how the model improves while it is training. To accomplish this task, a callback function is defined below. Since in this model, we did not use a pretrained model as Encoder, so the model has to learn everything from scratch. As you can see, in the first a few epochs, the model cannot really predict the mask - a blank mask was predicted. Only after about 10 epochs, the model prediction started to show something that makes sense.
```
class DisplayCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
clear_output(wait=True)
show_predictions()
print ('\nSample Prediction after epoch {}\n'.format(epoch+1))
EPOCHS = 32
VAL_SUBSPLITS = 5
VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS
model_history = model.fit(train_dataset, epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
validation_steps=VALIDATION_STEPS,
validation_data=test_dataset,
callbacks=[DisplayCallback()])
loss = model_history.history['loss']
val_loss = model_history.history['val_loss']
epochs = range(EPOCHS)
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'bo', label='Validation loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss Value')
plt.ylim([0, 2])
plt.legend()
plt.show()
```
## Make predictions
Let's make some predictions. In the interest of saving time, the number of epochs was kept small, but you may set this higher to achieve more accurate results.
```
show_predictions(test_dataset, 3)
```
## Next steps
Now that you have an understanding of what image segmentation is and how it works, you can try this tutorial out with different intermediate layer outputs, or even different pretrained model. You may also challenge yourself by trying out the [Carvana](https://www.kaggle.com/c/carvana-image-masking-challenge/overview) image masking challenge hosted on Kaggle.
You may also want to explore the [UNet model](https://arxiv.org/pdf/1505.04597.pdf), which has cross connections between encoder and decoder block. These cross connections allow better high resolution localization since the image doesn't have to go through all the the down-sampling steps to get to the output.
| github_jupyter |
# Desenhos de Curvas a partir da Curvatura
**Autor:** Leonardo Dantas
Este trabalho explora o uso da computação simbólica e numérica no estudo da curvatura. Como decorrência do Teorema Fundamental da Teoria Local das Curvas Planas, curvas planas podem ser desenhadas puramente a partir de suas funções de curvatura, desconsiderando movimentos rígidos. Implementações em código permitem visualizar esse fascinante fenômeno. Toda a implementação deste trabalho foi feita em [SageMath](https://www.sagemath.org/) usando um caderno interativo Jupyter.
O trabalho é constituído de quatro partes:
* Introdução ao Ambiente SageMath
* Conceitos Teóricos Gerais
* Construção de Curvas a partir de Curvatura
* Exemplos de Curvas Construídas
A formatação desse caderno é retirada do curso [Practical Numerical Methods with Python](https://github.com/numerical-mooc/numerical-mooc), com código licenciado sob a MIT license (c) 2014 L.A. Barba, C. Cooper, G.F. Forsyth, A. Krishnan. Para carrega-lá, basta executar a célula seguinte.
```
# Carregar estilo do caderno
from IPython.core.display import HTML
from os import getcwd
css_file = f'{getcwd()}/styles/numericalmoocstyle.css'
HTML(open(css_file, 'r').read())
```
## 1. Introdução ao Ambiente SageMath
Como o ambiente SageMath forma a base desse trabalho, consideraremos alguns conceitos básicos da plataforma. Como ponto de partida, iniciamos o ambiente de visualização em LaTeX e importamos funções customizadas úteis.
```
# Habilitar exibição de LaTeX
%display latex
# Funções úteis para o SageMath
from utils import vector_simplify, pretty_results, get_vector_arguments
```
Dessas funções, `vector_simplify` permite simplificar curvas paramétricas algebricamente, `pretty_results` exibe resultados formatados em LaTeX e `get_vector_arguments` encontra o parâmetro de uma curva paramétrica.
### Plotagem
Através do SageMath, é possível definir funções simbólicas, substituir valores nelas e plotar curvas paramétricas. Alguns exemplos são a lemniscata e a hélice.
#### Lemniscata
```
a = var('a')
lemniscata_generalizada(t) = ( (a*cos(t)) / (1 + sin(t)^2), (a*sin(t)*cos(t)) / (1 + sin(t)^2) )
lemniscata_padrao = lemniscata_generalizada(t).subs(a==1)
parametric_plot(lemniscata_padrao, (t,0, 2*pi))
```
#### Hélice
```
a,b = var('a b')
helice_generalizada(t) = (a*cos(t),a*sin(t),b*t)
helice = helice_generalizada.subs([a==1,b==3])
parametric_plot3d(helice,(t,-2*pi, 2*pi), thickness=5)
```
## 2. Conceitos Teóricos Gerais
Ferramentas como o SageMath permitem encontrar características importantes de curvas regulares como curvatura e torção. Nessa seção veremos como isso pode ser feito.
### Curvatura em $\mathbb{R}^2$
Em $\mathbb{R}^2$, o primeiro passo para encontrar a curvatura de uma curva é colocá-la em sua parametrização por comprimento de arco. Toda curva regular admite uma tal parametrização e para isso temos as seguintes funções.
```
def obter_comp_arco(curva):
# Identificar parâmetro da curva
x = get_vector_arguments(curva).pop()
curva_x = curva.derivative(x)
# Calcular comprimento de arco de 0 a t
t = var("t")
assume(t>0)
s = integrate(norm(curva_x), (x,0,t))
return s
def param_comp_arco(curva):
# Obter comprimento de arco de 0 a t
comp_arco = obter_comp_arco(curva)
t = comp_arco.arguments()[0]
# Encontrar t em termos de s
s = var("s")
param_comp_arco = solve(s == comp_arco,t)[0]
# Substituir parâmetro original na curva
curva_subs = curva(t).subs(param_comp_arco)
curva_subs = vector_simplify(curva_subs)
# Redefinir argumento da função
curva_param(s) = tuple(coord for coord in curva_subs)
return curva_param
```
Com uma curva parametrizada por comprimento de arco, é possível então obter a curvatura da seguinte forma.
```
def obter_curvatura_R2(curva):
# Identificar parâmetro da curva
s = get_vector_arguments(curva).pop()
curva_ss = curva.derivative(s, 2)
curvatura = norm(curva_ss)(s)
curvatura = curvatura.simplify_full()
return curvatura
```
#### Teste de Implementação
Verificamos que a implementação está correta através de um teste simples com um círculo em $\mathbb{R}^2$.
```
a,b = var('a b')
elipse_R2(t) = (a*cos(t), b*sin(t))
circ = elipse_R2.subs([a==5,b==5])
pretty_results((r"C(t)", circ), use_colon=True)
comp_circ = obter_comp_arco(circ)
pretty_results((r"\int_0^t || C'(x) || dx", comp_circ))
circ_comp_arco = param_comp_arco(circ)
pretty_results((r"C(s)", circ_comp_arco), use_colon=True)
circ_curv = obter_curvatura_R2(circ_comp_arco)
pretty_results((r"k_C(s)", circ_curv))
```
### Curvatura em $\mathbb{R}^3$
Em $\mathbb{R}^3$, a curvatura pode ser obtida através da fórmula geral independente de parametrização.
$$
k(t) = \frac{|| \alpha'(t) \times \alpha''(t) ||}{|| \alpha'(t) ||^3}
$$
O cálculo desta fórmula está implementando na seguinte função.
```
def obter_curvatura_R3(curva):
# Computar Derivadas
curva_t = curva.derivative(t)
curva_tt = curva_t.derivative(t)
# Aplicar Formula Geral de Curvatura
curvatura = norm( curva_t.cross_product(curva_tt) ) / norm(curva_t)^3
curvatura = curvatura.simplify_full()
return curvatura
```
### Torção em $\mathbb{R}^3$
Como a curvatura, a torção pode ser obtida através de uma fórmula geral independente de parametrização.
$$
\tau(t) = \frac{ \langle (\alpha'(t) \times \alpha''(t)) , \alpha'''(t) \rangle }{ ||\alpha'(t) \times \alpha''(t)||^2 }
$$
```
def obter_torcao_R3(curva):
# Computar Derivadas
curva_t = curva.derivative(t)
curva_tt = curva_t.derivative(t)
curva_ttt = curva_tt.derivative(t)
# Obter Produto Vetorial de Vetores Velocidade e Aceleração
prod_veloc_acel = curva_t.cross_product(curva_tt)
# Aplicar Fórmula Geral de Torção
torcao = (prod_veloc_acel * curva_ttt) / norm(prod_veloc_acel)^2
torcao = torcao.simplify_full()
return torcao
```
#### Testes de Implementação
Verificamos a corretude da implementação com um exemplo simples: o círculo unitário em $\mathbb{R}^3$. Reconhecidamente, essa curva possui curvatura identicamente igual a $1$ e torção identicamente igual a $0$. Caso os resultados sejam diferentes, temos um erro de implementação.
```
# Construção do Círculo Unitário
elipse(t) = (a*cos(t),b*sin(t),1)
circ_unit = elipse.subs([a==1, b==1])
# Calculo de Curvatura e Torção em R3
curvatura_circ_unit = obter_curvatura_R3(circ_unit)
torcao_circ_unit = obter_torcao_R3(circ_unit)
# Teste de Sanidade com Círculo Unitário
assert curvatura_circ_unit == 1
assert torcao_circ_unit == 0
# Exibição de Resultados
pretty_results((r"k_{\text{circ}}",curvatura_circ_unit),
(r"\tau_{\text{circ}}",torcao_circ_unit))
```
## 3. Construção de Curvas a partir de Curvatura
O Teorema Fundamental da Teoria Local de Curvas Planas nos diz que uma curva é unicamente determinada por sua função curvatura, desconsiderando movimentos rígidos. Há duas formas de reconstruir uma curva $\beta$ a partir de sua função curvatura $k_\beta(s)$.
### Construção Simbólica
Usando computação simbólica, o procedimento é o seguinte.
1. Encontrar a função ângulo $\theta(s)$ da curva através da fórmula
$$
\theta(s) = \int k(s) ds + \theta_0
$$
onde $\theta_0$ é constante de integração.
2. Reconstruir $\beta(s)$ a partir da função ângulo com
$$
\beta(s) = \left( \int \cos\left( \theta(s) \right) ds + c, \int \sin \left( \theta(s) \right) ds + d \right)
$$
onde $c$ e $d$ são constantes de integração
No SageMath, implementamos da seguinte forma.
```
def construir_curva_simb(curvatura, teta_0=0,c=0,d=0):
# Identificar Parâmetro da Curva
s = curvatura.arguments()[0]
# Construir Função Ângulo
teta(s) = integrate(curvatura, s) + teta_0
# Construir Curva a Partir de Função Ângulo
curva_ang(s) = ( integrate( cos(teta(s)), s ), integrate( sin(teta(s)), s ) )
curva_ang = vector_simplify(curva_ang)
# Exportar função com n-tupla
curva_construida = tuple(coord for coord in curva_ang)
return curva_construida
```
#### Testes de Implementação
Um teste de implementação razoável é tentar reconstruir o círculo unitário. Reconhecidamente, esse curva possui curvatura identicamente igual a $1$. Portanto, se obtivermos algo diferente, há um erro na implementação.
```
# Teste de Sanidade com Círculo Unitário
curvatura_circ_unit(s) = 1
circ_unit_simb(s) = construir_curva_simb(curvatura_circ_unit)
pretty_results((r"C(t)",circ_unit_simb), use_colon=True)
parametric_plot(circ_unit_simb, (s,-2*pi, 2*pi))
```
### Construção Numérica
Uma limitação da construção simbólica é a integrabilidade das funções envolvidas. Conforme veremos, em muitos casos não é possível obter uma solução exata para as integrais do primeiro método. Nesses casos, partimos para uma construção numérica. É possível obter a curva $\beta(s) = (x(s), y(s))$ resolvendo numericamente o seguinte sistema de equações diferenciais.
$$
\begin{align*}
\theta'(s) &= k(s) \\
x'(s) &= \cos(\theta(s)) \\
y'(s) &= \sin(\theta(s))
\end{align*}
$$
A solução numérica nos fornece um conjunto de pontos que então podem ser plotados. Em SageMath, temos a seguinte implementação.
```
def construir_curva_num(curvatura, r_inicial=-50, r_final=50,
vars_iniciais=[0,0,0], tam_passo=0.01):
# Montar Expressão de Curvatura
r = var('r')
kappa(r) = curvatura(s).subs(s==r)
# Resolver Sistema de EDOs via Runge-Kutta
teta, x,y = var('teta x y')
sis_trig = [kappa(r),cos(teta), sin(teta)]
condIniciais =[r_inicial, \
vars_iniciais[0], \
vars_iniciais[1],\
vars_iniciais[2]]
solucoes_sis = desolve_system_rk4(sis_trig, [teta, x, y], \
ics=condIniciais, ivar=r, \
step=tam_passo, end_points=r_final
)
# Isolar pontos (x,y). Soluções vêm no formato (r, teta, x, y)
pontos = [[k,m] for i,j,k,m in solucoes_sis]
# Visualização
plt = list_plot(pontos, plotjoined=true, aspect_ratio=1)
plt.show()
```
#### Teste de Implementação
A abordagem numérica não nos fornece uma forma fechada para a curva. No entanto, podemos visualizá-la através dos pontos produzidos. Nesse sentido, um teste razoável de implementação é o mesmo círculo testado na abordagem simbólica.
```
curvatura_circ_unit(s) = 1
construir_curva_num(curvatura_circ_unit)
```
Observe que embora o círculo esteja deslocado e não esteja perfeitamente redondo, é aproximadamente o mesmo círculo obtido com a abordagem simbólica. Usaremos essa estratégia quando a integração simbólica não for possível.
## 4. Exemplos de Curvas Geradas
Nessa seção, veremos exemplos de diversas curvas interessantes desenhadas a partir de suas curvaturas.
#### Catenária
As catenárias constituem uma classe de curvas conhecida. A curvatura delas é dada por
$$
k_{\text{catenária}}(s) = -\frac{a^2}{a^2 + s^2}
$$
Instaciaremos a fórmula geral da curvatura da catenária no SageMath. Em seguida, montaremos uma catenária padrão $\beta$(s) representando o caso particular quando $a=1$.
```
a = var('a')
curv_catenaria_generalizada(s) = a / (a^2 + s^2)
curv_catenaria = curv_catenaria_generalizada.subs(a==1)
```
Com isso, é possível possível reconstruir a curva catenária padrão.
```
catenaria(s) = construir_curva_simb(curv_catenaria)
pretty_results((r"\beta(s)", catenaria), use_colon=True)
parametric_plot(catenaria,(s,-5, 5), thickness=1)
```
#### Espiral Logarítimica
Já a curvatura da espiral logarítmica é dada por
$$
k_{\text{espiral}}(s) = \frac{1}{bs}
$$
Como anteriormente, instanciamos a versão generalizada da curvatura e depois implementamos o caso específico $\gamma(s)$ quando $b=\frac{1}{3}$.
```
b = var('b')
curv_espiral_log_gen(s) = 1/(b*s)
curv_espiral_log = curv_espiral_log_gen.subs([b==1/3])
espiral_log(s) = construir_curva_simb(curv_espiral_log)
pretty_results((r"\gamma(s)", espiral_log), use_colon=True)
```
Note como a expressão é um tanto mais complexa do que a da catenária.
```
parametric_plot(espiral_log,(s,-10, 10), thickness=1)
```
Apesar do sucesso na plotagem da curva, a abordagem simbólica começa a demonstrar problemas.
#### Espiral de Euler
A espiral de Euler representa um curva particularmente interessante. Observe que é possível computá-la simbólicamente.
```
a, n = var('a n')
curv_euler_generalizada(s) = - s^n / a^(n+1)
curv_euler = curv_euler_generalizada.subs([a==1,n==1])
espiral_euler = construir_curva_simb(curv_euler)
pretty_results((r"\delta(s)", espiral_euler), use_colon=True)
```
No entanto, não é possível desenhá-la a partir da expressão obtida. A célula de código a seguir produz um erro.
```
parametric_plot(espiral_euler, (s, 1, 2), thickness=2)
```
Portanto, partimos para a construção numérica.
```
construir_curva_num(curv_euler)
```
Como é possível observar, a abordagem numérica nos permite visualizar curvas com construção simbólica complicada. Deste ponto em diante, todas as curvas serão construídas numericamente.
#### Curvaturas com Funções Trigonométricas
Quando a curvatura inclui funções trigonométricas, as curvas assumem formatos curiosos.
##### Curva $\epsilon$
```
curv_epsilon(s) = cos(s)
pretty_results((r"\epsilon(s)", curv_epsilon), use_colon=True)
construir_curva_num(curv_epsilon,vars_iniciais=[pi/2,0,0],
r_inicial=-10, r_final=10)
```
##### Curva $\zeta$
```
curv_zeta(s) = s*sin(s)
pretty_results((r"\zeta(s)", curv_zeta), use_colon=True)
construir_curva_num(curv_zeta, vars_iniciais=[23*pi/16,0,0])
```
##### Curva $\eta$
```
curv_eta(s) = 2.4*sin(s)
pretty_results((r"\eta(s)", curv_eta), use_colon=True)
construir_curva_num(curv_eta,vars_iniciais=[pi/4,0,0])
```
##### Curva $\iota$
```
curv_iota(s) = sin(s)*s^2
pretty_results((r"\iota(s)", curv_iota), use_colon=True)
construir_curva_num(curv_iota,vars_iniciais=[pi/4,0,0],
r_inicial=-10, r_final=10)
```
##### Curva $\lambda$
```
curv_lambda(s) = -(2/3)*abs(csc(2*s))
pretty_results((r"\lambda(s)", curv_lambda), use_colon=True)
construir_curva_num(curv_lambda,vars_iniciais=[0,0,0], tam_passo=0.01,
r_inicial=0.1, r_final=19)
```
##### Curva $\nu$
```
curv_nu(t) = t*tan(t)
pretty_results((r"\nu(t)", curv_nu), use_colon=True)
construir_curva_num(curv_nu,vars_iniciais=[0,0,0], tam_passo=0.01,
r_inicial=0.1, r_final=19)
```
##### Curva $\xi$
```
curv_xi(t) = arcsin(t)
pretty_results((r"\xi(t)", curv_xi), use_colon=True)
construir_curva_num(curv_xi,vars_iniciais=[0,0,0], tam_passo=0.01,
r_inicial=0.1, r_final=7)
```
#### Curvaturas com Outras Funções
Além de funções trigonométricas, até mesmo funções especiais como a função gama podem ser usadas para a curvatura. A curva a seguir exemplifica essa utilização.
```
assume(2*s+1>0)
curv_mu(s) = integrate(t^(s-1/2) * e^(-t), t,0, infinity)
pretty_results((r"\lambda(s)", curv_mu), use_colon=True)
construir_curva_num(curv_mu,r_inicial=1, r_final=5,
vars_iniciais=[pi/2,0,0])
```
## Conclusão
Ao fim desse trabalho, constatamos como a computação simbólica e numérica é uma poderosa ferramenta para visualizações em geometria diferencial. Apesar de eventuais desafios, como a integrabilidade de funções, pacotes como o SageMath fornecem recursos que facilitam o estudo de curvas e suas características. Tais capacidades fortalecem a intuição por trás desses conceitos e permitem uma melhor compreensão desses objetos matemáticos.
## Referências
1. OPREA, John. **Differential Geometry and Its Applications**. 2. ed. Washington, Dc: Mathematical Association Of America, 2007. 469 p.
2. GRAY, Alfred; ABBENA, Elsa; SALAMON, Simon. **Modern Differential Geometry of Curves and Surfaces with Mathematica**. 3. ed. Boca Raton: Chapman And Hall/Crc, 2006. 974 p.
| github_jupyter |
```
# 챗봇, 번역, 이미지 캡셔닝등에 사용되는 시퀀스 학습/생성 모델인 Seq2Seq 을 구현해봅니다.
# 영어 단어를 한국어 단어로 번역하는 프로그램을 만들어봅니다.
import tensorflow as tf
import numpy as np
# S: 디코딩 입력의 시작을 나타내는 심볼
# E: 디코딩 출력을 끝을 나타내는 심볼
# P: 현재 배치 데이터의 time step 크기보다 작은 경우 빈 시퀀스를 채우는 심볼
# 예) 현재 배치 데이터의 최대 크기가 4 인 경우
# word -> ['w', 'o', 'r', 'd']
# to -> ['t', 'o', 'P', 'P']
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz단어나무놀이소녀키스사랑']
num_dic = {n: i for i, n in enumerate(char_arr)}
dic_len = len(num_dic)
# 영어를 한글로 번역하기 위한 학습 데이터
seq_data = [['word', '단어'], ['wood', '나무'],
['game', '놀이'], ['girl', '소녀'],
['kiss', '키스'], ['love', '사랑']]
def make_batch(seq_data):
input_batch = []
output_batch = []
target_batch = []
for seq in seq_data:
# 인코더 셀의 입력값. 입력단어의 글자들을 한글자씩 떼어 배열로 만든다.
input = [num_dic[n] for n in seq[0]]
# 디코더 셀의 입력값. 시작을 나타내는 S 심볼을 맨 앞에 붙여준다.
output = [num_dic[n] for n in ('S' + seq[1])]
# 학습을 위해 비교할 디코더 셀의 출력값. 끝나는 것을 알려주기 위해 마지막에 E 를 붙인다.
target = [num_dic[n] for n in (seq[1] + 'E')]
input_batch.append(np.eye(dic_len)[input])
output_batch.append(np.eye(dic_len)[output])
# 출력값만 one-hot 인코딩이 아님 (sparse_softmax_cross_entropy_with_logits 사용)
target_batch.append(target)
return input_batch, output_batch, target_batch
#########
# 옵션 설정
######
learning_rate = 0.01
n_hidden = 128
total_epoch = 100
# 입력과 출력의 형태가 one-hot 인코딩으로 같으므로 크기도 같다.
n_class = n_input = dic_len
#########
# 신경망 모델 구성
######
# Seq2Seq 모델은 인코더의 입력과 디코더의 입력의 형식이 같다.
# [batch size, time steps, input size]
enc_input = tf.placeholder(tf.float32, [None, None, n_input])
dec_input = tf.placeholder(tf.float32, [None, None, n_input])
# [batch size, time steps]
targets = tf.placeholder(tf.int64, [None, None])
# 인코더 셀을 구성한다.
with tf.variable_scope('encode'):
enc_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=0.5)
outputs, enc_states = tf.nn.dynamic_rnn(enc_cell, enc_input,
dtype=tf.float32)
# 디코더 셀을 구성한다.
with tf.variable_scope('decode'):
dec_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
dec_cell = tf.nn.rnn_cell.DropoutWrapper(dec_cell, output_keep_prob=0.5)
# Seq2Seq 모델은 인코더 셀의 최종 상태값을
# 디코더 셀의 초기 상태값으로 넣어주는 것이 핵심.
outputs, dec_states = tf.nn.dynamic_rnn(dec_cell, dec_input,
initial_state=enc_states,
dtype=tf.float32)
model = tf.layers.dense(outputs, n_class, activation=None)
cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=model, labels=targets))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#########
# 신경망 모델 학습
######
sess = tf.Session()
sess.run(tf.global_variables_initializer())
input_batch, output_batch, target_batch = make_batch(seq_data)
for epoch in range(total_epoch):
_, loss = sess.run([optimizer, cost],
feed_dict={enc_input: input_batch,
dec_input: output_batch,
targets: target_batch})
print('Epoch:', '%04d' % (epoch + 1),
'cost =', '{:.6f}'.format(loss))
print('최적화 완료!')
#########
# 번역 테스트
######
# 단어를 입력받아 번역 단어를 예측하고 디코딩하는 함수
def translate(word):
# 이 모델은 입력값과 출력값 데이터로 [영어단어, 한글단어] 사용하지만,
# 예측시에는 한글단어를 알지 못하므로, 디코더의 입출력값을 의미 없는 값인 P 값으로 채운다.
# ['word', 'PPPP']
seq_data = [word, 'P' * len(word)]
input_batch, output_batch, target_batch = make_batch([seq_data])
# 결과가 [batch size, time step, input] 으로 나오기 때문에,
# 2번째 차원인 input 차원을 argmax 로 취해 가장 확률이 높은 글자를 예측 값으로 만든다.
prediction = tf.argmax(model, 2)
result = sess.run(prediction,
feed_dict={enc_input: input_batch,
dec_input: output_batch,
targets: target_batch})
# 결과 값인 숫자의 인덱스에 해당하는 글자를 가져와 글자 배열을 만든다.
decoded = [char_arr[i] for i in result[0]]
# 출력의 끝을 의미하는 'E' 이후의 글자들을 제거하고 문자열로 만든다.
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated
print('\n=== 번역 테스트 ===')
print('word ->', translate('word'))
print('wodr ->', translate('wodr'))
print('love ->', translate('love'))
print('loev ->', translate('loev'))
print('abcd ->', translate('abcd'))
```
| github_jupyter |
# Building Fast Queries on a CSV
Skills: Object Oriented Programming, Time and Space Complexity Analysis
We will imagine that we own an online laptop store and want to build a way to answer a few different business questions about our inventory.
```
# Open and explore the dataset
import csv
with open('laptops.csv') as file:
read_file = csv.reader(file)
laptop_prices = list(read_file)
header = laptop_prices[0]
rows = laptop_prices[1:]
print(header)
print(rows)
# Create a class with the csv filename as input to read the file
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
new_class = Inventory('laptops.csv')
print(new_class.header)
print(len(new_class.rows))
# Improve the class to get laptop given laptop id as input
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
def get_laptop_from_id(self, laptop_id):
for row in self.rows:
if row[0] == laptop_id:
return row
return None
new_class = Inventory('laptops.csv')
print(new_class.get_laptop_from_id('3362737'))
print(new_class.get_laptop_from_id('3362736'))
# To reduce time complexity of this algorithm, we will use preprocessing the data to create dict
# where the keys are the IDs and the values are the rows.
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
self.id_to_row = {}
for row in self.rows:
self.id_to_row[row[0]] = row
def get_laptop_from_id(self, laptop_id):
for row in self.rows:
if row[0] == laptop_id:
return row
return None
def get_laptop_from_id_fast(self, laptop_id):
for row in self.rows:
if laptop_id in self.id_to_row:
return row
else:
return None
new_class = Inventory('laptops.csv')
print(new_class.get_laptop_from_id_fast('3362737'))
print(new_class.get_laptop_from_id_fast('3362736'))
# Let's compare the performance of those two methods
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
self.id_to_row = {}
for row in self.rows:
self.id_to_row[row[0]] = row
def get_laptop_from_id(self, laptop_id):
for row in self.rows:
if row[0] == laptop_id:
return row
return None
def get_laptop_from_id_fast(self, laptop_id):
for row in self.rows:
if laptop_id in self.id_to_row:
return row
else:
return None
import time
import random
ids = [str(random.randint(1000000,9999999)) for _ in range(10000)]
new_class = Inventory('laptops.csv')
total_time_no_dict = 0
for each in ids:
start = time.time()
new_class.get_laptop_from_id(each)
end = time.time()
total_time_no_dict += (end-start)
total_time_dict = 0
for each in ids:
start = time.time()
new_class.get_laptop_from_id_fast(each)
end = time.time()
total_time_dict += (end-start)
print(total_time_no_dict, total_time_dict)
# Let's implement preprocessing of data to make our code of check_promotion_dollars() run faster.
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
self.id_to_row = {}
self.prices = set()
for row in self.rows:
self.id_to_row[row[0]] = row
self.prices.add(row[-1])
def get_laptop_from_id(self, laptop_id):
for row in self.rows:
if row[0] == laptop_id:
return row
return None
def get_laptop_from_id_fast(self, laptop_id):
for row in self.rows:
if laptop_id in self.id_to_row:
return row
else:
return None
def check_promotion_dollars(self, dollars):
for row in self.rows:
if row[-1] == dollars:
return True
for i in self.rows:
for j in self.rows:
if i[-1] + j[-1] == dollars:
return True
return False
def check_promotion_dollars_fast(self, dollars):
if dollars in self.prices:
return True
for i in self.prices:
for j in self.prices:
if i + j == dollars:
return True
return False
new_class = Inventory('laptops.csv')
print(new_class.check_promotion_dollars_fast(1000))
print(new_class.check_promotion_dollars_fast(442))
# Let's compare the performance of the last two functions that we wrote
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
self.id_to_row = {}
self.prices = set()
for row in self.rows:
self.id_to_row[row[0]] = row
self.prices.add(row[-1])
def get_laptop_from_id(self, laptop_id):
for row in self.rows:
if row[0] == laptop_id:
return row
return None
def get_laptop_from_id_fast(self, laptop_id):
for row in self.rows:
if laptop_id in self.id_to_row:
return row
else:
return None
def check_promotion_dollars(self, dollars):
for row in self.rows:
if row[-1] == dollars:
return True
for i in self.rows:
for j in self.rows:
if i[-1] + j[-1] == dollars:
return True
return False
def check_promotion_dollars_fast(self, dollars):
if dollars in self.prices:
return True
for i in self.prices:
for j in self.prices:
if i + j == dollars:
return True
return False
import random
import time
prices = [random.randint(100,5000) for _ in range(100)]
new_class = Inventory('laptops.csv')
total_time_no_set = 0
for price in prices:
start = time.time()
new_class.check_promotion_dollars(price)
end = time.time()
total_time_no_set += (end-start)
total_time_set = 0
for price in prices:
start = time.time()
new_class.check_promotion_dollars_fast(price)
end = time.time()
total_time_set += (end-start)
print(total_time_no_set, total_time_set)
# We want to write a method that efficiently answers the query: Given a budget of D dollars, find all laptops
# whose price it at most D.
class Inventory():
def __init__(self, csv_filename):
with open(csv_filename) as file:
list_file = list(csv.reader(file))
self.header = list_file[0]
self.rows = list_file[1:]
for row in self.rows:
row[-1] = int(row[-1])
self.id_to_row = {}
self.prices = set()
for row in self.rows:
self.id_to_row[row[0]] = row
self.prices.add(row[-1])
def row_price(row):
return row[-1]
self.rows_by_price = sorted(self.rows, key=row_price)
def get_laptop_from_id(self, laptop_id):
for row in self.rows:
if row[0] == laptop_id:
return row
return None
def get_laptop_from_id_fast(self, laptop_id):
for row in self.rows:
if laptop_id in self.id_to_row:
return row
else:
return None
def check_promotion_dollars(self, dollars):
for row in self.rows:
if row[-1] == dollars:
return True
for i in self.rows:
for j in self.rows:
if i[-1] + j[-1] == dollars:
return True
return False
def check_promotion_dollars_fast(self, dollars):
if dollars in self.prices:
return True
for i in self.prices:
for j in self.prices:
if i + j == dollars:
return True
return False
def find_first_laptop_more_expensive(self, price):
range_start = 0
range_end = len(self.rows_by_price) - 1
while range_start < range_end:
range_middle = (range_end + range_start) // 2
lap_price = self.rows_by_price[range_middle][-1]
if lap_price == price:
return range_middle + 1
elif lap_price < price:
range_start = range_middle + 1
else:
range_end = range_middle - 1
lap_price = self.rows_by_price[range_start][-1]
if lap_price < price:
return -1
return range_start+1
new_class = Inventory('laptops.csv')
print(new_class.find_first_laptop_more_expensive(1000))
print(new_class.find_first_laptop_more_expensive(10000))
```
| github_jupyter |
```
from IPython.core.display import display, HTML, Markdown, clear_output, Javascript
from string import Template
import pandas as pd
import json, random
import yaml
import copy
import networkx as nx
import math
import xml.etree.ElementTree as ET
import ipywidgets as widgets
import os
import time
import os.path
from os import path as p
display(HTML('<style>.prompt{width: 0px; visibility: hidden}</style>'))
# load HTML script
HTML('''
<script src="../lib/sigmajs/sigma.min.js"></script>
<script src="../lib/sigmajs/sigma.plugins.dragNodes.js"></script>
<script src="../lib/sigmajs/sigma.canvas.edges.labels.curvedArrow.js"></script>
<script src="../lib/sigmajs/sigma.canvas.edges.curvedArrow.js"></script>
<script src="../lib/sigmajs/sigma.canvas.edges.curve.js"></script>
<script src="../lib/sigmajs/sigma.canvas.edges.labels.curve.js"></script>
<script src="../lib/sigmajs/sigma.canvas.edges.labels.def.js"></script>
<script src="../lib/sigmajs/sigma.exporters.svg.js"></script>
<script src="../lib/sigmajs/settings.js"></script>
''')
exercise = '4'
%run -i "exercise_graph/script/load_config.py" $exercise
txt = open(yaml_text[0], 'r',encoding='utf-8')
content = txt.read()
Markdown(content)
exercise = '0'
submit_cell = '0'
%run -i "../lib/pygraph/parserx" $yaml_graph_path
%run -i "../lib/pygraph/tools_choice.py" intro
# draw graph
#lettura js per creazione grafo
js_text_template = Template(open(yaml_js_path,'r').read())
#faccio il dump della struttura che ho creato dentro il js
js_text = js_text_template.substitute({'graph_data': json.dumps(graph_data),
'exercise' : exercise,
'container':'sigma-container'+exercise,
'data':not_editable,
's':'s'+exercise,
'n':'n'+exercise,
'ef':'`'+str(friends_e)+'`',
'nf':'`'+str(friends_n)+'`',
's_cell': 'cell'+exercise,
'submit_cell':submit_cell})
html_template = Template('''
<link rel="stylesheet" href="../css/style.css">
<div style="width:100%;margin-top: 30px; display: flex;flex-wrap: wrap">
<div class="row" style="width:100%;border: solid; border-color: #1e6add; border-width: 2px;">
<div id="sigma-container'''+exercise+'''" style="height:500px"></div>
</div>
<div class="btn-group_1" role="group">
''' + html_text + '''
</div>
</div>
<script> $js_text </script>
''')
#render HTML con sostituzione variabili
HTML(html_template.substitute({'js_text': js_text}))
txt = open(yaml_text[1], 'r',encoding='utf-8')
content = txt.read()
Markdown(content)
exercise = '4'
#requisiti esercizio (bottoni ecc)
%run -i "../lib/pygraph/tools_choice.py" $exercise
#parsing del grafo xml o graphml, viene creato graph_data, original_nodes e original_edges
%run -i "../lib/pygraph/parserx" $yaml_graph_path
%run -i "../lib/pygraph/json_to_nx"
#definizione verificatore
%run -i "exercise_graph/verifier/p_3
init_graph = copy.deepcopy(graph_data)
if p.exists('exercise_graph/data/tmp/graph_data'+exercise+'.json'):
with open('exercise_graph/data/tmp/graph_data'+exercise+'.json') as json_file:
graph_data = json.load(json_file)
display(Javascript('''
var out_a = this;
var cell_e = out_a.element.parents('.cell');
var cell_i = Jupyter.notebook.get_cell_elements().index(cell_e)+1;
var cm = 'submit_cell ='+cell_i;
IPython.notebook.kernel.execute(cm)
'''))
# draw graph
#lettura js per creazione grafo
js_text_template = Template(open(yaml_js_path,'r').read())
#faccio il dump della struttura che ho creato dentro il js
js_text = js_text_template.substitute({'graph_data': json.dumps(graph_data),
'exercise' : exercise,
'container':'sigma-container'+exercise,
'data':not_editable,
's':'s'+exercise,
'n':'n'+exercise,
'ef':'`'+str(friends_e)+'`',
'nf':'`'+str(friends_n)+'`',
's_cell': 'cell'+exercise,
'submit_cell':submit_cell})
html_template = Template('''
<link rel="stylesheet" href="../css/style.css">
<div style="width:100%;margin-top: 30px; display: flex;flex-wrap: wrap">
<div class="row" style="width:100%;border: solid; border-color: #1e6add; border-width: 2px;">
<div id="sigma-container'''+exercise+'''" style="height:500px"></div>
</div>
<div class="btn-group_1" role="group">
''' + html_text + '''
</div>
</div>
<script> $js_text </script>
''')
#render HTML con sostituzione variabili
HTML(html_template.substitute({'js_text': js_text}))
button = widgets.Button(description="Reset")
output = widgets.Output()
display(button, output)
def on_reset_clicked(b):
result = {}
original_graph={}
removed_nodes= 0
added_nodes = 0
added_edges = 0
removed_edges = 0
if os.path.exists('exercise_graph/data/tmp/svg'+exercise+'.svg'):
os.remove('exercise_graph/data/tmp/svg'+exercise+'.svg')
if os.path.exists('exercise_graph/data/tmp/graph_data'+exercise+'.json'):
os.remove('exercise_graph/data/tmp/graph_data'+exercise+'.json')
with output:
display(Javascript('''
var output_area = this;
var cell_element = output_area.element.parents('.cell');
var cell_idx = Jupyter.notebook.get_cell_elements().index(cell_element);
IPython.notebook.execute_cells([cell_idx-4])
IPython.notebook.execute_cells([cell_idx-3])
IPython.notebook.execute_cells([cell_idx-2])
IPython.notebook.execute_cells([cell_idx-1])
IPython.notebook.execute_cells([cell_idx+1])
'''))
button.on_click(on_reset_clicked)
result = {}
original_graph={}
removed_nodes= 0
added_nodes = 0
added_edges = 0
removed_edges = 0
submitted = False
if p.exists('exercise_graph/data/tmp/graph_data'+exercise+'.json'):
with open('exercise_graph/data/tmp/graph_data'+exercise+'.json') as json_file:
solution = json.load(json_file)
original_graph=json_to_nx_struct(init_graph)
result=json_to_nx_struct(solution)
submitted = True
try:
if (len(original_graph['nodes'])-len(result['nodes']))<0:
added_nodes = abs(len(original_graph['nodes'])-len(result['nodes']))
else:
removed_nodes = len(original_graph['nodes'])-len(result['nodes'])
if (len(original_graph['edges'])-len(result['edges']))<0:
added_edges = abs(len(original_graph['edges'])-len(result['edges']))
else:
removed_edges = len(original_graph['edges'])-len(result['edges'])
result.update({'added_edges':added_edges,'removed_edges':removed_edges,'added_nodes':added_nodes,'removed_nodes':removed_nodes})
except:
pass
if submitted:
# chiamare il verificatore passando result
file = open('exercise_graph/data/tmp/exercise_'+exercise+'.json', 'w')
file.write(str(result))
file.close()
p_3(result)
print("Ok, in attesa del verificatore")
else:
print("Esercizio non sottomesso")
result
```
| github_jupyter |
# Sample Hangul RNN
```
# -*- coding: utf-8 -*-
# Import Packages
import numpy as np
import tensorflow as tf
import collections
import string
import argparse
import time
import os
from six.moves import cPickle
from TextLoader import *
from Hangulpy import *
print ("Packages Imported")
```
# Load dataset using TextLoader
```
data_dir = "data/nine_dreams"
batch_size = 50
seq_length = 50
data_loader = TextLoader(data_dir, batch_size, seq_length)
# This makes "vocab.pkl" and "data.npy" in "data/nine_dreams"
# from "data/nine_dreams/input.txt"
vocab_size = data_loader.vocab_size
vocab = data_loader.vocab
chars = data_loader.chars
print ( "type of 'data_loader' is %s, length is %d"
% (type(data_loader.vocab), len(data_loader.vocab)) )
print ( "\n" )
print ("data_loader.vocab looks like \n%s " %
(data_loader.vocab))
print ( "\n" )
print ( "type of 'data_loader.chars' is %s, length is %d"
% (type(data_loader.chars), len(data_loader.chars)) )
print ( "\n" )
print ("data_loader.chars looks like \n%s " % (data_loader.chars,))
```
# Define Network
```
rnn_size = 512
num_layers = 3
grad_clip = 5.
_batch_size = 1
_seq_length = 1
vocab_size = data_loader.vocab_size
with tf.device("/cpu:0"):
# Select RNN Cell
unitcell = tf.nn.rnn_cell.BasicLSTMCell(rnn_size)
cell = tf.nn.rnn_cell.MultiRNNCell([unitcell] * num_layers)
# Set paths to the graph
input_data = tf.placeholder(tf.int32, [_batch_size, _seq_length])
targets = tf.placeholder(tf.int32, [_batch_size, _seq_length])
initial_state = cell.zero_state(_batch_size, tf.float32)
# Set Network
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [rnn_size, vocab_size])
softmax_b = tf.get_variable("softmax_b", [vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, rnn_size])
inputs = tf.split(1, _seq_length, tf.nn.embedding_lookup(embedding, input_data))
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
# Loop function for seq2seq
def loop(prev, _):
prev = tf.nn.xw_plus_b(prev, softmax_w, softmax_b)
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
# Output of RNN
outputs, last_state = tf.nn.seq2seq.rnn_decoder(inputs, initial_state
, cell, loop_function=None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, rnn_size])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Next word probability
probs = tf.nn.softmax(logits)
# Define LOSS
loss = tf.nn.seq2seq.sequence_loss_by_example([logits], # Input
[tf.reshape(targets, [-1])], # Target
[tf.ones([_batch_size * _seq_length])], # Weight
vocab_size)
# Define Optimizer
cost = tf.reduce_sum(loss) / _batch_size / _seq_length
final_state = last_state
lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
_optm = tf.train.AdamOptimizer(lr)
optm = _optm.apply_gradients(zip(grads, tvars))
print ("Network Ready")
# Sample !
def sample( sess, chars, vocab, __probs, num=200, prime=u'ㅇㅗᴥㄴㅡㄹᴥ '):
state = sess.run(cell.zero_state(1, tf.float32))
_probs = __probs
prime = list(prime)
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {input_data: x, initial_state:state}
[state] = sess.run([final_state], feed)
def weighted_pick(weights):
weights = weights / np.sum(weights)
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = prime
char = prime[-1]
for n in range(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {input_data: x, initial_state:state}
[_probsval, state] = sess.run([_probs, final_state], feed)
p = _probsval[0]
sample = int(np.random.choice(len(p), p=p))
# sample = weighted_pick(p)
# sample = np.argmax(p)
pred = chars[sample]
ret += pred
char = pred
return ret
print ("sampling function done.")
```
# Sample
```
save_dir = 'data/nine_dreams'
prime = decompose_text(u"누구 ")
print ("Prime Text : %s => %s" % (automata(prime), "".join(prime)))
n = 2000
sess = tf.Session()
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(save_dir)
# load_name = u'data/nine_dreams/model.ckpt-0'
load_name = u'data/nine_dreams/model.ckpt-99000'
print (load_name)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, load_name)
sampled_text = sample(sess, chars, vocab, probs, n, prime)
#print ("")
print (u"SAMPLED TEXT = %s" % sampled_text)
print ("")
print ("-- RESULT --")
print (automata("".join(sampled_text)))
```
| github_jupyter |
# Tutorial 2 for Python
## Make a scenario of Dantzig's Transport Problem using the *ix modeling platform* (ixmp)
<img style="float: right; height: 80px;" src="_static/python.png">
### Aim and scope of the tutorial
This tutorial uses teh transport problem scenario developed in the first tutorial and illustrates how the ixmp framework can be applied for scenario analysis in the sense often used in economic or environmental modeling: develop a baseline, create a clone from the baseline with altered parameters or assumptions, and solve the new model. Then, compare the results from the original and altered scenario versions.
In particular, this tutorial will take you through the following steps:
0. Launch an ixmp.Platform instance and retrieve the ixmp.Scenario instance of Dantzig's transport problem
0. Retrieve some data from the scenario for illustration of filters
0. Make a clone of the baseline scenario, then check out the clone and make changes:
in this case, add a new demand location and transport costs to that city
0. Solve the new scenario
0. Display the solution of both the baseline and the new scenario
### Launching the platform and loading a scenario from the ixmp database instance
We launch a platform instance and display all models/scenarios currently stored in the connected database instance.
```
# load required packages
import pandas as pd
import ixmp
# launch the ix modeling platform using the default local database
mp = ixmp.Platform()
# Model and scenario name for Dantzig's transport problem
model = 'canning problem'
scenario = 'standard'
# List all Scenarios in the database
mp.scenario_list()
```
If you have just run the first, ``py_transport`` tutorial, the existing scenario should appear, and we can load it.
Uncomment and run the following line.
```
# # Load the default version of the scenario created in the first tutorial
# scen = ixmp.Scenario(mp, model, scenario)
```
If not (e.g. starting with this tutorial), we can use a function that creates the scenario from scratch in one step:
```
from ixmp.testing import make_dantzig
scen = make_dantzig(mp, solve='.')
```
### Retrieve some data from the scenario for illustration of filters
Before cloning a scenario and editing data, this section illustrates two-and-a-half methods to retrieve data for a parameter from a scenario.
```
# load the distance parameter
d = scen.par("d")
d
# show only the distances for connections from Seattle by filtering the pandas.DataFrame returned above
d[d['i'] == "seattle"]
# for faster access or more complex filtering,
# it may be easier to only load specific parameter elements using a dictionary
ele_filter = {}
ele_filter['i'] = ['seattle']
ele_filter['j'] = ['chicago', 'topeka']
d_filtered = scen.par("d", ele_filter)
d_filtered
```
### Make a clone of the baseline scenario, then check out the clone and edit the scenario
For illustration of a scenario analysis workflow, we add a new demand location ``detroit`` and add a demand level and transport costs to that city.
Because the production capacity does not allow much slack for increased production, we also reduce the demand level in ``chicago``.
```
# create a new scenario by cloning the scenario (without keeping the solution)
scen_detroit = scen.clone(model=model, scenario='detroit', annotation='extend the Transport problem by a new city', keep_solution=False)
# check out the scenario to make changes
scen_detroit.check_out()
# reduce demand in chicago
scen_detroit.add_par('b', 'chicago', 200, 'cases')
# add a new city with demand and distances
scen_detroit.add_set('j', 'detroit')
scen_detroit.add_par('b', 'detroit', 150, 'cases')
scen_detroit.add_par('d', ['seattle', 'detroit'], 1.7, 'cases')
scen_detroit.add_par('d', ['san-diego', 'detroit'], 1.9, 'cases')
# commit changes to the ixmp database instance
comment = "add new city 'detroit' with demand, reduce demand in 'chicago'"
scen_detroit.commit(comment)
scen_detroit.set_as_default()
```
### Solve the new scenario
```
scen_detroit.solve(model='dantzig')
```
### Display and analyze the results
For comparison between the baseline scenario, i.e., the original transport problem, and the "detroit" scenario, we show the solution for both cases.
```
# display the objective value of the solution in the baseline scenario
scen.var('z')
# display the objective value of the solution in the "detroit" scenario
scen_detroit.var('z')
# display the quantities transported from canning plants to demand locations in the baseline scenario
scen.var('x')
# display the quantities transported from canning plants to demand locations in the "detroit" scenario
scen_detroit.var('x')
# display the quantities and marginals (=shadow prices) of the demand balance constraints in the baseline scenario
scen.equ("demand")
# display the quantities and marginals (=shadow prices) of the demand balance constraints in the "detroit" scenario
scen_detroit.equ("demand")
```
### Close the database connection of the ix modeling platform
```
# close the connection of the platform instance to the local ixmp database files
mp.close_db()
```
| github_jupyter |
```
%%writefile morse.py
# A lookup dictionary which, given a letter will return the morse code equivalent
_letter_to_morse = {'a':'.-', 'b':'-...', 'c':'-.-.', 'd':'-..', 'e':'.', 'f':'..-.',
'g':'--.', 'h':'....', 'i':'..', 'j':'.---', 'k':'-.-', 'l':'.-..', 'm':'--',
'n':'-.', 'o':'---', 'p':'.--.', 'q':'--.-', 'r':'.-.', 's':'...', 't':'-',
'u':'..-', 'v':'...-', 'w':'.--', 'x':'-..-', 'y':'-.--', 'z':'--..',
'0':'-----', '1':'.----', '2':'..---', '3':'...--', '4':'....-',
'5':'.....', '6':'-....', '7':'--...', '8':'---..', '9':'----.',
' ':'/'}
# This will create a dictionary that can go from the morse back to the letter
_morse_to_letter = {}
for letter in _letter_to_morse:
morse = _letter_to_morse[letter]
_morse_to_letter[morse] = letter
def encode(message):
"""
Encode a message from English to Morse Code
Args:
message (str): the English message to encode
Returns:
str: The encoded message
Examples:
>>> encode("Help us")
'.... . .-.. .--. / ..- ...'
"""
morse = []
for letter in message:
letter = letter.lower()
if letter not in _letter_to_morse:
raise ValueError(f"Cannot encode \"{message}\". Character \"{letter}\" not in Morse dictionary")
morse.append(_letter_to_morse[letter])
# We need to join together Morse code letters with spaces
morse_message = " ".join(morse)
return morse_message
def decode(message):
"""
Decode a message from Morse Code to English
Args:
message (str): the Morse Code message to decode
Returns:
str: The decoded English message
Examples:
>>> decode(".... . .-.. .--. / ..- ...")
'help us'
"""
english = []
# Now we cannot read by letter. We know that morse letters are
# separated by a space, so we split the morse string by spaces
morse_letters = message.split(" ")
for letter in morse_letters:
english.append(_morse_to_letter[letter])
# Rejoin, but now we don't need to add any spaces
english_message = "".join(english)
return english_message
%%writefile rot13.py
import string
_lower_cipher = string.ascii_lowercase[13:] + string.ascii_lowercase[:13]
_upper_cipher = string.ascii_uppercase[13:] + string.ascii_uppercase[:13]
def encode(message):
"""
Encode a message from English to ROT13
Args:
message (str): the English message to encode
Returns:
str: The encoded message
Examples:
>>> encode("Secretmessage")
'Frpergzrffntr'
"""
output = []
for letter in message:
if letter in string.ascii_lowercase:
i = string.ascii_lowercase.find(letter)
output.append(_lower_cipher[i])
elif letter in string.ascii_uppercase:
i = string.ascii_uppercase.find(letter)
output.append(_upper_cipher[i])
else: # Add this else statement
raise ValueError(f"Cannot encode \"{message}\". Character \"{letter}\" not valid")
return "".join(output)
def decode(message):
"""
Encode a message from ROT13 to English
Args:
message (str): the ROT13 message to encode
Returns:
str: The decoded message
Examples:
>>> encode("Frpergzrffntr")
'Secretmessage'
"""
output = []
for letter in message:
if letter in _lower_cipher:
i = _lower_cipher.find(letter)
output.append(string.ascii_lowercase[i]) # ascii_uppercase → ascii_lowercase
elif letter in _upper_cipher:
i = _upper_cipher.find(letter)
output.append(string.ascii_uppercase[i])
else: # Add this else statement
raise ValueError(f"Cannot decode \"{message}\". Character \"{letter}\" not valid")
return "".join(output)
%%writefile test_morse.py
import pytest
from morse import encode, decode
@pytest.mark.parametrize("message", [
"sos",
"help us please",
"An Uppercase String",
])
def test_roundtrip(message):
assert decode(encode(message)) == message
%%writefile test_rot13.py
import pytest
from rot13 import encode, decode
@pytest.mark.parametrize("message", [
"sos",
"helpusplease",
"AnUppercaseString",
])
def test_roundtrip(message):
assert decode(encode(message)) == message
!COLUMNS=60 pytest -v morse.py rot13.py test_morse.py test_rot13.py
```
The Morse Code converter does not maintain the case of the string. All messages passed into it will be converted to lower case. This means that a message like "SOS" will, after round-tripping be "sos". This means that in this case, the invariant of the round-trip is not that the messages are identical, but rather that they are "identical if you ignore case".
To make our test do this, we can compare the round-tripped message against the lower case message with `message.lower()`:
```
%%writefile test_morse.py
import pytest
from morse import encode, decode
@pytest.mark.parametrize("message", [
"sos",
"help us please",
"An Uppercase String",
])
def test_roundtrip(message):
assert decode(encode(message)) == message.lower() # This line has changed
!COLUMNS=60 pytest -v morse.py rot13.py test_morse.py test_rot13.py
```
| github_jupyter |
# Data Augmentation
We'll show you examples of data augmentation with various techniques such as [MixUp](https://openreview.net/pdf?id=r1Ddp1-Rb), [CutMix](http://openaccess.thecvf.com/content_ICCV_2019/papers/Yun_CutMix_Regularization_Strategy_to_Train_Strong_Classifiers_With_Localizable_Features_ICCV_2019_paper.pdf), and [VH-MixUp](https://arxiv.org/pdf/1805.11272.pdf)!
| Image 1 | Image 2 | Mixup | CutMix | VH-Mixup |
| --- | --- | --- | --- | --- |
| <img src="https://blog.nnabla.org/wp-content/uploads/sites/2/2020/04/07130642/image1.png" alt="" width="128" height="128" class="size-full wp-image-1074" /> | <img src="https://blog.nnabla.org/wp-content/uploads/sites/2/2020/04/07130708/image2.png" alt="" width="128" height="128" class="size-full wp-image-1075" /> | <img src="https://blog.nnabla.org/wp-content/uploads/sites/2/2020/04/07131002/mixuped_img.png" alt="" width="128" height="128" class="size-full wp-image-1076" /> | <img src="https://blog.nnabla.org/wp-content/uploads/sites/2/2020/04/07131130/cutmixed_img.png" alt="" width="128" height="128" class="size-full wp-image-1077" /> | <img src="https://blog.nnabla.org/wp-content/uploads/sites/2/2020/04/07131216/VHmixuped.png" alt="" width="128" height="128" class="size-full wp-image-1078" /> |
# Preparation
Let's start by installing nnabla and accessing [nnabla-examples repository](https://github.com/sony/nnabla-examples). If you're running on Colab, make sure that your Runtime setting is set as GPU, which can be set up from the top menu (Runtime → change runtime type), and make sure to click **Connect** on the top right-hand side of the screen before you start.
```
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla-examples.git
%run nnabla-examples/interactive-demos/colab_utils.py
%cd nnabla-examples/data_augmentation
```
Next, let's import required modules first.
```
import os
import numpy as np
import matplotlib.pyplot as plt
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from nnabla.utils.image_utils import imread, imresize
from nnabla.ext_utils import get_extension_context
from MixedDataLearning import *
from google.colab import files
from IPython.display import Image,display
ctx = get_extension_context("cudnn")
nn.set_default_context(ctx)
```
# Upload first image
Now, upload an image you'd like to use for data augmentation.
```
img1 = files.upload()
#Renaming the file for convenience. You can ignore the lines below.
ext = os.path.splitext(list(img1.keys())[-1])[-1]
os.rename(list(img1.keys())[-1], "input_image1{}".format(ext))
input_img1 = "input_image1" + ext
display(Image(input_img1))
```
# Upload second image
Next, upload the second image you'd like to mix with the first one.
```
img2 = files.upload()
#Renaming the file for convenience. You can ignore the lines below.
ext = os.path.splitext(list(img2.keys())[-1])[-1]
os.rename(list(img2.keys())[-1], "input_image2{}".format(ext))
input_img2 = "input_image2" + ext
display(Image(input_img2))
#@title Here we resize the uploaded images. To see the details, double-click this cell.
image1 = imread(input_img1, channel_first=True)[:3]
image2 = imread(input_img2, channel_first=True)[:3]
scale = float(image1.shape[1]) / image2.shape[1]
image2 = imresize(image2, size=(int(image2.shape[2]*scale), int(image2.shape[1]*scale)), channel_first=True)
larger_shape = [max(image1.shape[i], image2.shape[i]) for i in range(3)]
pad_length_1 = [larger_shape[i] - image1.shape[i] for i in range(3)]
pad_length_2 = [larger_shape[i] - image2.shape[i] for i in range(3)]
image1 = np.pad(image1, (
(0, 0),
(pad_length_1[1] // 2, pad_length_1[1] // 2 + pad_length_1[1] % 2),
(pad_length_1[2] // 2, pad_length_1[2] // 2 + pad_length_1[2] % 2)),
mode="reflect")
image2 = np.pad(image2, (
(0, 0),
(pad_length_2[1] // 2, pad_length_2[1] // 2 + pad_length_2[1] % 2),
(pad_length_2[2] // 2, pad_length_2[2] // 2 + pad_length_2[2] % 2)),
mode="reflect")
#@title Choose data augmentation config.
#@markdown Choose which data augmentation is used.
mixtype = "vhmmixup" #@param ['mixup', 'cutmix', 'vhmmixup']
#@markdown choose alpha value. (default: 0.5)
alpha = 1.04 #@param {type: "slider", min: 0.0, max: 2.0, step: 0.01}
```
Now setup the mix augmentation and so on.
```
inshape = (2,) + image1.shape
if mixtype == "mixup":
mdl = MixupLearning(2, alpha=alpha)
elif mixtype == "cutmix":
mdl = CutmixLearning(inshape, alpha=alpha, cutmix_prob=1.0)
else:
# "vhmixup" is used.
mdl = VHMixupLearning(inshape, alpha=alpha)
image_train = nn.Variable(inshape)
label_train = nn.Variable((2, 1))
mix_image, mix_label = mdl.mix_data(image_train, F.one_hot(label_train, (2, )))
image_train.d[0] = image1 / 255.
image_train.d[1] = image2 / 255.
```
# Apply Mix Augmentation
Running the following cell executes the augmentation and displays the augmented images. Note that every time you run the cell, the output will be different due to the randomness. Simple as it is, these augmentation techniques are very useful and actually improve the network performance.
```
mdl.set_mix_ratio()
mix_image.forward()
plt.imshow(mix_image.d[1].transpose(1,2,0))
```
| github_jupyter |
```
import numpy as np
def CSR_to_DNS(data, col, rowptr, shape):
A = np.zeros(shape)
counter = 0
row = 0
for i in range(len(data)):
while counter >= rowptr[row+1]:
row += 1
A[row][col[i]] = data[i]
counter += 1
return A
def DNS_to_CSR(A):
data = []
col = []
rowptr = [0]
shape = A.shape
counter = 0
for i in range(shape[0]):
for j in range(shape[1]):
element = A[i][j]
if element != 0:
counter += 1
data.append(element)
col.append(j)
rowptr.append(counter)
return data, col, rowptr, shape
def DNS_to_COO(A):
data = []
row = []
col = []
shape = A.shape
for i in range(shape[0]):
for j in range(shape[1]):
element = A[i][j]
if element != 0:
data.append(element)
row.append(i)
col.append(j)
return data, col, row, shape
def COO_to_DNS(data, col, row, shape):
A = np.zeros(shape)
for i in range(len(data)):
A[row[i]][col[i]] = data[i]
return A
# Test case: Square CSR to DNS
data = [1.0, 2.0]
col = [3, 2]
rowptr = [0, 0, 0, 1, 2]
shape = (4, 4)
expected = np.zeros(shape)
expected[2][3] = 1.0
expected[3][2] = 2.0
actual = CSR_to_DNS(data, col, rowptr, shape)
assert (expected == actual).all()
# Test case: Square DNS to CSR
A = np.zeros((4, 4))
A[2][3] = 1.0
A[3][2] = 2.0
expected_data = [1.0, 2.0]
expected_col = [3, 2]
expected_rowptr = [0, 0, 0, 1, 2]
expected_shape = (4, 4)
actual_data, actual_col, actual_rowptr, actual_shape = DNS_to_CSR(A)
assert expected_data == actual_data
assert expected_col == actual_col
assert expected_rowptr == actual_rowptr
assert expected_shape == actual_shape
# Test case: Square COO to DNS
data = [1.0, 2.0]
col = [3, 2]
row = [2, 3]
shape = (4, 4)
expected = np.zeros((4, 4))
expected[2][3] = 1.0
expected[3][2] = 2.0
actual = COO_to_DNS(data, col, row, shape)
assert np.array_equal(expected, actual)
# Test case: Square DNS to COO
A = np.zeros((4, 4))
A[2][3] = 1.0
A[3][2] = 2.0
expected_data = [1.0, 2.0]
expected_col = [3, 2]
expected_row = [2, 3]
expected_shape = (4, 4)
actual_data, actual_col, actual_row, actual_shape = DNS_to_COO(A)
assert expected_data == actual_data
assert expected_col == actual_col
assert expected_rowptr == actual_rowptr
assert expected_shape == actual_shape
# Workspace
data = [0.80, 0.10, 0.40, 0.20, 0.30, 0.40, 0.40]
row = [0, 0, 0, 1, 2, 1, 2]
col = [2, 3, 1, 3, 3, 2, 1]
COO_to_DNS(data, col, row, (4, 4))
#Workspace 2
data = [0.80, 0.30, 0.70, 0.70, 0.10, 0.20, 0.10, 0.70]
rowptr = [0, 2, 4, 6, 8]
col = [0, 3, 1, 2, 0, 2, 1, 3]
CSR_to_DNS(data, col, rowptr, (4, 4))
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
```
# Gibbs sampling for a one sample t-test
Chapter 3.2.1: Gibbs sampling
Assume $Y_i \mid \mu,\sigma^2\sim\mbox{Normal}(\mu,\sigma^2)$ for $i=1,\dots,n$ and let the prior distributions be $\mu\sim\mbox{Normal}\left(0,\frac{\sigma^2}{m}\right)$ and $\sigma^2\sim\mbox{InvGamma}(a,b)$. It can be shown (Chapter 2) that the full conditional distributions are:
$$\mu \mid \sigma^2,Y_1, \dots,Y_n\sim\mbox{Normal}\left(\frac{\sum_{i=1}^nY_i}{n+m},\frac{\sigma^2}{n+m}\right)$$
and
$$\sigma^2 \mid \mu,Y_1, \dots,Y_n\sim\mbox{InvGamma}\left(a+ \frac{n}{2},b+\frac{\sum_{i=1}^n(Y_i-\mu)^2}{2}\right)$$
Gibbs sampling iterates between drawing from these two (univariate) full conditional distributions to produce samples from the joint (bivariate) posterior distribution.
## Load the galaxy data
```
Y = np.loadtxt("data/galaxies.csv")
n = len(Y)
plt.hist(Y, bins="auto")
(stats.invgamma(2, scale=1 / 5).rvs(100000)).mean()
```
## Fix the priors
```
m = 0.01
a = 0.01
b = 0.01
```
## Gibbs sampling
```
n_iters = 3000
keep_mu = np.zeros(n_iters)
keep_s2 = np.zeros(n_iters)
# Initial values
mu = np.mean(Y)
s2 = np.var(Y)
keep_mu[0] = mu
keep_s2[0] = s2
for i in range(1, n_iters):
# sample mu|s2,Y
MN = np.sum(Y) / (n + m)
VR = s2 / (n + m)
mu = stats.norm(MN, VR ** 0.5).rvs(1)
# sample s2|mu,Y
A = a + n / 2
B = b + np.sum((Y - mu) ** 2) / 2
s2 = stats.invgamma(A, scale=B).rvs(1)
# keep track of the results
keep_mu[i] = mu
keep_s2[i] = s2
_, ax = plt.subplots(1, 2, constrained_layout=True)
ax[0].plot(keep_mu)
ax[0].set_ylabel("mu")
ax[1].plot(keep_s2)
ax[1].set_ylabel("s2")
```
## Plot the samples from the joint posterior of
```
plt.plot(keep_s2, keep_mu, ".")
plt.xlabel("Sigma^2")
plt.ylabel("mu")
plt.title("Joint posterior")
plt.axhline(np.mean(Y), color="k")
plt.axvline(np.var(Y), color="k")
```
## Plot the samples from the marginal (over $\sigma^2$) posterior of $\mu, p(\mu \mid Y_1,…,Y_n)$
```
plt.hist(keep_mu)
plt.xlabel("mu")
plt.title("Marginal posterior");
```
## Plot the samples from the marginal (over $\mu$) posterior of $\sigma, p(\sigma \mid Y_1,…,Y_n)$
```
keep_s = keep_s2 ** 0.5
plt.hist(keep_s2)
plt.xlabel("sigma")
plt.title("Marginal posterior");
```
## Compute the approximate marginal means and 95\% credible sets
```
keep_mu.mean() # mu
np.quantile(keep_mu, [0.025, 0.975])
keep_s2.mean() # sigma^2
np.quantile(keep_s2, [0.025, 0.975])
keep_s.mean()
```
## Plot the data versus the fitted model
```
mu_hat = keep_mu.mean()
sig_hat = keep_s.mean()
count, *_ = plt.hist(Y, bins="auto")
y = np.linspace(4000, 40000, 100)
d = stats.norm(mu_hat, sig_hat).pdf(y)
d = max(count) * d / max(d)
plt.plot(y, d, lw=2, c="C1")
```
| github_jupyter |
# ***Video da apresentação:***
---
# https://youtu.be/-5xjHpiqnL0 **bold text**
```
from google.colab import drive
drive.mount('/gdrive')
%cd /gdrive
!pip install icc_rt
#!pip uninstall icc_rt
import pandas as pd
import numpy as np
import gensim
import multiprocessing
import sklearn.preprocessing as pp
import warnings
import logging # Setting up the loggings to monitor gensim
import numba
from IPython.display import display
from IPython.core.display import HTML
from gensim.models import Word2Vec,KeyedVectors
from time import time
from gensim.models.phrases import Phrases, Phraser
from sklearn.feature_extraction.text import TfidfVectorizer
warnings.filterwarnings('ignore')
#logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
t = time()
estaticos_market = pd.read_csv('/gdrive/My Drive/estaticos_market.csv')
#estaticos_market = pd.read_csv('/gdrive/My Drive/estaticos_portfolio1.csv')
print(estaticos_market.shape)
print('Time to Read Csv: {} mins'.format(round((time() - t) / 60, 2)))
percent_missing = estaticos_market.isnull().sum() * 100 / len(estaticos_market)
missing_value_df = pd.DataFrame({'percent_missing': percent_missing,'dtypes': estaticos_market.dtypes,})
missing_value_df.sort_values('percent_missing', inplace=True)
missing_value_df.head(181)
@numba.jit()
def fast_clean(df):
t = time()
col_exclude = ["fl_email", "fl_telefone", "qt_filiais", "tx_rotatividade", "tx_crescimento_24meses", "tx_crescimento_12meses", "qt_funcionarios_12meses",
"qt_funcionarios_24meses", "min_meses_servicos",'Unnamed: 0','fl_matriz','qt_alteracao_socio_total','qt_alteracao_socio_365d','qt_alteracao_socio_90d','grau_instrucao_macro_desconhecido']
df = df.drop(col_exclude, axis=1)
booleandf = df.select_dtypes(include=[bool]).columns
booleanDictionary = {True: 'T', False: 'F'}
for column in booleandf:
df[column] = df[column].map(booleanDictionary)
df= df.astype(str)
objectdf = df.select_dtypes(include=[object]).columns
for column in objectdf:
df[column] = df[column].str.replace(' ', '')
df[column] = np.where(df[column] == 'nan', str(df.index), df[column])
df[column] = column.replace('_', '')+"_"+df[column]
df[column] = df[column].str.replace(',', '')
Maker_Model = list(df.id.unique())
indice = pd.Series(df.index, index=df['id']).drop_duplicates()
df_id = pd.DataFrame({'id': df['id']})
df = df.drop(['id'],axis=1)
df['id'] = df_id['id']
df2 = df_join(df)
df_clean = pd.DataFrame({'clean': df2})
sent = [row.split(',') for row in df_clean['clean']]
print('Time to clean: {} mins'.format(round((time() - t) / 60, 2)))
return(sent,indice,Maker_Model,df_clean)
def df_join(df):
df2 = df.apply(lambda x: ','.join(x.astype(str)), axis=1)
return(df2)
sent,indice, Maker_Model,df_clean = fast_clean(estaticos_market)
""" - UM DOS TESTE REALIZADOS - NÃO TEVE UM BOM DESEMPENHO
t = time()
tfidf = TfidfVectorizer(sublinear_tf=False,stop_words=None)
tfidf_matrix = tfidf.fit_transform(df_clean['clean'])
print('Time to tfidf: {} mins'.format(round((time() - t) / 60, 2)))
x = (tfidf_matrix.getrow(554).toarray().flatten())
y = (tfidf_matrix.getrow(9).toarray().flatten())
#x = np.squeeze(np.asarray(x))
#y = np.squeeze(np.asarray(y))
result = fast_cosine(x, y)
print(result)
tfidf_matrix = pp.normalize(tfidf_matrix.tocsc(), axis=0)
tfidf_matrix.shape
from scipy import sparse
b = (tfidf_matrix[9].transpose().todense())
b = np.squeeze(np.array(b), axis=1)
len(b)
"""
t = time()
cores=5
model = Word2Vec(sent,min_count=1,size=300,workers=cores, window=3, sg = 0)
print('Time to build model: {} mins'.format(round((time() - t) / 60, 2)))
t = time()
model.wv.save_word2vec_format('/gdrive/My Drive/model.bin', binary=True)
print('Time to save: {} mins'.format(round((time() - t) / 60, 2)))
t = time()
model2 = KeyedVectors.load_word2vec_format('/gdrive/My Drive/model.bin', binary=True)
#print('Time to load: {} mins'.format(round((time() - t) / 60, 2)))
def cosine_distance (model, word,target_list,num) :
cosine_dict ={}
try:
a = model[word]
for item in target_list :
b = model[item]
type(b)
cos_sim = fast_cosine(a, b)
cosine_dict[item] = cos_sim
dist_sort=sorted(cosine_dict.items(), key=lambda dist: dist[1],reverse = True) ## in Descedning order
data_day_list = []
count = 1
for item in dist_sort:
data_day =(estaticos_market[estaticos_market.id==item[0][3:]])
data_day_list.append(data_day)
if count==num:
break
count=count+1
final_data_day = pd.concat(data_day_list)
except KeyError:
print("")
return final_data_day
@numba.jit()
def cosine_distance2 (tfidf_matrix,word,indice,target_list,num) :
cosine_dict ={}
word_list = []
idx = indice[word]
a = (tfidf_matrix.getrow(554).toarray().flatten())
for item in target_list :
if item != word :
idx = indice[item]
b = (tfidf_matrix.getrow(idx).toarray().flatten())
b = np.squeeze(np.array(b))
cos_sim = fast_cosine(a, b)
cosine_dict[item] = cos_sim
dict_sorted(cosine_dict)
data_day_list = []
count = 1
for item in dist_sort:
print(item)
data_day =(estaticos_market[estaticos_market.id==item[0][3:]])
data_day_list.append(data_day)
if count==num:
break
count=count+1
final_data_day = pd.concat(data_day_list)
return final_data_day
def dict_sorted(cosine_dict):
dist_sort=sorted(cosine_dict.items(), key=lambda dist: dist[1],reverse = True) ## in Descedning order
return(dist_sort)
@numba.jit(('f8,f8'),parallel=True,fastmath=True)
def fast_cosine(a, b):
result = np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))
return(result)
@numba.jit(parallel=True,fastmath=False)
def requerimento(element,quant):
t = time()
result_list=[]
element
result = cosine_distance (model,element,Maker_Model,quant);
print('Tempo de cálculo {} mins'.format(round((time() - t) / 60, 2)))
return(result)
'''BLOCO DE TESTE
#id='id_dabe79bec87c88ae04e869bf6bd321ee5e1893cecf66255be9ea861a9e5841a9'
id='id_7d58990ba786a3a26617a8afbf814a9919ab34aa09013a559b2c9d45eb439720'
#id='id_fc6969bffd9f104e3a148ad7df64b338ca885dd6a5aa5153b4754bd55746d638'
#a = model[id]
#model2.most_similar([id])
#cosine_distance (model,id,Maker_Model,10)
cosine_distance2 (tfidf_matrix,id,indice,Maker_Model,10)
#result = model2.similar_by_vector('id_'+x[0], topn= 50)
#data_day_list=[]
'''
tfidf = TfidfVectorizer(sublinear_tf=False,stop_words=None)
tfidf_matrix = tfidf.fit_transform(df_clean['clean'])
cosine_distance2 (tfidf_matrix,id,indice,Maker_Model,10)
estaticos_portfolio1 = pd.read_csv('/gdrive/My Drive/estaticos_portfolio1.csv')
estaticos_portfolio2 = pd.read_csv('/gdrive/My Drive/estaticos_portfolio2.csv')
estaticos_portfolio3 = pd.read_csv('/gdrive/My Drive/estaticos_portfolio3.csv')
print(estaticos_portfolio1.shape)
print(estaticos_portfolio2.shape)
print(estaticos_portfolio3.shape)
requis=(['7d58990ba786a3a26617a8afbf814a9919ab34aa09013a559b2c9d45eb439720','70485e6b8abe52d8fb4e0bf060fb6f0f4f8576cb7583d885a8dcac38f506389','6fecd2c150c5fe474b31b2b4b68e5d7dc9d22f51dd5c781d801b58102a71a570'])
#requis=estaticos_portfolio3['id']
result_list=[]
result_list2=[]
for element in requis:
try:
id = 'id_'+element
print(id)
model[id]
result=(requerimento(id,50))
result_list.append(result)
except Exception as e:
print('chave não encontrada')
result_final = pd.concat(result_list)
result_final.drop_duplicates(keep = 'first', inplace = True)
result_list2 = []
for element in requis:
result = (result_final[result_final.id==element])
result_list2.append(result)
result_final2 = pd.concat(result_list2)
i1 = len(requis)
f1,f2 = result_final2.shape
print("Percentual de item solicitado no resultado: "+str(round(f1/i1*100,2))+'%')
pd.options.display.max_columns = None
display(result_final)
```
| github_jupyter |
# Classifying Bangla Fake News with HuggingFace Transformers and Fastai
- toc: true
- branch: master
- badges: true
- comments: true
- categories: [fastpages, jupyter]
- image: images/some_folder/your_image.png
- hide: false
- search_exclude: true
- metadata_key1: metadata_value1
- metadata_key2: metadata_value2

In this post we cover fine tuning a [multilingual BERT](https://huggingface.co/bert-base-multilingual-cased) model from Huggingface Transformers library on [**BanFakeNews**](https://aclanthology.org/2020.lrec-1.349/) dataset released in LREC 2020. While English Fake News Classification and fact checking tasks have many resources and competitions available such as [fake news challenge](http://www.fakenewschallenge.org/) and [hateful meme detection](https://ai.facebook.com/blog/hateful-memes-challenge-and-data-set/), similar efforts in Bangla has been almost non existent. BanFakeNews dataset contains 50k annotated articles from different news sources in Bangladesh, out of them around 1200 articles have been annotated as fake. As transformer architectures uses self attention to learn contextual embeddings they have been very popular in NLP research community for a while and many tools have been built around them.
This post is reusing materials taught in Weights and Bias's study group of Fast ai with Huggingface([link](https://wandb.ai/wandb_fc/events/reports/W-B-Study-Group-Lectures-fast-ai-w-Hugging-Face--Vmlldzo4NDUzNDU?galleryTag=events)) where several recent(2021) libraries([blurr](https://github.com/ohmeow/blurr),[Fasthugs](https://github.com/morganmcg1/fasthugs) & [Adaptnlp](https://novetta.github.io/adaptnlp/)) that integrates components from popular deep learning frameworks Huggingface transformers and Fastai v2 are shown.
My experience with using Transformers is fairly low, recently I participated in Huggingface's Flax/Jax week and there our team pretrained [Bengali GPT2](https://huggingface.co/flax-community/gpt2-bengali) and [T5](https://huggingface.co/flax-community/Bengali-t5/tree/main), so I was looking into Huggingface course and documentation to learn more about how to finetune transformers. Previously my thesis journal paper for undergraduate ["Deep Learning based Question Answering System in Bengali"](https://www.tandfonline.com/doi/full/10.1080/24751839.2020.1833136) worked on Bangla Question Answering with transformers, but I had stuck more to given scripts back then and focused more on synthetic dataset construction via translation and handling data corruption issues. So this post will focus more on the high level API of ```Blurr``` and the components of huggingface and fastai that are relevant for getting started quickly.
```
!pip install -Uqq transformers datasets tqdm
!pip install -Uqq ohmeow-blurr
!pip install -Uqq wandb
from google.colab import drive
drive.mount('/content/drive')
```
# Imports
```
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import wandb
from transformers import *
from fastai.text.all import *
from fastai.callback.wandb import *
from blurr.data.all import *
from blurr.modeling.all import *
```
We will use weights and biases for tracking experiments and runs. Project page : https://wandb.ai/tasmiah-tahsin/fake-news-blurr
```
wandb.login()
```
# Load dataset
This dataset has been downloaded from [Kaggle](https://www.kaggle.com/cryptexcode/banfakenews). Note that there are four files in the given dataset, but we use the two files (Authentic-48k and fake-1k) here because the rest of the files contain labelled fake and authentic news. Labelling in the sense of what type of fake news it is, clickbait, satire, misleading or false context, for the current priliminary phase we stick to binary classification of knowing if a news is fake or authentic. We also concatanate headline and content of the news article during preprocessing and combine the fake and authentic news dataset before sending them to Blurr dataloaders. This dataset is heavily imbalanced, so I'll take approximately half of the authentic news set, since taking full set takes each epoch with a batch size of four around an hour.

```
fake = pd.read_csv("/content/drive/MyDrive/fake news/Fake-1K.csv")
authentic = pd.read_csv("/content/drive/MyDrive/fake news/Authentic-48K.csv",engine='python',error_bad_lines=False,warn_bad_lines=True,nrows=15000)
df = pd.concat([authentic[['headline','content','label']],fake[['headline','content','label']]])
df.reset_index(drop=True,inplace=True)
print(authentic.shape, fake.shape)
df['text'] = df['headline'] + df['content']
df = df.drop(['headline','content'],axis=1)
df.head(1)
df.label=df.label.map({1:"Authentic",0:"Fake"})
from sklearn.model_selection import train_test_split
train, valid = train_test_split(df, test_size=0.2)
```
We will use ```Blurr``` high level API for sequence classification with the pandas dataframe where ```BlearnerForSequenceClassification.from_dataframe()``` method takes in a dataframe, splits them from the column of default ```is_valid``` using Fastai's [```ColSplitter```](https://docs.fast.ai/data.transforms.html#ColSplitter) into train and test splits, constructs the datablock and dataloaders and uses them for training. So we add a ```'is_valid'``` column in the dataframe. There are other ways of splitting the data available in Fastai like ```RandomSubsetSplitter``` where we can randomize the data inside a dataframe. Since we used scikit-learns train test split to shuffle the dataframe for now we can go with Column based splitting.
```
train['is_valid'] = False
valid['is_valid'] = True
final_df = pd.concat([train,valid],axis=0)
final_df.shape
final_df.head()
final_df.label.value_counts()
```
# Model Training
## Multilingual BERT
Since the original paper also used [multilingual cased bert](https://github.com/google-research/bert/blob/master/multilingual.md) released by Google this post can be considered as an attempt to reproduce the work of BanFakeNews. They trained mbert for 50 epochs with a learning rate of 0.00002 and optimizer Adam. The batch size was 32. The overall F1 score after training for 50 epochs on this dataset was .99 and f1 for fake class was 0.68. Multilingual bert has been pretrained on 104 languages including bengali with wordpiece tokenization. As bengali is already included it makes it a valid choice for current bangla text classification task. Information for this model are : 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters. As the size of the language corpora varied greatly for low resource languages exponential weighted smoothing was performed for weighting data during the pretraining stage, which results in undersampling high resource languages like english and oversampling low resource languages like Bengali. Mbert does not use any marker for input language to enable zero shot training.
```
wandb_init_kwargs = {
'reinit': True,
'project': "fake-news-blurr",
'entity': "tasmiah-tahsin",
'notes': 'Finetuning banfakenews with multilingual bert via Blurr',
'tags': ['mbert', 'fake-news-classification', 'blurr']
}
wandb.init(**wandb_init_kwargs)
```
Since I'm fairly new in ```blurr``` I'm using the high level API, but the key ideas are following. ```blurr``` is integrating two frameworks. Here ```Fastai``` is providing the ```datablock```, ```learner```, ```learning rate finder``` functionalities with Leslie Smith's 1cycle policy components, while huggingface transformers is providing the ready to use transformer model configuration and architectures made publicly available from huggingface hub(in general terms, but huggingface also has its own datasets library which integrates well with blurr, and fastai also provides pretrained language models based on LSTM like [ULMFiT](https://docs.fast.ai/tutorial.text.html) and [MultiFiT](https://nlp.fast.ai/classification/2019/09/10/multifit.html).
Fastai's datablock API works like a specification for quickly loading a data into a model. The blocks are specific steps which can be mixed/matched for training with its various transforms and splitting functions along with visualization capacities. Datablocks, Callbacks, and other fastai concepts are explained in the paper ["fastai: A Layered API for Deep Learning"](https://arxiv.org/abs/2002.04688). Under the hood ```blurr``` is providing wrappers for the huggingface transformers and for finetuning the parameters of the model with enabling discriminative learning rate like used in[ULMFiT](https://paperswithcode.com/method/ulmfit). Discriminative learning rate refers to using variable learning rates for different layers of a network while performing transfer learning.
The low level API for ```blurr``` works with datablocks and dataloaders creating the mini batches which are combined with the huggingface model architecture, optimizer and loss function inside a ```learner```. Mid level API contains ```BLearner``` and the highest level API contains task specific learners like the current one I'm using which is ```BlearnerForSequenceClassification```.
```
from blurr.modeling.core import BlearnerForSequenceClassification
pretrained_model_name = "bert-base-multilingual-cased"
learn = BlearnerForSequenceClassification.from_dataframe(final_df, pretrained_model_name, dl_kwargs={ 'bs': 4})
learn.lr_find(suggest_funcs=[minimum, steep, valley, slide])
```
I initially unfreezed all the layers and had set max learning rate to 1e-2 but the results were pretty bad. So I reduced the learning rate, decided to not do full unfreezing and retrained the model again. Original model in the training set was trained for 50 epochs, here we are experimenting with only 5 epochs though. Fastai documentation recommends that we set the learning rate equal to one order of magnitude lower than the minimum, so I went with 1e-3.
```
#learn.unfreeze()
learn.fit_one_cycle(5, lr_max=1e-3,cbs=[WandbCallback(log_preds=False, log_model=False)])
```
We can see some of the results by the model here. Our model trains on half of the dataset and achieves around 0.80 in overall f1. Its likely that the model is trained longer it will achieve better performance. I might retrain it later on full data.
```
learn.show_results(learner=learn, max_n=4,trunc_at=200)
wandb.finish()
```

# Model Sharing
To upload our model to Huggingface hub we can use push_to_hub method available to the models. The details can be found [here](https://huggingface.co/transformers/model_sharing.html). We install git-lfs since the tokenizer and the model files are fairly large. After uploading the model to the huggingface hub we will also use ```pipeline``` functionality by transformers and combine with transformers interpret library to see how the model weights each of the input tokens when making predictions in the section below.
```
!sudo apt-get install git-lfs
!transformers-cli login
!git config --global user.email "tasmiah.tahsin@northsouth.edu"
!git config --global user.name "Tahsin-Mayeesha"
blurr_tfm = get_blurr_tfm(learn.dls.before_batch)
blurr_tfm.hf_model.push_to_hub("bangla-fake-news-mbert",use_temp_dir=True)
blurr_tfm.hf_tokenizer.push_to_hub("bangla-fake-news-mbert",use_temp_dir=True)
```
# Explaining predictions
Transformers interpret library tries to show weights for tokens after making predictions and make some visualizations. The tokens are split into subwords as per the tokenizer.
```
!pip install transformers-interpret
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model_name = "Tahsin-Mayeesha/bangla-fake-news-mbert"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
text = "অভিনেতা আফজাল শরীফকে ২০ লাখ টাকার অনুদান অসুস্থ অভিনেতা আফজাল শরীফকে চিকিৎসার জন্য ২০ লাখ টাকা অনুদান দিয়েছেন প্রধানমন্ত্রী শেখ হাসিনা।"
# With both the model and tokenizer initialized we are now able to get explanations on an example text.
from transformers_interpret import SequenceClassificationExplainer
cls_explainer = SequenceClassificationExplainer(
model,
tokenizer)
word_attributions = cls_explainer(text)
word_attributions[0:10]
cls_explainer.visualize()
```

# Resources
* fastai paper : https://arxiv.org/pdf/2002.04688.pdf
* https://github.com/ohmeow/blurr
* [fine-tuning mrpc with blurr colab](https://colab.research.google.com/gist/ohmeow/ee69e3c519bc68fbea5dd671afbdde39/fine-tuning-mrpc-with-blurr.ipynb#scrollTo=3Bih_uwhgMNS)
* weights and biases study group of fastai x huggingface [playlist](https://www.youtube.com/playlist?list=PLD80i8An1OEF8UOb9N9uSoidOGIMKW96t)
* [multilingual bert](https://huggingface.co/bert-base-multilingual-cased)
* https://github.com/cdpierse/transformers-interpret
* https://blog.dataiku.com/the-learning-rate-finder-technique-how-reliable-is-it
| github_jupyter |
# 线性回归的简洁实现
随着深度学习框架的发展,开发深度学习应用变得越来越便利。实践中,我们通常可以用比上一节更简洁的代码来实现同样的模型。在本节中,我们将介绍如何使用tensorflow2.0推荐的keras接口更方便地实现线性回归的训练。
## 生成数据集
我们生成与上一节中相同的数据集。其中`features`是训练数据特征,`labels`是标签。
```
import tensorflow as tf
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = tf.random.normal(shape=(num_examples, num_inputs), stddev=1)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += tf.random.normal(labels.shape, stddev=0.01)
```
虽然tensorflow2.0对于线性回归可以直接拟合,不用再划分数据集,但我们仍学习一下读取数据的方法
```
from tensorflow import data as tfdata
batch_size = 10
# 将训练数据的特征和标签组合
dataset = tfdata.Dataset.from_tensor_slices((features, labels))
# 随机读取小批量
dataset = dataset.shuffle(buffer_size=num_examples)
dataset = dataset.batch(batch_size)
data_iter = iter(dataset)
for X, y in data_iter:
print(X, y)
break
```
定义模型,tensorflow 2.0推荐使用keras定义网络,故使用keras定义网络
我们先定义一个模型变量`model`,它是一个`Sequential`实例。
在keras中,`Sequential`实例可以看作是一个串联各个层的容器。
在构造模型时,我们在该容器中依次添加层。
当给定输入数据时,容器中的每一层将依次计算并将输出作为下一层的输入。
重要的一点是,在keras中我们无须指定每一层输入的形状。
因为为线性回归,输入层与输出层全连接,故定义一层
```
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow import initializers as init
model = keras.Sequential()
model.add(layers.Dense(1, kernel_initializer=init.RandomNormal(stddev=0.01)))
```
定义损失函数和优化器:损失函数为mse,优化器选择sgd随机梯度下降
在keras中,定义完模型后,调用`compile()`方法可以配置模型的损失函数和优化方法。定义损失函数只需传入`loss`的参数,keras定义了各种损失函数,并直接使用它提供的平方损失`mse`作为模型的损失函数。同样,我们也无须实现小批量随机梯度下降,只需传入`optimizer`的参数,keras定义了各种优化算法,我们这里直接指定学习率为0.01的小批量随机梯度下降`tf.keras.optimizers.SGD(0.03)`为优化算法
```
from tensorflow import losses
loss = losses.MeanSquaredError()
from tensorflow.keras import optimizers
trainer = optimizers.SGD(learning_rate=0.01)
loss_history = []
```
在使用keras训练模型时,我们通过调用`model`实例的`fit`函数来迭代模型。`fit`函数只需传入你的输入x和输出y,还有epoch遍历数据的次数,每次更新梯度的大小batch_size, 这里定义epoch=3,batch_size=10。
使用keras甚至完全不需要去划分数据集
```
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for (batch, (X, y)) in enumerate(dataset):
with tf.GradientTape() as tape:
l = loss(model(X, training=True), y)
loss_history.append(l.numpy().mean())
grads = tape.gradient(l, model.trainable_variables)
trainer.apply_gradients(zip(grads, model.trainable_variables))
l = loss(model(features), labels)
print('epoch %d, loss: %f' % (epoch, l.numpy().mean()))
```
下面我们分别比较学到的模型参数和真实的模型参数。我们可以通过model的`get_weights()`来获得其权重(`weight`)和偏差(`bias`)。学到的参数和真实的参数很接近。
```
true_w, model.get_weights()[0]
true_b, model.get_weights()[1]
loss_history
```
| github_jupyter |
# Getting Started With VerifyML
A quickstart guide to documenting your model findings in a VerifyML Model Card.
## Installation
```
!pip install verifyml
!pip install seaborn
```
## Imports
```
import pandas as pd
import verifyml.model_card_toolkit as mctlib
import verifyml.model_tests.utils as utils
import seaborn as sns
import uuid
from datetime import date
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import train_test_split
```
# Load Sample Data
In this example, we'll use the [Breast Cancer Wisconsin dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_breast_cancer.html) that comes bundled with `sklearn`.
```
cancer = load_breast_cancer()
X = pd.DataFrame(cancer.data, columns=cancer.feature_names)
y = pd.Series(cancer.target)
X_train, X_test, y_train, y_test = train_test_split(X, y)
```
# Data Exploration
## Plot the mean radius feature for both the train and test sets
```
# utils.plot_to_str() converts the plot to a base-64 encoded string that the model card will accept
sns.displot(x=X_train['mean radius'], hue=y_train)
mean_radius_train = utils.plot_to_str()
sns.displot(x=X_test['mean radius'], hue=y_test)
mean_radius_test = utils.plot_to_str()
```
# Create a Classifier
```
clf = GradientBoostingClassifier().fit(X_train, y_train)
# Plot an ROC curve
plot_roc_curve(clf, X_test, y_test)
roc_curve = utils.plot_to_str()
```
# Init Model Card
```
mct = mctlib.ModelCardToolkit(
output_dir='model_card_output', # directory that the model card output will be saved to
file_name='first_model_card' # file name of the model card that will be created
)
# init model card assets using the toolkit
model_card = mct.scaffold_assets()
```
After running the cell above, you should find a new directory with the name you specified in `output_dir` (`model_card_output` in this case). It should contain 2 subdirectories for now:
1. `data/`: contains a protobuf file with the format `<file_name specified above>.proto`. In this case, it should be `first_model_card.proto`.
2. `template/`: contains the Jinja templates that will be used later when rendering the Model Card
# Fill Out Model Card
## Model Details
```
# name, overview, owners, references, version
model_card.model_details.name = 'Breast Cancer Wisconsin (Diagnostic) Dataset'
model_card.model_details.overview = 'This model predicts whether breast cancer is benign or malignant based on image measurements.'
model_card.model_details.owners = [
mctlib.Owner(
name='Model Cards Team',
contact='model-cards-team@email.com',
role='data scientist'
)
]
model_card.model_details.references = [
mctlib.Reference(reference='https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)'),
mctlib.Reference(reference='https://minds.wisconsin.edu/bitstream/handle/1793/59692/TR1131.pdf')
]
model_card.model_details.version.name = str(uuid.uuid4())
model_card.model_details.version.date = str(date.today())
```
## Considerations
```
# ethical considerations, limitations, use cases, users
model_card.considerations.ethical_considerations = [
mctlib.Risk(
name='Manual selection of image sections to digitize could create selection bias',
mitigation_strategy='Automate the selection process'
)
]
model_card.considerations.limitations = [mctlib.Limitation(description='Breast cancer diagnosis')]
model_card.considerations.use_cases = [mctlib.UseCase(description='Breast cancer diagnosis')]
model_card.considerations.users = [
mctlib.User(description='Medical professionals'),
mctlib.User(description='ML researchers')
]
```
## Model Parameters
```
# add details for train and test datasets
mc_dataset_train = mctlib.Dataset()
mc_dataset_test = mctlib.Dataset()
mc_dataset_train.graphics.description = f'{len(X_train)} rows with {len(X_train.columns)} features'
mc_dataset_test.graphics.description = f'{len(X_test)} rows with {len(X_test.columns)} features'
mc_dataset_train.graphics.collection = [mctlib.Graphic(image=mean_radius_train)]
mc_dataset_test.graphics.collection = [mctlib.Graphic(image=mean_radius_test)]
# add details to model card
model_card.model_parameters.data = [mc_dataset_train, mc_dataset_test]
```
## Performance Metrics
```
# adding model details in a nested format (arguably less readable)
model_card.quantitative_analysis.performance_metrics = [
mctlib.PerformanceMetric(
type='accuracy',
slice='training',
value=str((49 + 89) / (49 + 89 + 2 + 3)),
graphics=mctlib.GraphicsCollection(
description='ROC curve and confusion matrix',
collection=[mctlib.Graphic(image=roc_curve)]
)
)
]
```
# Export Model Card
```
# needed to display HTML and markdown in notebooks
from IPython.display import Markdown, HTML
# update the model card with all changes made so far
mct.update_model_card(model_card)
```
## Export as Markdown
```
md = mct.export_format(
output_file='my_first_model_card.md',
template_path=f'{mct.output_dir}/template/md/default_template.md.jinja'
)
```
After running the cell above, you'll find a new subdirectory called `model_cards` inside the `output_dir` you defined above. Within it, there will be a Markdown file named whatever was specified in `output_file` above. In this case, there should be a `my_first_model_card.md` file in the `model_card_output/model_cards` subdirectory.
Note how `template_path` points to the templates subdirectory that was previously created when `scaffold_assets()` was called (`model_card_output/templates` in this case). If you'd prefer to use your own templates, feel free to use another `template_path`.
```
# display the markdown
Markdown(md)
```
## Export as HTML
```
# if unspecified, template_path defaults to f'{mct.output_dir}/template/html/default_template.html.jinja'
html = mct.export_format(output_file='my_first_model_card.html')
```
Just like with the Markdown export, the line above will create `my_first_model_card.html` in `model_card_output/model_cards`.
```
# display the HTML
HTML(html)
```
| github_jupyter |
# ARAS Datasets
H. Alemdar, H. Ertan, O.D. Incel, C. Ersoy, ARAS Human Activity Datasets in Multiple Homes with Multiple Residents, Pervasive Health, Venice, May 2013.
```
import sys
sys.path.append("../..")
import pandas as pd
import matplotlib.pyplot as plt
import pyadlml
import requests
import plotly
plotly.offline.init_notebook_mode(connected=True)
from pyadlml.dataset import set_data_home, fetch_aras
set_data_home('/home/chris/code/adlml/data_home')
data = fetch_aras(cache=True, keep_original=True)
data.df_activities.head()
data.df_activities_res2.head()
data.df_devices.head()
```
# Activities: Subject 1
```
print('There are', len(data.df_activities['activity'].unique()), 'activities recorded')
```
```
from pyadlml.dataset.plot.activities import hist_counts
idle = False
hist_counts(data.df_activities, idle=idle);
from pyadlml.dataset.plot.activities import boxplot_duration
idle = False
boxplot_duration(data.df_activities, idle=idle);
from pyadlml.dataset.plot.activities import hist_cum_duration
idle = True
y_scale = 'log'
hist_cum_duration(data.df_activities, y_scale=y_scale, idle=idle);
```
#### Transition Heatmap
$x_{72}$ should be read as first used the toilet and after that went to bed for a total of 46 times
```
from pyadlml.dataset.plot.activities import heatmap_transitions
idle = False
z_scale = None #'log'
grid=True
numbers=False
heatmap_transitions(data.df_activities, z_scale=z_scale, idle=idle,
grid=grid, numbers=numbers);
```
## Activities: Subject 2
```
print('There are', len(data.df_activities_res2['activity'].unique()), 'activities recorded.')
from pyadlml.dataset.plot.activities import hist_counts
idle = False
hist_counts(data.df_activities_res2, idle=idle);
from pyadlml.dataset.plot.activities import boxplot_duration
idle = False
boxplot_duration(data.df_activities_res2, idle=idle);
from pyadlml.dataset.plot.activities import hist_cum_duration
idle = True
y_scale = 'log'
hist_cum_duration(data.df_activities_res2, y_scale=y_scale, idle=idle);
```
#### Transition Heatmap
$x_{72}$ should be read as first used the toilet and after that went to bed for a total of 46 times
```
from pyadlml.dataset.plot.activities import heatmap_transitions
idle = False
z_scale = None #'log'
heatmap_transitions(data.df_activities_res2, z_scale=z_scale, idle=idle);
```
## Devices
```
print('There are', len(data.df_devices['device'].unique()), 'devices recorded.')
from pyadlml.dataset.devices import device_rep1_2_rep2
rep2_nodrop, syn_acts = device_rep1_2_rep2(data.df_devices, drop=False)
rep2_drop = device_rep1_2_rep2(data.df_devices, drop=True)
from pyadlml.dataset.plot.devices import hist_on_off
hist_on_off(rep2_nodrop)
from pyadlml.dataset.plot.devices import boxsplot_on_duration
boxsplot_on_duration(rep2_nodrop);
from pyadlml.dataset.plot.devices import heatmap_trigger_one_day
heatmap_trigger_one_day(data.df_devices, t_res='10m');
```
```
from pyadlml.dataset.stats.devices import duration_correlation_parallel
ct = duration_correlation_parallel(data.df_devices)
heatmap_cross_correlation(dur_corr=ct, figsize=(11,11))
from pyadlml.dataset.plot.devices import heatmap_cross_correlation
heatmap_cross_correlation(data.df_devices)
from pyadlml.dataset.plot.devices import hist_trigger_time_diff
hist_trigger_time_diff(data.df_devices);
from pyadlml.dataset.plot.devices import hist_counts
hist_counts(data.df_devices, y_scale='log', figsize=(10,9));
```
The distribution of triggers is also exponentially decaying for some devices e.g the Washingmaschine is used a lot less than the Hall-Bathroom door.
# Activites Person 1 ~ Devices
### Triggers
Lets look at the connection between the activities and the device activations.
```
import sys
sys.path.append("../..")
import pandas as pd
import matplotlib.pyplot as plt
import pyadlml
import requests
import plotly
plotly.offline.init_notebook_mode(connected=True)
from pyadlml.dataset import set_data_home, fetch_aras
set_data_home('/home/chris/code/adlml/data_home')
data = fetch_aras(cache=True, keep_original=True)
from pyadlml.dataset.stats import contingency_table_triggers, contingency_intervals, contingency_table_triggers_01
con_trigg = contingency_table_triggers(data.df_devices, data.df_activities)
con_trigg_01 = contingency_table_triggers_01(data.df_devices, data.df_activities)
con_int = contingency_intervals(data.df_devices, data.df_activities)
from pyadlml.dataset.plot import heatmap_contingency_triggers
idle = True
z_scale = 'log'
heatmap_contingency_triggers(con_tab=con_trigg, idle=idle, z_scale=z_scale)#, numbers=False)
```
```
from pyadlml.dataset.plot import heatmap_contingency_triggers_01
idle = True
z_scale = 'log'
numbers=True
heatmap_contingency_triggers_01(con_tab_01=con_trigg_01, z_scale=z_scale, idle=idle, numbers=numbers, figsize=(14,13));
```
Sadly the different states of a device don't convey much information. It would have been nice if an activity is only involved with a single device transition.
## Duration
the overlap of the intervals is also interesting.
```
from pyadlml.dataset.plot import heatmap_contingency_overlaps
z_scale='log'
idle=True
heatmap_contingency_overlaps(con_tab=con_int, z_scale=z_scale, idle=idle, figsize=(17,13), numbers=False)
```
# Representations
There are different ways to represent our data. One way is to discretize the data into intervals.
- RAW representation _____|^^^^^^|______
is 1 whenever the sensor is 'on' and 0 otherwise
- Changepoint representation ______|________|______
sensor gives a 1 to timeslices where the sensor reading changes
- LastFired representation ______|^^^^^^^^^
last sensor that changed state gives cont. 1
- Lagged representation
the
## RAW - representation
```
import sys
sys.path.append("../..")
import pandas as pd
import matplotlib.pyplot as plt
import pyadlml
import requests
from pyadlml.datasets import set_data_home, fetch_aras
set_data_home('/home/chris/code/adlml/data_home')
data = fetch_aras(cache=True, keep_original=True)
from pyadlml.preprocessing import DiscreteEncoder
enc_raw = DiscreteEncoder(rep="raw")
raw = enc_raw.fit_transform(data.df_devices)
from pyadlml.preprocessing import RawEncoder
enc_raw = RawEncoder()
raw = enc_raw.fit_transform(data.df_devices)
from pyadlml.preprocessing import LabelEncoder
enc_lbl = LabelEncoder(raw)
from pyadlml.datasets import set_parallel
set_parallel(True)
%time lbls = enc_lbl.fit_transform(data.df_activities)
set_parallel(False)
%time lbls = enc_lbl.fit_transform(data.df_activities)
```
- with dask
17.7 s ± 253 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
- without dask
Wall time: 1min 10s
```
import numpy as np
def check_raw_undiscretized(raw):
# should not contain any nan values
assert not raw.isnull().values.any()
# rows can only differ in one number
for i in range(1, len(raw)):
assert 1 == np.abs(np.sum(raw.iloc[i-1].values - raw.iloc[i].values))
check_raw_undiscretized(raw)
```
```
def check_raw(raw):
# should not contain any nan values
assert not raw.isnull().values.any()
check_raw(raw)
```
```
from pyadlml.dataset.plot.raw import hist_activities
hist_activities(enc_lbl.inverse_transform(lbls),
scale='log')#, figsize=(10,10))
from pyadlml.dataset.plot.raw import heatmap_contingency_01
z_scale = 'log'
rep = 'raw'
heatmap_contingency_01(raw, enc_lbl.inverse_transform(lbls), rep=rep, z_scale=z_scale);
from pyadlml.dataset.plot.raw import heatmap_cross_correlation
heatmap_cross_correlation(raw);
# TODO sth. is wrong here
#from pyadlml.dataset.plot.raw import corr_devices_01
#corr_devices_01(raw)
```
## Changepoint - representation
Raw: _____|^^^^^^|______
CP : ______|________|______
sensor gives a 1 to timeslices where the sensor reading changes
```
from pyadlml.preprocessing import ChangepointEncoder
idle = True
t_res = '60s'
cp_enc = ChangepointEncoder(t_res=t_res)
cp = cp_enc.fit_transform(data.df_devices)
from pyadlml.preprocessing import LabelEncoder
enc_lbl = LabelEncoder(cp, idle=idle)
lbls = enc_lbl.fit_transform(data.df_activities)
def check_changepoint(cp):
# should not contain any nan values
assert not cp.isnull().values.any()
check_changepoint(cp)
from pyadlml.dataset.plot.raw import heatmap_contingency_01
z_scale = 'log'
rep = 'Changepoint'
heatmap_contingency_01(cp, enc_lbl.inverse_transform(lbls), rep=rep, z_scale=z_scale);
from pyadlml.dataset.plot.raw import heatmap_cross_correlation
heatmap_cross_correlation(cp);
```
## LastFired - representation
______|^^^^^^^^^
last sensor that changed state gives cont. 1
```
from pyadlml.preprocessing import LastFiredEncoder
t_res = '60s'
idle=True
lf_enc = LastFiredEncoder(t_res=t_res)
lf = lf_enc.fit_transform(data.df_devices)
from pyadlml.preprocessing import LabelEncoder
enc_lbl = LabelEncoder(lf, idle=idle)
lbls = enc_lbl.fit_transform(data.df_activities)
def check_lastfired(lf):
# should not contain any nan values
assert not lf.isnull().values.any()
# each row has to have exactly one device being on
for row in lf.iterrows():
assert 1 == row[1].values.sum()
check_lastfired(lf)
from pyadlml.dataset.plot.raw import heatmap_contingency_01
z_scale = 'log'
rep = 'LastFired'
heatmap_contingency_01(lf, enc_lbl.inverse_transform(lbls), rep=rep, z_scale=z_scale);
from pyadlml.dataset.plot.raw import heatmap_cross_correlation
heatmap_cross_correlation(lf);
```
## Image - representation
a lot of timeseries can be represented as images. There are good algorithms for images available. There are Recurrence Plots (RP), Gramian Angular Field (GAF) (bad for binary data), Markov Transition Field (MTF): https://doi.org/10.3390/electronics9010068.
Gramian Angular field:
https://medium.com/analytics-vidhya/encoding-time-series-as-images-b043becbdbf3
https://pyts.readthedocs.io/en/stable/auto_examples/image/plot_mtf.html#sphx-glr-auto-examples-image-plot-mtf-py
### Lagged RAW
The lagged raw representation generates a 3D tensor from the raw input. A state at time $t$ is $1D$ vector. The window size determines how big the $2D$ picture is. Stack the windows in the third dimension.
Also see last part of
https://towardsdatascience.com/ml-approaches-for-time-series-4d44722e48fe
```
from pyadlml.preprocessing import LaggedRawEncoder, LaggedLabelEncoder
window_size = 20
t_res = '2min'
sample_strat = 'int_coverage'
idle = True
enc_lgd_raw = LaggedRawEncoder(window_size,
t_res=t_res,
sample_strat=sample_strat)
images = enc_lgd_raw.fit_transform(data.df_devices)
images.shape
enc_lgd_lbl = LaggedLabelEncoder(data.df_devices,
window_size,
t_res=t_res,
idle=idle)
labels = enc_lgd_lbl.fit_transform(data.df_activities)
labels.shape
from pyadlml.dataset.plot.image import mean_image
mean_image(images, data.devices)
from pyadlml.dataset.plot.image import mean_image_per_activity
X = images
y = enc_lgd_lbl.inverse_transform(labels)
mean_image_per_activity(X, y, data.devices)
```
# Lagged Changepoint representation
```
from pyadlml.preprocessing import LaggedChangepointEncoder
window_size = 20
t_res = '2min'
sample_strat = 'int_coverage'
idle = True
enc_lgd_cp = LaggedChangepointEncoder(window_size, t_res=t_res)
images = enc_lgd_cp.fit_transform(data.df_devices)
images.shape
from pyadlml.dataset.plot.image import mean_image
mean_image(images, data.devices)
enc_lgd_lbl = LaggedLabelEncoder(data.df_devices,
window_size,
t_res=t_res,
idle=idle)
labels = enc_lgd_lbl.fit_transform(data.df_activities)
labels.shape
from pyadlml.dataset.plot.image import mean_image_per_activity
X = images
y = enc_lgd_lbl.inverse_transform(labels)
mean_image_per_activity(X, y, data.devices)
```
## Lagged Last Fired Encoder
```
from pyadlml.preprocessing import LaggedLastFiredEncoder
window_size = 20
t_res = '2min'
sample_strat = 'int_coverage'
idle=True
enc_lgd_lf = LaggedLastFiredEncoder(window_size, t_res=t_res)
images = enc_lgd_lf.fit_transform(data.df_devices)
images.shape
from pyadlml.dataset.plot.image import mean_image
mean_image(images, data.devices)
enc_lgd_lbl = LaggedLabelEncoder(data.df_devices,
window_size,
t_res=t_res,
idle=idle)
labels = enc_lgd_lbl.fit_transform(data.df_activities)
labels.shape
from pyadlml.dataset.plot.image import mean_image_per_activity
X = images
y = enc_lgd_lbl.inverse_transform(labels)
mean_image_per_activity(X, y, data.devices)
```
| github_jupyter |
## Computer Vision Learner
[`vision.learner`](/vision.learner.html#vision.learner) is the module that defines the [`cnn_learner`](/vision.learner.html#cnn_learner) method, to easily get a model suitable for transfer learning.
```
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
```
## Transfer learning
Transfer learning is a technique where you use a model trained on a very large dataset (usually [ImageNet](http://image-net.org/) in computer vision) and then adapt it to your own dataset. The idea is that it has learned to recognize many features on all of this data, and that you will benefit from this knowledge, especially if your dataset is small, compared to starting from a randomly initialized model. It has been proved in [this article](https://arxiv.org/abs/1805.08974) on a wide range of tasks that transfer learning nearly always give better results.
In practice, you need to change the last part of your model to be adapted to your own number of classes. Most convolutional models end with a few linear layers (a part will call head). The last convolutional layer will have analyzed features in the image that went through the model, and the job of the head is to convert those in predictions for each of our classes. In transfer learning we will keep all the convolutional layers (called the body or the backbone of the model) with their weights pretrained on ImageNet but will define a new head initialized randomly.
Then we will train the model we obtain in two phases: first we freeze the body weights and only train the head (to convert those analyzed features into predictions for our own data), then we unfreeze the layers of the backbone (gradually if necessary) and fine-tune the whole model (possibly using differential learning rates).
The [`cnn_learner`](/vision.learner.html#cnn_learner) factory method helps you to automatically get a pretrained model from a given architecture with a custom head that is suitable for your data.
```
show_doc(cnn_learner)
```
This method creates a [`Learner`](/basic_train.html#Learner) object from the [`data`](/vision.data.html#vision.data) object and model inferred from it with the backbone given in `arch`. Specifically, it will cut the model defined by `arch` (randomly initialized if `pretrained` is False) at the last convolutional layer by default (or as defined in `cut`, see below) and add:
- an [`AdaptiveConcatPool2d`](/layers.html#AdaptiveConcatPool2d) layer,
- a [`Flatten`](/layers.html#Flatten) layer,
- blocks of \[[`nn.BatchNorm1d`](https://pytorch.org/docs/stable/nn.html#torch.nn.BatchNorm1d), [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout), [`nn.Linear`](https://pytorch.org/docs/stable/nn.html#torch.nn.Linear), [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU)\] layers.
The blocks are defined by the `lin_ftrs` and `ps` arguments. Specifically, the first block will have a number of inputs inferred from the backbone `arch` and the last one will have a number of outputs equal to `data.c` (which contains the number of classes of the data) and the intermediate blocks have a number of inputs/outputs determined by `lin_frts` (of course a block has a number of inputs equal to the number of outputs of the previous block). The default is to have an intermediate hidden size of 512 (which makes two blocks `model_activation` -> 512 -> `n_classes`). If you pass a float then the final dropout layer will have the value `ps`, and the remaining will be `ps/2`. If you pass a list then the values are used for dropout probabilities directly.
Note that the very last block doesn't have a [`nn.ReLU`](https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU) activation, to allow you to use any final activation you want (generally included in the loss function in pytorch). Also, the backbone will be frozen if you choose `pretrained=True` (so only the head will train if you call [`fit`](/basic_train.html#fit)) so that you can immediately start phase one of training as described above.
Alternatively, you can define your own `custom_head` to put on top of the backbone. If you want to specify where to split `arch` you should so in the argument `cut` which can either be the index of a specific layer (the result will not include that layer) or a function that, when passed the model, will return the backbone you want.
The final model obtained by stacking the backbone and the head (custom or defined as we saw) is then separated in groups for gradual unfreezing or differential learning rates. You can specify how to split the backbone in groups with the optional argument `split_on` (should be a function that returns those groups when given the backbone).
The `kwargs` will be passed on to [`Learner`](/basic_train.html#Learner), so you can put here anything that [`Learner`](/basic_train.html#Learner) will accept ([`metrics`](/metrics.html#metrics), `loss_func`, `opt_func`...)
```
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learner = cnn_learner(data, models.resnet18, metrics=[accuracy])
learner.fit_one_cycle(1,1e-3)
learner.save('one_epoch')
show_doc(unet_learner)
```
This time the model will be a [`DynamicUnet`](/vision.models.unet.html#DynamicUnet) with an encoder based on `arch` (maybe `pretrained`) that is cut depending on `split_on`. `blur_final`, `norm_type`, `blur`, `self_attention`, `y_range`, `last_cross` and `bottle` are passed to unet constructor, the `kwargs` are passed to the initialization of the [`Learner`](/basic_train.html#Learner).
```
jekyll_warn("The models created with this function won't work with pytorch `nn.DataParallel`, you have to use distributed training instead!")
```
### Get predictions
Once you've actually trained your model, you may want to use it on a single image. This is done by using the following method.
```
show_doc(Learner.predict)
img = learner.data.train_ds[0][0]
learner.predict(img)
```
Here the predict class for our image is '3', which corresponds to a label of 0. The probabilities the model found for each class are 99.65% and 0.35% respectively, so its confidence is pretty high.
Note that if you want to load your trained model and use it on inference mode with the previous function, you should export your [`Learner`](/basic_train.html#Learner).
```
learner.export()
```
And then you can load it with an empty data object that has the same internal state like this:
```
learn = load_learner(path)
```
### Customize your model
You can customize [`cnn_learner`](/vision.learner.html#cnn_learner) for your own model's default `cut` and `split_on` functions by adding them to the dictionary `model_meta`. The key should be your model and the value should be a dictionary with the keys `cut` and `split_on` (see the source code for examples). The constructor will call [`create_body`](/vision.learner.html#create_body) and [`create_head`](/vision.learner.html#create_head) for you based on `cut`; you can also call them yourself, which is particularly useful for testing.
```
show_doc(create_body)
show_doc(create_head, doc_string=False)
```
Model head that takes `nf` features, runs through `lin_ftrs`, and ends with `nc` classes. `ps` is the probability of the dropouts, as documented above in [`cnn_learner`](/vision.learner.html#cnn_learner).
```
show_doc(ClassificationInterpretation, title_level=3)
```
This provides a confusion matrix and visualization of the most incorrect images. Pass in your [`data`](/vision.data.html#vision.data), calculated `preds`, actual `y`, and your `losses`, and then use the methods below to view the model interpretation results. For instance:
```
learn = cnn_learner(data, models.resnet18)
learn.fit(1)
preds,y,losses = learn.get_preds(with_loss=True)
interp = ClassificationInterpretation(learn, preds, y, losses)
```
The following factory method gives a more convenient way to create an instance of this class:
```
show_doc(ClassificationInterpretation.from_learner, full_name='from_learner')
```
You can also use a shortcut `learn.interpret()` to do the same.
```
show_doc(Learner.interpret, full_name='interpret')
```
Note that this shortcut is a [`Learner`](/basic_train.html#Learner) object/class method that can be called as: `learn.interpret()`.
```
show_doc(ClassificationInterpretation.plot_top_losses, full_name='plot_top_losses')
```
The `k` items are arranged as a square, so it will look best if `k` is a square number (4, 9, 16, etc). The title of each image shows: prediction, actual, loss, probability of actual class. When `heatmap` is True (by default it's True) , Grad-CAM heatmaps (http://openaccess.thecvf.com/content_ICCV_2017/papers/Selvaraju_Grad-CAM_Visual_Explanations_ICCV_2017_paper.pdf) are overlaid on each image. `plot_top_losses` should be used with single-labeled datasets. See `plot_multi_top_losses` below for a version capable of handling multi-labeled datasets.
```
interp.plot_top_losses(9, figsize=(7,7))
show_doc(ClassificationInterpretation.plot_multi_top_losses, full_name='plot_multi_top_losses')
```
Similar to `plot_top_losses()` but aimed at multi-labeled datasets. It plots misclassified samples sorted by their respective loss.
Since you can have multiple labels for a single sample, they can easily overlap in a grid plot. So it plots just one sample per row.
Note that you can pass `save_misclassified=True` (by default it's `False`). In such case, the method will return a list containing the misclassified images which you can use to debug your model and/or tune its hyperparameters.
## Undocumented Methods - Methods moved below this line will intentionally be hidden
## New Methods - Please document or move to the undocumented section
| github_jupyter |
##### Copyright 2019 Qiyang Hu
```
#@title Licensed under MIT License (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://huqy.github.io/idre_learning_machine_learning/LICENSE.md
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Titanic Kaggle Challenge, a case study
Major ideas of data processing are mainly adopted from [Link](https://ahmedbesbes.com/how-to-score-08134-in-titanic-kaggle-challenge.html)
## Setup the useful libraries
```
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore', category=DeprecationWarning)
import pandas as pd
pd.options.display.max_columns = 100
from matplotlib import pyplot as plt
import numpy as np
import seaborn as sns
import pylab as plot
params = {
'axes.labelsize': "large",
'xtick.labelsize': 'large',
'legend.fontsize': 12,
'figure.figsize': [10, 3]
}
plot.rcParams.update(params)
def status(feature):
print('Processing', feature, ': ok')
```
## Get the data
```
from google.colab import files
files.upload();
!mkdir -p /root/.kaggle
!cp kaggle.json /root/.kaggle
!chmod 600 /root/.kaggle/kaggle.json
!kaggle config set -n path -v{/content}
!kaggle competitions list -s titanic
!kaggle competitions download -c titanic -p /content
```
## Exploratory Data Analysis (EDA)
### Loading training dataset
```
data = pd.read_csv('train.csv')
print(data.shape)
data.head()
```
### Check if it is an imbalanced classification problem
```
sns.countplot(x='Survived', data=data)
print("Survival rate: ", data.Survived.sum()/data.Survived.count())
```
Good! No imbalance class problem here, we can follow standard approach.
### Pre-rocessing the missing data
We may need to refine it further for better results.
```
data.describe()
data['Age'] = data['Age'].fillna(data['Age'].median())
data.describe()
```
### Visulizing the data
```
sns.pairplot(data, diag_kind="hist", hue="Survived")
def correlation_heatmap(df, method):
_ , ax = plt.subplots(figsize =(14, 12))
colormap = sns.diverging_palette(220, 10, as_cmap = True)
_ = sns.heatmap(
df.corr(method=method),
cmap = colormap,
square=True,
annot=True,
annot_kws={'fontsize':9 }
)
plt.title('Correlation Matrix', y=1.05, size=15)
correlation_heatmap(data, 'spearman')
data['Died'] = 1 - data['Survived']
data.groupby('Sex').agg('sum')[['Survived', 'Died']].plot(kind='bar', stacked=True, color=['g', 'r']);
data.groupby('Sex').agg('mean')[['Survived', 'Died']].plot(kind='bar', stacked=True, color=['g', 'r']);
sns.violinplot(x='Sex', y='Age', hue='Survived', data=data, split=True, palette={0: "r", 1: "g"});
plt.hist([data[data['Survived'] == 1]['Fare'], data[data['Survived'] == 0]['Fare']], stacked=True, color = ['g','r'], bins = 50, label = ['Survived','Dead'])
plt.xlabel('Fare')
plt.ylabel('Number of passengers')
plt.legend();
ax = plt.subplot()
ax.scatter(data[data['Survived'] == 1]['Age'], data[data['Survived'] == 1]['Fare'], c='green', s=data[data['Survived'] == 1]['Fare'])
ax.scatter(data[data['Survived'] == 0]['Age'], data[data['Survived'] == 0]['Fare'], c='red', s=data[data['Survived'] == 0]['Fare'])
plt.xlabel('Age')
plt.ylabel('Fare');
ax = plt.subplot()
ax.set_ylabel('Average fare')
data.groupby('Pclass').mean()['Fare'].plot(kind='bar', ax = ax);
sns.violinplot(x='Embarked', y='Fare', hue='Survived', data=data, split=True, palette={0: "r", 1: "g"});
```
## Feature Engineering
### Combining train and test dataset
```
def get_combined_data():
# reading train data
train = pd.read_csv('train.csv')
# reading test data
test = pd.read_csv('test.csv')
# extracting and then removing the targets from the training data
targets = train.Survived
train.drop(['Survived'], 1, inplace=True)
# merging train data and test data for future feature engineering
# we'll also remove the PassengerID since this is not an informative feature
combined = train.append(test)
combined.reset_index(inplace=True)
combined.drop(['index', 'PassengerId'], inplace=True, axis=1)
return combined
combined = get_combined_data()
print(combined.shape)
combined.head()
combined.isna().sum()
```
### Extracting the passenger titles
```
titles = set()
for name in data['Name']:
titles.add(name.split(',')[1].split('.')[0].strip())
print (titles)
```
We'd like to summarize 5 categories of titles as:
* Officer
* Royalty
* Mr
* Mrs
* Miss
```
Title_Dictionary = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess":"Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
def get_titles():
# we extract the title from each name
combined['Title'] = combined['Name'].map(lambda name:name.split(',')[1].split('.')[0].strip())
# a map of more aggregated title
# we map each title
combined['Title'] = combined.Title.map(Title_Dictionary)
status('Title')
return combined
combined = get_titles()
combined.head()
combined[combined['Title'].isnull()]
```
Fine for missing title in "1305" with a NaN value because it is in the training set!
### Further processing the missing-value ages
```
print('Number of missing ages in training set', combined.iloc[:891].Age.isnull().sum())
print('Number of missing ages in test set', combined.iloc[891:].Age.isnull().sum())
grouped_train = combined.iloc[:891].groupby(['Sex','Pclass','Title'])
grouped_median_train = grouped_train.median()
grouped_median_train = grouped_median_train.reset_index()[['Sex', 'Pclass', 'Title', 'Age']]
grouped_median_train.head()
def fill_age(row):
condition = (
(grouped_median_train['Sex'] == row['Sex']) &
(grouped_median_train['Title'] == row['Title']) &
(grouped_median_train['Pclass'] == row['Pclass'])
)
return grouped_median_train[condition]['Age'].values[0]
def process_age():
global combined
# a function that fills the missing values of the Age variable
combined['Age'] = combined.apply(lambda row: fill_age(row) if np.isnan(row['Age']) else row['Age'], axis=1)
status('age')
return combined
combined = process_age()
```
### Processing names to make title features binary
```
def process_names():
global combined
# we clean the Name variable
combined.drop('Name', axis=1, inplace=True)
# encoding in dummy variable
titles_dummies = pd.get_dummies(combined['Title'], prefix='Title')
combined = pd.concat([combined, titles_dummies], axis=1)
# removing the title variable
combined.drop('Title', axis=1, inplace=True)
status('names')
return combined
combined = process_names()
combined.head()
```
### Processing missing-value fares
```
def process_fares():
global combined
# there's one missing fare value - replacing it with the mean.
combined.Fare.fillna(combined.iloc[:891].Fare.mean(), inplace=True)
status('fare')
return combined
combined = process_fares()
```
### Processing Embarked
```
def process_embarked():
global combined
# two missing embarked values - filling them with the most frequent one in the train set(S)
combined.Embarked.fillna('S', inplace=True)
# dummy encoding
embarked_dummies = pd.get_dummies(combined['Embarked'], prefix='Embarked')
combined = pd.concat([combined, embarked_dummies], axis=1)
combined.drop('Embarked', axis=1, inplace=True)
status('embarked')
return combined
combined = process_embarked()
combined.head()
```
### Processing Cabin
```
train_cabin, test_cabin = set(), set()
for c in combined.iloc[:891]['Cabin']:
try:
train_cabin.add(c[0])
except:
train_cabin.add('U')
for c in combined.iloc[891:]['Cabin']:
try:
test_cabin.add(c[0])
except:
test_cabin.add('U')
print('Cabin set in training dataset:', sorted(train_cabin))
print('Cabin set in testing dataset: ', sorted(test_cabin))
def process_cabin():
global combined
# replacing missing cabins with U (for Uknown)
combined.Cabin.fillna('U', inplace=True)
# mapping each Cabin value with the cabin letter
combined['Cabin'] = combined['Cabin'].map(lambda c: c[0])
# dummy encoding ...
cabin_dummies = pd.get_dummies(combined['Cabin'], prefix='Cabin')
combined = pd.concat([combined, cabin_dummies], axis=1)
combined.drop('Cabin', axis=1, inplace=True)
status('cabin')
return combined
combined = process_cabin()
combined.head()
```
### Processing Sex
```
def process_sex():
global combined
# mapping string values to numerical one
combined['Sex'] = combined['Sex'].map({'male':1, 'female':0})
status('Sex')
return combined
combined = process_sex()
```
### Processing Pclass
```
def process_pclass():
global combined
# encoding into 3 categories:
pclass_dummies = pd.get_dummies(combined['Pclass'], prefix="Pclass")
# adding dummy variable
combined = pd.concat([combined, pclass_dummies],axis=1)
# removing "Pclass"
combined.drop('Pclass',axis=1,inplace=True)
status('Pclass')
return combined
combined = process_pclass()
combined.head()
```
### Processing Ticket
```
def cleanTicket(ticket):
ticket = ticket.replace('.', '')
ticket = ticket.replace('/', '')
ticket = ticket.split()
ticket = map(lambda t : t.strip(), ticket)
ticket = list(filter(lambda t : not t.isdigit(), ticket))
if len(ticket) > 0:
return ticket[0]
else:
return 'XXX'
tickets = set()
for t in combined['Ticket']:
tickets.add(cleanTicket(t))
print (len(tickets))
def process_ticket():
global combined
# a function that extracts each prefix of the ticket, returns 'XXX' if no prefix (i.e the ticket is a digit)
def cleanTicket(ticket):
ticket = ticket.replace('.','')
ticket = ticket.replace('/','')
ticket = ticket.split()
ticket = map(lambda t : t.strip(), ticket)
#ticket = filter(lambda t : not t.isdigit(), ticket)
ticket = list(filter(lambda t : not t.isdigit(), ticket))
if len(ticket) > 0:
return ticket[0]
else:
return 'XXX'
# Extracting dummy variables from tickets:
combined['Ticket'] = combined['Ticket'].map(cleanTicket)
tickets_dummies = pd.get_dummies(combined['Ticket'], prefix='Ticket')
combined = pd.concat([combined, tickets_dummies], axis=1)
combined.drop('Ticket', inplace=True, axis=1)
status('Ticket')
return combined
combined = process_ticket()
combined.head()
```
### Processing Family
We want to introduce 4 new features for family values:
* FamilySize : the total number of relatives including the passenger (him/her)self.
* Sigleton : a boolean variable that describes families of size = 1
* SmallFamily : a boolean variable that describes families of 2 <= size <= 4
* LargeFamily : a boolean variable that describes families of 5 < size
```
def process_family():
global combined
# introducing a new feature : the size of families (including the passenger)
combined['FamilySize'] = combined['Parch'] + combined['SibSp'] + 1
# introducing other features based on the family size
combined['Singleton'] = combined['FamilySize'].map(lambda s: 1 if s == 1 else 0)
combined['SmallFamily'] = combined['FamilySize'].map(lambda s: 1 if 2 <= s <= 4 else 0)
combined['LargeFamily'] = combined['FamilySize'].map(lambda s: 1 if 5 <= s else 0)
status('family')
return combined
combined = process_family()
print (combined.shape)
combined.head()
```
## Modeling
### Setup imports and preparations
```
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelBinarizer
from sklearn import ensemble, tree, svm, naive_bayes, neighbors, linear_model, gaussian_process, neural_network
#from sklearn.svm import LinearSVC, SVC
from sklearn.metrics import accuracy_score, f1_score, auc, roc_curve, roc_auc_score, make_scorer
```
To evaluate our model we'll be using a 5-fold cross validation with the accuracy.
```
def compute_score(clf, X, y, scoring='accuracy'):
xval = cross_val_score(clf, X, y, cv = 5, scoring=scoring)
return np.mean(xval)
def recover_train_test_target():
global combined
targets = pd.read_csv('train.csv', usecols=['Survived'])['Survived'].values
train = combined.iloc[:891]
test = combined.iloc[891:]
return train, test, targets
train, test, targets = recover_train_test_target()
```
### Scaling the numericals in the training data
```
train_numerical_features = list(train.select_dtypes(include=['int64', 'float64', 'int32']).columns)
#train_numerical_features = ['Age', 'Fare', 'FamilySize']
ss_scaler = StandardScaler()
train = pd.DataFrame(data = train)
train.head()
```
### Feature selection
Tree-based estimators can be used to compute feature importances.
```
clf = RandomForestClassifier(n_estimators=50, max_features='sqrt')
clf = clf.fit(train, targets)
features = pd.DataFrame()
features['feature'] = train.columns
features['importance'] = clf.feature_importances_
features.sort_values(by=['importance'], ascending=True, inplace=True)
features.set_index('feature', inplace=True)
features.plot(kind='barh', figsize=(10, 20))
model = SelectFromModel(clf, prefit=True)
train_reduced = model.transform(train)
print (train_reduced.shape)
test_reduced = model.transform(test)
print (test_reduced.shape)
targets.shape
```
### Inspection for solo-feature classification problem
```
sns.regplot(x='Age', y='Survived', data=data, logistic=True)
X = data['Age'].values.reshape(-1,1)
Y = targets
clf = LogisticRegression()
clf.fit(X,np.ravel(Y.astype(int)))
clf.coef_[0]
score = compute_score(clf=clf, X=X, y=targets, scoring='accuracy')
print("Accuracy Score:", score)
xlim = (0, 85)
xx = np.linspace(xlim[0], xlim[1], 85).reshape(-1,1)
Z = clf.predict_proba(xx)
Z = Z[:, 1].reshape(xx.shape)
ax = plt.subplot()
ax.scatter(X,Y)
ax.plot(xx,Z, c="orange")
plt.xlabel("Age")
plt.ylabel("Probability of Survival")
```
### Inspection for a dual-feature classification problem
```
ax = plt.subplot()
ax.scatter(data[data['Survived'] == 1]['Age'], data[data['Survived'] == 1]['Fare'], c='green')
ax.scatter(data[data['Survived'] == 0]['Age'], data[data['Survived'] == 0]['Fare'], c='red')
plt.xlabel('Age')
plt.ylabel('Fare');
X = train[['Age','Fare']].as_matrix()
clf = naive_bayes.GaussianNB()
clf.fit(X, targets)
score = compute_score(clf=clf, X=X, y=targets, scoring='accuracy')
print(score)
xlim = (0, 85)
ylim = (0, 550)
xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 85),
np.linspace(ylim[0], ylim[1], 550))
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
ax = plt.subplot()
ax.scatter(data[data['Survived'] == 1]['Age'], data[data['Survived'] == 1]['Fare'], c='green')
ax.scatter(data[data['Survived'] == 0]['Age'], data[data['Survived'] == 0]['Fare'], c='red')
ax.contour(xx, yy, Z, [0.5], colors='blue')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel('Age')
ax.set_ylabel('Fare')
plt.show()
```
### Different base models
```
logreg = LogisticRegression()
logreg_cv = LogisticRegressionCV(max_iter=700)
rf = RandomForestClassifier()
gboost = GradientBoostingClassifier()
xgb = XGBClassifier()
svmsvc = svm.SVC(probability=True)
perceptron = linear_model.Perceptron()
gaussiannb = naive_bayes.GaussianNB()
gaussianp = gaussian_process.GaussianProcessClassifier()
adaboost = ensemble.AdaBoostClassifier()
models = [logreg, logreg_cv, rf, gboost, xgb, svmsvc, perceptron, gaussiannb, gaussianp, adaboost]
#Do some preperation for the loop
col = []
algorithms = pd.DataFrame(columns = col)
idx = 0
#Train and score algorithms
for a in models:
a.fit(train_reduced, targets)
pred = a.predict(train_reduced)
acc = accuracy_score(targets, pred) #Other way: a.score(X_test, y_test)
f1 = f1_score(targets, pred)
cv = cross_val_score(a, train_reduced, targets).mean()
Alg = a.__class__.__name__
algorithms.loc[idx, 'Algorithm'] = Alg
algorithms.loc[idx, 'Accuracy'] = round(acc * 100, 2)
algorithms.loc[idx, 'F1 Score'] = round(f1 * 100, 2)
algorithms.loc[idx, 'CV Score'] = round(cv * 100, 2)
idx+=1
#Compare invidual models
algorithms.sort_values(by = ['CV Score'], ascending = False, inplace = True)
algorithms.head()
g = sns.barplot("CV Score", "Algorithm", data = algorithms)
g.set_xlabel("CV score")
g = g.set_title("Algorithm Scores")
for model in models:
print ('Cross-validation of : {0}'.format(model.__class__.__name__))
score = compute_score(clf=model, X=train_reduced, y=targets, scoring='accuracy')
print ('CV score = {0}'.format(score))
print ('****')
```
### Hyperparameters tuning
#### Random Forest model grid search
We will select Random Forest model as an example with using the full train set.
```
# turn run_gs to True if you want to run the gridsearch again.
run_gs = True
if run_gs:
parameter_grid = {
'max_depth' : [4, 6, 8],
'n_estimators': [50, 10],
'max_features': ['sqrt', 'auto', 'log2'],
'min_samples_split': [2, 3, 10],
'min_samples_leaf': [1, 3, 10],
'bootstrap': [True, False],
}
forest = RandomForestClassifier()
cross_validation = StratifiedKFold(n_splits=5)
grid_search = GridSearchCV(forest,
scoring='accuracy',
param_grid=parameter_grid,
cv=cross_validation,
verbose=1,
n_jobs=-1
)
grid_search.fit(train, targets)
model = grid_search
parameters = grid_search.best_params_
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
else:
parameters = {'bootstrap': False, 'min_samples_leaf': 3, 'n_estimators': 50,
'min_samples_split': 10, 'max_features': 'sqrt', 'max_depth': 6}
model = RandomForestClassifier(**parameters)
model.fit(train, targets)
```
#### Gradient boost model grid search
```
# turn run_gs to True if you want to run the gridsearch again.
run_gs = True
if run_gs:
parameter_grid = {
"max_depth":[6, 8, 10],
"n_estimators":[40, 20, 10, 5],
"max_features":["sqrt", "auto", "log2"],
"min_samples_split": [10, 20, 30, 40],
"min_samples_leaf": [3, 10, 20],
}
gboost = GradientBoostingClassifier()
#cross_validation = StratifiedKFold(n_splits=5)
grid_search = GridSearchCV(gboost,
scoring='accuracy',
param_grid=parameter_grid,
cv=5,
verbose=1,
n_jobs=-1
)
grid_search.fit(train, targets)
model = grid_search
parameters = grid_search.best_params_
print('Best score: {}'.format(grid_search.best_score_))
print('Best parameters: {}'.format(grid_search.best_params_))
else:
parameters = {'bootstrap': False, 'min_samples_leaf': 3, 'n_estimators': 50,
'min_samples_split': 10, 'max_features': 'sqrt', 'max_depth': 6}
model = RandomForestClassifier(**parameters)
model.fit(train, targets)
```
#### XGBoost model randomized search
```
gbm_param_grid = {
'n_estimators': range(6, 20),
'max_depth': range(2, 10),
'learning_rate': [.4, .45, .5, .55, .6],
'colsample_bytree': [.6, .7, .8, .9, 1]
}
# Instantiate the regressor: gbm
gbm = XGBClassifier(n_estimators=10)
# Perform random search: grid_mse
xgb_random = RandomizedSearchCV(param_distributions=gbm_param_grid,
estimator = gbm, scoring = "accuracy",
verbose = 1, n_iter = 1000, cv = 5, n_jobs=-1)
# Fit randomized_mse to the data
xgb_random.fit(train, targets)
model = xgb_random
# Print the best parameters and lowest RMSE
print("Best parameters found: ", xgb_random.best_params_)
print("Best accuracy found: ", xgb_random.best_score_)
```
#### XGBoost model grid search
```
param_test1 = {
'n_estimators': [20, 100,200,500],
'max_depth': [3,5,7,9],
'min_child_weight': [1,3,5],
'gamma':[i/10.0 for i in range(0,5)],
'subsample':[i/10.0 for i in range(6,10)],
'colsample_bytree':[i/10.0 for i in range(6,10)],
'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05, 0.1, 1],
'learning_rate': [0.01, 0.02, 0.05, 0.1]
}
scoring = {'AUC': 'roc_auc', 'Accuracy': make_scorer(accuracy_score)}
gsearch1 = GridSearchCV(estimator = XGBClassifier(),
param_grid = param_test1,
scoring=scoring, iid=False,
cv=5, verbose = 1, n_jobs=-1,
refit='Accuracy')
gsearch1.fit(train, targets)
model = gsearch1
# Print the best parameters and lowest RMSE
print("Best parameters found: ", model.best_params_)
print("Best accuracy found: ", model.best_score_)
```
### Making prediction and submission to check the score
```
output = model.predict(test).astype(int)
df_output = pd.DataFrame()
aux = pd.read_csv('test.csv')
df_output['PassengerId'] = aux['PassengerId']
df_output['Survived'] = output
df_output[['PassengerId','Survived']].to_csv('gridsearch_rf.csv', index=False)
!kaggle competitions submit -c titanic -f gridsearch_rf.csv -m "My another submission using Random Forest with standard scaling"
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib notebook
```
# Conditional Entropy: Can Information Theory Beat the L-S Periodogram?
**Version 0.1**
***
By AA Miller 5 June 2019
In this lecture we will examine alternative methods to search for periodic signals in astronomical time series. Earlier, we spent a great deal of time discussing the Lomb-Scargle periodogram. This is the "standard" in astronomy, in part because it was the first (good) method developed for noisy and sparse data.
We will now explore some alternatives to the L-S periodogram, and this is good, because LS: (i) does not handle outliers well, and (ii) works best on purely sinusoidal signals.
## An Incomplete Whirlwind Tour
In addition to LS, the following techniques are employed to search for periodic signals:
#### String Length
The string length method ([Dworetsky](http://adsabs.harvard.edu/abs/1983MNRAS.203..917D)), phase folds the data at trial periods and then minimizes the distance to connect the phase-ordered observations.
<img style="display: block; margin-left: auto; margin-right: auto" src="./images/StringLength.png" align="middle">
<div align="right"> <font size="-3">(credit: Gaveen Freer - http://slideplayer.com/slide/4212629/#) </font></div>
#### Phase Dispersion Minimization
Phase Dispersion Minimization (PDM; [Jurkevich 1971](http://adsabs.harvard.edu/abs/1971Ap%26SS..13..154J), [Stellingwerth 1978](http://adsabs.harvard.edu/abs/1978ApJ...224..953S)), like LS, folds the data at a large number of trial frequencies $f$.
The phased data are then binned, and the variance is calculated in each bin, combined, and compared to the overall variance of the signal. No functional form of the signal is assumed, and thus, non-sinusoidal signals can be found.
*Challenge*: how to select the number of bins?
<img style="display: block; margin-left: auto; margin-right: auto" src="./images/PDM.jpg" align="middle">
<div align="right"> <font size="-3">(credit: Gaveen Freer - http://slideplayer.com/slide/4212629/#) </font></div>
#### Analysis of Variance
Analysis of Variance (AOV; [Schwarzenberg-Czerny 1989](http://adsabs.harvard.edu/abs/1989MNRAS.241..153S)) is similar to PDM. Optimal periods are defined via hypothesis testing, and these methods are found to perform best for certain types of astronomical signals.
#### Supersmoother
Supersmoother ([Reimann](http://adsabs.harvard.edu/abs/1994PhDT........20R)) is a least-squares approach wherein a flexible, non-parametric model is fit to the folded observations at many trial frequncies. The use of this flexible model reduces aliasing issues relative to models that assume a sinusoidal shape, however, this comes at the cost of requiring considerable computational time.
#### Bayesian Methods
There have been some efforts to frame the period-finding problem in a Bayesian framework. [Bretthorst 1988](https://www.springer.com/us/book/9780387968711) developed Bayesian generalized LS models, while [Gregory & Loredo 1992](http://adsabs.harvard.edu/abs/1992ApJ...398..146G) applied Bayesian techniques to phase-binned models.
More recently, efforts to use Gaussian processes (GPs) to model and extract a period from the light curve have been developed ([Wang et al. 2012](http://adsabs.harvard.edu/abs/2012ApJ...756...67W)). These methods have proved to be especially useful for detecting stellar rotation in Kepler light curves ([Angus et al. 2018](http://adsabs.harvard.edu/abs/2018MNRAS.474.2094A)).
(There will be more on GPs later during this session)
#### Conditional Entropy
Conditional Entropy (CE; [Graham et al. 2013](http://adsabs.harvard.edu/abs/2013MNRAS.434.2629G)), and other entropy based methods, aim to minimize the entropy in binned (normalized magnitude, phase) space. CE, in particular, is good at supressing signal due to the window function.
When tested on real observations, CE outperforms most of the alternatives (e.g., LS, PDM, etc).
<img style="display: block; margin-left: auto; margin-right: auto" src="./images/CE.png" align="middle">
<div align="right"> <font size="-3">(credit: Graham et al. 2013) </font></div>
## Conditional Entropy
The focus of today's exercise is conditional entropy (CE), which uses information theory and thus, in principle, works better in the presence of noise and outliers. Furthermore, CE does not make any assumptions about the underlying shape of the signal, which is useful when looking for non-sinusoidal patterns (such as transiting planets or eclipsing binaries).
For full details on the CE algorithm, see [Graham et al. (2013)](http://adsabs.harvard.edu/abs/2013MNRAS.434.2629G).
Briefly, CE is based on the using the Shannon entropy ([Cincotta et al. 1995](https://ui.adsabs.harvard.edu/abs/1995ApJ...449..231C/abstract)), which is determined as follows:
- Normalize the time series data $m(t_i)$ to occupy a uniform square over phase, $\phi$, and magnitude, $m$, at a given trial period, $p$.
- Calculate the Shannon entropy, $H_0$, over the $k$ partitions in $(\phi, m)$:
$$H_0 = - \sum_{i=1}^{k} \mu_i \ln{(\mu_i)}\;\; \forall \mu_i \ne 0,$$
where $\mu_i$ is the occupation probability for the $i^{th}$ partition, which is just the number of data points in that partition divided by the total number of points in the data set.
- Iterate over multiple periods, and identify the period that minimizes the entropy (recall that entropy measures a lack of information)
As discussed in [Graham et al. (2013)](http://adsabs.harvard.edu/abs/2013MNRAS.434.2629G), minimizing the Shannon entropy can be influenced by the window function, so they introduce the conditional entropy, $H_c(m|\phi)$, to help mitigate these effects. The CE can be calculated as:
$$H_c = \sum_{i,j} p(m_i, \phi_j) \ln \left( \frac{p(\phi_j)}{p(m_i, \phi_j)} \right), $$
where $p(m_i, \phi_j)$ is the occupation probability for the $i^{th}$ partition in normalized magnitude and the $j^{th}$
partition in phase and $p(\phi_j)$ is the occupation probability of the $j^{th}$ phase partition
In this problem we will first calculate the Shannon entropy, then the CE to find the best-fit period of the eclipsing binary from the LS lecture.
## Problem 1) Helper Functions
**Problem 1a**
Create a function, `gen_periodic_data`, that creates simulated data (including noise) over a grid of user supplied positions:
$$ y = A\,cos\left(\frac{2{\pi}x}{P} - \phi\right) + \sigma_y$$
where $A, P, \phi$ are inputs to the function. `gen_periodic_data` should include Gaussian noise, $\sigma_y$, for each output $y_i$.
```
def gen_periodic_data(x, period=1, amplitude=1, phase=0, noise=0):
'''Generate periodic data given the function inputs
y = A*cos(x/p - phase) + noise
Parameters
----------
x : array-like
input values to evaluate the array
period : float (default=1)
period of the periodic signal
amplitude : float (default=1)
amplitude of the periodic signal
phase : float (default=0)
phase offset of the periodic signal
noise : float (default=0)
variance of the noise term added to the periodic signal
Returns
-------
y : array-like
Periodic signal evaluated at all points x
'''
y = amplitude*np.sin(2*np.pi*x/(period) - phase) + np.random.normal(0, np.sqrt(noise), size=len(x))
return y
```
**Problem 1b**
Create a function, `phase_plot`, that takes x, y, and $P$ as inputs to create a phase-folded light curve (i.e., plot the data at their respective phase values given the period $P$).
Include an optional argument, `y_unc`, to include uncertainties on the `y` values, when available.
```
def phase_plot(x, y, period, y_unc = 0.0):
'''Create phase-folded plot of input data x, y
Parameters
----------
x : array-like
data values along abscissa
y : array-like
data values along ordinate
period : float
period to fold the data
y_unc : array-like
uncertainty of the
'''
phases = (x/period) % 1
if type(y_unc) == float:
y_unc = np.zeros_like(x)
plot_order = np.argsort(phases)
norm_y = (y - np.min(y))/(np.max(y) - np.min(y))
norm_y_unc = (y_unc)/(np.max(y) - np.min(y))
plt.rc('grid', linestyle=":", color='0.8')
fig, ax = plt.subplots()
ax.errorbar(phases[plot_order], norm_y[plot_order], norm_y_unc[plot_order],
fmt='o', mec="0.2", mew=0.1)
ax.set_xlabel("phase")
ax.set_ylabel("signal")
ax.set_yticks(np.linspace(0,1,11))
ax.set_xticks(np.linspace(0,1,11))
ax.grid()
fig.tight_layout()
```
**Problem 1c**
Generate a signal with $A = 2$, $p = \pi$, and Gaussian noise with variance = 0.01 over a regular grid between 0 and 10. Plot the phase-folded results (and make sure the results behave as you would expect).
*Hint* - your simulated signal should have at least 100 data points.
```
x = np.linspace( # complete
y = # complete
# complete plot
```
Note a couple changes from the previous helper function –– we have added a grid to the plot (this will be useful for visualizing the entropy), and we have also normalized the brightness measurements from 0 to 1.
## Problem 2) The Shannon entropy
As noted above, to calculate the Shannon entropy we need to sum the data over partitions in the normalized $(\phi, m)$ plane.
This is straightforward using [`histogram2d`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram2d.html) from numpy.
**Problem 2a**
Write a function `shannon_entropy` to calculate the Shannon entropy, $H_0$, for a timeseries, $m(t_i)$, at a given period, `p`.
*Hint* - use `histogram2d` and a 10 x 10 grid (as plotted above).
```
def shannon_entropy(m, t, p):
'''Calculate the Shannon entropy
Parameters
----------
m : array-like
brightness measurements of the time-series data
t : array-like (default=1)
timestamps corresponding to the brightness measurements
p : float
period of the periodic signal
Returns
-------
H0 : float
Shannon entropy for m(t) at period p
'''
m_norm = # complete
phases = # complete
H, _, _ = np.histogram2d( # complete
occupied = np.where(H > 0)
H0 = # complete
return H0
```
**Problem 2b**
What is the Shannon entropy for the simulated signal at periods = 1, $\pi$-0.05, and $\pi$?
Do these results make sense given your understanding of the Shannon entropy?
```
print('For p = 1, \t\tH_0 = {:.5f}'.format( # complete
print('For p = pi - 0.05, \tH_0 = {:.5f}'.format( # complete
print('For p = pi, \t\tH_0 = {:.5f}'.format( # complete
```
We know the correct period of the simulated data is $\pi$, so it makes sense that this period minimizes the Shannon entropy.
**Problem 2c**
Write a function, `se_periodogram` to calculate the Shannon entropy for observations $m$, $t$ over a frequency grid `f_grid`.
```
def se_periodogram(m, t, f_grid):
'''Calculate the Shannon entropy at every freq in f_grid
Parameters
----------
m : array-like
brightness measurements of the time-series data
t : array-like
timestamps corresponding to the brightness measurements
f_grid : array-like
trial periods for the periodic signal
Returns
-------
se_p : array-like
Shannon entropy for m(t) at every trial freq
'''
# complete
for # complete in # complete
# complete
return se_p
```
**Problem 2d**
Plot the Shannon entropy periodogram, and return the best-fit period from the periodogram.
*Hint* - recall what we learned about frequency grids earlier today.
```
f_grid = # complete
se_p = # complete
fig,ax = plt.subplots()
# complete
# complete
# complete
print("The best fit period is: {:.4f}".format( # complete
```
## Problem 3) The Conditional Entropy
The CE is very similar to the Shannon entropy, though we need to condition the calculation on the occupation probability of the partitions in phase.
**Problem 3a**
Write a function `conditional_entropy` to calculate the CE, $H_c$, for a timeseries, $m(t_i)$, at a given period, `p`.
*Hint* - if you use `histogram2d` be sure to sum along the correct axes
*Hint 2* - recall from session 8 that we want to avoid `for` loops, try to vectorize your calculation.
```
def conditional_entropy(m, t, p):
'''Calculate the conditional entropy
Parameters
----------
m : array-like
brightness measurements of the time-series data
t : array-like
timestamps corresponding to the brightness measurements
p : float
period of the periodic signal
Returns
-------
Hc : float
Conditional entropy for m(t) at period p
'''
m_norm = # complete
phases = # complete
# complete
# complete
# complete
Hc = # complete
return Hc
```
**Problem 3b**
What is the conditional entropy for the simulated signal at periods = 1, $\pi$-0.05, and $\pi$?
Do these results make sense given your understanding of CE?
```
print('For p = 1, \t\tH_c = {:.5f}'.format( # complete
print('For p = pi - 0.05, \tH_c = {:.5f}'.format( # complete
print('For p = pi, \t\tH_c = {:.5f}'.format( # complete
```
**Problem 3c**
Write a function, `ce_periodogram`, to calculate the conditional entropy for observations $m$, $t$ over a frequency grid `f_grid`.
```
def ce_periodogram(m, t, f_grid):
'''Calculate the conditional entropy at every freq in f_grid
Parameters
----------
m : array-like
brightness measurements of the time-series data
t : array-like
timestamps corresponding to the brightness measurements
f_grid : array-like
trial periods for the periodic signal
Returns
-------
ce_p : array-like
conditional entropy for m(t) at every trial freq
'''
# complete
for # complete in # complete
# complete
return ce_p
```
**Problem 3d**
Plot the conditional entropy periodogram, and return the best-fit period from the periodogram.
```
f_grid = # complete
ce_p = # complete
fig,ax = plt.subplots()
# complete
# complete
# complete
print("The best fit period is: {:.4f}".format( # complete
```
The Shannon entropy and CE return nearly identical results for a simulated sinusoidal signal. Now we will examine how each performs with actual astronomical observations.
## Problem 4) SE vs. CE for real observations
**Problem 4a**
Load the data from our favorite eclipsing binary from this morning's LS exercise. Plot the light curve.
*Hint* - if you haven't already, download the [example light curve](https://northwestern.box.com/s/rclcz4lkcdfjn4829p8pa5ddfmcyd0gm).
```
data = pd.read_csv("example_asas_lc.dat")
fig, ax = plt.subplots()
ax.errorbar( # complete
ax.set_xlabel('HJD (d)')
ax.set_ylabel('V (mag)')
ax.set_ylim(ax.get_ylim()[::-1])
fig.tight_layout()
```
**Problem 4b**
Using the Shannon entropy, determine the best period for this light curve.
*Hint* - recall this morning's discussion about the optimal grid for a period search
```
f_min = # complete
f_max = # complete
delta_f = # complete
f_grid = # complete
se_p = # complete
print("The best fit period is: {:.9f}".format( # complete
```
**Problem 4c**
Plot the Shannon entropy periodogram.
```
fig, ax = plt.subplots()
# complete
# complete
# complete
```
**Problem 4d**
Plot the light curve phase-folded on the best-fit period, as measured by the Shannon entropy periodogram.
Does this look reasonable? Why or why not?
*Hint* - it may be helpful to zoom in on the periodogram.
```
phase_plot(# complete
```
**Problem 4e**
Using the conditional entropy, determine the best period for this light curve.
```
ce_p = # complete
print("The best fit period is: {:.9f}".format( # complete
```
**Problem 4f**
Plot the CE periodogram.
```
fig, ax = plt.subplots()
# complete
# complete
# complete
```
**Problem 4g**
Plot the light curve phase-folded on the best-fit period, as measured by the CE periodogram.
Does this look reasonable? If not - can you make it look better?
```
phase_plot( # complete
```
This example demonstrates the primary strength of CE over the Shannon entropy.
If you zoom-in on the CE periodogram, there is no power at $p \approx 1\,\mathrm{d}$, unlike the LS periodogram *or* the Shannon entropy method. This will not be the case for every single light curve, but this is a very nice feature of the CE method. And one reason why it may be preferred to something like LS when analyzing every light curve in LSST.
## Challenge Problem) Overlapping Bins
In the previous example we used a simple uniform grid to identify the best-fit period for the eclipsing binary. However, the "best-fit" resulted in an estimate of the half period. One way to improve upon this estimate is to build a grid that has overlapping phase bins. This requirement results in better continuity in the phase-folded light curves (K.Burdge, private communication).
**Challenge Problem**
Build a function `conditional_entropy_overlap` that utilizes overlapping bins in the CE calculation.
Can you use this function to identify the correct period of the binary?
| github_jupyter |
<a href="https://colab.research.google.com/github/ikonushok/My_projects/blob/main/%D0%A0%D0%B0%D0%B7%D0%B1%D0%BE%D1%80_HW4_UltraPro_%D0%A3%D0%B3%D0%BB%D1%83%D0%B1%D0%BB%D0%B5%D0%BD%D0%B8%D0%B5_%D0%B2_RNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Распарсить базу новостей, решить задачу NER
```
import numpy as np
from tensorflow.keras.layers import Dense, Input, concatenate, LSTM, GRU, Embedding, Bidirectional, Dropout, BatchNormalization, SpatialDropout1D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import AUC
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical, plot_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.model_selection import train_test_split
import io
import re
#from google.colab import drive
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
#Подготовка данных
```
import xml.etree.ElementTree as ET
color_meaning = {'00ff00':'сравнение', '00ffff':'дальность', '9900ff':'эмоции', 'ff00ff':'комфорт', 'ffff00':'скорость',
'green':'сравнение', 'yellow':'скорость', 'cyan':'дальность', 'magenta':'комфорт'}
meaning_token = {'сравнение':0, 'дальность':1, 'эмоции':2, 'комфорт':3, 'скорость':4}
token_meaning = {value:key for key, value in meaning_token.items()}
#названия необходимых тегов
r = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}r'
rpr = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}rPr'
t = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}t'
shd = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}shd'
fill = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}fill'
highlight = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}highlight'
val = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val'
#парсим tesla_2
tree = ET.parse('/content/Tesla_2.xml') #открываем xml файл
root = tree.getroot() #находим корень
body = root[0]
ps2 = list(iter(body))[:2481] #получаем список всех отзывов
x2 = []
y2 = []
for i, paragraph in enumerate(ps2): #проходим по всем отзывам
phrases_list = paragraph.findall(r) #находим все фразы (часть отзыва)
text='' #заготовка под текст
labels = [0, 0, 0, 0, 0] #заготовка под разметку
for phrase in phrases_list: #проходим по всем фразам
text += phrase.find(t).text #получаем текст, содержащийся во фразе
style = phrase.find(rpr) #получаем стили фразы
if style.find(shd) is not None: #если размечали через заливку
color = style.find(shd).attrib[fill] #получаем значение цвета заливки
elif style.find(highlight) is not None: #если размечали через хайлайт
color = style.find(highlight).attrib[val] #получаем значение цвета хайлайта
else:
color = 'white' #иных вариантов выделения в word нет, значит эта фраза не выделена (белый цвет)
color = color.lower() #переводим строковое значение цвета в нижний регистр
meaning = color_meaning[color] if color in color_meaning else '' #если есть цвет в словаре цвет-значение, то получаем значение. в противном случае у фразы значение не было выделено
if meaning in meaning_token: #если во фразе есть какая-то семантическая(смысловая) окраска
labels[meaning_token[meaning]] = 1 #получаем индекс позиции, соответствующей какой-то семантической(смысловой) окраске и устанавливаем по этому индексу 1
x2.append(text)
y2.append(labels)
#парсим tesla_1
tree = ET.parse('/content/Tesla_1.xml') #открываем xml файл
root = tree.getroot() #находим корень
body = root[0]
ps1 = list(iter(body))[:2497] #получаем список всех отзывов
x1 = []
y1 = []
for i, paragraph in enumerate(ps1): #проходим по всем отзывам
phrases_list = paragraph.findall(r) #находим все фразы (часть отзыва)
text='' #заготовка под текст
labels = [0, 0, 0, 0, 0] #заготовка под разметку
for phrase in phrases_list: #проходим по всем фразам
text += phrase.find(t).text #получаем текст, содержащийся во фразе
style = phrase.find(rpr) #получаем стили фразы
if style.find(shd) is not None: #если размечали через заливку
color = style.find(shd).attrib[fill] #получаем значение цвета заливки
elif style.find(highlight) is not None: #если размечали через хайлайт
color = style.find(highlight).attrib[val] #получаем значение цвета хайлайта
else:
color = 'white' #иных вариантов выделения в word нет, значит эта фраза не выделена (белый цвет)
color = color.lower() #переводим строковое значение цвета в нижний регистр
meaning = color_meaning[color] if color in color_meaning else '' #если есть цвет в словаре цвет-значение, то получаем значение. в противном случае у фразы значение не было выделено
if meaning in meaning_token: #если во фразе есть какая-то семантическая(смысловая) окраска
labels[meaning_token[meaning]] = 1 #получаем индекс позиции, соответствующей какой-то семантической(смысловой) окраске и устанавливаем по этому индексу 1
x1.append(text)
y1.append(labels)
n = 102
x2[n], y2[n]
X = x1+x2 #объединяем результаты парсинга двух файлов в один список
Y = y1+y2
len(X), len(Y)
#анализ длин предложений содержится в разборе 1 дз
num_words = 10000
sent_len = 50
tokenizer = Tokenizer(num_words, oov_token='<UNK>')
tokenizer.fit_on_texts(X)
tokenizer.index_word
X = tokenizer.texts_to_sequences(X) #преобразуем текст в последовательность токенов
X = pad_sequences(X, sent_len, padding='post', truncating='post') #дополняем padding'ом или обрезаем до нужной длины
X[:5]
Y = np.array(Y)
Y[:5]
X.shape, Y.shape
```
#Балансировка
```
#посмотрим, сколько в среднем разынх семантически-окрашенных фраз
num_labels = Y.sum(axis=1)
plt.hist(num_labels, bins=np.arange(1,5.5,0.5))
for meaning in meaning_token:
token = meaning_token[meaning]
print('Доля предложений в общей выборке, содержащих '+meaning+':',(Y[:, token]==1).mean())
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X, Y, test_size=0.2, shuffle=True) #перемешаем данные и разобьем их на тренировочную и тестовую выборки
for meaning in meaning_token:
token = meaning_token[meaning]
print('Доля предложений в тренировочной выборке, содержащих '+meaning+':',(Ytrain[:, token]==1).mean())
one_class_only = np.zeros(5)
for meaning in meaning_token:
token = meaning_token[meaning]
one_class_only[token] = (Ytrain==to_categorical(token, 5)).all(1).mean()
print('Доля предложений в тренировочной выборке, содержащих только'+meaning+':', one_class_only[token])
#выведем долю предложений с ровно двумя смысловыми окрасками
two_classes = np.zeros((5,5))
for i in range(5):
for j in range(5):
label = np.array([0,0,0,0,0])
label[[i,j]]=1
two_classes[i,j] = (Ytrain==label).all(1).mean()
sns.heatmap(two_classes, annot=True, vmin=0, vmax=0.3, xticklabels=[token_meaning[i] for i in sorted(token_meaning)], yticklabels=[token_meaning[i] for i in sorted(token_meaning)])
#значения по диагонали должны совпадать со значениями предыдущей ячейки
#выведем таблицу кратности размеров чистых классов. число на пересечении 1 и 2 оси означает во сколько раз нужно увеличить 1 класс, чтобы он стал равен по размеру с 2 классом
#например, берем по первой оси 1 класс, по второй оси 0 класс. на пересечении 6.2 Значит, 1 класс надо увеличить в 6.2 раза, тогда он станет равным по размеру с 0 классом
multiplicity = one_class_only[None].T/one_class_only
sns.heatmap(multiplicity.T, annot=True, cmap=None)
```
Сама балансировка
```
mask0 = (Ytrain[:, [0,2,3]] == [1,0,0]).all(axis=1)
mask1 = (Ytrain[:, [1,2,3]] == [1,0,0]).all(axis=1)
#классы эмоции и комфорта не балансируем
mask4 = (Ytrain[:, [4,2,3]] == [1,0,0]).all(axis=1)
x_0 = np.repeat(Xtrain[mask0], repeats = 2, axis=0)
y_0 = np.repeat(Ytrain[mask0], repeats = 2, axis=0)
x_1 = np.repeat(Xtrain[mask1], repeats = 23, axis=0)
y_1 = np.repeat(Ytrain[mask1], repeats = 23, axis=0)
x_4 = np.repeat(Xtrain[mask4], repeats = 18, axis=0)
y_4 = np.repeat(Ytrain[mask4], repeats = 18, axis=0)
Xtrain_balanced = np.concatenate([Xtrain, x_0, x_1, x_4])
Ytrain_balanced = np.concatenate([Ytrain, y_0, y_1, y_4])
```
Посмотрим на сбалансированные данные
```
for meaning in meaning_token:
token = meaning_token[meaning]
print('Доля предложений в сбалансированной выборке, содержащих '+meaning+':',(Ytrain_balanced[:, token]==1).mean())
```
#Создание модели
```
emb_size = 100 #размер скрытого пространства эмбеддинга
hidden_state_1 = 50 #размер скрытого пространства первого rnn слоя
hidden_state_2 = 50 #размер скрытого пространства второго rnn слоя
input = Input((None,))
emb = Embedding(num_words, emb_size)(input)
x = SpatialDropout1D(0.2)(emb)
x = BatchNormalization()(x)
x = Bidirectional(GRU(hidden_state_1, return_sequences=True))(x)
x = GRU(hidden_state_2)(x)
output = Dense(5, activation='sigmoid')(x)
model = Model(input, output)
model.compile(loss='binary_crossentropy', metrics=['AUC'], optimizer=Adam(learning_rate=1e-4)) #используем метрику AUC
model.fit(Xtrain_balanced, Ytrain_balanced, validation_data=(Xtest, Ytest), batch_size=64, epochs=5)
```
#Проверка качества работы
```
Xtest.shape
Y_hat = model.predict(Xtest)
Y_hat.shape, Ytest.shape
plt.figure(figsize=(10,10))
num_classes = len(meaning_token) #узнаем, сколько всего классов
for i in range(num_classes): #проходимся по всем классам
fpr, tpr, thresholds = roc_curve(Ytest[:, i], Y_hat[:, i]) #получаем roc-кривую для очередного класа
plt.plot(fpr, tpr, label='{} класс - {}, AUC={}'.format(i, token_meaning[i], AUC()(Ytest[:, i], Y_hat[:, i]))) #отрисовываем кривую и подписываем ее площадь
avg_fpr, avg_tpr, thresholds = roc_curve(Ytest.flatten(), Y_hat.flatten()) #получаем micro-average (общую) кривую roc
plt.plot(avg_fpr, avg_tpr, 'mD',label='micro-average, AUC={}'.format(AUC()(Ytest, Y_hat)))#отрисовываем кривую и подписываем ее площадь
plt.plot([0,1], [0,1], 'b-.') #обозначим границу качественной работы нейросети
plt.fill_between([0,1], [0,1], label='Зона некачестванного распознавания', color='pink', alpha=0.3) #обозначим границу некачественной работы нейросети
plt.legend()
n = 99
print(tokenizer.sequences_to_texts(Xtest[n:n+1]))
print((Y_hat[n]>=0.5).astype(int))
print(Ytest[n])
```
| github_jupyter |
# Ensembling Feature Overview
Ensembling is a fancy name for sub-sampling the data and generating $n_\text{models}$ from regressing onto each of these sub-samples. In practice this helps to robustify the regressions against outliers and other issues. We highly recommend checking out the following paper for understanding the usefulness of these methods against noisy data:
#### Fasel, Urban, et al. "Ensemble-SINDy: Robust sparse model discovery in the low-data, high-noise limit, with active learning and control." arXiv preprint arXiv:2111.10992 (2021). https://arxiv.org/abs/2111.10992
This notebook provides an overview of the basic and advanced functionality of using ensemble methods in PySINDy. Ensembling robustifies the SINDy method. Written by Alan Kaptanoglu and Urban Fasel.
An interactive version of this notebook is available on binder
[](https://mybinder.org/v2/gh/dynamicslab/pysindy/v1.5.1?filepath=examples/13_ensembling.ipynb)
There are many variants of ensembling/subsampling strategies and post-processing methods. We will show the following useful variants below: <br> <br>
Ensembling: <br>
**V1: Sample $n_\text{subset}$ of the data time series with replacement, generate $n_\text{models}$ <br>
V2: Sample $n_\text{subset}$ of the data time series without replacement, generate $n_\text{models}$ <br>
V3: Sample candidate library terms, chopping $n_\text{trunc}$ terms and generating $n_\text{models}$ <br>
V4: Combine V1 and V3 or V2 and V3.** <br>
Post-processing: <br>
**P1: Average the generated models <br>
P2: Take median of the generated models <br>
P3: Take weighted average/median of the generated models. Here we use the mean-squared error (MSE) on the testing dataset to weight the models. <br>
P4: Remove any short-time unstable models and then repeat any of V1-V3 <br>
P5: Calculate library inclusion probabilities, threshold terms with probability below some value, and then take average of median of the remaining coefficient values. Alternatively, regress again on the subset of the library terms.** <br>
We will use the Lorenz equations to illustrate:
$$\dot x = -10x + 10y,$$
$$ \dot y = x(28 - z) - y,$$
$$\dot z = xy - \frac{8}{3} z.$$
```
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from scipy.integrate import solve_ivp
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
from pysindy.utils.odes import lorenz
# Ignore integration and solver convergence warnings
import warnings
from scipy.integrate.odepack import ODEintWarning
warnings.simplefilter("ignore", category=UserWarning)
warnings.simplefilter("ignore", category=ODEintWarning)
import pysindy as ps
# Seed the random number generators for reproducibility
np.random.seed(100)
# integration keywords for solve_ivp, typically needed for chaotic systems
integrator_keywords = {}
integrator_keywords['rtol'] = 1e-12
integrator_keywords['method'] = 'LSODA'
integrator_keywords['atol'] = 1e-12
```
### Ensemble methods
One way to improve the robustness of a SINDy method is to use ensemble methods. Basically, we solve the regression problem many times on subsets (of size $n_{subset}$) of the time slices, generating $n_{models}$ models. All the coefficients are returned in model.coef_list and the average (bagging) can be used via np.mean(model.coef_list, axis=0) or the median (bragging) can be used via np.median(model.coef_list, axis=0).
#### The default number of models to generate is 20, and the default sampling strategy is to sample the entire time base with replacement, so that on average 60% or so of the data is seen.
#### Default is bragging - the median coefficients over all the models is printed by model.print(). However, model.simulate and model.predict use the coefficients of the last model that was fitted! To avoid confusion, we recommend using model.coef_list everywhere and doing all the post-processing separately.
```
dt = .002
t_train = np.arange(0, 10, dt)
t_train_span = (t_train[0], t_train[-1])
x0_train = [-8, 8, 27]
x_train = solve_ivp(lorenz, t_train_span, x0_train, t_eval=t_train, **integrator_keywords).y.T
# add 1% noise to add a little complexity (otherwise all the models are basically correct)
rmse = mean_squared_error(x_train, np.zeros(x_train.shape), squared=False)
x_train = x_train + np.random.normal(0, rmse / 100.0, x_train.shape)
# Evolve the Lorenz equations in time using a different initial condition
t_test = np.arange(0, 15, dt)
t_test_span = (t_test[0], t_test[-1])
x0_test = np.array([8, 7, 15])
x_test = solve_ivp(lorenz, t_test_span, x0_test, t_eval=t_test, **integrator_keywords).y.T
# Instantiate and fit the SINDy model
feature_names = ['x', 'y', 'z']
ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer)
# Ensemble with replacement (V1)
model.fit(x_train, t=dt, ensemble=True, quiet=True)
model.print()
ensemble_coefs = model.coef_list
# repeat without replacement (V2)
ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer)
model.fit(x_train, t=dt, ensemble=True, replace=False, quiet=True)
model.print()
ensemble_coefs = model.coef_list
```
### Library ensemble methods (V3)
Another way to do ensembling is to solve the regression problem on subsets of the candidate library, generating $p$ (the number of candidate terms) models. The syntax and code is a bit different, but the user-facing part is the same. Once again, the coefficients are returned in model.coef_list and the average can be used via np.mean(model.coef_list, axis=0). The default is still 20 generated models and the default sub-sampling is to chop one candidate term for each model.
```
# Instantiate and fit the SINDy model
library_ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=library_ensemble_optimizer)
model.fit(x_train, t=dt, library_ensemble=True, quiet=True)
model.print()
library_ensemble_coefs = model.coef_list
```
### Now do both types of ensembling with multiple candidate drops (V4)!
```
double_ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=double_ensemble_optimizer)
model.fit(x_train, t=dt, library_ensemble=True, ensemble=True, n_candidates_to_drop=2, quiet=True)
model.print()
double_ensemble_coefs = model.coef_list
```
### Alternatives to averaging (bagging, P1) or taking the median (bragging, P2) of the models:
1. Weighted average of the coefficients, with weights corresponding to the mean-squared error with the testing (or training, if you like) $\dot{\mathbf{X}}$ (P3).
2. Define a function that does small integration forward of all the potential models for a bunch of a random initial conditions and chop any unstable models. Then repeat bagging or bragging. (P4)
3. Calculate inclusion probabilities for each of the candidate library terms (illustrated later in this example), (P5).
4. Whatever custom post-processing you like!
```
# function to compute the average of the coefficients, weighted by the MSE on the test data
def weighted_coefficient_average(coef_list, x_test, dt, optimizer):
mse = np.zeros(np.shape(coef_list)[0])
for i in range(np.shape(coef_list)[0]):
optimizer.coef_ = np.asarray(coef_list)[i, :, :]
mse[i] = model.score(x_test, t=dt, metric=mean_squared_error)
avg = np.average(coef_list, axis=0, weights=mse)
coef_subtracted = np.zeros(np.shape(coef_list))
for i in range(np.shape(coef_list)[0]):
coef_subtracted[i, :, :] = np.asarray(coef_list)[i, :, :] - avg
std = np.sqrt(np.average(coef_subtracted ** 2, axis=0, weights=mse))
return avg, std
e_wavg, e_wstd = weighted_coefficient_average(ensemble_coefs, x_test, dt, ensemble_optimizer)
l_wavg, l_wstd = weighted_coefficient_average(library_ensemble_coefs, x_test, dt, library_ensemble_optimizer)
# function to zero out any short-term unstable models
def integration_metric(coef_list, optimizer):
t_test = np.arange(0, 2, 0.001)
# Try 5 test trajectories
for k in range(5):
x0_test = (np.random.rand(3) - 0.5) * 10
x_test = solve_ivp(lorenz, t_test_span, x0_test, t_eval=t_test).y.T
for i in range(np.shape(coef_list)[0]):
optimizer.coef_ = coef_list[i, :, :]
x_test_sim = model.simulate(x0_test, t_test, integrator="odeint")
if np.any(np.abs(x_test_sim) > 5000):
# print('unstable model!')
coef_list[i, :, :] = 0.0
return coef_list
stable_ensemble_coefs = integration_metric(np.asarray(ensemble_coefs), ensemble_optimizer)
stable_library_ensemble_coefs = integration_metric(np.asarray(library_ensemble_coefs), library_ensemble_optimizer)
# Plot bagging, bragging, and other post-processing results with error bars
coef_types = [np.mean(ensemble_coefs, axis=0),
np.mean(library_ensemble_coefs, axis=0),
e_wavg,
l_wavg,
np.median(ensemble_coefs, axis=0),
np.median(library_ensemble_coefs, axis=0),
np.mean(stable_ensemble_coefs, axis=0),
np.mean(stable_library_ensemble_coefs, axis=0),
]
coef_stds = [np.std(ensemble_coefs, axis=0),
np.std(library_ensemble_coefs, axis=0),
e_wstd,
l_wstd,
np.std(ensemble_coefs, axis=0),
np.std(library_ensemble_coefs, axis=0),
np.std(stable_ensemble_coefs, axis=0),
np.std(stable_library_ensemble_coefs, axis=0),
]
# define some plotting needs
colors = ['b', 'r', 'k']
props = dict(boxstyle='round', facecolor='wheat', alpha=1.0)
textstrs = ['bagging', 'bragging', 'weighted\nbagging', 'stability\nweighted\nbagging']
plt.figure(figsize=(16, 9))
# Loop over the different post-processed coefficients
for k, coefs in enumerate(coef_types):
coef_std = coef_stds[k]
plt.subplot(4, 2, k + 1)
for i in range(coefs.shape[0]):
plt.errorbar(range(coefs.shape[1]), coefs[i, :], yerr=coef_std[i, :],
fmt='o', color=colors[i], label='$\dot ' + feature_names[i] + '_{}$' + ' coefficients')
ax = plt.gca()
ax.set_xticks(range(10))
if k >= 6:
xticknames = model.get_feature_names()
for i in range(10):
xticknames[i] = '$' + xticknames[i] + '$'
ax.set_xticklabels(xticknames, verticalalignment='top', fontsize=28)
else:
ax.set_xticklabels([])
plt.ylim(-20, 35)
if k == 0:
plt.legend(fontsize=18, framealpha=1.0, loc='upper right')
ax.text(0.38, 1.15, 'ensemble', transform=ax.transAxes, fontsize=20, bbox=props)
if k == 1:
ax.text(0.35, 1.15, 'library ensemble', transform=ax.transAxes, fontsize=20, bbox=props)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.grid(True)
ax.set_yticks([-20, -10, 0, 10, 20, 30])
if k % 2 == 0:
textstr = textstrs[k // 2]
ax.text(-0.35, 0.4, textstr, transform=ax.transAxes, fontsize=20, bbox=props)
else:
ax.set_yticklabels([])
# plt.savefig('ensemble_summary.pdf', bbox_inches='tight')
```
### Repeat ensembling but now make ~1000 models and plot the distributions of coefficients
```
ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer)
model.fit(x_train, t=dt, ensemble=True, n_models=1000, quiet=True)
model.print()
ensemble_coefs = np.asarray(model.coef_list)
library_ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=library_ensemble_optimizer)
model.fit(x_train, t=dt, library_ensemble=True, n_models=1000, quiet=True)
model.print()
library_ensemble_coefs = np.asarray(model.coef_list)
# Make plots of each coefficient distribution!
plt.figure(figsize=(20, 20))
min_overall = -15
max_overall = 30
true_coefs = np.asarray([[0, -10, 10, 0, 0, 0, 0, 0, 0, 0],
[0, 28.0, -1, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, -8.0 / 3.0, 0, 1, 0, 0, 0, 0]])
# Loop through each element in the coefficients
for j in range(10):
for i in range(3):
plt.subplot(10, 3, i + 1 + j * 3)
if j == 0:
plt.title(feature_names[i], fontsize=30)
bins = np.linspace(min_overall, max_overall, max_overall - min_overall + 1)
plt.hist(ensemble_coefs[:, i, j], color='b', bins=bins, label='ensemble', align='left')
plt.hist(library_ensemble_coefs[:, i, j], bins=bins, color='r', alpha=0.5, label='library ensemble', align='left')
plt.scatter(true_coefs[i, j], 110.0, color='k', label='true value', s=100, marker='o')
plt.grid(True)
ax = plt.gca()
if i == 0:
plt.ylabel(xticknames[j], fontsize=30)
else:
ax.set_yticklabels([])
if j != 9:
ax.set_xticklabels([])
else:
plt.xlabel('coefficient value', fontsize=30)
ax.set_xticks([min_overall, 0, max_overall // 2, max_overall])
plt.ylim(0, 1000)
plt.xlim(min_overall, max_overall)
plt.xticks(fontsize=22)
plt.yticks(fontsize=18)
if i == 2 and j == 9:
plt.legend(fontsize=16)
# plt.savefig('lorenz_distributions.pdf')
```
### Plot 95% percentile bounds with mean and median test trajectories
```
# Rerun the ensembling
ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer)
model.fit(x_train, t=dt, ensemble=True, n_models=100, quiet=True)
model.print()
ensemble_coefs = np.asarray(model.coef_list)
mean_ensemble_coefs = np.mean(ensemble_coefs, axis=0)
median_ensemble_coefs = np.median(ensemble_coefs, axis=0)
# Predict the testing trajectory with the mean and median models
t_test = np.linspace(0, 10, 500)
t_test_span = (t_test[0], t_test[-1])
x_test_sim = solve_ivp(lorenz, t_test_span, x0_test, t_eval=t_test, **integrator_keywords).y.T
ensemble_optimizer.coef_ = mean_ensemble_coefs
x_test_sim_mean = model.simulate(x0_test, t_test)
ensemble_optimizer.coef_ = median_ensemble_coefs
x_test_sim_median = model.simulate(x0_test, t_test)
# Predict the testing trajectory with all the models
x_test_sims = []
for i in range(ensemble_coefs.shape[0]):
ensemble_optimizer.coef_ = ensemble_coefs[i, :, :]
x_test_sims.append(model.simulate(x0_test, t_test))
# Compute the 2.5 and 97.5 percentile trajectories
bottom_line = np.percentile(x_test_sims, 2.5, axis=0)
top_line = np.percentile(x_test_sims, 97.5, axis=0)
# Plot trajectory results
plt.figure(figsize=(14, 5))
for i in range(3):
plt.subplot(3, 1, i + 1)
plt.plot(t_test, x_test_sim[:, i], 'k', label='True')
plt.plot(t_test, x_test_sim_mean[:, i], 'b', label='Mean')
plt.plot(t_test, x_test_sim_median[:, i], 'r', label='Median')
plt.plot(t_test, bottom_line[:, i], 'g', alpha=0.6, label='95th percentile')
plt.plot(t_test, top_line[:, i], 'g', alpha=0.6)
ax = plt.gca()
ax.fill_between(t_test, bottom_line[:, i], top_line[:, i], color='g', alpha=0.3)
plt.grid(True)
if i != 2:
ax.set_xticklabels([])
else:
plt.xlabel('t', fontsize=20)
plt.ylabel(feature_names[i], fontsize=20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
ax.yaxis.set_label_coords(-0.05, 0.75 - 0.1 * (i + 1))
if i == 0:
ax.legend(bbox_to_anchor=(1.01, 1.05), fontsize=18)
# plt.savefig('lorenz_percentile_plot.pdf', bbox_inches='tight')
# Plot 3D trajectory results
fig = plt.figure(figsize=(14, 5))
ax = fig.add_subplot(111, projection='3d')
plt.plot(x_test_sim[:, 0], x_test_sim[:, 1], x_test_sim[:, 2], 'k', label='True')
plt.plot(x_test_sim_mean[:, 0], x_test_sim_mean[:, 1], x_test_sim_mean[:, 2], 'b', label='Mean')
plt.plot(x_test_sim_median[:, 0], x_test_sim_median[:, 1], x_test_sim_median[:, 2], 'r', label='Median')
plt.grid(True)
plt.ylabel(feature_names[i], fontsize=20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.legend()
# plt.savefig('lorenz_3D_percentile_plot.pdf', bbox_inches='tight')
```
### Variant of library ensembling (P5):
1. Library ensemble and calculate inclusion probabilities
2. Threshold off library terms below some threshold probability
3. Run normal ensemble method with reduced library
This works with similar success with pure ensembling, but this can be very helpful for more complicated, higher-dimensional systems with many library terms.
```
# 1. Library ensemble and calculate inclusion probabilities
n_candidates_to_drop = 1
n_models = 10
library_ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=library_ensemble_optimizer)
model.fit(x_train, t=dt, library_ensemble=True, n_models=n_models,
n_candidates_to_drop=n_candidates_to_drop, quiet=True)
model.print()
library_ensemble_coefs = np.asarray(model.coef_list)
n_targets = len(feature_names)
n_features = len(model.get_feature_names())
# Calculate inclusion probabilities (note you may want to add another factor here because
# each term is only present for a portion of the fits)
inclusion_probabilities = np.count_nonzero(model.coef_list, axis=0) / n_models
# 2. Chop inclusion probabilities <= 50% (this is rather drastic for illustration)
inclusion_probabilities[inclusion_probabilities <= 0.3] = 0.0
# Find indices that are chopped for all three equations
# since we pass the same library for all.
chopped_inds = np.any(inclusion_probabilities != 0.0, axis=0)
chopped_inds = np.ravel(np.where(~chopped_inds))
# 3. Pass truncated library and then do normal ensembling
library = ps.PolynomialLibrary(degree=2, library_ensemble=True,
ensemble_indices=chopped_inds)
ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer, feature_library=library)
model.fit(x_train, t=dt, ensemble=True, n_models=n_models, quiet=True)
two_step_ensemble_coefs = np.asarray(model.coef_list)
two_step_mean = np.mean(two_step_ensemble_coefs, axis=0)
two_step_std = np.std(two_step_ensemble_coefs, axis=0)
two_step_median = np.median(two_step_ensemble_coefs, axis=0)
# Add zeros to get coefficient matrices to original full size
for i in range(len(chopped_inds)):
two_step_mean = np.insert(two_step_mean, chopped_inds[i], 0.0, axis=-1)
two_step_std = np.insert(two_step_std, chopped_inds[i], 0.0, axis=-1)
two_step_median = np.insert(two_step_median, chopped_inds[i], 0.0, axis=-1)
# Compare with pure ensembling
ensemble_optimizer = ps.STLSQ()
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer)
model.fit(x_train, t=dt, ensemble=True, n_models=n_models, quiet=True)
ensemble_coefs = np.asarray(model.coef_list)
one_step_mean = np.mean(ensemble_coefs, axis=0)
one_step_std = np.std(ensemble_coefs, axis=0)
one_step_median = np.median(ensemble_coefs, axis=0)
# Plot results
colors = ['b', 'r', 'k']
xticknames = model.get_feature_names()
for i in range(10):
xticknames[i] = '$' + xticknames[i] + '$'
plt.figure(figsize=(10, 4))
for i in range(n_targets):
plt.subplot(1, 2, 1)
plt.errorbar(range(n_features), two_step_mean[i, :], yerr=two_step_std[i, :],
fmt='o', color=colors[i])
plt.grid(True)
plt.ylim(-10, 30)
ax.set_xticks(range(10))
ax.set_xticklabels(xticknames, verticalalignment='top', fontsize=12)
plt.subplot(1, 2, 2)
plt.errorbar(range(n_features), one_step_mean[i, :], yerr=one_step_std[i, :],
fmt='o', color=colors[i])
plt.grid(True)
plt.ylim(-10, 30)
ax = plt.gca()
ax.set_xticks(range(10))
ax.set_xticklabels(xticknames, verticalalignment='top', fontsize=12)
```
### Generate achievability plots from Maddu et al. 2019
Maddu, S., Cheeseman, B. L., Sbalzarini, I. F., & Müller, C. L. (2019). Stability selection enables robust learning of partial differential equations from limited noisy data. arXiv preprint arXiv:1907.07810.
For these plots, we want to do the following:
1. Ensemble and calculate inclusion probabilities
2. Threshold off library terms below some threshold probability
3. If there exists some $\lambda$ threshold such that the resulting reduced library is the correct support for the Lorenz equation, this is counted as success, otherwise failure.
```
# Support for the Lorenz coefficients
support_lorenz = np.zeros((3, 10))
support_lorenz[0, 1] = 1.0
support_lorenz[0, 2] = 1.0
support_lorenz[1, 1] = 1.0
support_lorenz[1, 2] = 1.0
support_lorenz[1, 6] = 1.0
support_lorenz[2, 3] = 1.0
support_lorenz[2, 5] = 1.0
# Scan over threshold, noise level, number of time samples, and number of initial conditions
lambda_scan = np.linspace(0.01, 0.1, 10)
noise_scan = np.linspace(0, rmse / 10.0, 10)
sampling_scan = range(40, 400, 40)
ic_scan = (np.random.rand(10, 3) - 0.5) * 20
# Fix number of models to generate
n_models = 10
success_rates = np.zeros((10, len(sampling_scan), 10))
dt = .005
t_train = np.arange(0, 10, dt)
t_train_span = (t_train[0], t_train[-1])
for kk, ic in enumerate(ic_scan):
x_train = solve_ivp(lorenz, t_train_span, ic, t_eval=t_train, **integrator_keywords).y.T
rmse = mean_squared_error(x_train, np.zeros(x_train.shape), squared=False)
print(kk)
for j, noise in enumerate(noise_scan):
x_noisy = x_train + np.random.normal(0, noise)
for i, sample in enumerate(sampling_scan):
for k, lam in enumerate(lambda_scan):
ensemble_optimizer = ps.STLSQ(threshold=lam)
model = ps.SINDy(feature_names=feature_names, optimizer=ensemble_optimizer)
model.fit(x_noisy, t=dt, ensemble=True, n_models=n_models, n_subset=sample, quiet=True)
inclusion_probabilities = np.count_nonzero(model.coef_list, axis=0) / n_models
inclusion_probabilities[inclusion_probabilities <= 0.8] = 0.0
if np.allclose(support_lorenz, inclusion_probabilities, atol=1e-3):
success_rates[kk, i, j] = 1.0
continue
successes = np.mean(success_rates, axis=0)
plt.figure(figsize=(18, 5))
plt.pcolormesh(noise_scan / rmse * 100, sampling_scan, successes)
plt.colorbar()
plt.xlabel('% noise', fontsize=20)
plt.ylabel('Number of time samples per model', fontsize=22)
```
### Test ensemble functionality with weak-PDE functionality, using the Kuramoto-Sivashinsky equation. For understanding weak form use, please see the Example 14 Jupyter Notebook.
The Kuramoto-Sivashinsky equation is
$u_t = -uu_x - u_{xx} - u_{xxxx}$.
```
from scipy.io import loadmat
data = loadmat('data/kuramoto_sivishinky.mat')
time = np.ravel(data['tt'])
x = np.ravel(data['x'])
u = data['uu']
# add 5% noise
rmse = mean_squared_error(u, np.zeros(u.shape), squared=False)
u = u + np.random.normal(0, rmse / 10.0, u.shape)
u_flattened = np.reshape(u, (len(x) * len(time), 1))
dt = time[1] - time[0]
dx = x[1] - x[0]
# Plot u and u_dot
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.pcolormesh(time, x, u)
plt.xlabel('t', fontsize=16)
plt.ylabel('x', fontsize=16)
plt.title(r'$u(x, t)$', fontsize=16)
u_shaped = np.reshape(u, (len(x), len(time), 1))
u_dot = np.zeros((len(x), len(time), 1))
for i in range(len(x)):
u_dot[i, :, :] = ps.FiniteDifference()._differentiate(u_shaped[i, :, :], t=dt)
plt.subplot(1, 2, 2)
plt.pcolormesh(time, x, u_dot[:, :, 0])
plt.xlabel('t', fontsize=16)
plt.ylabel('x', fontsize=16)
ax = plt.gca()
ax.set_yticklabels([])
plt.title(r'$\dot{u}(x, t)$', fontsize=16)
plt.show()
from scipy.interpolate import RectBivariateSpline
from scipy.integrate import trapezoid
from pysindy.differentiation import FiniteDifference
# Split into training and testing sets
train = range(0, len(time) // 2, 1)
test = [i for i in np.arange(len(time)) if i not in train]
u_train = u[:, train]
u_test = u[:, test]
t_train = time[train]
t_test = time[test]
# Flatten into 2D shape for SINDy model optimization
u_train_flattened = np.reshape(u_train, (len(x) * len(t_train), 1))
u_test_flattened = np.reshape(u_test, (len(x) * len(t_test), 1))
# Note for ensembling, need to set is_uniform=False, since once we sub-sample
# in time, the temporal grid is no longer uniform
library_functions = [lambda x: x, lambda x: x * x]
library_function_names = [lambda x: x, lambda x: x + x]
pde_lib = ps.PDELibrary(library_functions=library_functions,
function_names=library_function_names,
derivative_order=4, spatial_grid=x,
temporal_grid=time, include_bias=True, K=100,
is_uniform=False, weak_form=True, num_pts_per_domain=50)
# Convert u_dot into a weak form using the pre-defined pde_lib
def convert_u_dot_1D(u, t, pde_lib): # For weak form 1D PDES
K = pde_lib.K
num_pts_per_domain = pde_lib.num_pts_per_domain
x = pde_lib.spatial_grid
if len(u.shape) == 2:
u = np.reshape(u, (len(x), len(t), 1))
u_dot_integral = np.zeros((K, u.shape[-1]))
# Define interpolation object to interpolate u(x, time) onto the grid
# spanned by X, t
for j in range(u.shape[-1]):
u_interp = RectBivariateSpline(x, t, u[:, :, j])
for k in range(K):
X = np.ravel(pde_lib.X[k, :, :])
tgrid = np.ravel(pde_lib.t[k, :, :])
# Interpolate u onto the kth subdomain
u_new = u_interp.ev(X, tgrid)
u_new = np.reshape(u_new, (num_pts_per_domain,
num_pts_per_domain,
1))
# Get the smooth weights on the kth subdomain
w_diff = pde_lib._smooth_ppoly(
np.reshape(pde_lib.xgrid_k[k, :], (num_pts_per_domain, 1)),
pde_lib.tgrid_k[k, :], k, 0, 0, 0, 1
)
# Integrate over x, t points in the kth subdomain
u_dot_integral[k, j] = (-1) * (
trapezoid(
trapezoid(
u_new * w_diff,
x=pde_lib.xgrid_k[k, :],
axis=0
),
x=pde_lib.tgrid_k[k, :],
axis=0
)
)
return u_dot_integral
u_dot_train_integral = convert_u_dot_1D(u_train, t_train, pde_lib)
u_dot_test_integral = convert_u_dot_1D(u_test, t_test, pde_lib)
u_dot_integral = convert_u_dot_1D(u, time, pde_lib)
# Run SR3 with L0 norm
n_models = 10
optimizer = ps.SR3(threshold=2, max_iter=100, tol=1e-3,
thresholder='l0', normalize_columns=True)
model = ps.SINDy(feature_library=pde_lib, optimizer=optimizer)
model.fit(u_flattened, x_dot=u_dot_integral, ensemble=True,
n_models=n_models, n_subset=len(time) // 2, quiet=True)
model.print(precision=4)
# Aggregate, average, and take median of the coefficients
weak_KS_models = np.asarray(model.coef_list)
mean_weak_KS_models = np.mean(weak_KS_models, axis=0)
median_weak_KS_models = np.median(weak_KS_models, axis=0)
print('Mean of KS models: ', mean_weak_KS_models)
print('Median of KS models: ', median_weak_KS_models)
negligible_val = 1e-2
# Simple definition of a model error, ||True - Pred|| / ||True||
def model_error(KS_pred_coef, KS_true_coef):
return np.linalg.norm(KS_true_coef - KS_pred_coef
) / np.linalg.norm(KS_true_coef)
# Returns 1 if the identified model has three coefficient entries in the right locations.
# Otherwise returns 0.
def success_rate(KS_pred_coef, KS_true_coef):
return np.all((abs(KS_true_coef) > negligible_val) == (abs(KS_pred_coef) > negligible_val))
print(model.get_feature_names())
KS_true = np.zeros((1, 15))
KS_true[0, 4] = -1.0
KS_true[0, 6] = -1.0
KS_true[0, 7] = -1.0
print('mean: ', model_error(mean_weak_KS_models, KS_true), success_rate(mean_weak_KS_models, KS_true))
print('median: ', model_error(median_weak_KS_models, KS_true), success_rate(median_weak_KS_models, KS_true))
total_model_error = []
total_success_rate = []
for i in range(n_models):
coef = weak_KS_models[i, :, :]
total_model_error.append(model_error(coef, KS_true))
total_success_rate.append(success_rate(coef, KS_true))
print(i, model_error(coef, KS_true), success_rate(coef, KS_true))
print('Average model error: ', np.mean(total_model_error, axis=0) * 100, '%')
print('Average success rate: ', np.mean(total_success_rate, axis=0) * 100, '%')
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
from awave.experimental.filters import gabor_filter, edge_filter, curve_filter
from awave.experimental.filters_agg import *
import awave.experimental.viz as viz
from tqdm import tqdm
```
# look at base filters
```
filter_size = 13
angle_step = 20
ims = []
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(gabor_filter(filter_size, angle), 0))
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(edge_filter(filter_size, angle)[0], 0))
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(edge_filter(filter_size, angle, thickness_cutoff=2)[0], 0))
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(curve_filter(filter_size, angle, radius=1)[0], 0))
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(curve_filter(filter_size, angle, radius=3)[0], 0))
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(curve_filter(filter_size, angle, radius=5)[0], 0))
for angle in tqdm(range(0, 360, angle_step)):
ims.append(np.expand_dims(curve_filter(filter_size, angle, radius=9)[0], 0))
print('Note the curve filter has a radius parameter which can be easily optimized...')
viz.plot_grid(ims, ylabs=['Gabor', 'Edge (1)', 'Edge (2)',
'Curve (1)', 'Curve (3)', 'Curve (5)', 'Curve (9)'], fontsize_ylab=40)
```
# stitch together base filters
```
input_spec = [("color", i) for i in range(3)]
conv2d0_spec = [("gabor", orientation, offset)
for orientation in range(0, 180, 10)
for offset in [0, 7./8., 7./4, 7.*3/8.]]
conv2d1_spec = [("edge", orientation) for orientation in range(0, 180, 10) ]
conv2d2_spec = [("curve", orientation) for orientation in range(0, 360, 15) ]
conv2d2_spec += [("edge", orientation) for orientation in range(0, 180, 10) ]
mixed3a_spec = [("curve", orientation) for orientation in range(0, 360, 15) ]
mixed3a_spec += [("edge", orientation) for orientation in range(0, 180, 10) ]
mixed3b_spec = [("curve", orientation) for orientation in range(0, 360, 15) ]
mixed3b_spec += [("edge", orientation) for orientation in range(0, 180, 10) ]
# these are the weights that connect one layer to the next
W_conv2d0 = make_weights(7, input_spec, conv2d0_spec)
W_conv2d1 = make_weights(1, conv2d0_spec, conv2d1_spec)
W_conv2d2 = make_weights(3, conv2d1_spec, conv2d2_spec, r=list(range(5,10)))
W_mixed3a = make_weights(3, conv2d2_spec, mixed3a_spec, r=list(range(4,10)))
W_mixed3b = make_weights(5, mixed3a_spec, mixed3b_spec, r=list(range(4,10)))
print(W_conv2d0.shape, W_conv2d1.shape, W_conv2d2.shape, W_mixed3a.shape, W_mixed3b.shape)
viz.plot_grid(W_conv2d2.transpose())
plt.imshow(W_conv2d1.squeeze())
plt.ylabel('Input channels')
plt.xlabel('Output channels')
plt.show()
```
# Extract features and fit linear model
```
import cifar10
from torch import nn
import torch
import torch.optim as optim
import util
W_conv2d0 = make_weights(7,
[("color", i) for i in range(3)],
[("gabor", orientation, offset)
for orientation in range(0, 180, 10)
for offset in [0, 7./8., 7./4, 7.*3/8.]]
)
conv2d0 = nn.Conv2d(in_channels=3, out_channels=W_conv2d0.shape[-1], kernel_size=W_conv2d0.shape[0])
conv2d0.weight.value = torch.Tensor(W_conv2d0.transpose())
conv2d0.bias.value = 0
pool2d0 = nn.MaxPool2d(kernel_size=5, stride=4, padding=0)
feat_extractor = nn.Sequential(conv2d0, pool2d0)
conv2d0 = nn.Conv2d(in_channels=3, out_channels=W_conv2d0.shape[-1], kernel_size=W_conv2d0.shape[0])
conv2d0.weight.value = torch.Tensor(W_conv2d0.transpose())
conv2d0.bias.value = 0
pool2d0 = nn.MaxPool2d(kernel_size=4, stride=2, padding=0)
```
**look at one example**
```
x, y = cifar10.get_batch(batch_size=1) # X is 1, 3, 32, 32
# x.shape
# x0 = x[0].detach().numpy().transpose()
# plt.imshow((x0 - x0.min()) / (x0.max() - x0.min()), interpolation='bilinear')
# plt.show()
x_gabor = conv2d0(x) # 72, 26, 26
x_gabor_pooled = pool2d0(x_gabor) # 72, 12, 12
viz.plot_grid(np.expand_dims(x_gabor_pooled.detach().numpy()[0], 1))
```
# (tf) set up architecture based on these weights
```
t_image = tf.placeholder("float32", [None, None, None, 3], name="input")
t_conv2d0 = tf.nn.conv2d(t_image, W_conv2d0, strides=[2,2], padding="SAME")
t_conv2d0 = tf.nn.relu(t_conv2d0, name="conv2d0")
t_conv2d0_pool = tf.nn.max_pool2d(t_conv2d0, [3,3], strides=[2,2], padding="SAME")
t_conv2d1 = tf.nn.conv2d(t_conv2d0_pool, W_conv2d1, strides=[1,1], padding="SAME")
t_conv2d1 = tf.nn.relu(t_conv2d1, name="conv2d1")
t_conv2d2 = tf.nn.conv2d(t_conv2d1, W_conv2d2, strides=[1,1], padding="SAME")
t_conv2d2 = tf.nn.relu(t_conv2d2, name="conv2d2")
t_conv2d2_pool = tf.nn.max_pool2d(t_conv2d2, [3,3], strides=[2,2], padding="SAME")
t_mixed3a = tf.nn.conv2d(t_conv2d2_pool, W_mixed3a, strides=[1,1], padding="SAME")
t_mixed3a = tf.nn.relu(t_mixed3a, name="mixed3a")
t_mixed3b = tf.nn.conv2d(t_mixed3a, W_mixed3b, strides=[1,1], padding="SAME")
t_mixed3b = tf.nn.relu(t_mixed3b, name="mixed3b")
a = [1,2]
type(a)
str(type(1))
a = [[2,[2,[2]]],3,[4,[5,6]]]
a = list(reversed(a))
a
a.extend(reversed(a.pop()))
a
"list" in str(type(a[-1]))
a.pop().getList()
class hi():
class NestedIterator():
def __init__(self, nestedList):
self.stack = list(reversed(nestedList))
def next(self) -> int:
self.make_stack_top_an_integer()
return self.stack.pop().getInteger()
def hasNext(self) -> bool:
self.make_stack_top_an_integer()
return len(self.stack) > 0
def make_stack_top_an_integer(self):
# While the stack contains a nested list at the top...
while self.stack and not self.stack[-1].isInteger():
# Unpack the list at the top by putting its items onto
# the stack in reverse order.
self.stack.extend(reversed(self.stack.pop().getList()))
b = NestedIterator(a)
b.next()
a.extend(reversed(a.stack.pop().getList()))
nums = [3,30,34,5,9]
def compare(x, y):
return str(x)+str(y) > str(y)+str(x)
sorted(nums, key=compare())
list(map(str, nums))
class compare():
sorted(nums, key=lambda x: str(x)[0])[::-1]
class LargerNumKey(str):
def __lt__(x, y):
return x+y > y+x
class Solution:
def largestNumber(self, nums):
largest_num = ''.join(sorted(map(str, nums), key=LargerNumKey))
return '0' if largest_num[0] == '0' else largest_num
nums
```
| github_jupyter |
# Classification on Iris dataset with sklearn and DJL
In this notebook, you will try to use a pre-trained sklearn model to run on DJL for a general classification task. The model was trained with [Iris flower dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set).
## Background
### Iris Dataset
The dataset contains a set of 150 records under five attributes - sepal length, sepal width, petal length, petal width and species.
Iris setosa | Iris versicolor | Iris virginica
:-------------------------:|:-------------------------:|:-------------------------:
 |  | 
The chart above shows three different kinds of the Iris flowers.
We will use sepal length, sepal width, petal length, petal width as the feature and species as the label to train the model.
### Sklearn Model
You can find more information [here](http://onnx.ai/sklearn-onnx/). You can use the sklearn built-in iris dataset to load the data. Then we defined a [RandomForestClassifer](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) to train the model. After that, we convert the model to onnx format for DJL to run inference. The following code is a sample classification setup using sklearn:
```python
# Train a model.
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
clr = RandomForestClassifier()
clr.fit(X_train, y_train)
```
## Preparation
This tutorial requires the installation of Java Kernel. To install the Java Kernel, see the [README](https://github.com/deepjavalibrary/djl/blob/master/jupyter/README.md).
These are dependencies we will use. To enhance the NDArray operation capability, we are importing ONNX Runtime and PyTorch Engine at the same time. Please find more information [here](https://github.com/deepjavalibrary/djl/blob/master/docs/onnxruntime/hybrid_engine.md#hybrid-engine-for-onnx-runtime).
```
// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.14.0
%maven ai.djl.onnxruntime:onnxruntime-engine:0.14.0
%maven org.slf4j:slf4j-api:1.7.32
%maven org.slf4j:slf4j-simple:1.7.32
import ai.djl.inference.*;
import ai.djl.modality.*;
import ai.djl.ndarray.*;
import ai.djl.ndarray.types.*;
import ai.djl.repository.zoo.*;
import ai.djl.translate.*;
import java.util.*;
```
## Step 1 create a Translator
Inference in machine learning is the process of predicting the output for a given input based on a pre-defined model.
DJL abstracts away the whole process for ease of use. It can load the model, perform inference on the input, and provide
output. DJL also allows you to provide user-defined inputs. The workflow looks like the following:

The `Translator` interface encompasses the two white blocks: Pre-processing and Post-processing. The pre-processing
component converts the user-defined input objects into an NDList, so that the `Predictor` in DJL can understand the
input and make its prediction. Similarly, the post-processing block receives an NDList as the output from the
`Predictor`. The post-processing block allows you to convert the output from the `Predictor` to the desired output
format.
In our use case, we use a class namely `IrisFlower` as our input class type. We will use [`Classifications`](https://javadoc.io/doc/ai.djl/api/latest/ai/djl/modality/Classifications.html) as our output class type.
```
public static class IrisFlower {
public float sepalLength;
public float sepalWidth;
public float petalLength;
public float petalWidth;
public IrisFlower(float sepalLength, float sepalWidth, float petalLength, float petalWidth) {
this.sepalLength = sepalLength;
this.sepalWidth = sepalWidth;
this.petalLength = petalLength;
this.petalWidth = petalWidth;
}
}
```
Let's create a translator
```
public static class MyTranslator implements NoBatchifyTranslator<IrisFlower, Classifications> {
private final List<String> synset;
public MyTranslator() {
// species name
synset = Arrays.asList("setosa", "versicolor", "virginica");
}
@Override
public NDList processInput(TranslatorContext ctx, IrisFlower input) {
float[] data = {input.sepalLength, input.sepalWidth, input.petalLength, input.petalWidth};
NDArray array = ctx.getNDManager().create(data, new Shape(1, 4));
return new NDList(array);
}
@Override
public Classifications processOutput(TranslatorContext ctx, NDList list) {
float[] data = list.get(1).toFloatArray();
List<Double> probabilities = new ArrayList<>(data.length);
for (float f : data) {
probabilities.add((double) f);
}
return new Classifications(synset, probabilities);
}
}
```
## Step 2 Prepare your model
We will load a pretrained sklearn model into DJL. We defined a [`ModelZoo`](https://javadoc.io/doc/ai.djl/api/latest/ai/djl/repository/zoo/ModelZoo.html) concept to allow user load model from varity of locations, such as remote URL, local files or DJL pretrained model zoo. We need to define `Criteria` class to help the modelzoo locate the model and attach translator. In this example, we download a compressed ONNX model from S3.
```
String modelUrl = "https://mlrepo.djl.ai/model/tabular/softmax_regression/ai/djl/onnxruntime/iris_flowers/0.0.1/iris_flowers.zip";
Criteria<IrisFlower, Classifications> criteria = Criteria.builder()
.setTypes(IrisFlower.class, Classifications.class)
.optModelUrls(modelUrl)
.optTranslator(new MyTranslator())
.optEngine("OnnxRuntime") // use OnnxRuntime engine by default
.build();
ZooModel<IrisFlower, Classifications> model = criteria.loadModel();
```
## Step 3 Run inference
User will just need to create a `Predictor` from model to run the inference.
```
Predictor<IrisFlower, Classifications> predictor = model.newPredictor();
IrisFlower info = new IrisFlower(1.0f, 2.0f, 3.0f, 4.0f);
predictor.predict(info);
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.