text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
from google.colab import drive
drive.mount('/content/drive')
#First we generate the piano roll from y_test_pred, that has been predicted by the model
arr = np.load('/content/drive/MyDrive/Post-Processing/MEL2.npy')
print(np.shape(arr))
#backtracking to convert 3D array to 2D
X=[]
i=0
for i in range(2953):
if i==0:
X=arr[0]
else:
X=np.concatenate((X, arr[i]), axis=0)
print(X.shape)
#converting boolean to binary matrix with entries 0 and 1
arr2 = np.empty((295300, 88), dtype = int)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i,j]==False:
arr2[i,j]=int(0)
int(arr2[i,j])
elif X[i,j]==True:
arr2[i,j]=int(1)
print(arr2)
!pip install midiutil
from midiutil.MidiFile import MIDIFile
mf = MIDIFile(1)
track = 0
time = 0
delta = 0.000005
mf.addTrackName(track, time, "Output")
mf.addTempo(track, time, 120)
channel = 0
volume = 100
duration = 0.01
for i in range(10000):
time=time + i*delta
for j in range(arr2.shape[1]):
if X[i][j] == 1:
pitch = j
mf.addNote(track, channel, pitch, time, duration, volume)
#generate the MIDI file for y_test_pred
with open("output_final.mid", 'wb') as outf:
mf.writeFile(outf)
!pip install pretty_midi
import pretty_midi
import pandas as pd
path = "output_final.mid"
midi_data = pretty_midi.PrettyMIDI(path)
midi_list = []
pretty_midi.pretty_midi.MAX_TICK = 1e10
midi_data.tick_to_time(14325216)
for instrument in midi_data.instruments:
for note in instrument.notes:
start = note.start
end = note.end
pitch = note.pitch
velocity = note.velocity
midi_list.append([start, end, pitch, velocity, instrument.name])
midi_list = sorted(midi_list, key=lambda x: (x[0], x[2]))
df = pd.DataFrame(midi_list, columns=['Start', 'End', 'Pitch', 'Velocity', 'Instrument'])
print(df)
fig, ax = plt.subplots()
i = 0
while(i<48105) :
start = float(midi_list[i][0])
pitch = float(midi_list[i][2])
duration = float(midi_list[i][1]-midi_list[i][0])
rect = matplotlib.patches.Rectangle((start, pitch),duration, 1, ec='black', linewidth=1)
ax.add_patch(rect)
i+=1
plt.xlim([0, 130])
plt.ylim([0, 88])
plt.grid(color='grey',linewidth=1)
print('From Model')
plt.show()
arr3 = np.load('/content/drive/MyDrive/MEL-val/Y_final_MEL_val.npy')
arr3=np.squeeze(arr3, axis=-2)
print(np.shape(arr3))
#backtracking to convert 3D array to 2D
X2=[]
i=0
for i in range(2953):
if i==0:
X2=arr3[0]
else:
X2=np.concatenate((X2, arr3[i]), axis=0)
print(X2.shape)
#converting boolean to binary matrix with entries 0 and 1
arr4 = np.empty((295300, 88), dtype = int)
for i in range(X2.shape[0]):
for j in range(X2.shape[1]):
if X2[i,j]==False:
arr4[i,j]=int(0)
int(arr4[i,j])
elif X2[i,j]==True:
arr4[i,j]=int(1)
print(arr4)
from midiutil.MidiFile import MIDIFile
mf = MIDIFile(1)
track = 0
time = 0
delta = 0.000005
mf.addTrackName(track, time, "Output")
mf.addTempo(track, time, 120)
channel = 0
volume = 100
duration = 0.01
for i in range(10000):
time=time + i*delta
for j in range(arr4.shape[1]):
if X2[i][j] == 1:
pitch = j
mf.addNote(track, channel, pitch, time, duration, volume)
#generate the MIDI file for y_test_pred
with open("output_final_actual.mid", 'wb') as outf:
mf.writeFile(outf)
import pretty_midi
import pandas as pd
path = "output_final_actual.mid"
midi_data = pretty_midi.PrettyMIDI(path)
midi_list = []
pretty_midi.pretty_midi.MAX_TICK = 1e10
midi_data.tick_to_time(14325216)
for instrument in midi_data.instruments:
for note in instrument.notes:
start = note.start
end = note.end
pitch = note.pitch
velocity = note.velocity
midi_list.append([start, end, pitch, velocity, instrument.name])
midi_list = sorted(midi_list, key=lambda x: (x[0], x[2]))
df = pd.DataFrame(midi_list, columns=['Start', 'End', 'Pitch', 'Velocity', 'Instrument'])
print(df)
fig, ax = plt.subplots()
i = 0
while(i<30255) :
start = float(midi_list[i][0])
pitch = float(midi_list[i][2])
duration = float(midi_list[i][1]-midi_list[i][0])
rect = matplotlib.patches.Rectangle((start, pitch),duration, 1, ec='black', linewidth=1)
ax.add_patch(rect)
i+=1
plt.xlim([0, 130])
plt.ylim([0, 88])
plt.grid(color='grey',linewidth=1)
print('Actual')
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/MathewsJosh/Resmat/blob/main/Resmat_ex03.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Exercicio 03 <br>
Mathews Edwirds
Usando uma peça anelar, a força de 75N pode ser aplicada no plano vertical para vários ângulos Theta. Determine a intensidade do momento produzido em relação ao ponto A. Faça um gráfico do resultado de M (na ordenada) versus Theta (na abscissa) para 0<=Theta<=180º e especifique os ângulos que fornecem os momentos máximo e mínimo.

```
# Importações necessárias
import numpy as np
import matplotlib.pyplot as plt
# Valores passados no exercicio
comp_tubo1 = 1.5
comp_tubo2 = 2.0
forca = 75
# Método para calcular a magnitude com base na variação de theta
def calcula_magnitude(forca, theta):
# Converte o valor atual de teta para radiano
theta = np.deg2rad(theta)
# Distâncias em relação ao centro de A
dist_x = np.abs(comp_tubo1 - 0) # 1.5
dist_y = np.abs(comp_tubo2 - 0) # 2.0
dist_z = 0
# Calculando os Coeficientes
dxF_i = dist_x * forca
dxF_j = dist_y * forca
dxF_k = dxF_j
# Calculando a Magnitude de A
MA = np.sqrt(np.power((dxF_i * np.sin(theta)), 2) + np.power(dxF_j, 2))
return MA
# Chama a função para calcular a magnitude
intervalo = range(0,180,1) # Intervalo de valores de theta
resultados = [] # Lista com os valores das magnitudes
for theta in intervalo:
resultados.append(calcula_magnitude(75, theta))
# Função responsável por plotar os valores de t
def plot_results(x, label, labelX, labelY, title):
#intervalo = np.arange(0.0, 2*3.14, 0.01)
plt.figure(figsize = (8, 6))
plt.plot(intervalo, x,'-', label=label)
plt.xlim(np.amin(intervalo), np.amax(intervalo))
plt.ylim(np.amin(x) - 5, np.amax(x) + 5)
#plt.axhline(y=0, color="black", linestyle="-")
# Marca as magnitudes maxima e minimas
plt.annotate('Max = ' + str(x.index(np.amax(x))) + '°', fontsize = 12, xy=(x.index(np.amax(x)), np.amax(x)), xytext=(77, np.amax(x)-15), arrowprops=dict(facecolor='black', shrink=0.05))
plt.annotate('Min = ' + str(x.index(np.amin(x))) + '°', fontsize = 12, xy=(x.index(np.amin(x)), np.amin(x)), xytext=(5, np.amin(x)+10), arrowprops=dict(facecolor='black', shrink=0.05))
plt.annotate('Min = 180°', fontsize = 12, xy=(179, 150), xytext=(150, 160), arrowprops=dict(facecolor='black', shrink=0.05))
plt.grid(True)
# Personalização do gráfico (label, fontes, titulos e subtitulos)
plt.title(title, fontsize = 15, pad = 20)
plt.xlabel(labelX, labelpad = 5, fontsize = 12)
plt.xticks(fontsize = 12)
plt.ylabel(labelY, labelpad = 2, fontsize = 12)
plt.yticks(fontsize = 12)
plt.legend(loc = "best", fontsize = 12, frameon = True)
plt.show()
plot_results(resultados, "theta", "Valores de theta em graus", "Magnitude no ponto A", "Magnitude x Variação de θ")
```
| github_jupyter |
# The peaks over threshold method
This notebook continues with the dataset of the notebook about the `Dataset` object.
There are two main approaches in extreme value theory: the peaks over threshold approach and the block maxima approach.
In this notebook, the peaks over threshold approach will be illustrated.
In the notebook about the `Dataset` object, it was determined that the value 15 was a good guess for the threshold for our dataset.
First, generate the same dataset as in the notebook about the `Dataset` object.
```
from evt.dataset import Dataset
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import pareto, norm
N_DATAPOINTS = 100000 # number of datapoints in the example set
NORMAL_STD = 5 # standard deviation of the normal distribution
PARETO_SHAPE = 2.5 # shape parameter of the Pareto distribution
EXAMPLE_NAME = 'Values' # for nicer plots
EXAMPLE_INDEX_NAME = 'Index'
np.random.seed(0) # enforce deterministic behaviour
series = pd.Series(
norm.rvs(scale=NORMAL_STD, size=N_DATAPOINTS) + pareto.rvs(PARETO_SHAPE, size=N_DATAPOINTS),
name=EXAMPLE_NAME
)
series.index.name = EXAMPLE_INDEX_NAME
dataset = Dataset(series)
```
Let's start by determining the peaks over threshold.
```
from evt.methods.peaks_over_threshold import PeaksOverThreshold
THRESHOLD = 15
peaks_over_threshold = PeaksOverThreshold(dataset, THRESHOLD)
```
The peaks are stored in the `.series_tail` attribute.
```
peaks_over_threshold.series_tail
```
To graphically show the peaks over threshold, we can plot the peaks.
The original dataset is shown for comparison.
```
fig, ax = plt.subplots()
peaks_over_threshold.plot_tail(ax)
fig.tight_layout()
plt.show()
```
A natural next question is whether the tail is fatter or lighter than an exponential.
The exponential distribution is a benchmark for tail behaviour.
```
fig, ax = plt.subplots()
peaks_over_threshold.plot_qq_exponential(ax)
fig.tight_layout()
plt.show()
```
The quantiles of the empirical survival function are not described well by an exponential.
High quantiles seem to lie under the diagonal.
This is a signal of a sub-exponential distribution.
Next, let's make a Zipf plot: a log-log diagram of the survival function against the values.
```
fig, ax = plt.subplots()
peaks_over_threshold.plot_zipf(ax)
fig.tight_layout()
plt.show()
```
Power laws will show as straight lines in the Zipf plot.
In following notebooks, the tail index in the peaks over threshold method will be estimated.
For example, using the Hill estimator, the moment estimator and maximum likelihood.
| github_jupyter |
# Network Analysis
```
from elasticsearch import Elasticsearch
import pandas as pd
import numpy as np
import eland as ed
import networkx as nx
# Optionally add progress bars to df.apply using .progress_apply
from tqdm import tqdm
tqdm.pandas()
# uncomment this if database is not already open (and give ES a couple minutes to set up)
#!make database
ed_df = ed.read_es('localhost', 'twitter')
df = ed_df[
['tweet_id', 'user_id', 'name', 'description', 'full_text_processed', 'full_text',
'original_tweet_id_str', 'quoted_status_id_str', 'in_reply_to_status_id_str',
'is_quote_status', 'is_retweet', 'is_reply']
].to_pandas().fillna(np.nan)
# remove the annoying scientific notation from id columns
#pd.set_option('display.float_format', lambda x: '%.0f' % x)
df['tweet_id'] = df['tweet_id'].astype(float).astype(int).astype(str)
df['user_id'] = df['user_id'].astype(float).astype(int).astype(str)
def get_source_id(row):
"""Returns the original Tweet ID from a Quote, Retweet or Reply"""
if row['is_quote_status']:
val = (
row['quoted_status_id_str']
if ~np.isnan(row['quoted_status_id_str'])
else row['in_reply_to_status_id_str']
)
elif row['is_retweet']:
val = row['original_tweet_id_str']
elif row['is_reply']:
val = row['in_reply_to_status_id_str']
else:
val = np.nan
return val
df['source'] = df.apply(get_source_id, axis=1)\
.astype(float).fillna(0).astype(int).astype(str)
# I'm counting replies as original posts...
df['is_original'] = ~df[['is_quote_status', 'is_retweet']].max(1)
to_drop = df[~df.is_original & (df.source == '0')].index
df = df.drop(index=to_drop, columns=[
'is_quote_status', 'is_retweet', 'is_reply',
'original_tweet_id_str', 'quoted_status_id_str',
'in_reply_to_status_id_str'
]
).rename(columns={'tweet_id': 'target', 'user_id': 'target_user'})
# get nodes for tweet network
df_nodes_tweets = df\
.drop_duplicates('target')\
.rename(columns={'target': 'node_id', 'target_user':'user_id'})\
.drop(columns=['full_text_processed', 'source', 'is_original'])
df_nodes_tweets['node_id'] = df_nodes_tweets.node_id.astype(str)
t2v = pd.read_csv('../data/results/iwmi_tweet2vec.csv')\
.rename(columns={'tweet_id':'node_id'})
t2v['node_id'] = t2v['node_id'].astype(float).astype(int).astype(str)
df_nodes_tweets = df_nodes_tweets\
.set_index('node_id')\
.join(
t2v.drop(columns=['name', 'user_id']).set_index('node_id')
)\
.reset_index().drop(columns=['original_tweet_id_str'])
# get nodes for user network
df_nodes_users = df\
.drop_duplicates('target_user')\
.rename(columns={'target_user': 'node_id'})\
.drop(columns=['target', 'full_text_processed', 'is_original', 'source'])
df_nodes_users['node_id'] = df_nodes_users.node_id.astype(str)
u2v = pd.read_csv('../data/results/iwmi_user2vec.csv')\
.rename(columns={'user_id':'node_id'})
u2v['node_id'] = u2v['node_id'].astype(float).astype(int).astype(str)
df_nodes_users = df_nodes_users\
.set_index('node_id')\
.join(
u2v.set_index('node_id')
).reset_index()
def get_source_user(val, df_lookup):
try:
return df_lookup.loc[val, 'user_id']
except KeyError:
return '0'
df_lookup = df_nodes_tweets.set_index('node_id')
df['source_user'] = df['source'].apply(
lambda val: get_source_user(val, df_lookup)
)
df_edges_tweets = df[['source', 'target']]
df_edges_tweets = df_edges_tweets[df_edges_tweets.source != '0']
df_edges_users = df[['source_user', 'target_user']]\
.groupby(['source_user', 'target_user'])\
.size()\
.to_frame()\
.reset_index()\
.rename(columns={'source_user': 'source', 'target_user': 'target', 0:'weight'})
df_edges_users = df_edges_users[df_edges_users.source != '0']
# Get euclidean distances among tweets
df_lookup = df_nodes_tweets.set_index('node_id')
vec_cols = [f'vec_{i}' for i in range(10)]
def get_euclidean_distance(row, df_lookup, vec_cols):
try:
a = df_lookup.loc[row['source'], vec_cols]
b = df_lookup.loc[row['target'], vec_cols]
return np.linalg.norm(a-b)
except KeyError:
return np.nan
df_edges_tweets['euclidean_dist'] = df_edges_tweets.progress_apply(
lambda row: get_euclidean_distance(row, df_lookup, vec_cols), axis=1
)
# Get euclidean distances among users
df_lookup = df_nodes_users.set_index('node_id')
df_edges_users['euclidean_dist'] = df_edges_users.progress_apply(
lambda row: get_euclidean_distance(row, df_lookup, vec_cols), axis=1
)
```
**NOTE:** Some Tweet source ids don't exist in our database. This means that these tweets are either replies or quotes which we were not able to retrieve into the dataset
```
df_edges_tweets['euclidean_dist'] = df_edges_tweets['euclidean_dist'].fillna(df_edges_tweets['euclidean_dist'].mean())
df_edges_users['weighted_dist'] = df_edges_users['weight'] / df_edges_users['euclidean_dist']
df_edges_tweets.to_csv('../data/results/edges_tweets.csv', index=False)
df_edges_users.to_csv('../data/results/edges_users.csv', index=False)
df_nodes_tweets.to_csv('../data/results/nodes_tweets.csv', index=False)
df_nodes_users.to_csv('../data/results/nodes_users.csv', index=False)
```
## Network Analysis (Tweets)
There is possibly little to no information to be taken from this section, but I kept it anyway.
```
# create a directed graph
G_dir = nx.from_pandas_edgelist(df_network[['source', 'target', 'weight']], create_using=nx.DiGraph())
# create an undirected graph
G = nx.from_pandas_edgelist(df_network[['source', 'target', 'weight']], create_using=nx.Graph())
print(f'Graph basic info: {G_dir.number_of_nodes()} nodes, {G_dir.number_of_edges()} edges.')
# compare densities of the two graphs
print(f'Density measurement: \nUndirected: {nx.density(G)}\nDirected: {nx.density(G_dir)}')
#out-degree
node_attrs=pd.DataFrame.from_dict(dict(G_dir.out_degree()), 'index').rename(columns={0:'out_degree'})
#in-degree
node_attrs=node_attrs.join(pd.DataFrame.from_dict(dict(G_dir.in_degree()), 'index').rename(columns={0:'in_degree'}))
# weighted out-degree
#node_attrs=node_attrs.join(pd.DataFrame.from_dict(dict(G_dir.out_degree(weight='weight')), 'index').rename(columns={0:'weighted_out_degree'}))
# weighted in-degree
#node_attrs=node_attrs.join(pd.DataFrame.from_dict(dict(G_dir.in_degree(weight='weight')), 'index').rename(columns={0:'weighted_in_degree'}))
# add tweet ids and original info
#node_attrs=node_attrs.join(df.set_index('COMUNE')[["COD_REG","COD_CM", "COD_PRO", "PRO_COM"]])
node_attrs
node_attrs.out_degree.unique()
df_network[['source', 'target', 'weight']]
```
## Network Analysis (users)
```
df_network = ed_df[
['tweet_id', 'name',
'original_tweet_id_str', 'quoted_status_id_str', 'in_reply_to_status_id_str',
'is_quote_status', 'is_retweet', 'is_reply', 'is_translator', 'is_original']
].to_pandas().fillna(np.nan)
cols = ed_df.columns
cols[cols.str.contains('name')]
#ed_df[[]]
```
## User2Vec
Based on: https://ieeexplore.ieee.org/document/8875952/
How it works:
- Run doc2vec
- Average vector representations for each user
```
from src.models import User2Vec, tokenize
# Hyperparameters
vector_size = 10
min_count = 2
epochs = 40
df_network = df[[
'tweet_id', 'original_tweet_id_str',
'user_id', 'name', 'full_text_processed',
]].copy()
unique_docs = df_network['full_text_processed'].unique()
train_corpus = [tokenize(doc, tag) for doc, tag in zip(unique_docs, range(unique_docs.shape[0]))]
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
model = User2Vec(vector_size=vector_size, min_count=min_count, epochs=epochs)
model.build_vocab(train_corpus)
model.train(train_corpus, total_examples=model.corpus_count, epochs=model.epochs)
vec_cols = [f'vec_{i}' for i in range(vector_size)]
users_id, user_vectors = model.infer_user_vectors(
df_network['user_id'],
df_network['full_text_processed']
)
df_user_vecs = pd.DataFrame(user_vectors, columns=vec_cols)
df_user_vecs['user_id'] = users_id
df_user_vecs
```
**Sanity Checks**: To be removed
```
tweet_ids = df['target'].tolist()
check_existence = df.apply(lambda x: x['source'] in tweet_ids, axis=1)
from collections import Counter
Counter(check_existence)
#pd.set_option('display.float_format', lambda x: '%.3f' % x)
ed_df.shape
```
| github_jupyter |
```
from queue import Queue
import numpy as np
import math
from operator import itemgetter
from cacheout import Cache
edge_count = 0
class SystolicArrayCell:
def __init__(self, row_n, col_n):
self.pos_x = 0
self.pos_y = 0
self.row_n = row_n
self.col_n = col_n
#ring register
self.receive_cell = None #接收数据寄存器
self.receive_reg = 1
self.receive_out = 0
#self.send = #发送数据寄存器
#On chip buffer
self.result_bank_input = None
#edge update
self.process_id = 0
self.process_id_out = 0
self.next_src = -1
self.next_dst = -1
self.src = -1
self.dst = -1
self.rb_depth = 0
self.rb_value = 0
self.edge_empty = False
self.edge_compute = True
self.hold = False
self.edge_number = 0
self.cache_bank = None
# Connects this cell to its neighbors above and to the left
def connect(self, pos_x, pos_y, array):
self.pos_x = pos_x
self.pos_y = pos_y
self.edge_number = pos_y
#ring dataflow
#if self.pos_y is array.row_n-1:
# self.receive_cell = array.cells[0][self.pos_x] #cell 第一个代表行数,也就是Y, 第二个代表列数,也就是X
# Otherwise, it's another cell
#else:
# self.receive_cell = array.cells[self.pos_y+1][self.pos_x]
#each PE on the same row connect to the same result bank
self.cache_bank = array.cache_bank[self.pos_y]
self.result_bank_input = array.result_bank[self.pos_y][self.pos_x]
self.edge_bank = array.edge_bank[self.pos_y]
def set_process_id(self, idx):
for id_ in idx:
self.cache_bank.set(id_, 'none')
# We'll model the transfer of signals through registers with a read() and a
# compute() method.
# read() represents the registers sampling data at the positive edge of the
# clock
def read(self, edge_update):
#ring dataflow
#print("Enter | cell({:d},{:d}) next_src {:d}, next_dst {:d}, src {:d}, dst {:d}, process_id {:d}". format(self.pos_x, self.pos_y, self.next_src, self.next_dst, self.src, self.dst, self.process_id))
if self.edge_bank.empty():
self.edge_empty = True
elif self.edge_compute or edge_update:
self.src, self.dst = self.edge_bank.get()
self.hold = True
self.edge_compute = False
else:
self.edge_compute = False
#if edge_update:
# self.process_id = self.process_id
#else:
# self.receive_reg = self.receive_cell.receive_out
# self.process_id = self.receive_cell.process_id_out
#print("Medium | cell({:d},{:d}) next_src {:d}, next_dst {:d}, src {:d}, dst {:d}, process_id {:d}". format(self.pos_x, self.pos_y, self.next_src, self.next_dst, self.src, self.dst, self.process_id))
if self.cache_bank.has(self.src) and self.hold:
self.edge_compute = True
self.hold = False
else:
self.edge_compute = False
self.rb_depth = int(self.dst/self.row_n)
#print("Out | cell({:d},{:d}) next_src {:d}, next_dst {:d}, src {:d}, dst {:d}, process_id {:d}". format(self.pos_x, self.pos_y, self.next_src, self.next_dst, self.src, self.dst, self.process_id))
self.rb_value = self.result_bank_input[self.rb_depth]
# compute() represents combinational logic that takes place between
# positive edges of the clock (multiplication and addition)
def compute(self):
#ring dataflow
if self.edge_compute:
print("compute cell({:d},{:d}) src {:d}, dst {:d}". format(self.pos_x, self.pos_y, self.src, self.dst))
global edge_count
edge_count = edge_count + 1
self.result_bank_input[self.rb_depth] = self.rb_value + self.receive_reg
self.receive_out = self.receive_reg
self.process_id_out = self.process_id
#print("cell({:d},{:d}), hold {:d}, edge_empty {:d}". format(self.pos_x, self.pos_y, self.hold, self.edge_empty))
#print(self.edge_number)
def cell_state(self):
#print("cell({:d},{:d}),rec_reg={:d}, rec_out={:d}, proc_id={:d}, proc_out={:d}". format(self.pos_x, self.pos_y, self.receive_reg, self.receive_out, self.process_id, self.process_id_out))
print("cell({:d},{:d}),rec_reg={:d}, proc_id={:d}, rb_value={:d}". format(self.pos_x, self.pos_y, self.receive_reg, self.process_id, self.rb_value))
# This represents our entire array: cells, inputs, and outputs
class SystolicArray:
# We'll take a parameter for the size of the square arrays to be multiplied
def __init__(self, row_n, col_n):
self.row_n = row_n
self.col_n = col_n
# "cells" will hold the array of processing elements
self.cells = []
# This array is a square with dimensions "array_size"
for _ in range(self.row_n):
row = []
for _ in range(self.col_n):
cell = SystolicArrayCell(row_n, col_n)
row.append(cell)
self.cells.append(row)
self.cache_bank = [Cache(maxsize=self.row_n) for _ in range(self.row_n)]
self.edge_bank = [Queue() for _ in range(self.row_n)]
self.result_bank = [[list() for _ in range(self.col_n)] for _ in range(self.row_n)]
# When all cells and inputs are created, then they can be connected
# (again, this would be accomplished with wiring)
for row_num, row in enumerate(self.cells):
for col_num, cell in enumerate(row):
cell.connect(col_num, row_num, self) #每一行对应一个pos_y, 每一列对应一个pos_x
#ring dataflow
def edge_bucket_empty(self, e_b):
for idx in e_b:
for id_ in idx:
if id_.empty() is False:
return True
return False
def edge_load_balance(self, row_n, src, dst):
Edge_bucket = [[Queue() for _ in range(row_n)] for _ in range(row_n)]
for idx in sorted(zip(src, dst)):
src, dst = idx
Edge_bucket[src%row_n][dst%row_n].put(idx)
while(self.edge_bucket_empty(Edge_bucket)):
for i in range(row_n):
num = [j for j in range(i,row_n)]
for n in range(i):
num.append(n)
for id_, val in enumerate(num):
#print("--({:d}, {:d})". format(val, id_))
if Edge_bucket[val][id_].empty() is False:
self.edge_bank[id_].put(Edge_bucket[val][id_].get())
def edge_preprocess(self, num_node, edge_src, edge_dst):
src, dst = zip(*(sorted(zip(edge_src, edge_dst), key=itemgetter(1))))
result = [list() for _ in range(num_node)]
for idx in range(len(dst)):
result[dst[idx]].append((src[idx],dst[idx]))
for idx in range(len(result)):
#print(idx)
#print(len(result[idx]))
if len(result[idx]) is 0:
result[idx] = []
else:
src, dst = zip(*result[idx])
result_A = []
result_B = []
for idx_ in range(len(src)):
if(src[idx_] >= (dst[idx_]%self.row_n)):
result_A.append((src[idx_], dst[idx_]))
else:
result_B.append((src[idx_], dst[idx_]))
result_A.extend(result_B)
result[idx] = result_A
return result
def fill_edges(self, num_node, edge_src, edge_dst):
edge_ = self.edge_preprocess(num_node, edge_src, edge_dst)
for i, val in enumerate(edge_):
for e in val:
self.edge_bank[i%self.row_n].put(e)
def fill_result_banks(self, num_nodes):
for row_num in range(self.row_n):
for idx_ in range(self.col_n):
for _ in range(math.ceil(num_nodes/self.row_n)):
self.result_bank[row_num][idx_].append(0)
def fill_idx(self,idx):
for row_num in range(self.row_n):
for col_num in range(self.col_n):
self.cells[row_num][col_num].set_process_id(idx)
# For this demo, all cells will read() the values of their neighbors first
def read(self,edge_update):
for row in self.cells:
for cell in row:
cell.read(edge_update)
# And then after all cells have read(), they will compute() the next step
def compute(self):
for row in self.cells:
for cell in row:
cell.compute()
def terminal_signal(self):
for row in self.cells:
for cell in row:
#print(cell.hold)
#print(cell.edge_bank.empty())
if cell.hold or not cell.edge_empty:
return False
return True
#for id_x in self.edge_bank:
# if id_x.empty() is False:
# return False
#return True
def show_staus(self):
for row in self.cells:
for cell in row:
cell.cell_state()
# Each cycle involves a read() and a compute()
def cycle(self, edge_update):
# read() models register sampling on the positive edge of the clock
self.read(edge_update)
# compute() models the combinational logic between clock edges
self.compute()
#self.show_staus()
# run() will execute the array's computation, assuming it's been filled
def run(self, num_nodes):
# It takes 3n-2 cycles to compute the full matrix of results
edge_update = True
cycle = 0
while 1:
print("-----Cycle----{:d}----------". format(cycle))
self.cycle(edge_update)
edge_update = False
#self.get_edge_output(num_nodes)
if(self.terminal_signal()):
break
cycle = cycle + 1
return 1
#return self.get_outputs()
# The outputs are also staggered and transposed, so we'll format them
# before returning the results
def get_outputs(self):
ret = []
return ret
def get_edge_output(self, num_nodes):
for id_x in range(num_nodes):
print("id={:d}-|-{:d}". format(id_x, self.result_bank[int(id_x%self.row_n)][0][int(id_x/self.row_n)]))
# Here we'll use a small 3x3 test multiplication to see the systolic array
# in action
row_n = 3
col_n = 1
myArray = SystolicArray(row_n, col_n)
#src = [0,1,2,1,2,0,2,0,1]
#dst = [0,1,2,0,1,2,0,1,2]
#src = [0,0,0,0,1,1,2,2,2]
#dst = [0,1,3,5,2,4,0,3,5]
src = [0,1,1,2,0,0,0,2,2]
dst = [0,2,4,0,1,3,5,3,5]
#src = [0,1,1,2,0,1,0,2,2]
#dst = [0,2,4,0,1,3,5,4,5]
#src = [0,0,1,2]
#dst = [0,1,2,0]
myArray.fill_edges(6, src, dst)
#myArray.edge_load_balance(row_n, src, dst)
idx = [0,1,2]
myArray.fill_idx(idx)
myArray.fill_result_banks(6)
res = myArray.run(6)
#assert (res == np.matmul(activations, weights)).all()
#print('Systolic array matches numpy matmul')
import argparse, time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl import DGLGraph
from dgl import graph_index
from dgl.graph_index import disjoint_partition
from dgl.data import register_data_args, load_data
import math
parser = argparse.ArgumentParser(description='GCN')
parser.add_argument("--dataset", type=str, default="cora",
help="dropout probability")
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden gcn units")
parser.add_argument("--n-layers", type=int, default=1,
help="number of hidden gcn layers")
parser.add_argument("--weight-decay", type=float, default=5e-4,
help="Weight for L2 loss")
parser.add_argument("--self-loop", action='store_true',
help="graph self-loop (default=False)")
parser.set_defaults(self_loop=False)
args = parser.parse_args(args=[])
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
g = DGLGraph(data.graph)
partition_size = 32
Node_index = []
Edge = []
Edge_number = []
partition_number = math.ceil(g.number_of_nodes() / partition_size)
print("the graph split to {:d} part". format(partition_number))
for node_id in range(partition_number):
#print(node_id)
if node_id == partition_number-1:
index = list(range(partition_size*node_id,g.number_of_nodes()))
else:
index = list(range(partition_size*node_id,partition_size*(node_id+1)))
Node_index.append(index)
src, dst = g.out_edges(index)
Edge.append(list(zip(src.tolist(),dst.tolist())))
Edge_number.append(src.shape[0])
def fill_edges(self, num_node, edge_src, edge_dst):
edge_ = self.edge_preprocess(num_node, edge_src, edge_dst)
for i, val in enumerate(edge_):
for e in val:
self.edge_bank[i%self.row_n].put(e)
src, dst = zip(*Edge[0])
idx = Node_index[0]
row_n = 32
col_n = 1
myArray = SystolicArray(row_n, col_n)
#src = [0,1,2,1,2,0,2,0,1]
#dst = [0,1,2,0,1,2,0,1,2]
#src = [0,0,0,0,1,1,2,2,2]
#dst = [0,1,3,5,2,4,0,3,5]
#src = [0,1,1,2,0,0,0,2,2]
#dst = [0,2,4,0,1,3,5,3,5]
#src = [0,1,1,2,0,1,0,2,2]
#dst = [0,2,4,0,1,3,5,4,5]
#src = [0,0,1,2]
#dst = [0,1,2,0]
myArray.fill_edges(2708, src, dst)
#myArray.edge_load_balance(row_n, src, dst)
#idx = [0,1,2]
myArray.fill_idx(idx)
myArray.fill_result_banks(2708)
edge_count = 0
res = myArray.run(2708)
#assert (res == np.matmul(activations, weights)).all()
#print('Systolic array matches numpy matmul')
edge_count
```
| github_jupyter |
#Transformer
```
from google.colab import drive
drive.mount('/content/drive')
# informer, ARIMA, Prophet, LSTMa와는 다른 형식의 CSV를 사용한다.(Version2)
!pip install pandas
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/Data/삼성전자_6M_ST_Version2.csv', encoding='cp949')
df.head()
df.info()
data_start_date = df.columns[1]
data_end_date = df.columns[-1]
print('Data ranges from %s to %s' % (data_start_date, data_end_date))
```
### Train and Validation Series Partioning
```
######################## CHECK #########################
# 기준시간이 hour이므로, 7일 예측한다면 7*24로 설정한다.
from datetime import timedelta
pred_steps = 24*2+23
pred_length=timedelta(hours = pred_steps)
first_day = pd.to_datetime(data_start_date)
last_day = pd.to_datetime(data_end_date)
val_pred_start = last_day - pred_length + timedelta(1)
val_pred_end = last_day
print(val_pred_start, val_pred_end)
train_pred_start = val_pred_start - pred_length
train_pred_end = val_pred_start - timedelta(days=1)
print(train_pred_start, train_pred_end)
enc_length = train_pred_start - first_day
print(enc_length)
train_enc_start = first_day
train_enc_end = train_enc_start + enc_length - timedelta(1)
val_enc_start = train_enc_start + pred_length
val_enc_end = val_enc_start + enc_length - timedelta(1)
print(train_enc_start, train_enc_end)
print(val_enc_start, val_enc_end)
# 최종적으로 Val prediction 구간을 예측하게 된다.
print('Train encoding:', train_enc_start, '-', train_enc_end)
print('Train prediction:', train_pred_start, '-', train_pred_end, '\n')
print('Val encoding:', val_enc_start, '-', val_enc_end)
print('Val prediction:', val_pred_start, '-', val_pred_end)
print('\nEncoding interval:', enc_length.days)
print('Prediction interval:', pred_length.days)
```
## Data Formatting
```
#np.log 1p 해준다.
date_to_index = pd.Series(index=pd.Index([pd.to_datetime(c) for c in df.columns[1:]]),
data=[i for i in range(len(df.columns[1:]))])
series_array = df[df.columns[1:]].values.astype(np.float32)
print(series_array)
def get_time_block_series(series_array, date_to_index, start_date, end_date):
inds = date_to_index[start_date:end_date]
return series_array[:,inds]
def transform_series_encode(series_array):
series_array = np.nan_to_num(series_array) # filling NaN with 0
series_mean = series_array.mean(axis=1).reshape(-1,1)
series_array = series_array - series_mean
series_array = series_array.reshape((series_array.shape[0],series_array.shape[1], 1))
return series_array, series_mean
def transform_series_decode(series_array, encode_series_mean):
series_array = np.nan_to_num(series_array) # filling NaN with 0
series_array = series_array - encode_series_mean
series_array = series_array.reshape((series_array.shape[0],series_array.shape[1], 1))
return series_array
# sample of series from train_enc_start to train_enc_end
encoder_input_data = get_time_block_series(series_array, date_to_index,
train_enc_start, train_enc_end)
encoder_input_data, encode_series_mean = transform_series_encode(encoder_input_data)
# sample of series from train_pred_start to train_pred_end
decoder_target_data = get_time_block_series(series_array, date_to_index,
train_pred_start, train_pred_end)
decoder_target_data = transform_series_decode(decoder_target_data, encode_series_mean)
encoder_input_val_data = get_time_block_series(series_array, date_to_index, val_enc_start, val_enc_end)
encoder_input_val_data, encode_series_mean = transform_series_encode(encoder_input_val_data)
decoder_target_val_data = get_time_block_series(series_array, date_to_index, val_pred_start, val_pred_end)
decoder_target_val_data = transform_series_decode(decoder_target_val_data, encode_series_mean)
#for d in encoder_input_data:
# print(d.shape)
#train_dataset = tf.data.Dataset.from_tensor_slices((encoder_input_data, decoder_target_data))
#train_dataset = train_dataset.batch(54)
#for d in train_dataset:
# #print(f'features:{features_tensor} target:{target_tensor}')
# print("-----")
# print(d)
```
### Transformer model
```
!pip install tensorflow_datasets
import tensorflow_datasets as tfds
import tensorflow as tf
import time
import numpy as np
import matplotlib.pyplot as plt
train_dataset = tf.data.Dataset.from_tensor_slices((encoder_input_data, decoder_target_data))
val_dataset = tf.data.Dataset.from_tensor_slices((encoder_input_val_data, decoder_target_val_data))
### position
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
sines = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
cosines = np.cos(angle_rads[:, 1::2])
pos_encoding = np.concatenate([sines, cosines], axis=-1)
pos_encoding = pos_encoding[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
# Masking
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions so that we can add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
x = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
print(create_padding_mask(x))
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
x = tf.random.uniform((1, 4))
temp = create_look_ahead_mask(x.shape[1])
print(temp)
# Scaled dot product attention
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_v, depth)
return output, attention_weights
# scaled dot product attetion test
def print_out(q, k, v):
temp_out, temp_attn = scaled_dot_product_attention(
q, k, v, None)
print ('Attention weights are:')
print (temp_attn)
print ('Output is:')
print (temp_out)
np.set_printoptions(suppress=True)
temp_k = tf.constant([[10,0,0],
[0,10,0],
[0,0,10],
[0,0,10]], dtype=tf.float32) # (4, 3)
temp_v = tf.constant([[ 1,0],
[ 10,0],
[ 100,5],
[1000,6]], dtype=tf.float32) # (4, 3)
# This `query` aligns with the second `key`,
# so the second `value` is returned.
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3)
print_out(temp_q, temp_k, temp_v)
# Multi Head Attention
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q)
k = self.wk(k)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size) #(batch_size, num_head, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_v, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_v, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_v, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_v, d_model)
return output, attention_weights
# multhead attention test
temp_mha = MultiHeadAttention(d_model=512, num_heads=8)
y = tf.random.uniform((1, 60, 512)) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(y, k=y, q=y, mask=None)
out.shape, attn.shape
# activation – the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu).
# Point wise feed forward network
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
])
# Point wise feed forward network test
sample_ffn = point_wise_feed_forward_network(512, 2048)
sample_ffn(tf.random.uniform((64, 50, 512))).shape
```
### Encoder and Decoder
```
# Encoder Layer
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.BatchNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.BatchNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
# Encoder Layer Test
sample_encoder_layer = EncoderLayer(512, 8, 2048)
sample_encoder_layer_output = sample_encoder_layer(
tf.random.uniform((64, 43, 512)), False, None)
sample_encoder_layer_output.shape # (batch_size, input_seq_len, d_model)
# Decoder Layer
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.BatchNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.BatchNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.BatchNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3, attn_weights_block1, attn_weights_block2
# Decoder layer test
sample_decoder_layer = DecoderLayer(512, 8, 2048)
sample_decoder_layer_output, _, _ = sample_decoder_layer(
tf.random.uniform((64, 50, 512)), sample_encoder_layer_output,
False, None, None)
sample_decoder_layer_output.shape # (batch_size, target_seq_len, d_model)
# Encoder
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, max_len=5000,
rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Dense(d_model, use_bias=False)
self.pos_encoding = positional_encoding(max_len, self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding
x = self.embedding(x)
# (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x
sample_encoder = Encoder(num_layers=2, d_model=512, num_heads=8,
dff=2048)
sample_encoder_output = sample_encoder(tf.random.uniform((64, 62,1)),
training=False, mask=None)
print (sample_encoder_output.shape) # (batch_size, input_seq_len, d_model)
# Decoder
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, max_len=5000, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Dense(d_model, use_bias=False)
self.pos_encoding = positional_encoding(max_len, self.d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i+1)] = block1
attention_weights['decoder_layer{}_block2'.format(i+1)] = block2
return x, attention_weights
sample_decoder = Decoder(num_layers=2, d_model=512, num_heads=8,
dff=2048)
output, attn = sample_decoder(tf.random.uniform((64, 26,3)),
enc_output=sample_encoder_output,
training=False, look_ahead_mask=None,
padding_mask=None)
output.shape, attn['decoder_layer2_block2'].shape
```
### Transfomer for TS
```
class Transformer(tf.keras.Model):
def __init__(self, num_layers, d_model, num_heads, dff, out_dim, max_len=5000,
rate=0.1):
super(Transformer, self).__init__()
self.encoder = Encoder(num_layers, d_model, num_heads, dff,
max_len, rate)
self.decoder = Decoder(num_layers, d_model, num_heads, dff,
max_len, rate)
self.final_layer = tf.keras.layers.Dense(out_dim)
def call(self, inp, tar, training, enc_padding_mask,
look_ahead_mask, dec_padding_mask):
enc_output = self.encoder(inp, training, enc_padding_mask)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask)
final_output = self.final_layer(dec_output)
return final_output, attention_weights
sample_transformer = Transformer(
num_layers=2, d_model=512, num_heads=8, dff=2048,
out_dim=1)
temp_input = tf.random.uniform((64, 62,1))
temp_target = tf.random.uniform((64, 23,1))
fn_out, _ = sample_transformer(temp_input, temp_target,training=False,
enc_padding_mask=None,
look_ahead_mask=None,
dec_padding_mask=None)
fn_out.shape
# Set hyperparameters
# 트랜스포머 기준으로 바꿔볼까?
# d_model – the number of expected features in the encoder/decoder inputs (default=512).
# nhead – the number of heads in the multiheadattention models (default=8).
# num_encoder_layers – the number of sub-encoder-layers in the encoder & decoder (default=6).
# num_decoder_layers – the number of sub-decoder-layers in the decoder (default=6).
# dff(dim_feedforward) – the dimension of the feedforward network model (default=2048).
# dropout – the dropout value (default=0.1).
num_layers = 1
d_model = 64
dff = 256
num_heads = 4
dropout_rate = 0.1
input_sequence_length = 4320-(24*2+23) # Length of the sequence used by the encoder
target_sequence_length = 24*2+23 # Length of the sequence predicted by the decoder
batch_size = 2**11
train_dataset = train_dataset.batch(batch_size)
val_dataset = val_dataset.batch(batch_size)
# Optimizizer
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(64)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
temp_learning_rate_schedule = CustomSchedule(512)
plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
# Loss and metrics
loss_object = tf.keras.losses.MeanAbsoluteError()
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
train_loss = tf.keras.metrics.Mean(name='train_loss')
#train_accuracy = tf.keras.metrics.mean_absolute_error()
test_loss = tf.keras.metrics.Mean(name='test_loss')
# Training and checkpoint
transformer = Transformer(num_layers, d_model, num_heads, dff,
out_dim=1, rate=dropout_rate)
def create_masks(inp, tar):
inp = inp.reshape()
# Encoder padding mask
enc_padding_mask = create_padding_mask(inp)
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, combined_mask, dec_padding_mask
# check point
checkpoint_path = "./checkpoints/train"
ckpt = tf.train.Checkpoint(transformer=transformer,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print ('Latest checkpoint restored!!')
# EPOCHS
EPOCHS=100
@tf.function
def train_step(inp, tar):
last_inp = tf.expand_dims(inp[:,0,:],-1)
tar_inp = tf.concat([last_inp, tar[:,:-1,:]], axis=1)
tar_real = tar
#enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
#print(enc_padding_mask)
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
None,
look_ahead_mask,
None)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
#train_accuracy(tar_real, predictions)
@tf.function
def test_step(inp, tar):
#print(inp)
#print(tar)
last_inp = tf.expand_dims(inp[:,0,:],-1)
#print(last_inp)
tar_inp = tf.concat([last_inp, tar[:,:-1,:]], axis=1)
tar_real = tar
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
False,
None,
look_ahead_mask,
None)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
test_loss(loss)
# Val_dataset을 돌려서 Val_prediction 구간을 예측한다
for epoch in range(EPOCHS):
start = time.time()
train_loss.reset_states()
test_loss.reset_states()
# validation:
for (batch, (inp, tar)) in enumerate(val_dataset):
#print(inp, tar)
test_step(inp, tar)
if (epoch + 1) % 5 == 0:
ckpt_save_path = ckpt_manager.save()
print ('Saving checkpoint for epoch {} at {}'.format(epoch+1,
ckpt_save_path))
#print ('Epoch {} Train Loss {:.4f}'.format(epoch + 1,
#train_loss.result()))
#train_accuracy.result()))
print ('Epoch {} Test Loss {:.4f}'.format(epoch + 1,
test_loss.result()))
print ('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
MAX_LENGTH = target_sequence_length
def evaluate(inp):
encoder_input = inp
#print(encoder_input)
output = tf.expand_dims(encoder_input[:,-1,:],-1)
#print(output)
for i in range(MAX_LENGTH):
look_ahead_mask = create_look_ahead_mask(tf.shape(output)[1])
predictions, attention_weights = transformer(encoder_input,
output,
False,
None,
look_ahead_mask,
None)
# select the last word from the seq_len dimension
predictions = predictions[: ,-1:, :] # (batch_size, 1)
#print("pred:", predictions) #
output = tf.concat([output, predictions], axis=1)
#print(output)
return tf.squeeze(output, axis=0), attention_weights
def mape(y_pred, y_true):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def MAE(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred)))
def MSE(y_true, y_pred):
return np.mean(np.square((y_true - y_pred)))
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
encode_series = encoder_input_val_data[0:1,:,:]
#print(encode_series)
pred_series, _ = evaluate(encode_series)
pred_series = np.array([pred_series])
encode_series = encode_series.reshape(-1,1)
pred_series = pred_series.reshape(-1,1)[1:,:]
target_series = decoder_target_val_data[0,:,:1].reshape(-1,1)
encode_series_tail = np.concatenate([encode_series[-1000:],target_series[:1]])
x_encode = encode_series_tail.shape[0]
print(mape(pred_series[:24*2+23-23]+0.02294, target_series+0.02294))
print(MSE(target_series+0.02294, pred_series[:24*2+23-23]+0.02294))
print(MAE(target_series+0.02294, pred_series[:24*2+23-23]+0.02294))
x_encode
# 실제와 가격차이가 어떻게 나는지 비교해서 보정한다.
plt.figure(figsize=(20,6))
plt.plot(range(1,x_encode+1),encode_series_tail+0.02294)
plt.plot(range(x_encode,x_encode+pred_steps-23),target_series+0.02294,color='orange')
plt.plot(range(x_encode,x_encode+pred_steps-23),pred_series[:24*2+23-23]+0.02294,color='teal',linestyle='--')
plt.title('Encoder Series Tail of Length %d, Target Series, and Predictions' % 1000)
plt.legend(['Encoding Series','Target Series','Predictions'])
```
#Prophet
```
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Data/삼성전자_6M_ST_Version1.csv", encoding='CP949')
df = df.drop(df.columns[0], axis=1)
df.columns = ["ds","y"]
df["ds"] = pd.to_datetime(df["ds"], dayfirst = True)
df.head()
m = Prophet()
m.fit(df[:-24*2])
future = m.make_future_dataframe(freq='H',periods=24*2)
future.tail()
forecast = m.predict(future)
forecast[['ds', 'yhat']].tail()
plt.figure(figsize=(20,5))
plt.plot(df["y"][3320:], label="real")
plt.plot(range(4320-24*2,4320),forecast['yhat'][-24*2:], label="Prophet")
plt.plot(range(4320-24*2,4320),pred_series[:24*2+23-23]+0.02294, label="Transformer")
plt.legend()
plt.show()
```
#LSTMa
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import trange
import random
data = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Data/삼성전자_6M_ST_Version1.csv", encoding='CP949')
data.head()
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
data["종가"] = min_max_scaler.fit_transform(data["종가"].to_numpy().reshape(-1,1))
train = data[:-24*2]
train = train["종가"].to_numpy()
test = data[-24*2:]
test = test["종가"].to_numpy()
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda", index=0)
class lstm_encoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers = 1):
super(lstm_encoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers, batch_first=True)
def forward(self, x_input):
lstm_out, self.hidden = self.lstm(x_input)
return lstm_out, self.hidden
class lstm_decoder(nn.Module):
def __init__(self, input_size, hidden_size, num_layers = 1):
super(lstm_decoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size,num_layers = num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, input_size)
def forward(self, x_input, encoder_hidden_states):
lstm_out, self.hidden = self.lstm(x_input.unsqueeze(-1), encoder_hidden_states)
output = self.linear(lstm_out)
return output, self.hidden
class lstm_encoder_decoder(nn.Module):
def __init__(self, input_size, hidden_size):
super(lstm_encoder_decoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.encoder = lstm_encoder(input_size = input_size, hidden_size = hidden_size)
self.decoder = lstm_decoder(input_size = input_size, hidden_size = hidden_size)
def forward(self, inputs, targets, target_len, teacher_forcing_ratio):
batch_size = inputs.shape[0]
input_size = inputs.shape[2]
outputs = torch.zeros(batch_size, target_len, input_size)
_, hidden = self.encoder(inputs)
decoder_input = inputs[:,-1, :]
for t in range(target_len):
out, hidden = self.decoder(decoder_input, hidden)
out = out.squeeze(1)
if random.random() < teacher_forcing_ratio:
decoder_input = targets[:, t, :]
else:
decoder_input = out
outputs[:,t,:] = out
return outputs
def predict(self, inputs, target_len):
inputs = inputs.unsqueeze(0)
self.eval()
batch_size = inputs.shape[0]
input_size = inputs.shape[2]
outputs = torch.zeros(batch_size, target_len, input_size)
_, hidden = self.encoder(inputs)
decoder_input = inputs[:,-1, :]
for t in range(target_len):
out, hidden = self.decoder(decoder_input, hidden)
out = out.squeeze(1)
decoder_input = out
outputs[:,t,:] = out
return outputs.detach().numpy()[0,:,0]
from torch.utils.data import DataLoader, Dataset
class windowDataset(Dataset):
def __init__(self, y, input_window=80, output_window=20, stride=5):
#총 데이터의 개수
L = y.shape[0]
#stride씩 움직일 때 생기는 총 sample의 개수
num_samples = (L - input_window - output_window) // stride + 1
#input과 output
X = np.zeros([input_window, num_samples])
Y = np.zeros([output_window, num_samples])
for i in np.arange(num_samples):
start_x = stride*i
end_x = start_x + input_window
X[:,i] = y[start_x:end_x]
start_y = stride*i + input_window
end_y = start_y + output_window
Y[:,i] = y[start_y:end_y]
X = X.reshape(X.shape[0], X.shape[1], 1).transpose((1,0,2))
Y = Y.reshape(Y.shape[0], Y.shape[1], 1).transpose((1,0,2))
self.x = X
self.y = Y
self.len = len(X)
def __getitem__(self, i):
return self.x[i], self.y[i]
def __len__(self):
return self.len
iw = 24*4
ow = 24*2
train_dataset = windowDataset(train, input_window=iw, output_window=ow, stride=1)
train_loader = DataLoader(train_dataset, batch_size=64)
# y_train_loader = DataLoader(y_train, batch_size=5)
model = lstm_encoder_decoder(input_size=1, hidden_size=16).to(device)
# model.train_model(X_train.to(device), y_train.to(device), n_epochs=100, target_len=ow, batch_size=5, training_bprediction="mixed_teacher_forcing", teacher_forcing_ratio=0.6, learning_rate=0.01, dynamic_tf=False)
#5000으로 할 경우 시간도 오래걸리고 에러도 커서 100으로 줄인다.
learning_rate=0.01
epoch = 100
optimizer = optim.Adam(model.parameters(), lr = learning_rate)
criterion = nn.MSELoss()
from tqdm import tqdm
model.train()
with tqdm(range(epoch)) as tr:
for i in tr:
total_loss = 0.0
for x,y in train_loader:
optimizer.zero_grad()
x = x.to(device).float()
y = y.to(device).float()
output = model(x, y, ow, 0.6).to(device)
loss = criterion(output, y)
loss.backward()
optimizer.step()
total_loss += loss.cpu().item()
tr.set_postfix(loss="{0:.5f}".format(total_loss/len(train_loader)))
predict = model.predict(torch.tensor(train_dataset[0][0]).to(device).float(), target_len=ow)
real = train_dataset[0][1]
predict = model.predict(torch.tensor(train[-24*2*2:]).reshape(-1,1).to(device).float(), target_len=ow)
real = data["종가"].to_numpy()
predict = min_max_scaler.inverse_transform(predict.reshape(-1,1))
real = min_max_scaler.inverse_transform(real.reshape(-1,1))
real.shape
plt.figure(figsize=(20,5))
plt.plot(range(3319,4320), real[3320:], label="real")
plt.plot(range(4320-24*2,4320), predict[-24*2:], label="LSTMa")
plt.plot(range(4320-24*2,4320),forecast['yhat'][-24*2:], label="Prophet")
plt.plot(range(4320-24*2,4320),pred_series[:24*2+23-23]+0.02294, label="Transformer")
plt.legend()
plt.show()
```
#Informer
```
!git clone https://github.com/zhouhaoyi/Informer2020.git
from google.colab import drive
drive.mount('/content/drive')
import sys
if not 'Informer2020' in sys.path:
sys.path += ['Informer2020']
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from datetime import timedelta
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from models.model import Informer
class StandardScaler():
def __init__(self):
self.mean = 0.
self.std = 1.
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
return (data - mean) / std
def inverse_transform(self, data):
mean = torch.from_numpy(self.mean).type_as(data).to(data.device) if torch.is_tensor(data) else self.mean
std = torch.from_numpy(self.std).type_as(data).to(data.device) if torch.is_tensor(data) else self.std
return (data * std) + mean
def time_features(dates, freq='h'):
dates['month'] = dates.date.apply(lambda row:row.month,1)
dates['day'] = dates.date.apply(lambda row:row.day,1)
dates['weekday'] = dates.date.apply(lambda row:row.weekday(),1)
dates['hour'] = dates.date.apply(lambda row:row.hour,1)
dates['minute'] = dates.date.apply(lambda row:row.minute,1)
dates['minute'] = dates.minute.map(lambda x:x//15)
freq_map = {
'y':[],'m':['month'],'w':['month'],'d':['month','day','weekday'],
'b':['month','day','weekday'],'h':['month','day','weekday','hour'],
't':['month','day','weekday','hour','minute'],
}
return dates[freq_map[freq.lower()]].values
def _process_one_batch(batch_x, batch_y, batch_x_mark, batch_y_mark):
batch_x = batch_x.float().to(device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(device)
batch_y_mark = batch_y_mark.float().to(device)
dec_inp = torch.zeros([batch_y.shape[0], pred_len, batch_y.shape[-1]]).float()
dec_inp = torch.cat([batch_y[:,:label_len,:], dec_inp], dim=1).float().to(device)
outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-pred_len:,0:].to(device)
return outputs, batch_y
class Dataset_Pred(Dataset):
def __init__(self, dataframe, size=None, scale=True):
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
self.dataframe = dataframe
self.scale = scale
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = self.dataframe
df_raw["date"] = pd.to_datetime(df_raw["date"])
delta = df_raw["date"].iloc[1] - df_raw["date"].iloc[0]
if delta>=timedelta(hours=1):
self.freq='h'
else:
self.freq='t'
border1 = 0
border2 = len(df_raw)
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
if self.scale:
self.scaler.fit(df_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
tmp_stamp = df_raw[['date']][border1:border2]
tmp_stamp['date'] = pd.to_datetime(tmp_stamp.date)
pred_dates = pd.date_range(tmp_stamp.date.values[-1], periods=self.pred_len+1, freq=self.freq)
df_stamp = pd.DataFrame(columns = ['date'])
df_stamp.date = list(tmp_stamp.date.values) + list(pred_dates[1:])
data_stamp = time_features(df_stamp, freq=self.freq)
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_y = self.data_y[r_begin:r_end]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return len(self.data_x) - self.seq_len- self.pred_len + 1
data = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Data/삼성전자_6M_ST_Version1.csv", encoding='CP949')
data.head()
data["date"] = data["날짜"]
data["date"] = pd.to_datetime(data["date"], dayfirst = True)
data["value"] = data["종가"]
min_max_scaler = MinMaxScaler()
data["value"] = min_max_scaler.fit_transform(data["value"].to_numpy().reshape(-1,1)).reshape(-1)
data = data[["date", "value"]]
data_train = data.iloc[:-24*2].copy()
pred_len = 24*2
seq_len = pred_len#인풋 크기
label_len = pred_len#디코더에서 참고할 크기
pred_len = pred_len#예측할 크기
batch_size = 10
shuffle_flag = True
num_workers = 0
drop_last = True
dataset = Dataset_Pred(dataframe=data_train ,scale=True, size = (seq_len, label_len,pred_len))
data_loader = DataLoader(dataset,batch_size=batch_size,shuffle=shuffle_flag,num_workers=num_workers,drop_last=drop_last)
enc_in = 1
dec_in = 1
c_out = 1
device = torch.device("cuda:0")
model = Informer(enc_in, dec_in, c_out, seq_len, label_len, pred_len, device = device).to(device)
learning_rate = 1e-4
criterion = nn.MSELoss()
model_optim = optim.Adam(model.parameters(), lr=learning_rate)
# Informer는 error를 100하는게 시간도 덜 걸리고 에러도 적다.
train_epochs = 100
model.train()
progress = tqdm(range(train_epochs))
for epoch in progress:
train_loss = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(data_loader):
model_optim.zero_grad()
pred, true = _process_one_batch(batch_x, batch_y, batch_x_mark, batch_y_mark)
loss = criterion(pred, true)
train_loss.append(loss.item())
loss.backward()
model_optim.step()
train_loss = np.average(train_loss)
progress.set_description("loss: {:0.6f}".format(train_loss))
import time
now = time.time()
scaler = dataset.scaler
df_test = data_train.copy()
df_test["value"] = scaler.transform(df_test["value"])
df_test["date"] = pd.to_datetime(df_test["date"].values)
delta = df_test["date"][1] - df_test["date"][0]
for i in range(pred_len):
df_test = df_test.append({"date":df_test["date"].iloc[-1]+delta}, ignore_index=True)
df_test = df_test.fillna(0)
df_test_x = df_test.iloc[-seq_len-pred_len:-pred_len].copy()
df_test_y = df_test.iloc[-label_len-pred_len:].copy()
df_test_numpy = df_test.to_numpy()[:,1:].astype("float")
test_time_x = time_features(df_test_x, freq=dataset.freq) #인풋 타임 스템프
test_data_x = df_test_numpy[-seq_len-pred_len:-pred_len] #인풋 데이터
test_time_y = time_features(df_test_y, freq=dataset.freq) #아웃풋 타임스템프
test_data_y =df_test_numpy[-label_len-pred_len:]
test_data_y[-pred_len:] = np.zeros_like(test_data_y[-pred_len:]) #예측하는 부분을 0으로 채워준다.
test_time_x = test_time_x
test_time_y = test_time_y
test_data_y = test_data_y.astype(np.float64)
test_data_x = test_data_x.astype(np.float64)
_test = [(test_data_x,test_data_y,test_time_x,test_time_y)]
_test_loader = DataLoader(_test,batch_size=1,shuffle=False)
preds = []
with torch.no_grad():
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(_test_loader):
batch_x = batch_x.float().to(device)
batch_y = batch_y.float().to(device)
batch_x_mark = batch_x_mark.float().to(device)
batch_y_mark = batch_y_mark.float().to(device)
outputs = model(batch_x, batch_x_mark, batch_y, batch_y_mark)
preds = outputs.detach().cpu().numpy()
preds = scaler.inverse_transform(preds[0])
df_test.iloc[-pred_len:, 1:] = preds
print(time.time() - now)
import matplotlib.pyplot as plt
real = data["value"].to_numpy()
result = df_test["value"].iloc[-24*2:].to_numpy()
real = min_max_scaler.inverse_transform(real.reshape(-1,1)).reshape(-1)
result = min_max_scaler.inverse_transform(result.reshape(-1,1)).reshape(-1)
plt.figure(figsize=(20,5))
plt.plot(range(3319,4320),real[3320:], label="real")
plt.plot(range(4320-24*2,4320),result, label="Informer")
plt.plot(range(4320-24*2,4320), predict[-24*2:], label="LSTMa")
plt.plot(range(4320-24*2,4320),forecast['yhat'][-24*2:], label="Prophet")
plt.plot(range(4320-24*2,4320),pred_series[:24*2+23-23]+0.02294, label="Transformer")
plt.legend()
plt.show()
```
#ARIMA
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("/content/drive/MyDrive/Colab Notebooks/Data/삼성전자_6M_ST_Version1.csv", encoding='CP949')
df = df.drop(df.columns[0], axis=1)
df.columns = ["ds","y"]
df.head()
df_train = df.iloc[:-24*2]
from statsmodels.tsa.seasonal import seasonal_decompose
import statsmodels.api as sm
fig = plt.figure(figsize=(20,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(df_train["y"], lags=20, ax=ax1)
fig = plt.figure(figsize=(20,8))
ax1 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(df_train["y"], lags=20, ax=ax1)
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.sarimax import SARIMAX
import itertools
from tqdm import tqdm
p = range(0,3)
d = range(1,2)
q = range(0,6)
m = 24
pdq = list(itertools.product(p,d,q))
seasonal_pdq = [(x[0],x[1], x[2], m) for x in list(itertools.product(p,d,q))]
aic = []
params = []
with tqdm(total = len(pdq) * len(seasonal_pdq)) as pg:
for i in pdq:
for j in seasonal_pdq:
pg.update(1)
try:
model = SARIMAX(df_train["y"], order=(i), season_order = (j))
model_fit = model.fit()
# print("SARIMA:{}{}, AIC:{}".format(i,j, round(model_fit.aic,2)))
aic.append(round(model_fit.aic,2))
params.append((i,j))
except:
continue
optimal = [(params[i],j) for i,j in enumerate(aic) if j == min(aic)]
model_opt = SARIMAX(df_train["y"], order = optimal[0][0][0], seasonal_order = optimal[0][0][1])
model_opt_fit = model_opt.fit()
model_opt_fit.summary()
model = SARIMAX(df_train["y"], order=optimal[0][0][0], seasonal_order=optimal[0][0][1])
model_fit = model.fit(disp=0)
ARIMA_forecast = model_fit.forecast(steps=24*2)
plt.figure(figsize=(20,5))
plt.plot(range(0,4320), df["y"].iloc[1:], label="Real")
plt.plot(ARIMA_forecast, label="ARIMA")
plt.plot(range(4320-24*2,4320),result, label="Informer")
plt.plot(range(4320-24*2,4320), predict[-24*2:], label="LSTMa")
plt.plot(range(4320-24*2,4320),forecast['yhat'][-24*2:], label="Prophet")
plt.plot(range(4320-24*2,4320),pred_series[:24*2+23-23]+0.02294, label="Transformer")
plt.legend()
plt.show()
plt.figure(figsize=(20,5))
plt.plot(range(3319,4320), df["y"].iloc[3320:], label="Real")
plt.plot(ARIMA_forecast, label="ARIMA")
plt.plot(range(4320-24*2,4320),result, label="Informer")
plt.plot(range(4320-24*2,4320), predict[-24*2:], label="LSTMa")
plt.plot(range(4320-24*2,4320),forecast['yhat'][-24*2:], label="Prophet")
plt.plot(range(4320-24*2,4320),pred_series[:24*2+23-23]+0.02294, label="Transformer")
plt.legend()
plt.show()
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
def MAPEval(y_pred, y_true):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def MSE(y_true, y_pred):
return np.mean(np.square((y_true - y_pred)))
def MAE(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred)))
print('Transformer')
print('-' * 40)
print('MAPE: {} |\nMSE: {} |\nMAE : {}\n'.format(mape(pred_series[:24*2+23-23]+0.02294, target_series+0.02294), mean_squared_error(target_series+0.02294, pred_series[:24*2+23-23]+0.02294), mean_absolute_error(target_series+0.02294, pred_series[:24*2+23-23]+0.02294)))
print('Informer')
print('-' * 40)
print('MAPE: {} |\nMSE: {} |\nMAE : {}\n'.format(mape(result, real[-24*2:]), mean_squared_error(real[-24*2:], result), mean_absolute_error(real[-24*2:], result)))
print('ARIMA')
print('-' * 40)
print('MAPE: {} |\nMSE: {} |\nMAE : {}\n'.format(mape(ARIMA_forecast, df["y"].iloc[-24*2:]), mean_squared_error(df["y"].iloc[-24*2:], ARIMA_forecast), mean_absolute_error(df["y"].iloc[-24*2:], ARIMA_forecast)))
print('Prophet')
print('-' * 40)
print('MAPE: {} |\nMSE: {} |\nMAE : {}\n'.format(mape(forecast['yhat'][4320-24*2:],df["y"][4320-24*2:]), mean_squared_error(df["y"][4320-24*2:], forecast['yhat'][4320-24*2:]), mean_absolute_error(df["y"][4320-24*2:], forecast['yhat'][4320-24*2:])))
print('LSTMa')
print('-' * 40)
print('MAPE: {} |\nMSE: {} |\nMAE : {}\n'.format(mape(predict[-24*2:],real[-24*2:]), mean_squared_error(real[-24*2:], predict[-24*2:]), mean_absolute_error(real[-24*2:], predict[-24*2:])))
```
| github_jupyter |
```
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras import backend as K
import matplotlib.pyplot as plt
import pandas as pd
import datetime
```
# Load classification model
```
classification_model = tf.keras.models.load_model('TrainedModel/trainedModel.h5')
```
# Load dataset
### And convert it to numpy array
```
train_X = pd.read_csv('/home/ege/Repo/SideChannel-AdversarialAI/Tensorflow/DataSet/trainX13.csv', header=None)
train_Y = pd.read_csv('/home/ege/Repo/SideChannel-AdversarialAI/Tensorflow/DataSet/trainY13.csv', header=None)
trainY = train_Y.to_numpy()
trainX = train_X.to_numpy()
trainX = np.expand_dims(trainX,axis=2)
```
# Normalize dataset
```
minimum = np.amin(trainX)
maximum = np.amax(trainX)
trainX_normalized = (trainX-minimum)/(maximum-minimum)
#Uncomment below if you need to fit with a dataset for specific class
#classToCut = 6
#trainXCUT = trainX[classToCut::14]
#trainYCUT = trainY[classToCut::14]
```
# Define Sampling layer as a subclass of keras.layers.Layer
## Sampling layer: Layer that samples a random point in latent space from a distribution with a mean and variance
```
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
```
## Define latent space dimension
```
latent_dim = 2
```
# Encoder
```
encoder_inputs = keras.Input(shape=(6000,1))
x = layers.Conv1D(256,16,strides=2,padding='same',activation='relu')(encoder_inputs)#possibly update kernel_initializer
#x = layers.MaxPooling1D(pool_size = 4,strides = 4, padding = 'same')(x)
x = layers.Conv1D(128,16,strides=2,padding='same',activation='relu')(x)#possibly update kernel_initializer
#x = layers.MaxPooling1D(pool_size = 4,strides = 4, padding = 'same')(x)
x = layers.Conv1D(64,8,strides=2,padding='same',activation='relu')(x)#possibly update kernel_initializer
#x = layers.MaxPooling1D(pool_size = 4,strides = 4, padding = 'same')(x)
x = layers.Conv1D(32,8,strides=2,padding='same',activation='relu')(x)#possibly update kernel_initializer
#x = layers.MaxPooling1D(pool_size = 4,strides = 4, padding = 'same')(x)
shape_before_flattening = K.int_shape(x)
flatten_1 = layers.Flatten()(x)
#x = layers.LSTM(32,activation='tanh',recurrent_activation='hard_sigmoid',use_bias=True,kernel_initializer='VarianceScaling',recurrent_initializer = 'orthogonal',bias_initializer='Zeros', return_sequences = True)(flatten_1) #Variance Scaling
x = layers.Dense(64 , activation="relu")(flatten_1)
x = layers.Dense(32 , activation="relu")(x)
x = layers.Dense(16 , activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean",kernel_initializer='Zeros',bias_initializer = 'Zeros')(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var",kernel_initializer='Zeros',bias_initializer = 'Zeros')(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
```
# Decoder
```
#DECODER
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(16 , activation="relu")(latent_inputs)
x = layers.Dense(32 , activation="relu")(x)
x = layers.Dense(64 , activation="relu")(x)
#x = layers.LSTM(32,activation='tanh',recurrent_activation='hard_sigmoid',use_bias=True,kernel_initializer='VarianceScaling',recurrent_initializer = 'orthogonal',bias_initializer='Zeros', return_sequences = True)(x) #Variance Scaling
x = layers.Dense(np.prod(shape_before_flattening[1:]), activation="relu")(x)
x = layers.Reshape(shape_before_flattening[1:])(x)
x = layers.Conv1DTranspose(32, 8, activation="relu", strides=2,padding='same')(x)
x = layers.Conv1DTranspose(64, 8, activation="relu", strides=2,padding='same')(x)
x = layers.Conv1DTranspose(128, 16, activation="relu", strides=2,padding='same')(x)
x = layers.Conv1DTranspose(256, 16, activation="relu", strides=2,padding='same')(x)
decoder_outputs = layers.Conv1DTranspose(1, 16, padding="same",activation="sigmoid")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
```
# Defining subclass VAE
## VAE is a subclass of keras.Model class
```
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.kl_loss_tracker,
]
def train_step(self, data):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.binary_crossentropy(data, reconstruction),axis=(1)
)
)
kl_loss = -1 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=0))
total_loss = reconstruction_loss + kl_loss
#total_loss = reconstruction_loss #ABSOLUTELY CHANGE!
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.kl_loss_tracker.update_state(kl_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"kl_loss": self.kl_loss_tracker.result(),
}
```
# Train model
```
vae = VAE(encoder, decoder)
vae.compile(optimizer=keras.optimizers.Adam())
history = vae.fit(trainX_normalized, epochs=25, batch_size=128)
```
# Test reconstructed dataset with the classification model
### Predict reconstructed dataset
```
sumOfAccuracy = 0
for j in range(14):
classPrediction = j
trainXCUT = trainX_normalized[classPrediction::14]
z_mean, z_log_var, z = vae.encoder.predict(trainXCUT)
reconstructed_x = vae.decoder.predict(z)
predictions = classification_model.predict(reconstructed_x)
#print(predictions)
correctPredAmt = 0
for i, y_i in enumerate(predictions):
firstHighest = np.argmax(y_i)
y_i[firstHighest] = 0
secondHighest = np.argmax(y_i)
y_i[secondHighest] = 0
thirdHighest = np.argmax(y_i)
if(firstHighest == classPrediction or secondHighest == classPrediction or thirdHighest == classPrediction):
correctPredAmt = correctPredAmt + 1
#print(str(firstHighest) +", "+str(secondHighest)+", "+str(thirdHighest))
accuracy = correctPredAmt/(len(predictions))
sumOfAccuracy = sumOfAccuracy + accuracy
print("Class "+str(j)+": "+str(accuracy))
averageAccuracy = sumOfAccuracy/14
print("Average: "+ str(averageAccuracy))
```
### Evaluate reconstructed dataset
```
for i in range(14):
trainXCUT = trainX_normalized[i::14]
trainYCUT = trainY[i::14]
z_mean, z_log_var, z = vae.encoder.predict(trainXCUT)
reconstructed_x = vae.decoder.predict(z)
classification_model.evaluate(reconstructed_x,trainYCUT)
classToCut = 6
trainXCUT = trainX_normalized[classToCut::14]
trainYCUT = trainY[classToCut::14]
z_mean, z_log_var, z = vae.encoder.predict(trainXCUT)
reconstructed_x = vae.decoder.predict(z)*(maximum-minimum)+minimum
fig = plt.figure(figsize=(40,5))
#plt.plot(results)
sampleToPredict = 15
plt.plot(reconstructed_x[sampleToPredict],label='Reconstruction')
plt.plot(trainXCUT[sampleToPredict]*(maximum-minimum)+minimum,label='Sample')
#plt.plot(data3[0],label=3)
#plt.plot(data4[0],label=4)
#plt.plot(averageArray[0])
plt.legend()
plt.yticks(np.arange(0, 9, 1))
plt.xticks(np.arange(0, 6000, 500))
plt.grid()
#plt.axhline(linewidth=1, color='r')
plt.xlabel("5 ms")
plt.ylabel("PnP timing")
#figure(figsize=(8, 6), dpi=80)
fig.savefig('vis_test.png',dpi=200)
```
| github_jupyter |
# Preliminary Analysis of Statement Sentiment by Lexicon
Analyse statement by Loughran and McDonald word list to see if the relationship between economy and net sentiment
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import os
import pickle
import codecs
import re
from tqdm import tqdm_notebook as tqdm
#For tokenizing sentences
import nltk
nltk.download('punkt')
plt.style.use('seaborn-whitegrid')
```
### Reading the data in
```
file = open("../data/FOMC/statement.pickle", "rb")
Data = pickle.load(file)
Data['text'] = Data['contents'].apply(lambda x: x.replace('\n\n[SECTION]\n\n', '').replace('\n', ' ').replace('\r', ' ').strip())
Data.drop(columns=['title'], axis=1, inplace=True)
print('Date: ', Data.iloc[-1]['date'].strftime('%Y-%m-%d'))
print('Speaker: ', Data.iloc[-1]['speaker'])
print('Text: \n\n', Data.iloc[-1]['contents'].replace('\n[SECTION]\n', '')[1192:])
```
### Creating a Financial Dictionary based on Loughran and McDonald
```
#Dictionary tone assessment will compare them by Index (need the numbers back)
Data['Index'] = range(0, len(Data))
# Make 'date' column as the index of Data
Data.set_index(['date'], inplace=True)
Data.head()
import re
# Loughran and McDonald Sentiment Word Lists (https://sraf.nd.edu/textual-analysis/resources/)
lmdict = {'Negative': ['abandon', 'abandoned', 'abandoning', 'abandonment', 'abandonments', 'abandons', 'abdicated',
'abdicates', 'abdicating', 'abdication', 'abdications', 'aberrant', 'aberration', 'aberrational',
'aberrations', 'abetting', 'abnormal', 'abnormalities', 'abnormality', 'abnormally', 'abolish',
'abolished', 'abolishes', 'abolishing', 'abrogate', 'abrogated', 'abrogates', 'abrogating',
'abrogation', 'abrogations', 'abrupt', 'abruptly', 'abruptness', 'absence', 'absences',
'absenteeism', 'abuse', 'abused', 'abuses', 'abusing', 'abusive', 'abusively', 'abusiveness',
'accident', 'accidental', 'accidentally', 'accidents', 'accusation', 'accusations', 'accuse',
'accused', 'accuses', 'accusing', 'acquiesce', 'acquiesced', 'acquiesces', 'acquiescing',
'acquit', 'acquits', 'acquittal', 'acquittals', 'acquitted', 'acquitting', 'adulterate',
'adulterated', 'adulterating', 'adulteration', 'adulterations', 'adversarial', 'adversaries',
'adversary', 'adverse', 'adversely', 'adversities', 'adversity', 'aftermath', 'aftermaths',
'against', 'aggravate', 'aggravated', 'aggravates', 'aggravating', 'aggravation', 'aggravations',
'alerted', 'alerting', 'alienate', 'alienated', 'alienates', 'alienating', 'alienation',
'alienations', 'allegation', 'allegations', 'allege', 'alleged', 'allegedly', 'alleges',
'alleging', 'annoy', 'annoyance', 'annoyances', 'annoyed', 'annoying', 'annoys', 'annul',
'annulled', 'annulling', 'annulment', 'annulments', 'annuls', 'anomalies', 'anomalous',
'anomalously', 'anomaly', 'anticompetitive', 'antitrust', 'argue', 'argued', 'arguing',
'argument', 'argumentative', 'arguments', 'arrearage', 'arrearages', 'arrears', 'arrest',
'arrested', 'arrests', 'artificially', 'assault', 'assaulted', 'assaulting', 'assaults',
'assertions', 'attrition', 'aversely', 'backdating', 'bad', 'bail', 'bailout', 'balk', 'balked',
'bankrupt', 'bankruptcies', 'bankruptcy', 'bankrupted', 'bankrupting', 'bankrupts', 'bans',
'barred', 'barrier', 'barriers', 'bottleneck', 'bottlenecks', 'boycott', 'boycotted',
'boycotting', 'boycotts', 'breach', 'breached', 'breaches', 'breaching', 'break', 'breakage',
'breakages', 'breakdown', 'breakdowns', 'breaking', 'breaks', 'bribe', 'bribed', 'briberies',
'bribery', 'bribes', 'bribing', 'bridge', 'broken', 'burden', 'burdened', 'burdening', 'burdens',
'burdensome', 'burned', 'calamities', 'calamitous', 'calamity', 'cancel', 'canceled',
'canceling', 'cancellation', 'cancellations', 'cancelled', 'cancelling', 'cancels', 'careless',
'carelessly', 'carelessness', 'catastrophe', 'catastrophes', 'catastrophic', 'catastrophically',
'caution', 'cautionary', 'cautioned', 'cautioning', 'cautions', 'cease', 'ceased', 'ceases',
'ceasing', 'censure', 'censured', 'censures', 'censuring', 'challenge', 'challenged',
'challenges', 'challenging', 'chargeoffs', 'circumvent', 'circumvented', 'circumventing',
'circumvention', 'circumventions', 'circumvents', 'claiming', 'claims', 'clawback', 'closed',
'closeout', 'closeouts', 'closing', 'closings', 'closure', 'closures', 'coerce', 'coerced',
'coerces', 'coercing', 'coercion', 'coercive', 'collapse', 'collapsed', 'collapses',
'collapsing', 'collision', 'collisions', 'collude', 'colluded', 'colludes', 'colluding',
'collusion', 'collusions', 'collusive', 'complain', 'complained', 'complaining', 'complains',
'complaint', 'complaints', 'complicate', 'complicated', 'complicates', 'complicating',
'complication', 'complications', 'compulsion', 'concealed', 'concealing', 'concede', 'conceded',
'concedes', 'conceding', 'concern', 'concerned', 'concerns', 'conciliating', 'conciliation',
'conciliations', 'condemn', 'condemnation', 'condemnations', 'condemned', 'condemning',
'condemns', 'condone', 'condoned', 'confess', 'confessed', 'confesses', 'confessing',
'confession', 'confine', 'confined', 'confinement', 'confinements', 'confines', 'confining',
'confiscate', 'confiscated', 'confiscates', 'confiscating', 'confiscation', 'confiscations',
'conflict', 'conflicted', 'conflicting', 'conflicts', 'confront', 'confrontation',
'confrontational', 'confrontations', 'confronted', 'confronting', 'confronts', 'confuse',
'confused', 'confuses', 'confusing', 'confusingly', 'confusion', 'conspiracies', 'conspiracy',
'conspirator', 'conspiratorial', 'conspirators', 'conspire', 'conspired', 'conspires',
'conspiring', 'contempt', 'contend', 'contended', 'contending', 'contends', 'contention',
'contentions', 'contentious', 'contentiously', 'contested', 'contesting', 'contraction',
'contractions', 'contradict', 'contradicted', 'contradicting', 'contradiction', 'contradictions',
'contradictory', 'contradicts', 'contrary', 'controversial', 'controversies', 'controversy',
'convict', 'convicted', 'convicting', 'conviction', 'convictions', 'corrected', 'correcting',
'correction', 'corrections', 'corrects', 'corrupt', 'corrupted', 'corrupting', 'corruption',
'corruptions', 'corruptly', 'corruptness', 'costly', 'counterclaim', 'counterclaimed',
'counterclaiming', 'counterclaims', 'counterfeit', 'counterfeited', 'counterfeiter',
'counterfeiters', 'counterfeiting', 'counterfeits', 'countermeasure', 'countermeasures', 'crime',
'crimes', 'criminal', 'criminally', 'criminals', 'crises', 'crisis', 'critical', 'critically',
'criticism', 'criticisms', 'criticize', 'criticized', 'criticizes', 'criticizing', 'crucial',
'crucially', 'culpability', 'culpable', 'culpably', 'cumbersome', 'curtail', 'curtailed',
'curtailing', 'curtailment', 'curtailments', 'curtails', 'cut', 'cutback', 'cutbacks',
'cyberattack', 'cyberattacks', 'cyberbullying', 'cybercrime', 'cybercrimes', 'cybercriminal',
'cybercriminals', 'damage', 'damaged', 'damages', 'damaging', 'dampen', 'dampened', 'danger',
'dangerous', 'dangerously', 'dangers', 'deadlock', 'deadlocked', 'deadlocking', 'deadlocks',
'deadweight', 'deadweights', 'debarment', 'debarments', 'debarred', 'deceased', 'deceit',
'deceitful', 'deceitfulness', 'deceive', 'deceived', 'deceives', 'deceiving', 'deception',
'deceptions', 'deceptive', 'deceptively', 'decline', 'declined', 'declines', 'declining',
'deface', 'defaced', 'defacement', 'defamation', 'defamations', 'defamatory', 'defame',
'defamed', 'defames', 'defaming', 'default', 'defaulted', 'defaulting', 'defaults', 'defeat',
'defeated', 'defeating', 'defeats', 'defect', 'defective', 'defects', 'defend', 'defendant',
'defendants', 'defended', 'defending', 'defends', 'defensive', 'defer', 'deficiencies',
'deficiency', 'deficient', 'deficit', 'deficits', 'defraud', 'defrauded', 'defrauding',
'defrauds', 'defunct', 'degradation', 'degradations', 'degrade', 'degraded', 'degrades',
'degrading', 'delay', 'delayed', 'delaying', 'delays', 'deleterious', 'deliberate',
'deliberated', 'deliberately', 'delinquencies', 'delinquency', 'delinquent', 'delinquently',
'delinquents', 'delist', 'delisted', 'delisting', 'delists', 'demise', 'demised', 'demises',
'demising', 'demolish', 'demolished', 'demolishes', 'demolishing', 'demolition', 'demolitions',
'demote', 'demoted', 'demotes', 'demoting', 'demotion', 'demotions', 'denial', 'denials',
'denied', 'denies', 'denigrate', 'denigrated', 'denigrates', 'denigrating', 'denigration',
'deny', 'denying', 'deplete', 'depleted', 'depletes', 'depleting', 'depletion', 'depletions',
'deprecation', 'depress', 'depressed', 'depresses', 'depressing', 'deprivation', 'deprive',
'deprived', 'deprives', 'depriving', 'derelict', 'dereliction', 'derogatory', 'destabilization',
'destabilize', 'destabilized', 'destabilizing', 'destroy', 'destroyed', 'destroying', 'destroys',
'destruction', 'destructive', 'detain', 'detained', 'detention', 'detentions', 'deter',
'deteriorate', 'deteriorated', 'deteriorates', 'deteriorating', 'deterioration',
'deteriorations', 'deterred', 'deterrence', 'deterrences', 'deterrent', 'deterrents',
'deterring', 'deters', 'detract', 'detracted', 'detracting', 'detriment', 'detrimental',
'detrimentally', 'detriments', 'devalue', 'devalued', 'devalues', 'devaluing', 'devastate',
'devastated', 'devastating', 'devastation', 'deviate', 'deviated', 'deviates', 'deviating',
'deviation', 'deviations', 'devolve', 'devolved', 'devolves', 'devolving', 'difficult',
'difficulties', 'difficultly', 'difficulty', 'diminish', 'diminished', 'diminishes',
'diminishing', 'diminution', 'disadvantage', 'disadvantaged', 'disadvantageous', 'disadvantages',
'disaffiliation', 'disagree', 'disagreeable', 'disagreed', 'disagreeing', 'disagreement',
'disagreements', 'disagrees', 'disallow', 'disallowance', 'disallowances', 'disallowed',
'disallowing', 'disallows', 'disappear', 'disappearance', 'disappearances', 'disappeared',
'disappearing', 'disappears', 'disappoint', 'disappointed', 'disappointing', 'disappointingly',
'disappointment', 'disappointments', 'disappoints', 'disapproval', 'disapprovals', 'disapprove',
'disapproved', 'disapproves', 'disapproving', 'disassociates', 'disassociating',
'disassociation', 'disassociations', 'disaster', 'disasters', 'disastrous', 'disastrously',
'disavow', 'disavowal', 'disavowed', 'disavowing', 'disavows', 'disciplinary', 'disclaim',
'disclaimed', 'disclaimer', 'disclaimers', 'disclaiming', 'disclaims', 'disclose', 'disclosed',
'discloses', 'disclosing', 'discontinuance', 'discontinuances', 'discontinuation',
'discontinuations', 'discontinue', 'discontinued', 'discontinues', 'discontinuing', 'discourage',
'discouraged', 'discourages', 'discouraging', 'discredit', 'discredited', 'discrediting',
'discredits', 'discrepancies', 'discrepancy', 'disfavor', 'disfavored', 'disfavoring',
'disfavors', 'disgorge', 'disgorged', 'disgorgement', 'disgorgements', 'disgorges', 'disgorging',
'disgrace', 'disgraceful', 'disgracefully', 'dishonest', 'dishonestly', 'dishonesty', 'dishonor',
'dishonorable', 'dishonorably', 'dishonored', 'dishonoring', 'dishonors', 'disincentives',
'disinterested', 'disinterestedly', 'disinterestedness', 'disloyal', 'disloyally', 'disloyalty',
'dismal', 'dismally', 'dismiss', 'dismissal', 'dismissals', 'dismissed', 'dismisses',
'dismissing', 'disorderly', 'disparage', 'disparaged', 'disparagement', 'disparagements',
'disparages', 'disparaging', 'disparagingly', 'disparities', 'disparity', 'displace',
'displaced', 'displacement', 'displacements', 'displaces', 'displacing', 'dispose', 'dispossess',
'dispossessed', 'dispossesses', 'dispossessing', 'disproportion', 'disproportional',
'disproportionate', 'disproportionately', 'dispute', 'disputed', 'disputes', 'disputing',
'disqualification', 'disqualifications', 'disqualified', 'disqualifies', 'disqualify',
'disqualifying', 'disregard', 'disregarded', 'disregarding', 'disregards', 'disreputable',
'disrepute', 'disrupt', 'disrupted', 'disrupting', 'disruption', 'disruptions', 'disruptive',
'disrupts', 'dissatisfaction', 'dissatisfied', 'dissent', 'dissented', 'dissenter', 'dissenters',
'dissenting', 'dissents', 'dissident', 'dissidents', 'dissolution', 'dissolutions', 'distort',
'distorted', 'distorting', 'distortion', 'distortions', 'distorts', 'distract', 'distracted',
'distracting', 'distraction', 'distractions', 'distracts', 'distress', 'distressed', 'disturb',
'disturbance', 'disturbances', 'disturbed', 'disturbing', 'disturbs', 'diversion', 'divert',
'diverted', 'diverting', 'diverts', 'divest', 'divested', 'divesting', 'divestiture',
'divestitures', 'divestment', 'divestments', 'divests', 'divorce', 'divorced', 'divulge',
'divulged', 'divulges', 'divulging', 'doubt', 'doubted', 'doubtful', 'doubts', 'downgrade',
'downgraded', 'downgrades', 'downgrading', 'downsize', 'downsized', 'downsizes', 'downsizing',
'downsizings', 'downtime', 'downtimes', 'downturn', 'downturns', 'downward', 'downwards', 'drag',
'drastic', 'drastically', 'drawback', 'drawbacks', 'drop', 'dropped', 'drought', 'droughts', 'duress',
'dysfunction', 'dysfunctional', 'dysfunctions', 'easing', 'egregious', 'egregiously', 'embargo',
'embargoed', 'embargoes', 'embargoing', 'embarrass', 'embarrassed', 'embarrasses',
'embarrassing', 'embarrassment', 'embarrassments', 'embezzle', 'embezzled', 'embezzlement',
'embezzlements', 'embezzler', 'embezzles', 'embezzling', 'encroach', 'encroached', 'encroaches',
'encroaching', 'encroachment', 'encroachments', 'encumber', 'encumbered', 'encumbering',
'encumbers', 'encumbrance', 'encumbrances', 'endanger', 'endangered', 'endangering',
'endangerment', 'endangers', 'enjoin', 'enjoined', 'enjoining', 'enjoins', 'erode', 'eroded',
'erodes', 'eroding', 'erosion', 'erratic', 'erratically', 'erred', 'erring', 'erroneous',
'erroneously', 'error', 'errors', 'errs', 'escalate', 'escalated', 'escalates', 'escalating',
'evade', 'evaded', 'evades', 'evading', 'evasion', 'evasions', 'evasive', 'evict', 'evicted',
'evicting', 'eviction', 'evictions', 'evicts', 'exacerbate', 'exacerbated', 'exacerbates',
'exacerbating', 'exacerbation', 'exacerbations', 'exaggerate', 'exaggerated', 'exaggerates',
'exaggerating', 'exaggeration', 'excessive', 'excessively', 'exculpate', 'exculpated',
'exculpates', 'exculpating', 'exculpation', 'exculpations', 'exculpatory', 'exonerate',
'exonerated', 'exonerates', 'exonerating', 'exoneration', 'exonerations', 'exploit',
'exploitation', 'exploitations', 'exploitative', 'exploited', 'exploiting', 'exploits', 'expose',
'exposed', 'exposes', 'exposing', 'expropriate', 'expropriated', 'expropriates', 'expropriating',
'expropriation', 'expropriations', 'expulsion', 'expulsions', 'extenuating', 'fail', 'failed',
'failing', 'failings', 'fails', 'failure', 'failures', 'fallout', 'false', 'falsely',
'falsification', 'falsifications', 'falsified', 'falsifies', 'falsify', 'falsifying', 'falsity',
'fatalities', 'fatality', 'fatally', 'fault', 'faulted', 'faults', 'faulty', 'fear', 'fears',
'felonies', 'felonious', 'felony', 'fictitious', 'fined', 'fines', 'fired', 'firing', 'flaw',
'flawed', 'flaws', 'forbid', 'forbidden', 'forbidding', 'forbids', 'force', 'forced', 'forcing',
'foreclose', 'foreclosed', 'forecloses', 'foreclosing', 'foreclosure', 'foreclosures', 'forego',
'foregoes', 'foregone', 'forestall', 'forestalled', 'forestalling', 'forestalls', 'forfeit',
'forfeited', 'forfeiting', 'forfeits', 'forfeiture', 'forfeitures', 'forgers', 'forgery',
'fraud', 'frauds', 'fraudulence', 'fraudulent', 'fraudulently', 'frivolous', 'frivolously',
'frustrate', 'frustrated', 'frustrates', 'frustrating', 'frustratingly', 'frustration',
'frustrations', 'fugitive', 'fugitives', 'gratuitous', 'gratuitously', 'grievance', 'grievances',
'grossly', 'groundless', 'guilty', 'halt', 'halted', 'hamper', 'hampered', 'hampering',
'hampers', 'harass', 'harassed', 'harassing', 'harassment', 'hardship', 'hardships', 'harm',
'harmed', 'harmful', 'harmfully', 'harming', 'harms', 'harsh', 'harsher', 'harshest', 'harshly',
'harshness', 'hazard', 'hazardous', 'hazards', 'hinder', 'hindered', 'hindering', 'hinders',
'hindrance', 'hindrances', 'hostile', 'hostility', 'hurt', 'hurting', 'idle', 'idled', 'idling',
'ignore', 'ignored', 'ignores', 'ignoring', 'ill', 'illegal', 'illegalities', 'illegality',
'illegally', 'illegible', 'illicit', 'illicitly', 'illiquid', 'illiquidity', 'imbalance',
'imbalances', 'immature', 'immoral', 'impair', 'impaired', 'impairing', 'impairment',
'impairments', 'impairs', 'impasse', 'impasses', 'impede', 'impeded', 'impedes', 'impediment',
'impediments', 'impeding', 'impending', 'imperative', 'imperfection', 'imperfections', 'imperil',
'impermissible', 'implicate', 'implicated', 'implicates', 'implicating', 'impossibility',
'impossible', 'impound', 'impounded', 'impounding', 'impounds', 'impracticable', 'impractical',
'impracticalities', 'impracticality', 'imprisonment', 'improper', 'improperly', 'improprieties',
'impropriety', 'imprudent', 'imprudently', 'inability', 'inaccessible', 'inaccuracies',
'inaccuracy', 'inaccurate', 'inaccurately', 'inaction', 'inactions', 'inactivate', 'inactivated',
'inactivates', 'inactivating', 'inactivation', 'inactivations', 'inactivity', 'inadequacies',
'inadequacy', 'inadequate', 'inadequately', 'inadvertent', 'inadvertently', 'inadvisability',
'inadvisable', 'inappropriate', 'inappropriately', 'inattention', 'incapable', 'incapacitated',
'incapacity', 'incarcerate', 'incarcerated', 'incarcerates', 'incarcerating', 'incarceration',
'incarcerations', 'incidence', 'incidences', 'incident', 'incidents', 'incompatibilities',
'incompatibility', 'incompatible', 'incompetence', 'incompetency', 'incompetent',
'incompetently', 'incompetents', 'incomplete', 'incompletely', 'incompleteness', 'inconclusive',
'inconsistencies', 'inconsistency', 'inconsistent', 'inconsistently', 'inconvenience',
'inconveniences', 'inconvenient', 'incorrect', 'incorrectly', 'incorrectness', 'indecency',
'indecent', 'indefeasible', 'indefeasibly', 'indict', 'indictable', 'indicted', 'indicting',
'indictment', 'indictments', 'ineffective', 'ineffectively', 'ineffectiveness', 'inefficiencies',
'inefficiency', 'inefficient', 'inefficiently', 'ineligibility', 'ineligible', 'inequitable',
'inequitably', 'inequities', 'inequity', 'inevitable', 'inexperience', 'inexperienced',
'inferior', 'inflicted', 'infraction', 'infractions', 'infringe', 'infringed', 'infringement',
'infringements', 'infringes', 'infringing', 'inhibited', 'inimical', 'injunction', 'injunctions',
'injure', 'injured', 'injures', 'injuries', 'injuring', 'injurious', 'injury', 'inordinate',
'inordinately', 'inquiry', 'insecure', 'insensitive', 'insolvencies', 'insolvency', 'insolvent',
'instability', 'insubordination', 'insufficiency', 'insufficient', 'insufficiently',
'insurrection', 'insurrections', 'intentional', 'interfere', 'interfered', 'interference',
'interferences', 'interferes', 'interfering', 'intermittent', 'intermittently', 'interrupt',
'interrupted', 'interrupting', 'interruption', 'interruptions', 'interrupts', 'intimidation',
'intrusion', 'invalid', 'invalidate', 'invalidated', 'invalidates', 'invalidating',
'invalidation', 'invalidity', 'investigate', 'investigated', 'investigates', 'investigating',
'investigation', 'investigations', 'involuntarily', 'involuntary', 'irreconcilable',
'irreconcilably', 'irrecoverable', 'irrecoverably', 'irregular', 'irregularities',
'irregularity', 'irregularly', 'irreparable', 'irreparably', 'irreversible', 'jeopardize',
'jeopardized', 'justifiable', 'kickback', 'kickbacks', 'knowingly', 'lack', 'lacked', 'lacking',
'lackluster', 'lacks', 'lag', 'lagged', 'lagging', 'lags', 'lapse', 'lapsed', 'lapses',
'lapsing', 'late', 'laundering', 'layoff', 'layoffs', 'lie', 'limitation', 'limitations',
'lingering', 'liquidate', 'liquidated', 'liquidates', 'liquidating', 'liquidation',
'liquidations', 'liquidator', 'liquidators', 'litigant', 'litigants', 'litigate', 'litigated',
'litigates', 'litigating', 'litigation', 'litigations', 'lockout', 'lockouts', 'lose', 'loses',
'losing', 'loss', 'losses', 'lost', 'lying', 'malfeasance', 'malfunction', 'malfunctioned',
'malfunctioning', 'malfunctions', 'malice', 'malicious', 'maliciously', 'malpractice',
'manipulate', 'manipulated', 'manipulates', 'manipulating', 'manipulation', 'manipulations',
'manipulative', 'markdown', 'markdowns', 'misapplication', 'misapplications', 'misapplied',
'misapplies', 'misapply', 'misapplying', 'misappropriate', 'misappropriated', 'misappropriates',
'misappropriating', 'misappropriation', 'misappropriations', 'misbranded', 'miscalculate',
'miscalculated', 'miscalculates', 'miscalculating', 'miscalculation', 'miscalculations',
'mischaracterization', 'mischief', 'misclassification', 'misclassifications', 'misclassified',
'misclassify', 'miscommunication', 'misconduct', 'misdated', 'misdemeanor', 'misdemeanors',
'misdirected', 'mishandle', 'mishandled', 'mishandles', 'mishandling', 'misinform',
'misinformation', 'misinformed', 'misinforming', 'misinforms', 'misinterpret',
'misinterpretation', 'misinterpretations', 'misinterpreted', 'misinterpreting', 'misinterprets',
'misjudge', 'misjudged', 'misjudges', 'misjudging', 'misjudgment', 'misjudgments', 'mislabel',
'mislabeled', 'mislabeling', 'mislabelled', 'mislabels', 'mislead', 'misleading', 'misleadingly',
'misleads', 'misled', 'mismanage', 'mismanaged', 'mismanagement', 'mismanages', 'mismanaging',
'mismatch', 'mismatched', 'mismatches', 'mismatching', 'misplaced', 'misprice', 'mispricing',
'mispricings', 'misrepresent', 'misrepresentation', 'misrepresentations', 'misrepresented',
'misrepresenting', 'misrepresents', 'miss', 'missed', 'misses', 'misstate', 'misstated',
'misstatement', 'misstatements', 'misstates', 'misstating', 'misstep', 'missteps', 'mistake',
'mistaken', 'mistakenly', 'mistakes', 'mistaking', 'mistrial', 'mistrials', 'misunderstand',
'misunderstanding', 'misunderstandings', 'misunderstood', 'misuse', 'misused', 'misuses',
'misusing', 'monopolistic', 'monopolists', 'monopolization', 'monopolize', 'monopolized',
'monopolizes', 'monopolizing', 'monopoly', 'moratoria', 'moratorium', 'moratoriums',
'mothballed', 'mothballing', 'negative', 'negatively', 'negatives', 'neglect', 'neglected',
'neglectful', 'neglecting', 'neglects', 'negligence', 'negligences', 'negligent', 'negligently',
'nonattainment', 'noncompetitive', 'noncompliance', 'noncompliances', 'noncompliant',
'noncomplying', 'nonconforming', 'nonconformities', 'nonconformity', 'nondisclosure',
'nonfunctional', 'nonpayment', 'nonpayments', 'nonperformance', 'nonperformances',
'nonperforming', 'nonproducing', 'nonproductive', 'nonrecoverable', 'nonrenewal', 'nuisance',
'nuisances', 'nullification', 'nullifications', 'nullified', 'nullifies', 'nullify',
'nullifying', 'objected', 'objecting', 'objection', 'objectionable', 'objectionably',
'objections', 'obscene', 'obscenity', 'obsolescence', 'obsolete', 'obstacle', 'obstacles',
'obstruct', 'obstructed', 'obstructing', 'obstruction', 'obstructions', 'offence', 'offences',
'offend', 'offended', 'offender', 'offenders', 'offending', 'offends', 'omission', 'omissions',
'omit', 'omits', 'omitted', 'omitting', 'onerous', 'opportunistic', 'opportunistically',
'oppose', 'opposed', 'opposes', 'opposing', 'opposition', 'oppositions', 'outage', 'outages',
'outdated', 'outmoded', 'overage', 'overages', 'overbuild', 'overbuilding', 'overbuilds',
'overbuilt', 'overburden', 'overburdened', 'overburdening', 'overcapacities', 'overcapacity',
'overcharge', 'overcharged', 'overcharges', 'overcharging', 'overcome', 'overcomes',
'overcoming', 'overdue', 'overestimate', 'overestimated', 'overestimates', 'overestimating',
'overestimation', 'overestimations', 'overload', 'overloaded', 'overloading', 'overloads',
'overlook', 'overlooked', 'overlooking', 'overlooks', 'overpaid', 'overpayment', 'overpayments',
'overproduced', 'overproduces', 'overproducing', 'overproduction', 'overrun', 'overrunning',
'overruns', 'overshadow', 'overshadowed', 'overshadowing', 'overshadows', 'overstate',
'overstated', 'overstatement', 'overstatements', 'overstates', 'overstating', 'oversupplied',
'oversupplies', 'oversupply', 'oversupplying', 'overtly', 'overturn', 'overturned',
'overturning', 'overturns', 'overvalue', 'overvalued', 'overvaluing', 'panic', 'panics',
'penalize', 'penalized', 'penalizes', 'penalizing', 'penalties', 'penalty', 'peril', 'perils',
'perjury', 'perpetrate', 'perpetrated', 'perpetrates', 'perpetrating', 'perpetration', 'persist',
'persisted', 'persistence', 'persistent', 'persistently', 'persisting', 'persists', 'pervasive',
'pervasively', 'pervasiveness', 'petty', 'picket', 'picketed', 'picketing', 'plaintiff',
'plaintiffs', 'plea', 'plead', 'pleaded', 'pleading', 'pleadings', 'pleads', 'pleas', 'pled',
'poor', 'poorly', 'poses', 'posing', 'postpone', 'postponed', 'postponement', 'postponements',
'postpones', 'postponing', 'precipitated', 'precipitous', 'precipitously', 'preclude',
'precluded', 'precludes', 'precluding', 'predatory', 'prejudice', 'prejudiced', 'prejudices',
'prejudicial', 'prejudicing', 'premature', 'prematurely', 'pressing', 'pretrial', 'preventing',
'prevention', 'prevents', 'problem', 'problematic', 'problematical', 'problems', 'prolong',
'prolongation', 'prolongations', 'prolonged', 'prolonging', 'prolongs', 'prone', 'prosecute',
'prosecuted', 'prosecutes', 'prosecuting', 'prosecution', 'prosecutions', 'protest', 'protested',
'protester', 'protesters', 'protesting', 'protestor', 'protestors', 'protests', 'protracted',
'protraction', 'provoke', 'provoked', 'provokes', 'provoking', 'punished', 'punishes',
'punishing', 'punishment', 'punishments', 'punitive', 'purport', 'purported', 'purportedly',
'purporting', 'purports', 'question', 'questionable', 'questionably', 'questioned',
'questioning', 'questions', 'quit', 'quitting', 'racketeer', 'racketeering', 'rationalization',
'rationalizations', 'rationalize', 'rationalized', 'rationalizes', 'rationalizing',
'reassessment', 'reassessments', 'reassign', 'reassigned', 'reassigning', 'reassignment',
'reassignments', 'reassigns', 'recall', 'recalled', 'recalling', 'recalls', 'recession',
'recessionary', 'recessions', 'reckless', 'recklessly', 'recklessness', 'redact', 'redacted',
'redacting', 'redaction', 'redactions', 'redefault', 'redefaulted', 'redefaults', 'redress',
'redressed', 'redresses', 'redressing', 'refusal', 'refusals', 'refuse', 'refused', 'refuses',
'refusing', 'reject', 'rejected', 'rejecting', 'rejection', 'rejections', 'rejects',
'relinquish', 'relinquished', 'relinquishes', 'relinquishing', 'relinquishment',
'relinquishments', 'reluctance', 'reluctant', 'renegotiate', 'renegotiated', 'renegotiates',
'renegotiating', 'renegotiation', 'renegotiations', 'renounce', 'renounced', 'renouncement',
'renouncements', 'renounces', 'renouncing', 'reparation', 'reparations', 'repossessed',
'repossesses', 'repossessing', 'repossession', 'repossessions', 'repudiate', 'repudiated',
'repudiates', 'repudiating', 'repudiation', 'repudiations', 'resign', 'resignation',
'resignations', 'resigned', 'resigning', 'resigns', 'restate', 'restated', 'restatement',
'restatements', 'restates', 'restating', 'restructure', 'restructured', 'restructures',
'restructuring', 'restructurings', 'retaliate', 'retaliated', 'retaliates', 'retaliating',
'retaliation', 'retaliations', 'retaliatory', 'retribution', 'retributions', 'revocation',
'revocations', 'revoke', 'revoked', 'revokes', 'revoking', 'ridicule', 'ridiculed', 'ridicules',
'ridiculing', 'riskier', 'riskiest', 'risky', 'sabotage', 'sacrifice', 'sacrificed',
'sacrifices', 'sacrificial', 'sacrificing', 'scandalous', 'scandals', 'scrutinize',
'scrutinized', 'scrutinizes', 'scrutinizing', 'scrutiny', 'secrecy', 'seize', 'seized', 'seizes',
'seizing', 'sentenced', 'sentencing', 'serious', 'seriously', 'seriousness', 'setback',
'setbacks', 'sever', 'severe', 'severed', 'severely', 'severities', 'severity', 'sharply',
'shocked', 'shortage', 'shortages', 'shortfall', 'shortfalls', 'shrinkage', 'shrinkages', 'shut',
'shutdown', 'shutdowns', 'shuts', 'shutting', 'slander', 'slandered', 'slanderous', 'slanders',
'slippage', 'slippages', 'slow', 'slowdown', 'slowdowns', 'slowed', 'slower', 'slowest',
'slowing', 'slowly', 'slowness', 'sluggish', 'sluggishly', 'sluggishness', 'solvencies',
'solvency', 'spam', 'spammers', 'spamming', 'staggering', 'stagnant', 'stagnate', 'stagnated',
'stagnates', 'stagnating', 'stagnation', 'standstill', 'standstills', 'stolen', 'stoppage',
'stoppages', 'stopped', 'stopping', 'stops', 'strain', 'strained', 'straining', 'strains',
'stress', 'stressed', 'stresses', 'stressful', 'stressing', 'stringent', 'strong', 'subjected',
'subjecting', 'subjection', 'subpoena', 'subpoenaed', 'subpoenas', 'substandard', 'sue', 'sued',
'sues', 'suffer', 'suffered', 'suffering', 'suffers', 'suing', 'summoned', 'summoning',
'summons', 'summonses', 'susceptibility', 'susceptible', 'suspect', 'suspected', 'suspects',
'suspend', 'suspended', 'suspending', 'suspends', 'suspension', 'suspensions', 'suspicion',
'suspicions', 'suspicious', 'suspiciously', 'taint', 'tainted', 'tainting', 'taints', 'tampered',
'tense', 'terminate', 'terminated', 'terminates', 'terminating', 'termination', 'terminations',
'testify', 'testifying', 'threat', 'threaten', 'threatened', 'threatening', 'threatens',
'threats', 'tightening', 'tolerate', 'tolerated', 'tolerates', 'tolerating', 'toleration',
'tortuous', 'tortuously', 'tragedies', 'tragedy', 'tragic', 'tragically', 'traumatic', 'trouble',
'troubled', 'troubles', 'turbulence', 'turmoil', 'unable', 'unacceptable', 'unacceptably',
'unaccounted', 'unannounced', 'unanticipated', 'unapproved', 'unattractive', 'unauthorized',
'unavailability', 'unavailable', 'unavoidable', 'unavoidably', 'unaware', 'uncollectable',
'uncollected', 'uncollectibility', 'uncollectible', 'uncollectibles', 'uncompetitive',
'uncompleted', 'unconscionable', 'unconscionably', 'uncontrollable', 'uncontrollably',
'uncontrolled', 'uncorrected', 'uncover', 'uncovered', 'uncovering', 'uncovers', 'undeliverable',
'undelivered', 'undercapitalized', 'undercut', 'undercuts', 'undercutting', 'underestimate',
'underestimated', 'underestimates', 'underestimating', 'underestimation', 'underfunded',
'underinsured', 'undermine', 'undermined', 'undermines', 'undermining', 'underpaid',
'underpayment', 'underpayments', 'underpays', 'underperform', 'underperformance',
'underperformed', 'underperforming', 'underperforms', 'underproduced', 'underproduction',
'underreporting', 'understate', 'understated', 'understatement', 'understatements',
'understates', 'understating', 'underutilization', 'underutilized', 'undesirable', 'undesired',
'undetected', 'undetermined', 'undisclosed', 'undocumented', 'undue', 'unduly', 'uneconomic',
'uneconomical', 'uneconomically', 'unemployed', 'unemployment', 'unethical', 'unethically',
'unexcused', 'unexpected', 'unexpectedly', 'unfair', 'unfairly', 'unfavorability', 'unfavorable',
'unfavorably', 'unfavourable', 'unfeasible', 'unfit', 'unfitness', 'unforeseeable', 'unforeseen',
'unforseen', 'unfortunate', 'unfortunately', 'unfounded', 'unfriendly', 'unfulfilled',
'unfunded', 'uninsured', 'unintended', 'unintentional', 'unintentionally', 'unjust',
'unjustifiable', 'unjustifiably', 'unjustified', 'unjustly', 'unknowing', 'unknowingly',
'unlawful', 'unlawfully', 'unlicensed', 'unliquidated', 'unmarketable', 'unmerchantable',
'unmeritorious', 'unnecessarily', 'unnecessary', 'unneeded', 'unobtainable', 'unoccupied',
'unpaid', 'unperformed', 'unplanned', 'unpopular', 'unpredictability', 'unpredictable',
'unpredictably', 'unpredicted', 'unproductive', 'unprofitability', 'unprofitable', 'unqualified',
'unrealistic', 'unreasonable', 'unreasonableness', 'unreasonably', 'unreceptive',
'unrecoverable', 'unrecovered', 'unreimbursed', 'unreliable', 'unremedied', 'unreported',
'unresolved', 'unrest', 'unsafe', 'unsalable', 'unsaleable', 'unsatisfactory', 'unsatisfied',
'unsavory', 'unscheduled', 'unsellable', 'unsold', 'unsound', 'unstabilized', 'unstable',
'unsubstantiated', 'unsuccessful', 'unsuccessfully', 'unsuitability', 'unsuitable', 'unsuitably',
'unsuited', 'unsure', 'unsuspected', 'unsuspecting', 'unsustainable', 'untenable', 'untimely',
'untrusted', 'untruth', 'untruthful', 'untruthfully', 'untruthfulness', 'untruths', 'unusable',
'unwanted', 'unwarranted', 'unwelcome', 'unwilling', 'unwillingness', 'upset', 'urgency',
'urgent', 'usurious', 'usurp', 'usurped', 'usurping', 'usurps', 'usury', 'vandalism', 'verdict',
'verdicts', 'vetoed', 'victims', 'violate', 'violated', 'violates', 'violating', 'violation',
'violations', 'violative', 'violator', 'violators', 'violence', 'violent', 'violently',
'vitiate', 'vitiated', 'vitiates', 'vitiating', 'vitiation', 'voided', 'voiding', 'volatile',
'volatility', 'vulnerabilities', 'vulnerability', 'vulnerable', 'vulnerably', 'warn', 'warned',
'warning', 'warnings', 'warns', 'wasted', 'wasteful', 'wasting', 'weak', 'weaken', 'weakened',
'weakening', 'weakens', 'weaker', 'weakest', 'weakly', 'weakness', 'weaknesses', 'willfully',
'worries', 'worry', 'worrying', 'worse', 'worsen', 'worsened', 'worsening', 'worsens', 'worst',
'worthless', 'writedown', 'writedowns', 'writeoff', 'writeoffs', 'wrong', 'wrongdoing',
'wrongdoings', 'wrongful', 'wrongfully', 'wrongly',
'negative', 'negatives', 'fail', 'fails', 'failing', 'failure', 'weak', 'weakness', 'weaknesses',
'difficult', 'difficulty', 'hurdle', 'hurdles', 'obstacle', 'obstacles', 'slump', 'slumps',
'slumping', 'slumped', 'uncertain', 'uncertainty', 'unsettled', 'unfavorable', 'downturn',
'depressed', 'disappoint', 'disappoints', 'disappointing', 'disappointed', 'disappointment',
'risk', 'risks', 'risky', 'threat', 'threats', 'penalty', 'penalties', 'down', 'decrease',
'decreases', 'decreasing', 'decreased', 'decline', 'declines', 'declining', 'declined', 'fall',
'falls', 'falling', 'fell', 'fallen', 'drop', 'drops', 'dropping', 'dropped', 'deteriorate',
'deteriorates', 'deteriorating', 'deteriorated', 'worsen', 'worsens', 'worsening', 'weaken',
'weakens', 'weakening', 'weakened', 'worse', 'worst', 'low', 'lower', 'lowest', 'less', 'least',
'smaller', 'smallest', 'shrink', 'shrinks', 'shrinking', 'shrunk', 'below', 'under', 'challenge',
'challenges', 'challenging', 'challenged'
],
'Positive': ['able', 'abundance', 'abundant', 'acclaimed', 'accomplish', 'accomplished', 'accomplishes',
'accomplishing', 'accomplishment', 'accomplishments', 'achieve', 'achieved', 'achievement',
'achievements', 'achieves', 'achieving', 'adequately', 'advancement', 'advancements', 'advances',
'advancing', 'advantage', 'advantaged', 'advantageous', 'advantageously', 'advantages',
'alliance', 'alliances', 'assure', 'assured', 'assures', 'assuring', 'attain', 'attained',
'attaining', 'attainment', 'attainments', 'attains', 'attractive', 'attractiveness', 'beautiful',
'beautifully', 'beneficial', 'beneficially', 'benefit', 'benefited', 'benefiting', 'benefitted',
'benefitting', 'best', 'better', 'bolstered', 'bolstering', 'bolsters', 'boom', 'booming',
'boost', 'boosted', 'breakthrough', 'breakthroughs', 'brilliant', 'charitable', 'collaborate',
'collaborated', 'collaborates', 'collaborating', 'collaboration', 'collaborations',
'collaborative', 'collaborator', 'collaborators', 'compliment', 'complimentary', 'complimented',
'complimenting', 'compliments', 'conclusive', 'conclusively', 'conducive', 'confident',
'constructive', 'constructively', 'courteous', 'creative', 'creatively', 'creativeness',
'creativity', 'delight', 'delighted', 'delightful', 'delightfully', 'delighting', 'delights',
'dependability', 'dependable', 'desirable', 'desired', 'despite', 'destined', 'diligent',
'diligently', 'distinction', 'distinctions', 'distinctive', 'distinctively', 'distinctiveness',
'dream', 'easier', 'easily', 'easy', 'effective', 'efficiencies', 'efficiency', 'efficient',
'efficiently', 'empower', 'empowered', 'empowering', 'empowers', 'enable', 'enabled', 'enables',
'enabling', 'encouraged', 'encouragement', 'encourages', 'encouraging', 'enhance', 'enhanced',
'enhancement', 'enhancements', 'enhances', 'enhancing', 'enjoy', 'enjoyable', 'enjoyably',
'enjoyed', 'enjoying', 'enjoyment', 'enjoys', 'enthusiasm', 'enthusiastic', 'enthusiastically',
'excellence', 'excellent', 'excelling', 'excels', 'exceptional', 'exceptionally', 'excited',
'excitement', 'exciting', 'exclusive', 'exclusively', 'exclusiveness', 'exclusives',
'exclusivity', 'exemplary', 'fantastic', 'favorable', 'favorably', 'favored', 'favoring',
'favorite', 'favorites', 'friendly', 'gain', 'gained', 'gaining', 'gains', 'good', 'great',
'greater', 'greatest', 'greatly', 'greatness', 'happiest', 'happily', 'happiness', 'happy',
'highest', 'honor', 'honorable', 'honored', 'honoring', 'honors', 'ideal', 'impress',
'impressed', 'impresses', 'impressing', 'impressive', 'impressively', 'improve', 'improved',
'improvement', 'improvements', 'improves', 'improving', 'incredible', 'incredibly',
'influential', 'informative', 'ingenuity', 'innovate', 'innovated', 'innovates', 'innovating',
'innovation', 'innovations', 'innovative', 'innovativeness', 'innovator', 'innovators',
'insightful', 'inspiration', 'inspirational', 'integrity', 'invent', 'invented', 'inventing',
'invention', 'inventions', 'inventive', 'inventiveness', 'inventor', 'inventors', 'leadership',
'leading', 'loyal', 'lucrative', 'meritorious', 'opportunities', 'opportunity', 'optimistic',
'outperform', 'outperformed', 'outperforming', 'outperforms', 'perfect', 'perfected',
'perfectly', 'perfects', 'pleasant', 'pleasantly', 'pleased', 'pleasure', 'plentiful', 'popular',
'popularity', 'positive', 'positively', 'preeminence', 'preeminent', 'premier', 'premiere',
'prestige', 'prestigious', 'proactive', 'proactively', 'proficiency', 'proficient',
'proficiently', 'profitability', 'profitable', 'profitably', 'progress', 'progressed',
'progresses', 'progressing', 'prospered', 'prospering', 'prosperity', 'prosperous', 'prospers',
'rebound', 'rebounded', 'rebounding', 'receptive', 'regain', 'regained', 'regaining', 'resolve',
'revolutionize', 'revolutionized', 'revolutionizes', 'revolutionizing', 'reward', 'rewarded',
'rewarding', 'rewards', 'satisfaction', 'satisfactorily', 'satisfactory', 'satisfied',
'satisfies', 'satisfy', 'satisfying', 'smooth', 'smoothing', 'smoothly', 'smooths', 'solves',
'solving', 'spectacular', 'spectacularly', 'stability', 'stabilization', 'stabilizations',
'stabilize', 'stabilized', 'stabilizes', 'stabilizing', 'stable', 'strength', 'strengthen',
'strengthened', 'strengthening', 'strengthens', 'strengths', 'strong', 'stronger', 'strongest',
'succeed', 'succeeded', 'succeeding', 'succeeds', 'success', 'successes', 'successful',
'successfully', 'superior', 'surpass', 'surpassed', 'surpasses', 'surpassing', "sustainable", 'transparency',
'tremendous', 'tremendously', 'unmatched', 'unparalleled', 'unsurpassed', 'upturn', 'upturns',
'valuable', 'versatile', 'versatility', 'vibrancy', 'vibrant', 'win', 'winner', 'winners', 'winning', 'worthy',
'positive', 'positives', 'success', 'successes', 'successful', 'succeed', 'succeeds',
'succeeding', 'succeeded', 'accomplish', 'accomplishes', 'accomplishing', 'accomplished',
'accomplishment', 'accomplishments', 'strong', 'strength', 'strengths', 'certain', 'certainty',
'definite', 'solid', 'excellent', 'good', 'leading', 'achieve', 'achieves', 'achieved',
'achieving', 'achievement', 'achievements', 'progress', 'progressing', 'deliver', 'delivers',
'delivered', 'delivering', 'leader', 'leading', 'pleased', 'reward', 'rewards', 'rewarding',
'rewarded', 'opportunity', 'opportunities', 'enjoy', 'enjoys', 'enjoying', 'enjoyed',
'encouraged', 'encouraging', 'up', 'increase', 'increases', 'increasing', 'increased', 'rise',
'rises', 'rising', 'rose', 'risen', 'improve', 'improves', 'improving', 'improved', 'improvement',
'improvements', 'strengthen', 'strengthens', 'strengthening', 'strengthened', 'stronger',
'strongest', 'better', 'best', 'more', 'most', 'above', 'record', 'high', 'higher', 'highest',
'greater', 'greatest', 'larger', 'largest', 'grow', 'grows', 'growing', 'grew', 'grown', 'growth',
'expand', 'expands', 'expanding', 'expanded', 'expansion', 'exceed', 'exceeds', 'exceeded',
'exceeding', 'beat', 'beats', 'beating']
}
negate = ["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt", "ain't", "aren't", "can't",
"couldn't", "daren't", "didn't", "doesn't", "dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt",
"neither", "don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't", "neednt", "needn't",
"never", "none", "nope", "nor", "not", "nothing", "nowhere", "oughtnt", "shant", "shouldnt", "wasnt",
"werent", "oughtn't", "shan't", "shouldn't", "wasn't", "weren't", "without", "wont", "wouldnt", "won't",
"wouldn't", "rarely", "seldom", "despite", "no", "nobody"]
def negated(word):
"""
Determine if preceding word is a negation word
"""
if word.lower() in negate:
return True
else:
return False
def tone_count_with_negation_check(dict, article):
"""
Count positive and negative words with negation check. Account for simple negation only for positive words.
Simple negation is taken to be observations of one of negate words occurring within three words
preceding a positive words.
"""
pos_count = 0
neg_count = 0
pos_words = []
neg_words = []
input_words = re.findall(r'\b([a-zA-Z]+n\'t|[a-zA-Z]+\'s|[a-zA-Z]+)\b', article.lower())
word_count = len(input_words)
for i in range(0, word_count):
if input_words[i] in dict['Negative']:
neg_count += 1
neg_words.append(input_words[i])
if input_words[i] in dict['Positive']:
if i >= 3:
if negated(input_words[i - 1]) or negated(input_words[i - 2]) or negated(input_words[i - 3]):
neg_count += 1
neg_words.append(input_words[i] + ' (with negation)')
else:
pos_count += 1
pos_words.append(input_words[i])
elif i == 2:
if negated(input_words[i - 1]) or negated(input_words[i - 2]):
neg_count += 1
neg_words.append(input_words[i] + ' (with negation)')
else:
pos_count += 1
pos_words.append(input_words[i])
elif i == 1:
if negated(input_words[i - 1]):
neg_count += 1
neg_words.append(input_words[i] + ' (with negation)')
else:
pos_count += 1
pos_words.append(input_words[i])
elif i == 0:
pos_count += 1
pos_words.append(input_words[i])
results = [word_count, pos_count, neg_count, pos_words, neg_words]
return results
print(len(Data))
temp = [tone_count_with_negation_check(lmdict,x) for x in Data.text]
temp = pd.DataFrame(temp)
Data['wordcount'] = temp.iloc[:,0].values
Data['NPositiveWords'] = temp.iloc[:,1].values
Data['NNegativeWords'] = temp.iloc[:,2].values
#Sentiment Score normalized by the number of words
Data['sentiment'] = (Data['NPositiveWords'] - Data['NNegativeWords']) / Data['wordcount'] * 100
Data['Poswords'] = temp.iloc[:,3].values
Data['Negwords'] = temp.iloc[:,4].values
temp.head()
Data.head()
```
### Plots of the sentiment analysis
Plot positive and negative word counts
```
NetSentiment = Data['NPositiveWords'] - Data['NNegativeWords']
plt.figure(figsize=(15,7))
ax = plt.subplot()
plt.plot(Data.index, Data['NPositiveWords'], c='green', linewidth= 1.0)
plt.plot(Data.index, Data['NNegativeWords']*-1, c='red', linewidth=1.0)
plt.plot(Data.index, NetSentiment, c='grey', linewidth=1.0)
plt.title('The number of positive/negative words in statement', fontsize=16)
plt.legend(['Positive Words', 'Negative Words', 'Net Sentiment'], prop={'size': 14}, loc=1)
ax.fill_between(Data.index, NetSentiment, where=(NetSentiment > 0), color='green', alpha=0.3, interpolate=True)
ax.fill_between(Data.index, NetSentiment, where=(NetSentiment <= 0), color='red', alpha=0.3, interpolate=True)
import matplotlib.dates as mdates
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
ax.grid(True)
plt.show()
```
Positive and negative word counts highly correlate... probably because of the total number of words varies. Take the positive - negative as Net Sentiment.
```
NetSentiment = Data['NPositiveWords'] - Data['NNegativeWords']
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(Data.index, NetSentiment,
c = 'red',
linewidth= 1.0)
plt.title('Net sentiment implied by BoW over time',size = 'medium')
# format the ticks
# round to nearest years.
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.show()
```
Plot derivative to see the changes in net sentiment
```
firstderivative = (NetSentiment.shift(1) - NetSentiment) / NetSentiment
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(Data.index, firstderivative,
c = 'red')
plt.title('Change in sentiment over time (first derivative)')
# format the ticks
# round to nearest years.
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.show()
# Normalize data
NPositiveWordsNorm = Data['NPositiveWords'] / Data['wordcount'] * np.mean(Data['wordcount'])
NNegativeWordsNorm = Data['NNegativeWords'] / Data['wordcount'] * np.mean(Data['wordcount'])
NetSentimentNorm = (NPositiveWordsNorm - NNegativeWordsNorm)
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(Data.index, NPositiveWordsNorm, c='green', linewidth= 1.0)
plt.plot(Data.index, NNegativeWordsNorm, c='red', linewidth=1.0)
plt.title('Counts normalized by the number of words', fontsize=16)
plt.legend(['Count of Positive Words', 'Count of Negative Words'],
prop={'size': 14},
loc = 1
)
# format the ticks
# round to nearest years.
import matplotlib.dates as mdates
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.show()
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(Data.index, NetSentimentNorm, c='red', linewidth=1.0)
plt.title('Net sentiment implied by BoW over time',size = 'medium')
# format the ticks
# round to nearest years.
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.show()
```
### Function for extracting the direction of the rate change (hike, keep, lower)
Gets Basis Points Move and End Rate Value for Fed Rate and Discount Rate as well as Preferred Fed Rate Move from Statement Text
```
import nltk.data
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def ExtractKeyValues(text):
'''
First, the text is tokenized in to sentences and the fed funds target rate sentence is extracted
Secondly, the decision whether the fed target rate was hiked, kept or lowered is retreived to RateHike variable
Third, the target rate range is extracted and saved in TargetRange (The max is extracted as it conveys most info)
'''
Potentials = [] #For storing the potential decision sentences
#Remove the \n and replace them with dots
text = text.replace('\\n','. ')
#Split the text in to a list of sentences
paragraph_sentence_list = tokenizer.tokenize(text)
#print('paragraph_sentence_list: ', paragraph_sentence_list)
#Finds the sentences with the decisions
for Sentence in paragraph_sentence_list:
#print('Sentence: ', Sentence)
#Make the text lower case
Sentence = Sentence.lower()
if 'federal funds ' in Sentence and 'target' in Sentence:
#Check if it contains a number
if bool(re.search(r'\d', Sentence)):
Potentials.append(Sentence)
Potentials = ''.join(Potentials)
#print('Potentials: ', Potentials)
#Look for decision words related to raising rates
r = Potentials.find('rais')
i = Potentials.find('increas')
upscore = max([r,i])
#Look for decision words related to keeping rates
k = Potentials.find('keep')
m = Potentials.find('maintain')
r = Potentials.find('remain')
c = Potentials.find('current')
keepscore = max([r,k,m,c])
#Look for decision words related to decreasing rates
l = Potentials.find('lower')
d = Potentials.find('decreas')
lowerscore = max([l,d])
Summary = {1:upscore,
0: keepscore,
-1:lowerscore
}
#take the key that was mentioned first in the text
RateHike = max(Summary.keys(), key=(lambda x: Summary[x]))
#For extracting the target range
def RangeReader(Nums):
def FractionReader(Fraction):
'''
Converts the fraction to a float
'''
i = 0
if '/' in Fraction:
if ' ' in Fraction:
i, Fraction = Fraction.split(' ')
if '-' in Fraction:
i, Fraction = Fraction.split('-')
if '‑' in Fraction:
i, Fraction = Fraction.split('‑')
N, D = Fraction.split('/')
return float(i) + float(N) / float(D)
else:
return float(i)+float(Fraction)
#Splits the range based on to or -
Splitters = [' to ','-']
for Splitter in Splitters:
if Splitter in Nums:
TargetRange = Nums.split(Splitter)
return FractionReader(TargetRange[0]), FractionReader(TargetRange[1])
#If there was no splitter in the range (no range)
return FractionReader(Nums)
#Find the percentage change and take the closest digits
Rate = Potentials[ : Potentials.find('percent') -1 ]
Rate = Rate[-16:]
# print(Rate)
if re.search("\d", Rate):
Rate = Rate[ re.search("\d", Rate).start() : ]
TargetRange = np.max(RangeReader(Rate))
else:
Rate = None
TargetRange = None
return RateHike, TargetRange
```
Appends decision, dummy and Moves and End Rate Values for Fed and Disc Rate also appends preferred fed rate Move while measuring the speed of the extraction
```
Data.tail()
#Data.drop(index=[pd.to_datetime("2020-03-03")], inplace=True)
```
Tried to take rate and decision from the statement text but it sometimes fails. Debugging takes time, so switch to use data instead of reading from text.
```
# import timeit
# start = timeit.timeit()
# #Append fed funds rates and decisions to dataframe
# Hikes = []
# TargetRate = []
# for i in range(len(Data)):
# if Data.iloc[i,1] == True:
# Hikes.append(np.nan)
# TargetRate.append(np.nan)
# else:
# Hikes.append( ExtractKeyValues( Data.iloc[i,0] )[0] )
# TargetRate.append( ExtractKeyValues( Data.iloc[i,0] )[1] )
# Data['RateDecision'] = Hikes
# Data['FedFundRateEndValue'] = TargetRate
# end = timeit.timeit()
# print (str ( end - start ) + ' seconds elapsed for ' + str(i) + ' statements')
# Data.tail()
# FRB changed to range from 2008. So use Upper side from 2008.
filename_till08 = '../data/MarketData/Quandl/FRED_DFEDTAR.csv'
filename_from08u = '../data/MarketData/Quandl/FRED_DFEDTARU.csv'
filename_from08l = '../data/MarketData/Quandl/FRED_DFEDTARL.csv'
fedtar = pd.read_csv(filename_till08, names=['Date', 'Rate'], header=0)
fedtaru = pd.read_csv(filename_from08u, names=['Date', 'Rate'], header=0)
fedtarl = pd.read_csv(filename_from08l, names=['Date', 'Rate'], header=0)
fedrate_df = pd.concat([fedtar, fedtaru], axis=0)
fedrate_df.index = pd.to_datetime(fedrate_df.Date, format="%Y-%m-%d")
fedrate_df.drop(columns=['Date'], inplace=True)
fedrate_df['Rate'] = fedrate_df['Rate'].map(lambda x: np.float(x))
fedrate_df
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(fedrate_df.index, fedrate_df['Rate'].values, c = 'green', linewidth= 1.0)
ax.grid(True)
plt.show()
Data['RateDecision'] = None
Data['Rate'] = None
for i in range(len(Data)):
for j in range(len(fedrate_df)):
if Data.index[i] == fedrate_df.index[j]:
Data['Rate'][i] = float(fedrate_df['Rate'][j+1])
if fedrate_df['Rate'][j-1] == fedrate_df['Rate'][j+1]:
Data['RateDecision'][i] = 0
elif fedrate_df['Rate'][j-1] < fedrate_df['Rate'][j+1]:
Data['RateDecision'][i] = 1
elif fedrate_df['Rate'][j-1] > fedrate_df['Rate'][j+1]:
Data['RateDecision'][i] = -1
Data.head(10)
def pickle_dump(df, filename='no-filename.pickle'):
if filename:
with open(filename, "wb") as output_file:
pickle.dump(df, output_file)
```
### Plot the results and compare to the economical uncertainty / systemic risk periods
```
#Dot-com bubble
#https://en.wikipedia.org/wiki/Dot-com_bubble
DotCom = np.logical_and(Data.index > '2000-03', Data.index < '2002-10')
#Financial crisis of 2007–2008
#https://en.wikipedia.org/wiki/Financial_crisis_of_2007%E2%80%932008
FinCrisis = np.logical_and(Data.index > '2007-04', Data.index < '2009-03')
#European debt crisis
#https://en.wikipedia.org/wiki/European_debt_crisis
EuroDebt = np.logical_and(Data.index > '2010-09', Data.index < '2012-09')
#2015–16 Chinese stock market turbulence
#https://en.wikipedia.org/wiki/2015%E2%80%9316_Chinese_stock_market_turbulence
Asian = np.logical_and(Data.index > '2015-01', Data.index < '2016-06')
#2020- Covid-19 Pandemic
#https://en.wikipedia.org/wiki/COVID-19_pandemic
Covid = np.logical_and(Data.index > '2020-02', Data.index < '2021-12')
Recessions = np.logical_or.reduce((DotCom, FinCrisis, EuroDebt, Asian, Covid))
Window = 16
CompToMA = NetSentimentNorm.rolling(Window).mean()
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(Data.index, CompToMA, c = 'r', linewidth= 2)
ax.plot(Data.index, NetSentimentNorm, c = 'green', linewidth= 1, alpha = 0.5)
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.title( str('Moving average of last ' + str(Window) + ' statements (~2 Year Window) seems to match with periods of economic uncertainty'))
ax.legend([str(str(Window) + ' statement MA'), 'Net sentiment of individual statements'],
prop={'size': 16},
loc = 2
)
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.fill_between(Data.index, 0, 10, where = Recessions,
facecolor='darkblue', alpha=0.4, transform=trans)
# Add text
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.21, 0.15, "Dot Com Bubble", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.46, 0.15, "Financial Crisis", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.60, 0.15, "EU Debt Crisis", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.76, 0.15, "China Crisis", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.94, 0.15, "Covid-19", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
plt.show()
# Speaker window
Greenspan = np.logical_and(Data.index > '1987-08-11', Data.index < '2006-01-31')
Bernanke = np.logical_and(Data.index > '2006-02-01', Data.index < '2014-01-31')
Yellen = np.logical_and(Data.index > '2014-02-03', Data.index < '2018-02-03')
Powell = np.logical_and(Data.index > '2018-02-05', Data.index < '2022-02-05')
Speaker = np.logical_or.reduce((Greenspan, Yellen))
# Moving Average
Window = 8
CompToMA = NetSentimentNorm.rolling(Window).mean()
# Plotting Data
fig, ax = plt.subplots(figsize=(15,7))
plt.title('Sentiment goes down before monetary easing', fontsize=16)
ax.scatter(Data.index, Data['Rate']*3, c = 'g')
ax.plot(Data.index, CompToMA, c = 'r', linewidth= 2.0)
ax.plot(Data.index, NetSentimentNorm, c = 'green', linewidth= 1, alpha = 0.5)
ax.legend([str(str(Window) + ' statements moving average'),
'Net sentiment of individual statements',
'Fed Funds Rate'], prop={'size': 14}, loc = 1)
# Format X-axis
import matplotlib.dates as mdates
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# Set X-axis and Y-axis range
datemin = np.datetime64(Data.index[18], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
ax.set_ylim(-10,30)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
ax.tick_params(axis='both', which='major', labelsize=12)
# Fill speaker
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.fill_between(Data.index, 0, 10, where = Speaker, facecolor='lightblue', alpha=0.5, transform=trans)
# Add text
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.13, 0.75, "Alan Greenspan", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.46, 0.75, "Ben Bernanke", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.73, 0.75, "Janet Yellen", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
ax.text(0.88, 0.75, "Jerome Powell", transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props)
# Add annotations
arrow_style = dict(facecolor='black', edgecolor='white', shrink=0.05)
ax.annotate('QE1', xy=('2008-11-25', 0), xytext=('2008-11-25', -4), size=12, ha='right', arrowprops=arrow_style)
ax.annotate('QE1+', xy=('2009-03-18', 0), xytext=('2009-03-18', -6), size=12, ha='center', arrowprops=arrow_style)
ax.annotate('QE2', xy=('2010-11-03', 0), xytext=('2010-11-03', -4), size=12, ha='center', arrowprops=arrow_style)
ax.annotate('QE2+', xy=('2011-09-21', 0), xytext=('2011-09-21', -4.5), size=12, ha='center', arrowprops=arrow_style)
ax.annotate('QE2+', xy=('2012-06-20', 0), xytext=('2012-06-20', -6.5), size=12, ha='right', arrowprops=arrow_style)
ax.annotate('QE3', xy=('2012-09-13', 0), xytext=('2012-09-13', -8), size=12, ha='center', arrowprops=arrow_style)
ax.annotate('Tapering', xy=('2013-12-18', 0), xytext=('2013-12-18', -8), size=12, ha='center', arrowprops=arrow_style)
plt.show()
fig, ax = plt.subplots(figsize=(15,7))
Count = Data['wordcount']
ax.plot(Data.index, Count,
c = 'red',
linewidth= 1.5)
plt.title('Count of words per statement over time')
import matplotlib.dates as mdates
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
# format the ticks
# round to nearest years.
datemin = np.datetime64(Data.index[0], 'Y')
datemax = np.datetime64(Data.index[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.fill_between(Data.index, 0, 10, where = Speaker,
facecolor='lightblue', alpha=0.5, transform=trans)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.05, 0.95, "Alan Greenspan", transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.text(0.50, 0.95, "Ben Bernanke", transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.text(0.77, 0.95, "Janet Yellen", transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
ax.text(0.90, 0.95, "Jerome Powell", transform=ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
plt.show()
```
| github_jupyter |
## http://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html
```
%matplotlib inline
# subset of http://scikit-learn.org/stable/_downloads/plot_lle_digits.py
# see Kyle Kastner at https://youtu.be/r-1XJBHot58?t=1335
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import matplotlib
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0) # make plot larger in notebook
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from matplotlib import offsetbox
import numpy as np
digits = load_digits(n_class=6)
X = digits.data
y = digits.target
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row , 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix +8 , iy:iy+8] = X[ i * n_img_per_row +j ].reshape((8,8))
plt.imshow(img,cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits datast')
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
plt.show()
X_tsne = TSNE(n_components=2, init="pca", random_state=1999).fit_transform(X)
plot_embedding(X_tsne, title="TSNE_embedding")
X_pca = PCA(n_components=2).fit_transform(X)
plot_embedding(X_pca, title="PCA_embedding")
```
| github_jupyter |
```
import pandas as pd
import logging
import glob
from sklearn.model_selection import train_test_split
pd.set_option('display.max_colwidth', 500)
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
```
# Download Data
```
# Ensure that the github-issues-data volume is mounted in /mnt
!ls -la /mnt
# Set path for data dir
%env DATA_DIR=/mnt/github-issues-data
# Download the github-issues.zip training data to /mnt/github-issues-data
!wget --directory-prefix=${DATA_DIR} https://storage.googleapis.com/kubeflow-examples/github-issue-summarization-data/github-issues.zip
# Unzip the file into /mnt/github-issues-data directory
!unzip ${DATA_DIR}/github-issues.zip -d ${DATA_DIR}
# Create a symlink from <current_directory>/github-issues-data to /mnt/github-issues-data
!ln -sf ${DATA_DIR} github-issues-data
# Make sure that the github-issues-data symlink is created
!ls -lh github-issues-data/github_issues.csv
```
# Process Data
Split data into train and test set and preview data
```
data_file='github-issues-data/github_issues.csv'
# read in data sample 2000 rows (for speed of tutorial)
# Set this to False to train on the entire dataset
use_sample_data=True
if use_sample_data:
training_data_size=2000
traindf, testdf = train_test_split(pd.read_csv(data_file).sample(n=training_data_size),
test_size=.10)
else:
traindf, testdf = train_test_split(pd.read_csv(data_file),test_size=.10)
#print out stats about shape of data
print(f'Train: {traindf.shape[0]:,} rows {traindf.shape[1]:,} columns')
print(f'Test: {testdf.shape[0]:,} rows {testdf.shape[1]:,} columns')
# preview data
traindf.head(3)
```
**Convert to lists in preparation for modeling**
```
train_body_raw = traindf.body.tolist()
train_title_raw = traindf.issue_title.tolist()
#preview output of first element
train_body_raw[0]
```
# Pre-Process Data For Deep Learning
See [this repo](https://github.com/hamelsmu/ktext) for documentation on the ktext package
```
%reload_ext autoreload
%autoreload 2
from ktext.preprocess import processor
%%time
# Clean, tokenize, and apply padding / truncating such that each document length = 70
# also, retain only the top 8,000 words in the vocabulary and set the remaining words
# to 1 which will become common index for rare words
body_pp = processor(keep_n=8000, padding_maxlen=70)
train_body_vecs = body_pp.fit_transform(train_body_raw)
```
#### Look at one example of processed issue bodies
```
print('\noriginal string:\n', train_body_raw[0], '\n')
print('after pre-processing:\n', train_body_vecs[0], '\n')
# Instantiate a text processor for the titles, with some different parameters
# append_indicators = True appends the tokens '_start_' and '_end_' to each
# document
# padding = 'post' means that zero padding is appended to the end of the
# of the document (as opposed to the default which is 'pre')
title_pp = processor(append_indicators=True, keep_n=4500,
padding_maxlen=12, padding ='post')
# process the title data
train_title_vecs = title_pp.fit_transform(train_title_raw)
```
#### Look at one example of processed issue titles
```
print('\noriginal string:\n', train_title_raw[0])
print('after pre-processing:\n', train_title_vecs[0])
```
Serialize all of this to disk for later use
```
import dill as dpickle
import numpy as np
# Save the preprocessor
with open('body_pp.dpkl', 'wb') as f:
dpickle.dump(body_pp, f)
with open('title_pp.dpkl', 'wb') as f:
dpickle.dump(title_pp, f)
# Save the processed data
np.save('train_title_vecs.npy', train_title_vecs)
np.save('train_body_vecs.npy', train_body_vecs)
```
# Define Model Architecture
### Load the data from disk into variables
```
from seq2seq_utils import load_decoder_inputs, load_encoder_inputs, load_text_processor
encoder_input_data, doc_length = load_encoder_inputs('train_body_vecs.npy')
decoder_input_data, decoder_target_data = load_decoder_inputs('train_title_vecs.npy')
num_encoder_tokens, body_pp = load_text_processor('body_pp.dpkl')
num_decoder_tokens, title_pp = load_text_processor('title_pp.dpkl')
```
### Define Model Architecture
```
%matplotlib inline
from keras.models import Model
from keras.layers import Input, LSTM, GRU, Dense, Embedding, Bidirectional, BatchNormalization
from keras import optimizers
#arbitrarly set latent dimension for embedding and hidden units
latent_dim = 300
##### Define Model Architecture ######
########################
#### Encoder Model ####
encoder_inputs = Input(shape=(doc_length,), name='Encoder-Input')
# Word embeding for encoder (ex: Issue Body)
x = Embedding(num_encoder_tokens, latent_dim, name='Body-Word-Embedding', mask_zero=False)(encoder_inputs)
x = BatchNormalization(name='Encoder-Batchnorm-1')(x)
# Intermediate GRU layer (optional)
#x = GRU(latent_dim, name='Encoder-Intermediate-GRU', return_sequences=True)(x)
#x = BatchNormalization(name='Encoder-Batchnorm-2')(x)
# We do not need the `encoder_output` just the hidden state.
_, state_h = GRU(latent_dim, return_state=True, name='Encoder-Last-GRU')(x)
# Encapsulate the encoder as a separate entity so we can just
# encode without decoding if we want to.
encoder_model = Model(inputs=encoder_inputs, outputs=state_h, name='Encoder-Model')
seq2seq_encoder_out = encoder_model(encoder_inputs)
########################
#### Decoder Model ####
decoder_inputs = Input(shape=(None,), name='Decoder-Input') # for teacher forcing
# Word Embedding For Decoder (ex: Issue Titles)
dec_emb = Embedding(num_decoder_tokens, latent_dim, name='Decoder-Word-Embedding', mask_zero=False)(decoder_inputs)
dec_bn = BatchNormalization(name='Decoder-Batchnorm-1')(dec_emb)
# Set up the decoder, using `decoder_state_input` as initial state.
decoder_gru = GRU(latent_dim, return_state=True, return_sequences=True, name='Decoder-GRU')
decoder_gru_output, _ = decoder_gru(dec_bn, initial_state=seq2seq_encoder_out)
x = BatchNormalization(name='Decoder-Batchnorm-2')(decoder_gru_output)
# Dense layer for prediction
decoder_dense = Dense(num_decoder_tokens, activation='softmax', name='Final-Output-Dense')
decoder_outputs = decoder_dense(x)
########################
#### Seq2Seq Model ####
#seq2seq_decoder_out = decoder_model([decoder_inputs, seq2seq_encoder_out])
seq2seq_Model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
seq2seq_Model.compile(optimizer=optimizers.Nadam(lr=0.001), loss='sparse_categorical_crossentropy')
```
** Examine Model Architecture Summary **
```
from seq2seq_utils import viz_model_architecture
seq2seq_Model.summary()
viz_model_architecture(seq2seq_Model)
```
# Train Model
```
from keras.callbacks import CSVLogger, ModelCheckpoint
script_name_base = 'tutorial_seq2seq'
csv_logger = CSVLogger('{:}.log'.format(script_name_base))
model_checkpoint = ModelCheckpoint('{:}.epoch{{epoch:02d}}-val{{val_loss:.5f}}.hdf5'.format(script_name_base),
save_best_only=True)
batch_size = 1200
epochs = 7
history = seq2seq_Model.fit([encoder_input_data, decoder_input_data], np.expand_dims(decoder_target_data, -1),
batch_size=batch_size,
epochs=epochs,
validation_split=0.12, callbacks=[csv_logger, model_checkpoint])
#save model
seq2seq_Model.save('seq2seq_model_tutorial.h5')
```
# See Example Results On Holdout Set
It is useful to see examples of real predictions on a holdout set to get a sense of the performance of the model. We will also evaluate the model numerically in a following section.
```
from seq2seq_utils import Seq2Seq_Inference
seq2seq_inf = Seq2Seq_Inference(encoder_preprocessor=body_pp,
decoder_preprocessor=title_pp,
seq2seq_model=seq2seq_Model)
# this method displays the predictions on random rows of the holdout set
seq2seq_inf.demo_model_predictions(n=50, issue_df=testdf)
```
# Evaluate Model: BLEU Score
For machine-translation tasks such as this one, it is common to measure the accuracy of results using the [BLEU Score](https://en.wikipedia.org/wiki/BLEU). The convenience function illustrated below uses [NLTK's corpus_bleu](https://www.nltk.org/api/nltk.translate.html#nltk.translate.bleu_score.corpus_bleu). The output of the below convenience function is an Average of BlEU-1, BLEU-2, BLEU-3 and BLEU-4.
```
#convenience function that generates predictions on holdout set and calculates BLEU Score
bleu_score = seq2seq_inf.evaluate_model(holdout_bodies=testdf.body.tolist(),
holdout_titles=testdf.issue_title.tolist(),
max_len_title=12)
print(f'BLEU Score (avg of BLUE 1-4) on Holdout Set: {bleu_score * 100}')
```
| github_jupyter |
# MNIST Image Classification with TensorFlow
This notebook demonstrates how to implement a simple linear image models on MNIST using Estimator.
<hr/>
This <a href="mnist_models.ipynb">companion notebook</a> extends the basic harness of this notebook to a variety of models including DNN, CNN, dropout, pooling etc.
```
import numpy as np
import shutil
import os
import tensorflow as tf
print(tf.__version__)
```
## Exploring the data
Let's download MNIST data and examine the shape. We will need these numbers ...
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('mnist/data', one_hot=True, reshape=False)
print(mnist.train.images.shape)
print(mnist.train.labels.shape)
HEIGHT=28
WIDTH=28
NCLASSES=10
import matplotlib.pyplot as plt
IMGNO=12
plt.imshow(mnist.test.images[IMGNO].reshape(HEIGHT, WIDTH));
```
## Define the model.
Let's start with a very simple linear classifier. All our models will have this basic interface -- they will take an image and return logits.
```
def linear_model(img):
#TODO
return ylogits, NCLASSES
```
## Write Input Functions
As usual, we need to specify input functions for training, evaluation, and predicition.
```
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'image':mnist.train.images},
y=mnist.train.labels,
batch_size=100,
num_epochs=None,
shuffle=True,
queue_capacity=5000
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
#TODO
)
def serving_input_fn():
inputs = {'image': tf.placeholder(tf.float32, [None, HEIGHT, WIDTH])}
features = inputs # as-is
return tf.estimator.export.ServingInputReceiver(features, inputs)
```
## Write Custom Estimator
I could have simply used a canned LinearClassifier, but later on, I will want to use different models, and so let's write a custom estimator
```
def image_classifier(features, labels, mode, params):
ylogits, nclasses = linear_model(features['image'])
probabilities = tf.nn.softmax(ylogits)
classes = tf.cast(tf.argmax(probabilities, 1), tf.uint8)
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=ylogits, labels=labels))
evalmetrics = {'accuracy': tf.metrics.accuracy(classes, tf.argmax(labels, 1))}
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(loss, tf.train.get_global_step(),
learning_rate=params['learning_rate'], optimizer="Adam")
else:
train_op = None
else:
loss = None
train_op = None
evalmetrics = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities, "classes": classes},
loss=loss,
train_op=train_op,
eval_metric_ops=evalmetrics,
export_outputs={'classes': tf.estimator.export.PredictOutput({"probabilities": probabilities, "classes": classes})}
)
```
tf.estimator.train_and_evaluate does distributed training.
```
def train_and_evaluate(output_dir, hparams):
estimator = tf.estimator.Estimator(model_fn = image_classifier,
params = hparams,
model_dir = output_dir)
train_spec = tf.estimator.TrainSpec(input_fn = train_input_fn,
max_steps = hparams['train_steps'])
exporter = tf.estimator.LatestExporter('Servo', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(input_fn = eval_input_fn,
steps = None,
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
This is the main() function
```
OUTDIR='mnist/learned'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
hparams = {'train_steps': 1000, 'learning_rate': 0.01}
train_and_evaluate(OUTDIR, hparams)
```
What accuracy did you achieve?
<pre>
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
</pre>
| github_jupyter |
# Experiment Management for Hyperparameter Tuning Jobs
Demonstrates how to associate trial components created by a hyperparameter tuning job with an experiment management trial.
Prerequisite - hyperparameter tuning job has already been created.
## Steps
1. retrieves the most recently created tuning job
1. creates an experiment or retrieve an existing one
1. creates a trial or retrieve an existing one
1. retrieve all the training jobs created by the tuning job
1. retrieve all the trial components created by those training jobs
1. associate the trial components with the trial
_Testing using SageMaker Studio with the `Python 3(Data Science)` kernel._
```
import sys
!{sys.executable} -m pip install sagemaker-experiments==0.1.25
import time
from datetime import timezone
import boto3
from sagemaker import HyperparameterTuningJobAnalytics, Session
from smexperiments.experiment import Experiment
from smexperiments.search_expression import Filter, Operator, SearchExpression
from smexperiments.trial import Trial
from smexperiments.trial_component import TrialComponent
sess = boto3.Session()
sm = sess.client("sagemaker")
sagemaker_session = Session(sess)
# get the most recently created tuning job
list_tuning_jobs_response = sm.list_hyper_parameter_tuning_jobs(
SortBy="CreationTime", SortOrder="Descending"
)
print(f'Found {len(list_tuning_jobs_response["HyperParameterTuningJobSummaries"])} tuning jobs.')
tuning_jobs = list_tuning_jobs_response["HyperParameterTuningJobSummaries"]
most_recently_created_tuning_job = tuning_jobs[0]
tuning_job_name = most_recently_created_tuning_job["HyperParameterTuningJobName"]
experiment_name = "example-experiment-with-tuning-jobs"
trial_name = tuning_job_name + "-trial"
print(f"Associate all training jobs created by {tuning_job_name} with trial {trial_name}")
# create the experiment if it doesn't exist
try:
experiment = Experiment.load(experiment_name=experiment_name)
except Exception as ex:
if "ResourceNotFound" in str(ex):
experiment = Experiment.create(experiment_name=experiment_name)
# create the trial if it doesn't exist
try:
trial = Trial.load(trial_name=trial_name)
except Exception as ex:
if "ResourceNotFound" in str(ex):
trial = Trial.create(experiment_name=experiment_name, trial_name=trial_name)
# get the trial components derived from the training jobs
creation_time = most_recently_created_tuning_job["CreationTime"]
creation_time = creation_time.astimezone(timezone.utc)
creation_time = creation_time.strftime("%Y-%m-%dT%H:%M:%SZ")
created_after_filter = Filter(
name="CreationTime",
operator=Operator.GREATER_THAN_OR_EQUAL,
value=str(creation_time),
)
# the training job names contain the tuning job name (and the training job name is in the source arn)
source_arn_filter = Filter(
name="TrialComponentName", operator=Operator.CONTAINS, value=tuning_job_name
)
source_type_filter = Filter(
name="Source.SourceType", operator=Operator.EQUALS, value="SageMakerTrainingJob"
)
search_expression = SearchExpression(
filters=[created_after_filter, source_arn_filter, source_type_filter]
)
# search iterates over every page of results by default
trial_component_search_results = list(
TrialComponent.search(search_expression=search_expression, sagemaker_boto_client=sm)
)
print(f"Found {len(trial_component_search_results)} trial components.")
# associate the trial components with the trial
for tc in trial_component_search_results:
print(f"Associating trial component {tc.trial_component_name} with trial {trial.trial_name}.")
trial.add_trial_component(tc.trial_component_name)
# sleep to avoid throttling
time.sleep(0.5)
```
## Contact
Submit any questions or issues to https://github.com/aws/sagemaker-experiments/issues or mention @aws/sagemakerexperimentsadmin
| github_jupyter |
<img src="../logo_skmob.png" width=250 align="left"/>
# Trajectory models
Generative models of individual trajectories
```
import skmob
%matplotlib inline
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
```
## Exploration and Preferential Return EPR
### Density EPR
```
from skmob.models.epr import DensityEPR
tessellation = gpd.GeoDataFrame.from_file("data/NY_counties_2011.geojson")
start_time = pd.to_datetime('2019/01/01 08:00:00')
end_time = pd.to_datetime('2019/01/14 08:00:00')
depr = DensityEPR()
tdf = depr.generate(start_time, end_time, tessellation,
relevance_column='population',
n_agents=100, od_matrix=None, verbose=True)
print(type(tdf))
tdf.parameters
tdf.head()
from skmob.preprocessing import detection, clustering
dtdf = detection.stops(tdf)
ctdf = clustering.cluster(dtdf)
uid = 2
m = tdf[tdf['uid'] == uid].plot_trajectory(zoom=7)
ctdf[ctdf['uid'] == uid].plot_stops(map_f=m)
ax = ctdf.plot_diary(user=1)
```
Now that we have a TrajDataFrame, we can compute the individual and collective measures
```
from skmob.measures.individual import jump_lengths
from skmob.measures.individual import radius_of_gyration, waiting_times
jls = jump_lengths(tdf, merge=True)
jls[:5]
plt.hist(jls, bins=100)
plt.xlabel('jump length [km]', fontsize=20)
plt.loglog()
plt.show()
wts = waiting_times(tdf, merge=True)
wts[:5]
plt.hist(wts, bins=100)
plt.xlabel('waiting time [s]', fontsize=20)
plt.loglog()
plt.show()
rg_df = radius_of_gyration(tdf)
plt.hist(rg_df.radius_of_gyration.values, bins=10)
plt.xlabel('radius of gyration [km]', fontsize=20)
plt.show()
```
### DITRAS
```
from skmob.models.epr import Ditras
from skmob.models.markov_diary_generator import MarkovDiaryGenerator
from skmob.preprocessing import filtering, compression, detection, clustering
tdf = skmob.TrajDataFrame.from_file('./data/geolife_sample.txt.gz',
latitude='lat', longitude='lon', user_id='user',
datetime='datetime', sep=',')
ctdf = compression.compress(tdf)
stdf = detection.stops(ctdf)
cstdf = clustering.cluster(stdf)
cstdf[:5]
mdg = MarkovDiaryGenerator()
mdg.fit(cstdf, 2, lid='cluster')
start_time = pd.to_datetime('2019/01/01 08:00:00')
end_time = pd.to_datetime('2019/01/14 08:00:00')
ditras = Ditras(mdg)
tdf = ditras.generate(start_time, end_time, tessellation, relevance_column='population',
n_agents=3, od_matrix=None, verbose=True)
tdf.head()
dtdf = detection.stops(tdf)
ctdf = clustering.cluster(dtdf)
ax = ctdf.plot_diary(user=1)
```
| github_jupyter |
# Circuito cuántico para autoencoder
Propuesta para el hackathon es diseñar un circuito cuántico que pueda disminuir el número de variables de imagenes sin perder información y poder trabajar con menos qubits para una clasificación usando el conjunto de datos MNIST.
Se indican a continuación las dependencias necesarias para poder trabajar en este problema.
```
import numpy as np
# Bibliotecas necesarias de Qiskit
from qiskit import QuantumCircuit, transpile, Aer, IBMQ, execute, QuantumRegister, ClassicalRegister
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from qiskit.circuit import Parameter, ParameterVector
#Bliblioteca para la adquisición y preprocesamiento del conjunto MNIST.
import tensorflow as tf
#Bibliotecas para graficar
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
```
## Preprocesamiento
Inicializamos las carácteristicas de nuestro circuito cuántico que es el conjunto de datos MNIST, para esto nos apoyamos de tensorflow que ya tiene el conjunto de datos.
Cada conjunto tiene 10 clases : **[0,1,2,3,4,5,6,7,8,9]**,
y van de 0 a 255, por ello nosotros pasamos un proceso de **normalización que va de 0.0 a 1.0**, donde negro representa el fondo y el blanco y escala de gris a los números.
```
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Rescale the images from [0,255] to the [0.0,1.0] range.
x_train, x_test = x_train[..., np.newaxis]/255.0, x_test[..., np.newaxis]/255.0
print("Imagenes del conjunto de entrenamiento:", len(x_train))
print("Imagenes del conjunto de entrenamiento:", len(x_test))
def filter_01(x, y):
keep = (y == 0) | (y == 1)
x, y = x[keep], y[keep]
return x,y
x_train, y_train = filter_01(x_train, y_train)
x_test, y_test = filter_01(x_test, y_test)
print("Imagenes del conjunto de entrenamiento:", len(x_train))
print("Imagenes del conjunto de entrenamiento:", len(x_test))
```
Se representan las imagenes que tienen un tamaño original de 28x28
```
plt.imshow(x_train[0, :, :, 0])
plt.colorbar()
```
### Reducción de la imagen
Tensorflow tiene el método *tf.image.resize* que disminuye las imagenes a partir de los siguientes posibles criteros:
<ul>
<li><b>bilinear</b>: Bilinear interpolation. If antialias is true, becomes a hat/tent filter function with radius 1 when downsampling.</li>
<li><b>lanczos3</b>: Lanczos kernel with radius 3. High-quality practical filter but may have some ringing, especially on synthetic images.</li>
<li><b>lanczos5</b>: Lanczos kernel with radius 5. Very-high-quality filter but may have stronger ringing.</li>
<li><b>bicubic</b>: Cubic interpolant of Keys. Equivalent to Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel, particularly when upsampling.</li>
<li><b>gaussian</b>: Gaussian kernel with radius 3, sigma = 1.5 / 3.0.</li>
<li><b>nearest</b>: Nearest neighbor interpolation. antialias has no effect when used with nearest neighbor interpolation.</li>
<li><b>area</b>: Anti-aliased resampling with area interpolation. antialias has no effect when used with area interpolation; it always anti-aliases.</li>
<li><b>mitchellcubic</b>: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp.</li>
</ul>
Ejemplo de dicho preprocesamiento de reducción de datos se emplea a continuación con el método nearest y con el tamaño de imagen 8x8.
```
x_train_small = tf.image.resize(x_train, (8,8), method='nearest', preserve_aspect_ratio=True).numpy()
x_test_small = tf.image.resize(x_test, (8,8), method='nearest', preserve_aspect_ratio=True).numpy()
```
En este punto se tienen imagenes de tamaño 8x8 y se tienen que pasar como un estado de amplitud ya que $8x8 = 64$ y esto nos dará un vector de $2^6$ que recordando el valor 6 es el número de qubits a usar
```
plt.imshow(x_train_small[0,:,:,0], vmin=0, vmax=1)
plt.colorbar()
x_train = x_train_small.reshape(len(x_train_small), 64)
x_test = x_test_small.reshape(len(x_test_small), 64)
x_train.shape,x_test.shape
x_train_small[0]
```
## De imagen a qubits
Por las capacidades que actualmente se usa CPU para el diseño de circuitos cuánticos en Qiskit, no es posible ocupar todo el conjunto de imagenes, se diseñaron 8 experimentos por lo menos desde 10 iteraciones hasta 50, ocupando las siguientes entradas:
<ul>
<li>Las 5 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 10 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 12 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 50 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 70 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 100 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 200 primeras imagenes del conjunto de entrenamiento.</li>
<li>Las 500 primeras imagenes del conjunto de entrenamiento.</li>
</ul>
```
x_train = (x_train)
x_test = (x_test)
x_train.shape,x_test.shape
```
Se pasa de la matriz de tamaño 8x8 a un vector de tamaño 64x1 por cada imagen
```
x_train[0]
```
Se elimina las posibles imagenes que no tengan información, es decir, puros 0's para el conjutno de entrenamiento
```
k = 0
while k < len(x_train):
a = x_train[k].copy()
if np.sum(a) == 0.:
print(k,x_train[k])
x_train = np.delete(x_train, k, axis=0)
y_train = np.delete(y_train, k, axis=0)
k -= 1
k+=1
```
Se elimina las posibles imagenes que no tengan información, es decir, puros 0's para el conjutno de testeo
```
k = 0
while k < len(x_test):
a = x_test[k].copy()
if np.sum(a) == 0.:
print(k,x_test[k])
x_test = np.delete(x_test, k, axis=0)
y_test = np.delete(y_test, k, axis=0)
k -= 1
k+=1
```
Ahora se aplicara una renormalización para poder interpretar un vector de estado para pdoer aplicarse en nuestro modelo de circuito cuántico, siguiendo el critero:
$ \frac{vector-de-entrada}{\sqrt{\sum_{i=0}^{n-1} (vector-de-entrada_i)^2}}$,
donde vector-de-entrada es el vector de 64x1 que representa una imagen ya sea del conjunto de entrenamiento o de prueba con la finalidad de convertirse en un vector de estado $| \psi \rangle$.
```
import cmath
def Normalize(row):
suma = np.sqrt(np.sum(row**2))
if suma == 0.:
return 0.0
row = row/suma
return row
for i in range(len(x_train)):
x_train[i] = Normalize(x_train[i])
for i in range(len(x_test)):
x_test[i] = Normalize(x_test[i])
print("la suma de los estados de la imagen del conjunto de entrenamiento 0",np.sum(x_train[0]**2))
```
# Análisis y Diseño del Autoencoder
Para este trabajo se diseñará los circuitos necesarios para indicar el autoencoder en su versión cuántica.
Para este proceso se consideran 6 qubits que recordando equivalen al vector de estado 64x1, donde usaremos el mapeo por amplitud, y se ocupo por cuestiones de tiempo y recursos 1 sola capa por cada 1.
Más adelante se indicaran el circuito que se uso para esta aplicación pero requiere de 10 paramétros.
```
n=6
num_layers = 1
params = np.random.random(10*(num_layers))
```
Se valida que el vector de entrada con indice 0 este normalizado a un vector de estado
```
x_train[0]
```
Al ingresar el vector de estado como entrada a una función que genera un circuito cuántico de 6 qubits que usando el método
`initialize(vectorstate,qubits)` que genera la representación en términos de qubits.
Una observación de este método es que se puede variar el costo computacional por el tipo de método que se genere, y más si predominan estados con estados 0 de amplitud, generando que los equipos de computo de nuestro equipo fue limitado.
```
def input_data(n,inputs):
circuit = QuantumCircuit(n,1)
circuit.initialize(inputs,range(0,n,1))
circuit.barrier()
return circuit
input_data(n,x_train[0]).draw(output="mpl")
```
En este proceso generamos el circuito variacional cuántico que representa el autoencoder cuántico, consideramos de apoyo el circuito cuántico propuesto en [1], pero algunos problemas daban por el costo computacional por lo que consideramos [2],[3],[4],[5] para generar a partir de varias propuestas de redes tensoriales como peude ser MERA, para nuestro trabajo y con ciertas modificaciones para dejarlo en 6 qubits.
```
def vqc(n, num_layers,params):
#Set the number of layers and qubits
#ParameterVectors are initialized with a string identifier and an integer specifying the vector length
parameters = ParameterVector('θ', 10*(num_layers))
len_p = len(parameters)
circuit = QuantumCircuit(n, 1)
for layer in range(num_layers):
for i in range(n):
circuit.ry(parameters[(layer)+i], i)
circuit.barrier()
circuit.cx(2,0)
circuit.cx(3,1)
circuit.cx(5,4)
circuit.barrier()
circuit.ry(parameters[6+(layer)],0)
circuit.ry(parameters[7+(layer)],1)
circuit.ry(parameters[8+(layer)],4)
circuit.barrier()
circuit.cx(4,1)
circuit.barrier()
circuit.ry(parameters[9+(layer)], 1)
circuit.barrier()
params_dict = {}
i = 0
for p in parameters:
params_dict[p] = params[i]
i += 1
#Assign parameters using the assign_parameters method
circuit = circuit.assign_parameters(parameters = params_dict)
return circuit
```
El circuito de nuestra red tensorial se ve afectada por 10 compuertas $Ry(\theta)$ y 4 $C_{not}$, considerando como costo ligado al número de $C_{not}$ sería de 4.
```
vqc(n,num_layers,params).draw(output="mpl")
```
Considerando [6] se tiene la oportunidad de usar el Swap-test para buscar el valor de y se identifico de [7] la forma de trabajo de la Swap-Test se comparan dos estados $| \psi \rangle$ y $| \phi \rangle$, donde el primero es el vector de referencia $| 0 \rangle$ y el segundo los qubits que se quieren eliminar para disminuir variables, estos son $| \phi_0 \phi_1 \rangle$ donde al medirse el qubit que tiene la Hadamard del Swap-Test y este se acerque más al estado $|0 \rangle$ significa que se disminuyó de manera correcta la informacion en $| \phi_0 \phi_1 \rangle$.
```
def swap_test(n):
qubits_values = 2*n+1
qc = QuantumCircuit(qubits_values)
qc.h(0)
for i in range(n):
qc.cswap(0,i+1,2*n-i)
qc.h(0)
qc.barrier()
return qc
swap_test(2).draw(output="mpl")
```
El siguiente proceso indica el circuito cuántico variacional dle autoencoder para generar la disminución de dos qubits.
```
size_reduce = 2
circuit_init = input_data(n,x_train[0])
circuit_vqc = vqc(n,num_layers,params)
circuit_swap_test = swap_test(size_reduce)
circuit_full = QuantumCircuit(n+size_reduce+1,1)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(size_reduce+1,n+size_reduce+1)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(size_reduce+1,n+size_reduce+1)])
circuit_full = circuit_full.compose(circuit_swap_test,[i for i in range(2*size_reduce+1)])
circuit_full.draw(output="mpl")
```
Qiskit tiene la propeidad de generar de un circuito cuántico su inverso que es necesario para nosotros al momento de decodificar la disminuci´no de variables al tamaño original del vector de estado.
```
vqc(n,num_layers,params).inverse().draw(output = "mpl")
```
## Comprimir datos
En este trabajo al no encontrar una manera correecta de usar los optimziadores para un circuito cuántico que utiliza le mapeo por amplitud, se utilizo la libreria scikitlearn por su método de optimización que es el mismo que usan en Qiskit. y también se usa un shuffle para que en cada iteración logre ocupar algunas imagenes nuevas en cada iteración.
```
from random import shuffle
from scipy.optimize import minimize
```
Se uso para identificar el costo de cada entrada por el valor esperado en el eje z, es decir, $\langle z \rangle $, el cuál se define
$\langle Z \rangle = \langle q | Z | q\rangle =\langle q|0\rangle\langle 0|q\rangle - \langle q|1\rangle\langle 1|q\rangle
=|\langle 0 |q\rangle|^2 - |\langle 1 | q\rangle|^2 $
pero al adecuarl oal criterio del Swap test debe quedar como:
$1 -\langle Z \rangle = 1 - \langle q | Z | q\rangle = 1- [\langle q|0\rangle\langle 0|q\rangle - \langle q|1\rangle\langle 1|q\rangle] = 1 - [|\langle 0 |q\rangle|^2 - |\langle 1 | q\rangle|^2] = 1 - |\langle 0 |q\rangle|^2 + |\langle 1 | q\rangle|^2 $
para mayor información se puede observar en https://qiskit.org/textbook/ch-labs/Lab02_QuantumMeasurement.html
```
def objective_function(params):
costo = 0
shuffle(x_train)
lenght= 5
for i in range(lenght):
circuit_init = input_data(n,x_train[i])
circuit_vqc = vqc(n,num_layers,params)
circuit_swap_test = swap_test(size_reduce)
circuit_full = QuantumCircuit(n+size_reduce+1,1)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(size_reduce+1,n+size_reduce+1)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(size_reduce+1,n+size_reduce+1)])
circuit_full = circuit_full.compose(circuit_swap_test,[i for i in range(2*size_reduce+1)])
circuit_full.measure(0, 0)
#qc.draw()
shots= 8192
job = execute( circuit_full, Aer.get_backend('qasm_simulator'),shots=shots )
counts = job.result().get_counts()
probs = {}
for output in ['0','1']:
if output in counts:
probs[output] = counts[output]/shots
else:
probs[output] = 0
costo += (1 +probs['1'] - probs['0'])
return costo/lenght
for i in range(1):
minimum = minimize(objective_function, params, method='COBYLA', tol=1e-6)
params = minimum.x
print(" cost: ",objective_function(params))
print(params)
```
Al finalizar las iteraciones necesarias, depende del número del conjunto de instancia seleccionadas, se pasa por vector de estado por el complejo conjugado de nuestro circuito cuántico, dónde [6] y [7] mencionaban que nos debe dar la información original. Esto se realiza a todo el conjunto de entrenamiento como de prueba.
```
def compress_result_test(params):
reduce = []
for i in range(len(x_test)):
circuit_init = input_data(n,x_test[i])
circuit_vqc = vqc(n,num_layers,params)
circuit_full = QuantumCircuit(n,n-size_reduce)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
len_cf = len(circuit_full)
for i in range(n-size_reduce):
circuit_full.measure(size_reduce+i, i)
job = execute( circuit_full, Aer.get_backend('qasm_simulator'),shots=8192 )
result = job.result().get_counts()
probs = {k: np.sqrt(v / 8192) for k, v in result.items()}
reduce.append(probs)
return reduce
reduce_img =compress_result_test(params)
test_reduce = []
for i in reduce_img:
index_image = []
for j in range(16):
bin_index = bin(j)[2:]
while len(bin_index) <4:
bin_index = '0'+bin_index
try:
index_image.append(i[bin_index])
except:
index_image.append(0)
test_reduce.append(np.array(index_image))
def compress_result_train(params):
reduce = []
for i in range(len(x_train)):
circuit_init = input_data(n,x_train[i])
circuit_vqc = vqc(n,num_layers,params)
circuit_full = QuantumCircuit(n,n-size_reduce)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
len_cf = len(circuit_full)
for i in range(n-size_reduce):
circuit_full.measure(size_reduce+i, i)
job = execute( circuit_full, Aer.get_backend('qasm_simulator'),shots=8192 )
result = job.result().get_counts()
probs = {k: np.sqrt(v / 8192) for k, v in result.items()}
reduce.append(probs)
return reduce
reduce_img =compress_result_train(params)
train_reduce = []
for i in reduce_img:
index_image = []
for j in range(16):
bin_index = bin(j)[2:]
while len(bin_index) <4:
bin_index = '0'+bin_index
try:
index_image.append(i[bin_index])
except:
index_image.append(0)
train_reduce.append(np.array(index_image))
```
En este punto se muestra las primeras 5 imagenes del conjunto de prueba de tamaño 8x8 como se reducen a un tamaño de 4x4 cada una.
```
plt.figure()
#subplot(r,c) provide the no. of rows and columns
f, axarr = plt.subplots(5,1)
# use the created array to output your multiple images. In this case I have stacked 4 images vertically
axarr[0].imshow(x_test[0].reshape(8,8)*255)
axarr[1].imshow(x_test[1].reshape(8,8)*255)
axarr[2].imshow(x_test[2].reshape(8,8)*255)
axarr[3].imshow(x_test[3].reshape(8,8)*255)
axarr[4].imshow(x_test[4].reshape(8,8)*255)
#subplot(r,c) provide the no. of rows and columns
f, axarr = plt.subplots(5,1)
# use the created array to output your multiple images. In this case I have stacked 4 images vertically
axarr[0].imshow(test_reduce[0].reshape(4,4)*255)
axarr[1].imshow(test_reduce[1].reshape(4,4)*255)
axarr[2].imshow(test_reduce[2].reshape(4,4)*255)
axarr[3].imshow(test_reduce[3].reshape(4,4)*255)
axarr[4].imshow(test_reduce[4].reshape(4,4)*255)
```
### Descomprimir datos
Aquí recordando la aplicación del complejo conjugado de nuestra propuesta de red tensorial debemos acercarnos al valor original de entrada $|\phi \rangle$
```
vqc(n,num_layers,params).inverse().draw(output = "mpl")
def decoder_result_test(params):
reduce = []
for i in range(len(test_reduce)):
circuit_init = input_data(6,np.concatenate((np.zeros(48), test_reduce[i]), axis=0))
circuit_vqc = vqc(n,num_layers,params).inverse()
circuit_full = QuantumCircuit(n,n)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
job = execute( circuit_full, Aer.get_backend('statevector_simulator') )
result = job.result().get_statevector()
reduce.append(result)
return reduce
decoder =decoder_result_test(params)
plt.figure()
#subplot(r,c) provide the no. of rows and columns
f, axarr = plt.subplots(5,1)
# use the created array to output your multiple images. In this case I have stacked 4 images vertically
axarr[0].imshow(decoder[0].real.reshape(8,8)*255)
axarr[1].imshow(decoder[1].real.reshape(8,8)*255)
axarr[2].imshow(decoder[2].real.reshape(8,8)*255)
axarr[3].imshow(decoder[3].real.reshape(8,8)*255)
axarr[4].imshow(decoder[4].real.reshape(8,8)*255)
def decoder_result_train(params):
reduce = []
for i in range(len(train_reduce)):
circuit_init = input_data(n,np.concatenate((np.zeros(48), train_reduce[i]), axis=0))
circuit_vqc = vqc(n,num_layers,params).inverse()
circuit_full = QuantumCircuit(n,n)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
job = execute( circuit_full, Aer.get_backend('statevector_simulator') )
result = job.result().get_statevector()
reduce.append(result)
return reduce
decoder_train =decoder_result_train(params)
```
# métricas para comparar imagenes
De cada una de las imágenes tanto de prueba como entrenamiento se realizará las siguientes métricas para validar la capacidad de nuestro autoencoder entre las imagenes de entrada y descomprimidas.
- Error Cuadrático medio (o por sus siglas en inglés MSE)
$MSE=\frac{1}{m n} \sum_{i=0}^{m-1} \sum_{j=0}^{n-1}[I(i, j)-K(i, j)]^{2},$
donde $m$ es el alto de la imágen $I$, n el ancho de la imagen $K$ e $i$,$j$ las posiciones $x,y$ de los píxeles de las imágenes; entre más cercano a 0 sea su valor es mejor.
- Proporción Máxima de Señal a Ruido (o por sus siglas en inglés PSNR)
$PSNR = 10×log_{10}(\frac{(mxn)^2}{MSE},$
donde $m$ el alto de la imagen $I$, n el ancho de la imagen $K$ y $MSE$ el error cuadrático medio;entre más alto su valor es mejor.
- Semejanza Estructural (o por sus siglas en inglés SSIM)
$ \operatorname{SSIM}(x, y)=\frac{\left(2 \mu_{x} \mu_{y}+c_{1}\right)\left(2 \sigma_{x y}+c_{2}\right)}{\left(\mu_{x}^{2}+\mu_{y}^{2}+c_{1}\right)\left(\sigma_{x}^{2}+\sigma_{y}^{2}+c_{2}\right)},$
donde $\mu$ es el promedio, $\sigma$ es la varianza y $c$ es la covarianza \cite{c1}}; peor caso -1, mejor caso 1.
```
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
from skimage.metrics import structural_similarity as ssim
```
# Comparar el conjunto de entrenamiento
Se realiza los resultados de las tres métricas en el conjunto de entrenamiento
```
import math
ssim_list = []
mse_list = []
psnr_list = []
for i in range(len(x_train)):
test_img = x_train[i].reshape(8,8)*255
decoded_img = decoder_train[i].real.reshape(8,8)*255
Y = float(mse(decoded_img,test_img))
ssim_list.append(ssim(decoded_img.astype("float"),test_img.astype("float")))
mse_list.append(Y)
aux = (64**2)/Y
psnr_list.append(10*math.log10(aux))
from matplotlib import pyplot as plt
plt.plot(mse_list)
plt.show()
from matplotlib import pyplot as plt
plt.plot(psnr_list)
plt.show()
from matplotlib import pyplot as plt
plt.plot(ssim_list)
plt.show()
```
# Comparar el conjunto de prueba (Test)
Se realiza los resultados de las tres métricas en el conjunto de prueba
```
ssim_list = []
mse_list = []
psnr_list = []
for i in range(len(x_test)):
test_img = x_test[i].reshape(8,8)*255
decoded_img = decoder[i].real.reshape(8,8)*255
Y = float(mse(decoded_img,test_img))
ssim_list.append(ssim(decoded_img.astype("float"),test_img.astype("float")))
mse_list.append(Y)
aux = (64**2)/Y
psnr_list.append(10*math.log10(aux))
from matplotlib import pyplot as plt
plt.plot(mse_list)
plt.show()
from matplotlib import pyplot as plt
plt.plot(psnr_list)
plt.show()
from matplotlib import pyplot as plt
plt.plot(ssim_list)
plt.show()
```
Se repite el mis proceso pero ya con eentradas no aleatorias para guardar la información en archivos csv que se ocuapran para realizar una clasificación con las imagenes reducidas.
```
(x_train_c, y_train_c), (x_test_c, y_test_c) = tf.keras.datasets.mnist.load_data()
# Rescale the images from [0,255] to the [0.0,1.0] range.
x_train_c, x_test_c = x_train_c[..., np.newaxis]/255.0, x_test_c[..., np.newaxis]/255.0
x_train_c, y_train_c = filter_01(x_train_c, y_train_c)
x_test_c, y_test_c = filter_01(x_test_c, y_test_c)
x_train_c = tf.image.resize(x_train_c, (8,8), method='nearest', preserve_aspect_ratio=True).numpy()
x_test_c = tf.image.resize(x_test_c, (8,8), method='nearest', preserve_aspect_ratio=True).numpy()
for i in range(len(x_train_c)):
x_train_c[i] = Normalize(x_train_c[i])
for i in range(len(x_test)):
x_test_c[i] = Normalize(x_test_c[i])
x_train_c = x_train_c.reshape(len(x_train_small), 64)
x_test_c = x_test_c.reshape(len(x_test_small), 64)
x_train_c.shape
def compress_result_train(params):
reduce = []
for i in range(len(x_train_c)):
circuit_init = input_data(n,x_train_c[i])
circuit_vqc = vqc(n,num_layers,params)
circuit_full = QuantumCircuit(n,n-size_reduce)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
len_cf = len(circuit_full)
for i in range(n-size_reduce):
circuit_full.measure(size_reduce+i, i)
job = execute( circuit_full, Aer.get_backend('qasm_simulator'),shots=8192 )
result = job.result().get_counts()
probs = {k: np.sqrt(v / 8192) for k, v in result.items()}
reduce.append(probs)
return reduce
reduce_train_c = compress_result_train(params)
def compress_result_test(params):
reduce = []
for i in range(len(x_test_c)):
circuit_init = input_data(n,x_test_c[i])
circuit_vqc = vqc(n,num_layers,params)
circuit_full = QuantumCircuit(n,n-size_reduce)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
len_cf = len(circuit_full)
for i in range(n-size_reduce):
circuit_full.measure(size_reduce+i, i)
job = execute( circuit_full, Aer.get_backend('qasm_simulator'),shots=8192 )
result = job.result().get_counts()
probs = {k: np.sqrt(v / 8192) for k, v in result.items()}
reduce.append(probs)
return reduce
reduce_test_c = compress_result_test(params)
test_reduce = []
for i in reduce_test_c:
index_image = []
for j in range(16):
bin_index = bin(j)[2:]
while len(bin_index) <4:
bin_index = '0'+bin_index
try:
index_image.append(i[bin_index])
except:
index_image.append(0)
test_reduce.append(np.array(index_image))
train_reduce = []
for i in reduce_train_c:
index_image = []
for j in range(16):
bin_index = bin(j)[2:]
while len(bin_index) <4:
bin_index = '0'+bin_index
try:
index_image.append(i[bin_index])
except:
index_image.append(0)
train_reduce.append(np.array(index_image))
def decoder_result_train_c(params):
reduce = []
for i in range(len(train_reduce)):
circuit_init = input_data(n,np.concatenate((np.zeros(48), train_reduce[i]), axis=0))
circuit_vqc = vqc(n,num_layers,params).inverse()
circuit_full = QuantumCircuit(n,n)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
job = execute( circuit_full, Aer.get_backend('statevector_simulator') )
result = job.result().get_statevector()
reduce.append(result)
return reduce
decoder_train_c =decoder_result_train_c(params)
len(decoder_train_c)
def decoder_result_test_c(params):
reduce = []
for i in range(len(test_reduce)):
circuit_init = input_data(6,np.concatenate((np.zeros(48), test_reduce[i]), axis=0))
circuit_vqc = vqc(n,num_layers,params).inverse()
circuit_full = QuantumCircuit(n,n)
circuit_full = circuit_full.compose(circuit_init,[i for i in range(n)])
circuit_full = circuit_full.compose(circuit_vqc,[i for i in range(n)])
job = execute( circuit_full, Aer.get_backend('statevector_simulator') )
result = job.result().get_statevector()
reduce.append(result)
return reduce
decoder_c =decoder_result_test_c(params)
```
### Guardar los resultados
Se guardaron dos archivos train.csv y test.csv de las imágenes comprimidas obtenidas de nuestro autoencoder la primera para el conjunto de entrenamiento y la segunda para el conjunto de prueba
```
import pandas as pd
df = pd.DataFrame(train_reduce)
df[16] = y_train
df.to_csv("train_1.csv",index=False)
df = pd.DataFrame(test_reduce)
df[16] = y_test
df.to_csv("test_1.csv",index=False)
```
# Resultados del autoencoder cuántico
Siguiendo los resultados definimos en un histograma por métrica los mejores casos y nos dieron las siguientes gráficas
## MSE
Los resultados más cercanos al 0 son los mejores resultados, viendo de manera visual el mejor caso es con 200 imágenes.
<img src="mse.png">
## PSNR
Los resultados con un valor mayor en el eje de las ordenadas son los mejores resultados, observando de manera visual que el mejor caso es con 200 imágenes.
<img src="psnr.png">
## SSIM
Los resultados más cercanos a 1 son los mejores resultados, viendo de manera visual el mejor caso es con 200 imágenes.
<img src="ssim.png">
Por lo tanto consideraremos los resultados de 200 imagenes para realizar un clasificador binario.
# Parte del clasificador binario
Se importan las bibliotecas necesarias para esto usando qiskit meachine learning
```
# Scikit Imports
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# Qiskit Imports
from qiskit import Aer, execute
from qiskit.circuit import QuantumCircuit, Parameter, ParameterVector
from qiskit.circuit.library import PauliFeatureMap, ZFeatureMap, ZZFeatureMap
from qiskit.circuit.library import TwoLocal, NLocal, RealAmplitudes, EfficientSU2
from qiskit.circuit.library import HGate, RXGate, RYGate, RZGate, CXGate, CRXGate, CRZGate
from qiskit_machine_learning.kernels import QuantumKernel
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import csv
```
Se obtiene los datos de los csv tanto para poder procesar los datos en el clasificador cuántico variacional. pasando un lado el vector de entrada de tamaño 16x1 y el otro la etiqueta
```
sample_train = []
label_train = []
with open('train.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for row in reader:
sample_train.append(row[0:-1])
label_train.append(row[-1])
sample_train = np.array(sample_train).astype(np.float)
label_train = np.array(label_train).astype(np.float)
sample_train.shape, label_train.shape
sample_test = []
label_test = []
with open('test.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for row in reader:
sample_test.append(row[0:-1])
label_test.append(row[-1])
sample_test = np.array(sample_test).astype(np.float)
label_test = np.array(label_test).astype(np.float)
sample_test.shape, label_test.shape
```
Se genera el vector de entrada para el conjunto de entrenamiento y prueba de tamañ0 16x1
```
sample_train = sample_train.reshape(len(sample_train), 16)
sample_test = sample_test.reshape(len(sample_test), 16)
```
Vamos a realizar un clasificador usando 4 qubits por lo cual hay disminuir el númeor de muestras para que nuestros dispositivos puedan correr los ejemplos, usando el método Hold-out 70-30, es decir, 70% entrenamiento y 30% de prueba.
```
train_size = 700
sample_train = sample_train[:train_size]
label_train = label_train[:train_size]
test_size = 300
sample_test = sample_test[:test_size]
label_test = label_test[:test_size]
```
Lo siguiente es mapear el vector clasico a un estado cuántico para ello usaremos la funcion ZZFeatureMap ("Se puede ocupar otro como PauliFeatureMap o ZFeatureMap").
Despues inicializaremos un Kernel cuántico del cual podemos calcular cada elemento de esta mátriz en una computadora cuántica calculando la amplitud de transición. Esto nos proporciona una estimación de la matriz cuántica del kernel, que luego podemos usar en un algoritmo de aprendizaje automático del kernel, es este caso se usara en una maquina de soporte vectorial
```
zz_map = ZZFeatureMap(feature_dimension=16, reps=1, entanglement='linear', insert_barriers=True)
zz_kernel = QuantumKernel(feature_map=zz_map, quantum_instance=Aer.get_backend('statevector_simulator'))
zz_map.draw(output="mpl")
```
Construimos las matrices de entrenamiento y prueba del kernel cuántico.
Para cada par de puntos de datos en el conjunto de datos de entrenamiento
```
matrix_train = zz_kernel.evaluate(x_vec=sample_train)
matrix_test = zz_kernel.evaluate(x_vec=sample_test, y_vec=sample_train)
```
Utilizamos las matrices de entrenamiento y prueba del kernel cuántico en un algoritmo de clasificación de máquina de vectores de soporte clásico.
```
zzpc_svc = SVC(kernel='precomputed')
zzpc_svc.fit(matrix_train, label_train)
zzpc_score = zzpc_svc.score(matrix_test, label_test)
print(f'Precomputed kernel classification test score: {zzpc_score}')
```
Probamos el algoritmo viendo que tal hace la clasificacion del set de prueba
```
predictions = zzpc_svc.predict(matrix_test)
```
Como se puede observar de 300 muestaras solo 6 no se clasificaron de manera correcta
```
for prediction,label in zip(predictions,label_test):
if(prediction != label):
print(prediction, label)
```
## Validar para el conjunto de 200 imagenes
Se repite el mismo proceso pero considerando que el método de validación hold-out se consideré válido se debe hacer la prueba con diferentes conjuntos de forma aleatoria que se realizará con el conjunto comprimido de 200 imágenes
```
sample_train = []
label_train = []
with open('train_200.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for row in reader:
sample_train.append(row[0:-1])
label_train.append(row[-1])
sample_train = np.array(sample_train).astype(np.float)
label_train = np.array(label_train).astype(np.float)
sample_test = []
label_test = []
with open('test_200.csv', newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader, None)
for row in reader:
sample_test.append(row[0:-1])
label_test.append(row[-1])
sample_test = np.array(sample_test).astype(np.float)
label_test = np.array(label_test).astype(np.float)
sample_train.shape, label_train.shape, sample_test.shape, label_test.shape
```
Se genera 7 iteraciones con diferentes imagenes de rangos fijos
```
score = []
for i in range(7):
train_size = 700
sample_train_1 = sample_train[i*train_size:(i+1)*train_size]
label_train_1 = label_train[i*train_size:(i+1)*train_size]
test_size = 300
sample_test_1 = sample_test[i*test_size:(i+1)*test_size]
label_test_1 = label_test[i*test_size:(i+1)*test_size]
zz_map = ZZFeatureMap(feature_dimension=16, reps=1, entanglement='linear', insert_barriers=True)
zz_kernel = QuantumKernel(feature_map=zz_map, quantum_instance=Aer.get_backend('statevector_simulator'))
matrix_train = zz_kernel.evaluate(x_vec=sample_train_1)
matrix_test = zz_kernel.evaluate(x_vec=sample_test_1, y_vec=sample_train_1)
zzpc_svc = SVC(kernel='precomputed')
zzpc_svc.fit(matrix_train, label_train_1)
zzpc_score = zzpc_svc.score(matrix_test, label_test_1)
print(f'Precomputed kernel classification test score: {zzpc_score}')
score.append(zzpc_score)
del matrix_train, matrix_test
```
El valor promedio para el conjunto que se dio del auto encoder usando Hold-out 70-30 se obtuvo un valor de desempeño de
```
sum(score)/len(score)
```
## Autores
- Martínez Vázquez María Fernanda (undergraduate)
- Navarro Ambriz Ronaldo (undergraduate)
- Martinez Hernandez Luis Eduardo (undergraduate)
- Galindo Reyes Agustin (undergraduate)
- Alberto Maldonado Romo (master)
# Referencias
[1] Bravo-Prieto, Carlos. (2020). Quantum autoencoders with enhanced data encoding.
[2] Biamonte, Jacob. (2019). Lectures on Quantum Tensor Networks.
[3] Kardashin, Andrey & Uvarov, Aleksey & Biamonte, Jacob. (2021). Quantum Machine Learning Tensor Network States. Frontiers in Physics. 8. 586374. 10.3389/fphy.2020.586374.
[4] Stoudenmire, E. & Schwab, David. (2016). Supervised Learning with Quantum-Inspired Tensor Networks.
[5] Liu, Ding & Yao, Zekun & Zhang, Quan. (2020). Quantum-Classical Machine learning by Hybrid Tensor Networks
[6] Romero, Jonathan & Olson, Jonathan & Aspuru-Guzik, Alán. (2016). Quantum autoencoders for efficient compression of quantum data. Quantum Science and Technology. 2. 10.1088/2058-9565/aa8072.
[7] Foulds, Steph & Kendon, Viv & Spiller, Tim. (2020). The controlled SWAP test for determining quantum entanglement.
| github_jupyter |
# Comparison of the CNN filter combinations based on 5-fold cross-validation
Investigation into the effect of various filter combinations for the CNN model. To compare the different filter values, five-fold cross-validation was used. For each fold, one subject of the five total subjects (subject C being reserved for final evaluation) was withheld for evaluation whilst the model was trained on the remaining four subjects.
CNN model developed by A. Angelov, applied to the micro-Doppler spectrograms.
User c is completely removed as this is the test set.
The remaining users A, B, D, E and F make up each fold.
For example, fold 1 will train on users B, D, E and F then evaluate on A (and so on for each fold).
## Notebook setup
```
# Plot graphs inline
%matplotlib inline
```
The following cell is needed for compatibility when using both CoLab and Local Jupyter notebook. It sets the appropriate file path for the data.
```
import os
path = os.getcwd()
if path == '/content':
from google.colab import drive
drive.mount('/content/gdrive')
BASE_PATH = '/content/gdrive/My Drive/Level-4-Project/'
os.chdir('gdrive/My Drive/Level-4-Project/')
elif path == 'D:\\Google Drive\\Level-4-Project\\notebooks':
BASE_PATH = "D:/Google Drive/Level-4-Project/"
elif path == "/export/home/2192793m":
BASE_PATH = "/export/home/2192793m/Level-4-Project/"
DATA_PATH = BASE_PATH + 'data/processed/doppler_spectrograms/3/'
RESULTS_PATH = BASE_PATH + 'results/CNN_model_comparison/'
if not os.path.exists(RESULTS_PATH):
os.makedirs(RESULTS_PATH)
```
Import remaining packages
```
import numpy as np
from keras.optimizers import SGD
from keras.utils import np_utils
import pickle
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
import matplotlib.pyplot as plt
# Needed as originally code was for theano backend but now using tensor flow
from keras import backend as K
K.set_image_dim_ordering('th')
```
## Experiment Setup
```
SAVE_GRAPHS_OVERFITTING = False # false to not override results
SAVE_GRAPHS_AVERAGES = False # save avg acc and avg loss graphs from k fold
SAVE_GRAPHS_DISTRIBUTIONS = False # save accuracy distribution across folds graphs
SAVE_RESULTS_OVERFITTING = False # false to not override results
SAVE_RESULTS_K_FOLD = False
SAVE_BOXPLOTS = False
target_names = ["walking", "pushing", "sitting", "pulling", "circling", "clapping", "bending"]
nb_classes = len(target_names)
batch_size = 64
# input image dimensions
img_rows, img_cols = 75, 75
# user c excluded as this is reserved for final evaluation
users = ["A", "B", "D", "E", "F"]
def load_data(user_letter):
"""
load the data and labels associated with a particular user/subject (interchangeable)
:param user_letter: Letter representing subject/user (A-F)
:type user_letter: str
:return: data and labels
:rtype: tuple of the form (data, labels)
"""
with open(DATA_PATH + user_letter + "_data.pkl", 'rb') as data_file:
data = pickle.load(data_file)
data = data.reshape(data.shape[0], 1, 75, 75)
with open(DATA_PATH + user_letter + "_labels.pkl", 'rb') as labels_file:
labels = pickle.load(labels_file)
labels = np.reshape(labels, (len(labels), 1))
return data, labels
datasets = {}
for user in users:
data, labels = load_data(user)
datasets[user] = {"data":data, "labels":labels}
def split_train_validation(validation_user):
"""
Splits the data into a train and validation set.
The validation set is composed of the subject specified, the training set of the remaining subjects.
:param validation_user: Subject to use for validation set (A-F)
:type validation_user: str
:return: data and labels for the train and validation set
:rtype: dictionary with keys "train_data", "train_labels", "validation_data" and "validation_labels"
"""
train_data = None
train_labels = None
first_round = True
validation_data = []
validation_labels = []
for user in users:
data = datasets[user]["data"]
labels = datasets[user]["labels"]
if user == validation_user:
validation_data = data
validation_labels = labels
else:
if first_round:
train_data = data
train_labels = labels
first_round = False
else:
train_data = np.concatenate((train_data, data))
train_labels = np.concatenate((train_labels, labels))
train_labels = np_utils.to_categorical(train_labels, nb_classes)
validation_labels = np_utils.to_categorical(validation_labels, nb_classes)
train_data = train_data.astype('float32')
validation_data = validation_data.astype('float32')
train_data /= 255
validation_data /= 255
return {
"train_data": train_data,
"train_labels": train_labels,
"validation_data": validation_data,
"validation_labels": validation_labels
}
```
## Define Models
```
def make_model(nb_filters, img_rows, img_cols, nb_classes):
"""
Make and return the CNN model
:param nb_filters: Number of filters to use in layers 1,2 and 3,4 respectively
:type nb_filters: str containing the number of filters for the first two layers followed by
the last two layers, for example: "16-32"
:param img_rows: image height
:type img_rows: int
:param img_cols: image width
:type img_cols: int
:param nb_classes: Number of classes to be predicted
:type nb_classes: int
:return: CNN model
:rtype: Keras sequential model
"""
model = Sequential(name=nb_filters)
nb_filters = nb_filters.split("-")
size_1 = int(nb_filters[0])
size_2 = int(nb_filters[1])
model.add(Convolution2D(size_1, (3, 3), padding='same', input_shape=(1, img_rows, img_cols), activation='relu'))
model.add(Convolution2D(size_1, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(size_2, (3, 3), padding='same', activation='relu'))
model.add(Convolution2D(size_2, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
return model
model_types = ["2-4", "4-8", "8-16", "16-32", "32-64", "64-128"]
```
## Examining Overfitting
Train each model on 4 of the 5 users then evalaute on the 5th.
Compare at which epoch the model begins to overfit.
```
nb_epoch = 50
overfitting_results = {}
validation_user = "B" # use user B for validation
for model_type in model_types:
print("Model:", model_type)
data_split = split_train_validation(validation_user)
train_data = data_split["train_data"]
train_labels = data_split["train_labels"]
validation_data = data_split["validation_data"]
validation_labels = data_split["validation_labels"]
model = make_model(model_type, img_rows, img_cols, nb_classes)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
history = model.fit(
train_data,
train_labels,
batch_size=batch_size,
epochs=nb_epoch,
shuffle=True,
validation_data=(validation_data, validation_labels),
verbose=0)
overfitting_results[model_type] = history.history
```
### Save Results
```
if SAVE_RESULTS_OVERFITTING:
with open(RESULTS_PATH + "overfitting_results.pkl", 'wb') as results_file:
pickle.dump(overfitting_results, results_file)
```
### Load Results
```
with open(RESULTS_PATH + "overfitting_results.pkl", 'rb') as results_file:
overfitting_results = pickle.load(results_file)
```
### Visualize Results
```
for model_type in model_types:
training_acc = np.array(overfitting_results[model_type]['acc'])*100
validation_acc = np.array(overfitting_results[model_type]['val_acc'])*100
# Create count of the number of epochs
epoch_count = range(1, len(training_acc) + 1)
# Visualize loss history
plt.plot(epoch_count, training_acc, 'b--', label='Training (Subjects A, D, E and F)')
plt.plot(epoch_count, validation_acc, 'r-', label='Validation (Subject B)')
plt.legend(loc='best')
plt.xlabel('Epoch')
plt.ylabel('Classification Accuracy (%)')
plt.title("Model: " + model_type)
plt.grid()
if SAVE_GRAPHS_OVERFITTING:
plt.savefig(RESULTS_PATH + model_type + "_overfitting.pdf", format='pdf')
plt.show()
```
## 5-Fold Cross-Validation
From the above graphs it would seem that all models have almost converged after 30 epochs. Therefore we will use this value for the k-fold comparison.
```
nb_epoch = 30
```
### Variables to save results
```
results = {}
for model_type in model_types:
results[model_type] = {}
for user in users:
results[model_type][user] = {}
```
### Run K-fold
```
for model_type in model_types:
print("Model:", model_type)
average_accuracy = 0
average_loss = 0
for user in users:
data_split = split_train_validation(user)
train_data = data_split["train_data"]
train_labels = data_split["train_labels"]
validation_data = data_split["validation_data"]
validation_labels = data_split["validation_labels"]
model = make_model(model_type, img_rows, img_cols, nb_classes)
# train the model using SGD + momentum.
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(
train_data,
train_labels,
batch_size=batch_size,
epochs=nb_epoch,
shuffle=True,
verbose=0)
evaluation = model.evaluate(validation_data, validation_labels,
batch_size=batch_size, verbose=1)
results[model_type][user]["loss"] = evaluation[0]
results[model_type][user]["accuracy"] = evaluation[1]
average_loss += evaluation[0]
average_accuracy += evaluation[1]
results[model_type]["avg_loss"] = average_loss/len(users)
results[model_type]["avg_acc"] = average_accuracy/len(users)
print("Average Loss:", average_loss/len(users))
print("Average Accuracy:", average_accuracy/len(users))
```
## Save results
```
if SAVE_RESULTS_K_FOLD:
with open(RESULTS_PATH + "results_dictionary.pkl", 'wb') as results_file:
pickle.dump(results, results_file)
```
## Load previous results
```
with open(RESULTS_PATH + "results_dictionary.pkl", 'rb') as results_file:
results = pickle.load(results_file)
```
## Results Visualization
### Average Accuracy
```
accuracies = []
labels = []
for model_name, value in results.items():
accuracies.append(value["avg_acc"])
labels.append(model_name)
plt.bar(range(len(labels)), np.array(accuracies)*100, zorder=3)
plt.xticks(range(len(labels)), labels)
plt.xlabel("Model")
plt.ylabel("Average Classification Accuracy (%)")
plt.title("Average Accuracy Comparison from 5-Fold Cross-Validation")
plt.grid(axis='y', zorder=0)
if SAVE_GRAPHS_AVERAGES:
plt.savefig(RESULTS_PATH + "average_accuracy_comparison.pdf", format='pdf')
plt.show()
```
### Avererage Loss
```
loss = []
labels = []
for model_name, value in results.items():
loss.append(value["avg_loss"])
labels.append(model_name)
plt.bar(range(len(labels)), loss, zorder=3)
plt.xticks(range(len(labels)), labels)
plt.xlabel("Model")
plt.ylabel("Average Loss")
plt.title("Average Loss Comparison from 5-Fold Cross-Validation")
plt.grid(axis='y', zorder=0)
if SAVE_GRAPHS_AVERAGES:
plt.savefig(RESULTS_PATH + "average_loss_comparison.pdf", format='pdf')
plt.show()
```
### Box Plot Comparison
```
seperated_results = {}
for model_name, value in results.items():
accuracies = []
for user_label, res in value.items():
if len(user_label) > 1:
continue
accuracies.append(res["accuracy"] * 100)
seperated_results[model_name] = accuracies
labels = ["2-4", "4-8", "8-16", "16-32", "32-64", "64-128"]
data_to_plot = []
for label in labels:
data_to_plot.append(seperated_results[label])
plt.boxplot(data_to_plot, labels=labels, zorder=3)
plt.title("Five-Fold Cross-Validation Distribution Comparison")
plt.xlabel("Filter Combination")
plt.ylabel("Classification Accuracy (%)")
plt.ylim(0,100)
plt.grid(axis='y', zorder=0)
if SAVE_BOXPLOTS:
plt.savefig(RESULTS_PATH + "boxplot_all_models.pdf", format='pdf')
```
All the models above appear to be performing very similiar however due to its slightly higher mean and specifically its better minimum performance model "8-16" has been chosen to continue.
### Comparing Accuracy Across Folds
```
for model_name, value in results.items():
accuracies = []
labels = []
for user_label, res in value.items():
if len(user_label) > 1:
continue
accuracies.append(res["accuracy"]*100)
labels.append(user_label)
plt.bar(range(len(labels)), accuracies, zorder=3)
plt.xticks(range(len(labels)), labels)
plt.xlabel("Subject (fold)")
plt.ylabel("Classification Accuracy (%)")
plt.title("Model: " + model_name + " Fold Accuracy Distribution")
plt.grid(axis='y', zorder=0)
if SAVE_GRAPHS_DISTRIBUTIONS:
plt.savefig(RESULTS_PATH + model_name + "_fold_accuracy_distribution.pdf", format='pdf')
plt.show()
```
| github_jupyter |
# Deploy a Trained PyTorch Model
In this notebook, we walk through the process of deploying a trained model to a SageMaker endpoint. If you recently ran [the notebook for training](get_started_mnist_deploy.ipynb) with %store% magic, the `model_data` can be restored. Otherwise, we retrieve the
model artifact from a public S3 bucket.
```
# setups
import os
import json
import boto3
import sagemaker
from sagemaker.pytorch import PyTorchModel
from sagemaker import get_execution_role, Session
# Get global config
with open("code/config.json", "r") as f:
CONFIG = json.load(f)
sess = Session()
role = get_execution_role()
%store -r pt_mnist_model_data
try:
pt_mnist_model_data
except NameError:
import json
# copy a pretrained model from a public public to your default bucket
s3 = boto3.client("s3")
bucket = CONFIG["public_bucket"]
key = "datasets/image/MNIST/model/pytorch-training-2020-11-21-22-02-56-203/model.tar.gz"
s3.download_file(bucket, key, "model.tar.gz")
# upload to default bucket
pt_mnist_model_data = sess.upload_data(
path="model.tar.gz", bucket=sess.default_bucket(), key_prefix="model/pytorch"
)
print(pt_mnist_model_data)
```
## PyTorch Model Object
The `PyTorchModel` class allows you to define an environment for making inference using your
model artifact. Like `PyTorch` class we discussed
[in this notebook for training an PyTorch model](
get_started_mnist_train.ipynb), it is high level API used to set up a docker image for your model hosting service.
Once it is properly configured, it can be used to create a SageMaker
endpoint on an EC2 instance. The SageMaker endpoint is a containerized environment that uses your trained model
to make inference on incoming data via RESTful API calls.
Some common parameters used to initiate the `PyTorchModel` class are:
- entry_point: A user defined python file to be used by the inference image as handlers of incoming requests
- source_dir: The directory of the `entry_point`
- role: An IAM role to make AWS service requests
- model_data: the S3 location of the compressed model artifact. It can be a path to a local file if the endpoint
is to be deployed on the SageMaker instance you are using to run this notebook (local mode)
- framework_version: version of the PyTorch package to be used
- py_version: python version to be used
We elaborate on the `entry_point` below.
```
model = PyTorchModel(
entry_point="inference.py",
source_dir="code",
role=role,
model_data=pt_mnist_model_data,
framework_version="1.5.0",
py_version="py3",
)
```
### Entry Point for the Inference Image
Your model artifacts pointed by `model_data` is pulled by the `PyTorchModel` and it is decompressed and saved in
in the docker image it defines. They become regular model checkpoint files that you would produce outside SageMaker. This means in order to use your trained model for serving,
you need to tell `PyTorchModel` class how to a recover a PyTorch model from the static checkpoint.
Also, the deployed endpoint interacts with RESTful API calls, you need to tell it how to parse an incoming
request to your model.
These two instructions needs to be defined as two functions in the python file pointed by `entry_point`.
By convention, we name this entry point file `inference.py` and we put it in the `code` directory.
To tell the inference image how to load the model checkpoint, you need to implement a function called
`model_fn`. This function takes one positional argument
- `model_dir`: the directory of the static model checkpoints in the inference image.
The return of `model_fn` is an PyTorch model. In this example, the `model_fn`
looks like:
```python
def model_fn(model_dir):
model = Net().to(device)
model.eval()
return model
```
Next, you need to tell the hosting service how to handle the incoming data. This includes:
* How to parse the incoming request
* How to use the trained model to make inference
* How to return the prediction to the caller of the service
You do it by implementing 3 functions:
#### `input_fn` function
The SageMaker PyTorch model server will invoke an `input_fn` function in your inferece
entry point. This function handles data decoding. The `input_fn` have the following signature:
```python
def input_fn(request_body, request_content_type)
```
The two positional arguments are:
- `request_body`: the payload of the incoming request
- `request_content_type`: the content type of the incoming request
The return of `input_fn` is an object that can be passed to `predict_fn`
In this example, the `input_fn` looks like:
```python
def input_fn(request_body, request_content_type):
assert request_content_type=='application/json'
data = json.loads(request_body)['inputs']
data = torch.tensor(data, dtype=torch.float32, device=device)
return data
```
It requires the request payload is encoded as a json string and
it assumes the decoded payload contains a key `inputs`
that maps to the input data to be consumed by the model.
#### `predict_fn`
After the inference request has been deserialzed by `input_fn`, the SageMaker PyTorch
model server invokes `predict_fn` on the return value of `input_fn`.
The `predict_fn` function has the following signature:
```python
def predict_fn(input_object, model)
```
The two positional arguments are:
- `input_object`: the return value from `input_fn`
- `model`: the return value from `model_fn`
The return of `predict_fn` is the first argument to be passed to `output_fn`
In this example, the `predict_fn` function looks like
```python
def predict_fn(input_object, model):
with torch.no_grad():
prediction = model(input_object)
return prediction
```
Note that we directly feed the return of `input_fn` to `predict_fn`.
This means you should invoke the SageMaker PyTorch model server with data that
can be readily consumed by the model, i.e. normalized and has batch and channel dimension.
#### `output_fn`
After invoking `predict_fn`, the model server invokes `output_fn` for data post-process.
The `output_fn` has the following signature:
```python
def output_fn(prediction, content_type)
```
The two positional arguments are:
- `prediction`: the return value from `predict_fn`
- `content_type`: the content type of the response
The return of `output_fn` should be a byte array of data serialized to `content_type`.
In this exampe, the `output_fn` function looks like
```python
def output_fn(predictions, content_type):
assert content_type == 'application/json'
res = predictions.cpu().numpy().tolist()
return json.dumps(res)
```
After the inference, the function uses `content_type` to encode the
prediction into the content type of the response. In this example,
the function requires the caller of the service to accept json string.
For more info on handler functions, check the [SageMaker Python SDK document](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#process-model-output)
## Execute the inference container
Once the `PyTorchModel` class is initiated, we can call its `deploy` method to run the container for the hosting
service. Some common parameters needed to call `deploy` methods are:
- initial_instance_count: the number of SageMaker instances to be used to run the hosting service.
- instance_type: the type of SageMaker instance to run the hosting service. Set it to `local` if you want run the hosting service on the local SageMaker instance. Local mode are typically used for debugging.
- serializer: A python callable used to serialize (encode) the request data.
- deserializer: A python callable used to deserialize (decode) the response data.
Commonly used serializers and deserialzers are implemented in `sagemaker.serializers` and `sagemaker.deserializer`
submodules of the SageMaker Python SDK.
Since in the `transform_fn` we declared that the incoming requests are json-encoded, we need use a json serializer,
to encode the incoming data into a json string. Also, we declared the return content type to be json string, we
need to use a json deserializer to parse the response into a an (in this case, an
integer represeting the predicted hand-written digit).
<span style="color:red"> Note: local mode is not supported in SageMaker Studio </span>
```
from sagemaker.serializers import JSONSerializer
from sagemaker.deserializers import JSONDeserializer
# set local_mode to False if you want to deploy on a remote
# SageMaker instance
local_mode = False
if local_mode:
instance_type = "local"
else:
instance_type = "ml.c4.xlarge"
predictor = model.deploy(
initial_instance_count=1,
instance_type=instance_type,
serializer=JSONSerializer(),
deserializer=JSONDeserializer(),
)
```
The `predictor` we get above can be used to make prediction requests agaist a SageMaker endpoint. For more
information, check [the api reference for SageMaker Predictor](
https://sagemaker.readthedocs.io/en/stable/api/inference/predictors.html#sagemaker.predictor.Predictor)
Now, let's test the endpoint with some dummy data.
```
import random
import numpy as np
dummy_data = {"inputs": np.random.rand(16, 1, 28, 28).tolist()}
```
In `transform_fn`, we declared that the parsed data is a python dictionary with a key `inputs` and its value should
be a 1D array of length 784. Hence, the definition of `dummy_data`.
```
res = predictor.predict(dummy_data)
print("Predictions:", res)
```
If the input data does not look exactly like `dummy_data`, the endpoint will raise an exception. This is because
of the stringent way we defined the `transform_fn`. Let's test the following example.
```
dummy_data = [random.random() for _ in range(784)]
```
When the `dummy_data` is parsed in `transform_fn`, it does not have an `inputs` field, so `transform_fn` will crush.
```
# uncomment the following line to make inference on incorrectly formated input data
# res = predictor.predict(dummy_data)
```
Now, let's use real MNIST test to test the endpoint. We use helper functions defined in `code.utils` to
download MNIST data set and normalize the input data.
```
from utils.mnist import mnist_to_numpy, normalize
import random
import matplotlib.pyplot as plt
%matplotlib inline
data_dir = "/tmp/data"
X, _ = mnist_to_numpy(data_dir, train=False)
# randomly sample 16 images to inspect
mask = random.sample(range(X.shape[0]), 16)
samples = X[mask]
# plot the images
fig, axs = plt.subplots(nrows=1, ncols=16, figsize=(16, 1))
for i, splt in enumerate(axs):
splt.imshow(samples[i])
print(samples.shape, samples.dtype)
```
Before we invoke the SageMaker PyTorch model server with `samples`, we need to do
some pre-processing
- convert its data type to 32 bit floating point
- normalize each channel (only one channel for MNIST)
- add a channel dimension
```
samples = normalize(samples.astype(np.float32), axis=(1, 2))
res = predictor.predict({"inputs": np.expand_dims(samples, axis=1).tolist()})
```
The response is a list of probablity vector of each sample.
```
predictions = np.argmax(np.array(res, dtype=np.float32), axis=1).tolist()
print("Predicted digits: ", predictions)
```
## Test and debug the entry point before deployment
When deploying a model to a SageMaker endpoint, it is a good practice to test the entry
point. The following snippet shows you how you can test and debug the `model_fn` and
`transform_fn` you implemented in the entry point for the inference image.
```
!pygmentize code/test_inference.py
```
The `test` function simulates how the inference container works. It pulls the model
artifact and loads the model into
memory by calling `model_fn` and parse `model_dir` to it.
When it receives a request,
it calls `input_fn`, `predict_fn` and `output_fn` consecutively.
Implementing such a test function helps you debugging the entry point before put it into
the production. If `test` runs correctly, then you can be certain that if the incoming
data and its content type are what they suppose to be, then the endpoint point is going
to work as expected.
## (Optional) Clean up
If you do not plan to use the endpoint, you should delete it to free up some computation
resource. If you use local, you will need to manually delete the docker container bounded
at port 8080 (the port that listens to the incoming request).
```
import os
if not local_mode:
predictor.delete_endpoint()
else:
os.system("docker container ls | grep 8080 | awk '{print $1}' | xargs docker container rm -f")
```
| github_jupyter |
##### Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
```
#@title Default title text
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# High Performance Monte Carlo Simulation of Ising Model on TPU Clusters
This notebook is a companion webpage for the paper: *High Performance Monte Carlo Simulation of Ising Model on TPU Clusters (Yang et al., 2019)*. See the [README.md](https://github.com/google-research/google-research/blob/master/simulation_research/ising_model/README.md) for details on how to simulate Ising model on Cloud TPU.
```
"""Ising Model MCMC Simulation on TPU.
This is the implementation of Algorithm 2: UpdateOptim in the paper.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.python.ops import inplace_ops
# Constants
_NANOS_PER_SECOND = 1e9
# Whether bfloat16 is used in the simulation.
USE_BFLOAT16 = True
# Lattice sub dimension, where each sub lattice is a square grid of spin values.
LATTICE_SUB_DIM = 256
# Lattice block dimensions of size 2, where each block is a grid of sub lattices
# with shape lattice_block_dims.
# Note: The TPU chips accessible from https://colab.research.google.com are
# TPUv2 (8 cores). They have less memory than TPUv3 (the chips used in the
# paper) so can not fit [224, 112], the size reported in the paper.
# `README.md` has instructions on setting up Google Cloud project to use
# TPUv3 and running larger experiments.
LATTICE_BLOCK_DIMS = [96, 48]
# Lattice super block dimension, where each super block is a square grid of
# lattice blocks.
LATTICE_SUPER_BLOCK_DIM = 2
# Cores topology of size 2, each core is one replica of the computation.
CORES_TOPOLOGY = [2, 4]
# Burn in steps in MCMC.
NUMBER_OF_BURN_IN_UPDATE = 1
# Simulation steps in MCMC.
NUMBER_OF_WHOLE_LATTICE_UPDATE = 200
# Critical temperature.
CRITICAL_TEMPERATURE = 2.26918531421
# The temperature normalized to the critical temerature. Default to 1.0.
NORMALIZED_TEMPERATURE = 1.0
# Using Convolution for nearest neighbor computation.
USE_CONV = True
# Convolution kernel channel sizes.
KERNEL_CHANNEL_SIZE = 16
KERNEL_CHANNEL_SIZE_2 = 128
CONV_LATTICE_DIMS = [(LATTICE_SUB_DIM // 2) * dim for dim in LATTICE_BLOCK_DIMS]
# END PARAMETERS
```
###TensorFlow Version
```
print(tf.__version__)
def get_inverse_temperature():
"""Returns the inverse temperature."""
return 1.0 / (NORMALIZED_TEMPERATURE * CRITICAL_TEMPERATURE)
def grid_coordinates(computation_shape):
"""Returns a numpy array containing all grid coordinates.
Args:
computation_shape: A sequence of integers giving the shape of the grid.
Returns:
A numpy array with shape
(np.prod(computation_shape), len(computation_shape)) and type np.int32.
"""
rank = len(computation_shape)
assert rank > 0
coords = np.meshgrid(
*[np.arange(x, dtype=np.int32) for x in computation_shape], indexing="ij")
return np.stack(coords, axis=-1).reshape(-1, rank)
def tpu_device_assignment(computation_shape, tpu_topology):
"""Builds a DeviceAssignment that maps grid coordinates to TPU cores."""
# This may be too restrictive, but it makes mapping onto the TPU topology
# simple. We can use a more complicated algorithm if needed.
if computation_shape.ndim != 1:
raise ValueError(
"computation_shape ({}) must be a vector".format(computation_shape))
if np.prod(computation_shape) > np.prod(tpu_topology.mesh_shape):
raise ValueError(
"computation_shape ({}) does not fit in TPU mesh shape ({})".format(
computation_shape, tpu_topology.mesh_shape))
core_assignment = grid_coordinates(
tpu_topology.mesh_shape)[0:np.prod(computation_shape), :]
core_assignment = core_assignment.reshape((-1, 1, tpu_topology.mesh_rank))
# External logical compute grid shape.
compute_core_assignment = grid_coordinates(computation_shape)
return (tf.contrib.tpu.DeviceAssignment(tpu_topology, core_assignment),
compute_core_assignment)
def get_dtype():
return tf.bfloat16 if USE_BFLOAT16 else tf.float32
def is_single_core(cores_topology):
return cores_topology == [1, 1]
def create_iterator(shape):
"""Create an iterator with a given shape."""
dims = [range(dim) for dim in shape]
return itertools.product(*dims)
def create_list(shape):
"""Create a list with a given shape and default value None."""
if shape:
return [create_list(shape[1:]) for _ in range(shape[0])]
class NearestNeighborCalculatorOptimConv(object):
"""Calculate the sum of nearest neighbor spin values."""
def __init__(self):
# Constant matrix to compute the sum of nearest neighbor spins. These are
# formulated as the kernel, or filters, for tf.nn.conv1d and when applied,
# it computes the sum of nearest neighbors.
self._kernel_nn = tf.constant(np.expand_dims(np.stack(
[np.eye(KERNEL_CHANNEL_SIZE, k=-15, dtype=np.float16),
(np.eye(KERNEL_CHANNEL_SIZE, k=0, dtype=np.float16) +
np.eye(KERNEL_CHANNEL_SIZE, k=1, dtype=np.float16)),
np.zeros([KERNEL_CHANNEL_SIZE, KERNEL_CHANNEL_SIZE],
dtype=np.float16)]), axis=0), tf.bfloat16)
self._kernel_nn_t = tf.constant(np.expand_dims(np.stack(
[np.zeros([KERNEL_CHANNEL_SIZE, KERNEL_CHANNEL_SIZE], dtype=np.float16),
(np.eye(KERNEL_CHANNEL_SIZE, k=-1, dtype=np.float16) +
np.eye(KERNEL_CHANNEL_SIZE, k=0, dtype=np.float16)),
np.eye(KERNEL_CHANNEL_SIZE, k=15, dtype=np.float16)]), axis=0),
tf.bfloat16)
self._kernel_nn_2 = tf.constant(np.expand_dims(np.stack(
[np.eye(KERNEL_CHANNEL_SIZE_2, k=-15, dtype=np.float16),
(np.eye(KERNEL_CHANNEL_SIZE_2, k=0, dtype=np.float16) +
np.eye(KERNEL_CHANNEL_SIZE_2, k=1, dtype=np.float16)),
np.zeros([KERNEL_CHANNEL_SIZE_2, KERNEL_CHANNEL_SIZE_2],
dtype=np.float16)]), axis=0), tf.bfloat16)
self._kernel_nn_t_2 = tf.constant(np.expand_dims(np.stack(
[np.zeros([KERNEL_CHANNEL_SIZE_2, KERNEL_CHANNEL_SIZE_2],
dtype=np.float16),
(np.eye(KERNEL_CHANNEL_SIZE_2, k=-1, dtype=np.float16) +
np.eye(KERNEL_CHANNEL_SIZE_2, k=0, dtype=np.float16)),
np.eye(KERNEL_CHANNEL_SIZE_2, k=15, dtype=np.float16)]), axis=0),
tf.bfloat16)
# If the lattice is distributed in multiple replicas, then define that
# permutation pairs that permute the boundaries of sub-lattices across
# replicas in all 4 directions. Those boundary spins are used to compute the
# nearest neighor sums of boundary spins on sub-lattices. They are ignored
# if the lattice is updated on single core.
tpu_x, tpu_y = CORES_TOPOLOGY
core_ids = np.arange(tpu_x * tpu_y).reshape((tpu_x, tpu_y))
core_ids_n = np.roll(core_ids, 1, axis=0)
core_ids_s = np.roll(core_ids, -1, axis=0)
core_ids_w = np.roll(core_ids, 1, axis=1)
core_ids_e = np.roll(core_ids, -1, axis=1)
self._permute_n = []
self._permute_s = []
self._permute_w = []
self._permute_e = []
for i, j in itertools.product(range(tpu_x), range(tpu_y)):
self._permute_n.append([core_ids_n[i, j], core_ids[i, j]])
self._permute_s.append([core_ids_s[i, j], core_ids[i, j]])
self._permute_w.append([core_ids_w[i, j], core_ids[i, j]])
self._permute_e.append([core_ids_e[i, j], core_ids[i, j]])
def get_boundary_n(self, super_grids, i, j, black):
"""The boundary on the northern direction."""
# Collective permute can handle single core condition naturally.
if black:
if i == 0:
boundary_n = tpu_ops.collective_permute(
super_grids[1][0][-1][j][-1, -1, :, :], self._permute_n)
else:
boundary_n = super_grids[1][0][i - 1][j][-1, -1, :, :]
else:
if i == 0:
# In this case, its northern boundary is the southern boundary of the
# sub-lattice in the replica above, assuming periodic boundary
# condition on core topology.
boundary_n = tpu_ops.collective_permute(
super_grids[1][1][-1][j][-1, -1, :, :], self._permute_n)
else:
boundary_n = super_grids[1][1][i - 1][j][-1, -1, :, :]
return boundary_n
def get_boundary_s(self, super_grids, i, j, black):
"""The boundary on the southern direction."""
if black:
if i == LATTICE_SUPER_BLOCK_DIM - 1:
boundary_s = tpu_ops.collective_permute(
super_grids[0][1][0][j][0, 0, :, :], self._permute_s)
else:
boundary_s = super_grids[0][1][i + 1][j][0, 0, :, :]
else:
if i == LATTICE_SUPER_BLOCK_DIM -1:
# In this case, its southern boundary is the northern boundary of the
# sub-lattice in the replica below, assuming periodic boundary
# condition on core topology.
boundary_s = tpu_ops.collective_permute(
super_grids[0][0][0][j][0, 0, :, :], self._permute_s)
else:
boundary_s = super_grids[0][0][i + 1][j][0, 0, :, :]
return boundary_s
def get_boundary_w(self, super_grids, i, j, black):
"""The boundary on the western direction."""
if black:
if j == 0:
boundary_w = tpu_ops.collective_permute(
super_grids[0][1][i][-1][:, :, -1, -1], self._permute_w)
else:
boundary_w = super_grids[0][1][i][j - 1][:, :, -1, -1]
else:
if j == 0:
# In this case, its western boundary is the eastern boundary of the
# sub-lattice in the replica on the left, assuming periodic boundary
# condition on core topology.
boundary_w = tpu_ops.collective_permute(
super_grids[1][1][i][-1][:, :, -1, -1], self._permute_w)
else:
boundary_w = super_grids[1][1][i][j - 1][:, :, -1, -1]
return boundary_w
def get_boundary_e(self, super_grids, i, j, black):
"""The boundary on the eastern direction."""
if black:
if j == LATTICE_SUPER_BLOCK_DIM - 1:
boundary_e = tpu_ops.collective_permute(
super_grids[1][0][i][0][:, :, 0, 0], self._permute_e)
else:
boundary_e = super_grids[1][0][i][j + 1][:, :, 0, 0]
else:
if j == LATTICE_SUPER_BLOCK_DIM - 1:
# In this case, its eastern boundary is the western boundary of the
# sub-lattice in the replica on the right, assuming periodic boundary
# condition on core topology.
boundary_e = tpu_ops.collective_permute(
super_grids[0][0][i][0][:, :, 0, 0], self._permute_e)
else:
boundary_e = super_grids[0][0][i][j + 1][:, :, 0, 0]
return boundary_e
def sum_of_nearest_neighbors_black(self, super_grids):
"""The sum of nearest neighbor in each site on periodic boundaries."""
sum_nn_00 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
sum_nn_11 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
neighbor_n_00 = tf.einsum(
'ij,kl->ijkl', tf.einsum(
'i,j->ij', tf.one_hot(0, KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype()),
tf.one_hot(0, CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype())),
self.get_boundary_n(super_grids, i, j, black=True))
neighbor_w_00 = tf.einsum(
'ij,kl->ijkl', self.get_boundary_w(super_grids, i, j, black=True),
tf.einsum(
'i,j->ij',
tf.one_hot(0, CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE,
dtype=get_dtype()),
tf.one_hot(0, KERNEL_CHANNEL_SIZE, dtype=get_dtype())))
neighbor_s_11 = tf.einsum(
'ij,kl->ijkl', tf.einsum(
'i,j->ij',
tf.one_hot(KERNEL_CHANNEL_SIZE_2 - 1, KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype()),
tf.one_hot((CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2) - 1,
CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype())),
self.get_boundary_s(super_grids, i, j, black=True))
neighbor_e_11 = tf.einsum(
'ij,kl->ijkl', self.get_boundary_e(super_grids, i, j, black=True),
tf.einsum(
'i,j->ij',
tf.one_hot((CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE) - 1,
CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE, dtype=get_dtype()),
tf.one_hot(KERNEL_CHANNEL_SIZE - 1, KERNEL_CHANNEL_SIZE,
dtype=get_dtype())))
sum_nn_00[i][j] = (tf.nn.conv2d(
super_grids[0][1][i][j], self._kernel_nn, padding='SAME') +
tf.einsum('ijkl->lkji', tf.nn.conv2d(
tf.einsum('ijkl->lkji', super_grids[1][0][i][j]),
self._kernel_nn_2, padding='SAME')))
sum_nn_11[i][j] = (tf.nn.conv2d(
super_grids[1][0][i][j], self._kernel_nn_t, padding='SAME') +
tf.einsum('ijkl->lkji', tf.nn.conv2d(
tf.einsum('ijkl->lkji', super_grids[0][1][i][j]),
self._kernel_nn_t_2, padding='SAME')))
sum_nn_00[i][j] += (neighbor_n_00 + neighbor_w_00)
sum_nn_11[i][j] += (neighbor_s_11 + neighbor_e_11)
return sum_nn_00, sum_nn_11
def sum_of_nearest_neighbors_white(self, super_grids):
"""The sum of nearest neighbor in each site on periodic boundaries."""
sum_nn_01 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
sum_nn_10 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
neighbor_n_01 = tf.einsum(
'ij,kl->ijkl', tf.einsum(
'i,j->ij', tf.one_hot(0, KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype()),
tf.one_hot(0, CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype())),
self.get_boundary_n(super_grids, i, j, black=False))
neighbor_e_01 = tf.einsum(
'ij,kl->ijkl', self.get_boundary_e(super_grids, i, j, black=False),
tf.einsum(
'i,j->ij',
tf.one_hot((CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE) - 1,
CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE, dtype=get_dtype()),
tf.one_hot(KERNEL_CHANNEL_SIZE - 1, KERNEL_CHANNEL_SIZE,
dtype=get_dtype())))
neighbor_s_10 = tf.einsum(
'ij,kl->ijkl', tf.einsum(
'i,j->ij',
tf.one_hot(KERNEL_CHANNEL_SIZE_2 - 1, KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype()),
tf.one_hot((CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2) - 1,
CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2,
dtype=get_dtype())),
self.get_boundary_s(super_grids, i, j, black=False))
neighbor_w_10 = tf.einsum(
'ij,kl->ijkl', self.get_boundary_w(super_grids, i, j, black=False),
tf.einsum(
'i,j->ij',
tf.one_hot(0, CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE,
dtype=get_dtype()),
tf.one_hot(0, KERNEL_CHANNEL_SIZE, dtype=get_dtype())))
sum_nn_01[i][j] = (tf.nn.conv2d(
super_grids[0][0][i][j], self._kernel_nn_t, padding='SAME') +
tf.einsum('ijkl->lkji', tf.nn.conv2d(
tf.einsum('ijkl->lkji', super_grids[1][1][i][j]),
self._kernel_nn_2, padding='SAME')))
sum_nn_10[i][j] = (tf.nn.conv2d(
super_grids[1][1][i][j], self._kernel_nn, padding='SAME') +
tf.einsum('ijkl->lkji',
tf.nn.conv2d(
tf.einsum('ijkl->lkji', super_grids[0][0][i][j]),
self._kernel_nn_t_2, padding='SAME')))
sum_nn_01[i][j] += (neighbor_n_01 + neighbor_e_01)
sum_nn_10[i][j] += (neighbor_s_10 + neighbor_w_10)
return sum_nn_01, sum_nn_10
class NearestNeighborCalculatorOptim(object):
"""Calculate the sum of nearest neighbor spin values."""
def __init__(self):
# Constant matrix to compute the sum of nearest neighbor spins. Applying
# it as a left operand to adding the northern/southern neighbors, as a
# right operand to adding the western/eastern neighobrs.
grid_nn = tf.constant(
np.eye(LATTICE_SUB_DIM // 2, k=0, dtype=np.float16) +
np.eye(LATTICE_SUB_DIM // 2, k=1, dtype=np.float16),
dtype=get_dtype())
self._grid_nn = tf.broadcast_to(grid_nn, [
LATTICE_BLOCK_DIMS[0],
LATTICE_BLOCK_DIMS[1],
LATTICE_SUB_DIM // 2,
LATTICE_SUB_DIM // 2,
])
# Given the spins on northern/southern or western/eastern boundaries of a
# grid of sub-lattices, transform them into another grid of sub-lattices
# that are added to compensate the nearest neighbor sums for spins on the
# boundaries.
self._grid_expand_s = tf.broadcast_to(
tf.one_hot(0, LATTICE_SUB_DIM // 2, dtype=get_dtype()), [
LATTICE_BLOCK_DIMS[0],
LATTICE_BLOCK_DIMS[1],
LATTICE_SUB_DIM // 2,
])
self._grid_expand_e = tf.broadcast_to(
tf.one_hot(
LATTICE_SUB_DIM // 2 - 1, LATTICE_SUB_DIM // 2, dtype=get_dtype()),
[
LATTICE_BLOCK_DIMS[0],
LATTICE_BLOCK_DIMS[1],
LATTICE_SUB_DIM // 2,
])
# If the lattice is distributed in multiple replicas, then define that
# permutation pairs that permute the boundaries of sub-lattices across
# replicas in all 4 directions. Those boundary spins are used to compute the
# nearest neighor sums of boundary spins on sub-lattices. They are ignored
# if the lattice is updated on single core.
tpu_x, tpu_y = CORES_TOPOLOGY
core_ids = np.arange(tpu_x * tpu_y).reshape((tpu_x, tpu_y))
core_ids_n = np.roll(core_ids, 1, axis=0)
core_ids_s = np.roll(core_ids, -1, axis=0)
core_ids_w = np.roll(core_ids, 1, axis=1)
core_ids_e = np.roll(core_ids, -1, axis=1)
self._permute_n = []
self._permute_s = []
self._permute_w = []
self._permute_e = []
for i, j in itertools.product(range(tpu_x), range(tpu_y)):
self._permute_n.append([core_ids_n[i, j], core_ids[i, j]])
self._permute_s.append([core_ids_s[i, j], core_ids[i, j]])
self._permute_w.append([core_ids_w[i, j], core_ids[i, j]])
self._permute_e.append([core_ids_e[i, j], core_ids[i, j]])
def get_boundary_n(self, super_grids, i, j, single_core, black):
"""The boundary on the northern direction."""
if single_core:
if black:
boundary_n = super_grids[1][0][
(i - 1) % LATTICE_SUPER_BLOCK_DIM][j][-1:, :, -1, :]
boundary_n_rest = super_grids[1][0][i][j][:-1, :, -1, :]
else:
boundary_n = super_grids[1][1][
(i - 1) % LATTICE_SUPER_BLOCK_DIM][j][-1:, :, -1, :]
boundary_n_rest = super_grids[1][1][i][j][:-1, :, -1, :]
else:
if black:
if i == 0:
boundary_n = tpu_ops.collective_permute(
super_grids[1][0][(i - 1) %
LATTICE_SUPER_BLOCK_DIM][j][-1:, :, -1, :],
self._permute_n)
else:
boundary_n = super_grids[1][0][
(i - 1) % LATTICE_SUPER_BLOCK_DIM][j][-1:, :, -1, :]
boundary_n_rest = super_grids[1][0][i][j][:-1, :, -1, :]
else:
if i == 0:
# In this case, its northern boundary is the southern boundary of the
# sub-lattice in the replica above, assuming periodic boundary
# condition on core topology.
boundary_n = tpu_ops.collective_permute(
super_grids[1][1][(i - 1) %
LATTICE_SUPER_BLOCK_DIM][j][-1:, :, -1, :],
self._permute_n)
else:
boundary_n = super_grids[1][1][
(i - 1) % LATTICE_SUPER_BLOCK_DIM][j][-1:, :, -1, :]
boundary_n_rest = super_grids[1][1][i][j][:-1, :, -1, :]
grid_boundary_n_ij = tf.concat([boundary_n, boundary_n_rest], axis=0)
return grid_boundary_n_ij
def get_boundary_s(self, super_grids, i, j, single_core, black):
"""The boundary on the southern direction."""
if single_core:
if black:
boundary_s = super_grids[0][1][(i + 1) %
LATTICE_SUPER_BLOCK_DIM][j][:1, :, 0, :]
boundary_s_rest = super_grids[0][1][i][j][1:, :, 0, :]
else:
boundary_s = super_grids[0][0][(i + 1) %
LATTICE_SUPER_BLOCK_DIM][j][:1, :, 0, :]
boundary_s_rest = super_grids[0][0][i][j][1:, :, 0, :]
else:
if black:
if i == LATTICE_SUPER_BLOCK_DIM - 1:
boundary_s = tpu_ops.collective_permute(
super_grids[0][1][(i + 1) %
LATTICE_SUPER_BLOCK_DIM][j][:1, :, 0, :],
self._permute_s)
else:
boundary_s = super_grids[0][1][
(i + 1) % LATTICE_SUPER_BLOCK_DIM][j][:1, :, 0, :]
boundary_s_rest = super_grids[0][1][i][j][1:, :, 0, :]
else:
if i == LATTICE_SUPER_BLOCK_DIM - 1:
# In this case, its southern boundary is the northern boundary of the
# sub-lattice in the replica below, assuming periodic boundary
# condition on core topology.
boundary_s = tpu_ops.collective_permute(
super_grids[0][0][(i + 1) %
LATTICE_SUPER_BLOCK_DIM][j][:1, :, 0, :],
self._permute_s)
else:
boundary_s = super_grids[0][0][
(i + 1) % LATTICE_SUPER_BLOCK_DIM][j][:1, :, 0, :]
boundary_s_rest = super_grids[0][0][i][j][1:, :, 0, :]
grid_boundary_s_ij = tf.concat([boundary_s_rest, boundary_s], axis=0)
return grid_boundary_s_ij
def get_boundary_w(self, super_grids, i, j, single_core, black):
"""The boundary on the western direction."""
if single_core:
if black:
boundary_w = super_grids[0][1][i][
(j - 1) % LATTICE_SUPER_BLOCK_DIM][:, -1:, :, -1]
boundary_w_rest = super_grids[0][1][i][j][:, :-1, :, -1]
else:
boundary_w = super_grids[1][1][i][
(j - 1) % LATTICE_SUPER_BLOCK_DIM][:, -1:, :, -1]
boundary_w_rest = super_grids[1][1][i][j][:, :-1, :, -1]
else:
if black:
if j == 0:
boundary_w = tpu_ops.collective_permute(
super_grids[0][1][i][(j - 1) %
LATTICE_SUPER_BLOCK_DIM][:, -1:, :, -1],
self._permute_w)
else:
boundary_w = super_grids[0][1][i][
(j - 1) % LATTICE_SUPER_BLOCK_DIM][:, -1:, :, -1]
boundary_w_rest = super_grids[0][1][i][j][:, :-1, :, -1]
else:
if j == 0:
# In this case, its western boundary is the eastern boundary of the
# sub-lattice in the replica on the left, assuming periodic boundary
# condition on core topology.
boundary_w = tpu_ops.collective_permute(
super_grids[1][1][i][(j - 1) %
LATTICE_SUPER_BLOCK_DIM][:, -1:, :, -1],
self._permute_w)
else:
boundary_w = super_grids[1][1][i][
(j - 1) % LATTICE_SUPER_BLOCK_DIM][:, -1:, :, -1]
boundary_w_rest = super_grids[1][1][i][j][:, :-1, :, -1]
grid_boundary_w_ij = tf.concat([boundary_w, boundary_w_rest], axis=1)
return grid_boundary_w_ij
def get_boundary_e(self, super_grids, i, j, single_core, black):
"""The boundary on the eastern direction."""
if single_core:
if black:
boundary_e = super_grids[1][0][i][(j + 1) %
LATTICE_SUPER_BLOCK_DIM][:, :1, :, 0]
boundary_e_rest = super_grids[1][0][i][j][:, 1:, :, 0]
else:
boundary_e = super_grids[0][0][i][(j + 1) %
LATTICE_SUPER_BLOCK_DIM][:, :1, :, 0]
boundary_e_rest = super_grids[0][0][i][j][:, 1:, :, 0]
else:
if black:
if j == LATTICE_SUPER_BLOCK_DIM - 1:
boundary_e = tpu_ops.collective_permute(
super_grids[1][0][i][(j + 1) %
LATTICE_SUPER_BLOCK_DIM][:, :1, :, 0],
self._permute_e)
else:
boundary_e = super_grids[1][0][i][
(j + 1) % LATTICE_SUPER_BLOCK_DIM][:, :1, :, 0]
boundary_e_rest = super_grids[1][0][i][j][:, 1:, :, 0]
else:
if j == LATTICE_SUPER_BLOCK_DIM - 1:
# In this case, its eastern boundary is the western boundary of the
# sub-lattice in the replica on the right, assuming periodic boundary
# condition on core topology.
boundary_e = tpu_ops.collective_permute(
super_grids[0][0][i][(j + 1) %
LATTICE_SUPER_BLOCK_DIM][:, :1, :, 0],
self._permute_e)
else:
boundary_e = super_grids[0][0][i][
(j + 1) % LATTICE_SUPER_BLOCK_DIM][:, :1, :, 0]
boundary_e_rest = super_grids[0][0][i][j][:, 1:, :, 0]
grid_boundary_e_ij = tf.concat([boundary_e_rest, boundary_e], axis=1)
return grid_boundary_e_ij
def sum_of_nearest_neighbors_black(self, super_grids, single_core):
"""The sum of nearest neighbor in each site on periodic boundaries."""
sum_nn_00 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
sum_nn_00[i][j] = (
tf.matmul(super_grids[0][1][i][j], self._grid_nn) +
tf.matmul(self._grid_nn, super_grids[1][0][i][j], transpose_a=True))
sum_nn_11 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
sum_nn_11[i][j] = (
tf.matmul(self._grid_nn, super_grids[0][1][i][j]) +
tf.matmul(super_grids[1][0][i][j], self._grid_nn, transpose_b=True))
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
# Handle the northern/western boundary.
grid_boundary_n_ij = self.get_boundary_n(super_grids, i, j, single_core,
True)
grid_boundary_w_ij = self.get_boundary_w(super_grids, i, j, single_core,
True)
sum_nn_00[i][j] += (
tf.einsum('mni,mnj->mnij', self._grid_expand_s, grid_boundary_n_ij) +
tf.einsum('mni,mnj->mnij', grid_boundary_w_ij, self._grid_expand_s))
# Handle the southern/eastern boundary.
grid_boundary_s_ij = self.get_boundary_s(super_grids, i, j, single_core,
True)
grid_boundary_e_ij = self.get_boundary_e(super_grids, i, j, single_core,
True)
sum_nn_11[i][j] += (
tf.einsum('mni,mnj->mnij', self._grid_expand_e, grid_boundary_s_ij) +
tf.einsum('mni,mnj->mnij', grid_boundary_e_ij, self._grid_expand_e))
return sum_nn_00, sum_nn_11
def sum_of_nearest_neighbors_white(self, super_grids, single_core):
"""The sum of nearest neighbor in each site on periodic boundaries."""
sum_nn_01 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
sum_nn_01[i][j] = (
tf.matmul(super_grids[0][0][i][j], self._grid_nn, transpose_b=True) +
tf.matmul(self._grid_nn, super_grids[1][1][i][j], transpose_a=True))
sum_nn_10 = create_list([LATTICE_SUPER_BLOCK_DIM] * 2)
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
sum_nn_10[i][j] = (
tf.matmul(self._grid_nn, super_grids[0][0][i][j]) +
tf.matmul(super_grids[1][1][i][j], self._grid_nn))
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
# Handle the northern/estern boundary.
grid_boundary_n_ij = self.get_boundary_n(super_grids, i, j, single_core,
False)
grid_boundary_e_ij = self.get_boundary_e(super_grids, i, j, single_core,
False)
sum_nn_01[i][j] += (
tf.einsum('mni,mnj->mnij', self._grid_expand_s, grid_boundary_n_ij) +
tf.einsum('mni,mnj->mnij', grid_boundary_e_ij, self._grid_expand_e))
# Handle the southern/western boundary.
grid_boundary_s_ij = self.get_boundary_s(super_grids, i, j, single_core,
False)
grid_boundary_w_ij = self.get_boundary_w(super_grids, i, j, single_core,
False)
sum_nn_10[i][j] += (
tf.einsum('mni,mnj->mnij', self._grid_expand_e, grid_boundary_s_ij) +
tf.einsum('mni,mnj->mnij', grid_boundary_w_ij, self._grid_expand_s))
return sum_nn_01, sum_nn_10
def _validate_params():
"""Validate parameters before using them."""
assert LATTICE_SUB_DIM > 0
assert LATTICE_BLOCK_DIMS is not None and len(LATTICE_BLOCK_DIMS) == 2
assert LATTICE_BLOCK_DIMS[0] > 0 and LATTICE_BLOCK_DIMS[1] > 0
assert LATTICE_SUPER_BLOCK_DIM > 0
assert NORMALIZED_TEMPERATURE > 0.0
assert NUMBER_OF_BURN_IN_UPDATE > 0
assert NUMBER_OF_WHOLE_LATTICE_UPDATE > 0
assert CORES_TOPOLOGY is not None and len(CORES_TOPOLOGY) == 2
assert CORES_TOPOLOGY[0] > 0 and CORES_TOPOLOGY[1] > 0
def compute_nanoseconds_per_flip(step_time,
num_steps=NUMBER_OF_WHOLE_LATTICE_UPDATE):
"""Compute the nanoseconds per flip given step_time in seconds."""
nanos_per_flip = step_time * _NANOS_PER_SECOND / (
(LATTICE_SUB_DIM * LATTICE_SUPER_BLOCK_DIM)**2 *
np.prod(LATTICE_BLOCK_DIMS) * num_steps *
np.prod(CORES_TOPOLOGY))
return nanos_per_flip
# pylint: disable=unused-argument
def _grid_initializer(shape, dtype, partition_info):
grid_value_init = 2.0 * tf.cast(
tf.random_uniform(shape) > 0.5, dtype=dtype) - 1.0
return grid_value_init
def update_optim_conv(sweeps, fn):
"""Simulation in each replica using 'compact' representation.
Boundary value communications are handled by collective permute. This uses
the convolution instead of matmul for the nearest neighbor sum calculation.
Ref: Algorithm UpdateOptim in the paper.
Args:
sweeps: the number of whole lattice update.
fn: the function on a given configuration of the lattice, it takes a 2-D
list of variables, i.e., v_ij and returns a float32.
Returns:
The estimated expectation of fn, i.e., <fn>
"""
_validate_params()
temperature_muliplier = -2 * get_inverse_temperature()
lattice_sub_shape = [
KERNEL_CHANNEL_SIZE_2,
CONV_LATTICE_DIMS[0] // KERNEL_CHANNEL_SIZE_2,
CONV_LATTICE_DIMS[1] // KERNEL_CHANNEL_SIZE,
KERNEL_CHANNEL_SIZE
]
nn_calculator = NearestNeighborCalculatorOptimConv()
def checkerboard(k, estimation):
"""Checkerboard algorithm update."""
with tf.variable_scope('tpu', reuse=tf.AUTO_REUSE, use_resource=True):
super_grids = create_list([2] * 2 + [LATTICE_SUPER_BLOCK_DIM] * 2)
for l, m in create_iterator([2] * 2):
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
super_grids[l][m][i][j] = tf.get_variable(
'grids_%d%d_%d%d' % (l, m, i, j),
initializer=_grid_initializer,
shape=lattice_sub_shape,
dtype=tf.bfloat16)
def update(probs, black):
"""Checkerboard algorithm update for a given color."""
if black:
idx = [[0, 0], [1, 1]]
sum_nn_color = nn_calculator.sum_of_nearest_neighbors_black(super_grids)
else:
idx = [[0, 1], [1, 0]]
sum_nn_color = nn_calculator.sum_of_nearest_neighbors_white(super_grids)
assign_ops = []
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
for [idx0, idx1], sum_nn in zip(idx, sum_nn_color):
acceptance_ratio_ij = (
temperature_muliplier * sum_nn[i][j] *
super_grids[idx0][idx1][i][j])
flips_ij = tf.cast(
probs[idx0][idx1][i][j] < acceptance_ratio_ij, dtype=get_dtype())
assign_ops.append(super_grids[idx0][idx1][i][j].assign_sub(
flips_ij * super_grids[idx0][idx1][i][j] *
tf.constant(2.0, dtype=get_dtype())))
return assign_ops
probs = create_list([2] * 2 + [LATTICE_SUPER_BLOCK_DIM] * 2)
for l, m in create_iterator([2] * 2):
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
probs[l][m][i][j] = (
tf.log(tf.random_uniform(lattice_sub_shape, dtype=get_dtype())))
grid_black_update = update(probs, black=True)
with tf.control_dependencies(grid_black_update):
grid_white_update = update(probs, black=False)
with tf.control_dependencies(grid_white_update):
return k + 1, (
estimation * tf.cast(k, dtype=tf.float32) /
tf.cast(k + 1, dtype=tf.float32) +
tf.cast(fn(super_grids), dtype=tf.float32) /
tf.cast(k + 1, dtype=tf.float32))
def while_loop(sweeps):
_, estimation = tf.while_loop(lambda i, _: i < sweeps, checkerboard, [
0,
tf.constant([0.0, 0.0, 0.0], dtype=tf.float32),
])
return estimation
return while_loop(sweeps)
def update_optim(sweeps, fn, single_core):
"""Simulation in each replica using 'compact' representation.
Boundary value communications are handled by collective permute.
Ref: Algorithm UpdateOptim in the paper.
Args:
sweeps: the number of whole lattice update.
fn: the function on a given configuration of the lattice, it takes a 2-D
list of variables, i.e., v_ij and returns a float32.
single_core: a bool that specify whether the lattice is updated on single or
multiple cores.
Returns:
The estimated expectation of fn, i.e., <fn>
"""
_validate_params()
temperature_muliplier = -2 * get_inverse_temperature()
lattice_sub_shape = [
LATTICE_BLOCK_DIMS[0],
LATTICE_BLOCK_DIMS[1],
LATTICE_SUB_DIM // 2,
LATTICE_SUB_DIM // 2,
]
nn_calculator = NearestNeighborCalculatorOptim()
def checkerboard(k, estimation):
"""Checkerboard algorithm update."""
with tf.variable_scope('tpu', reuse=tf.AUTO_REUSE, use_resource=True):
super_grids = create_list([2] * 2 + [LATTICE_SUPER_BLOCK_DIM] * 2)
for l, m in create_iterator([2] * 2):
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
super_grids[l][m][i][j] = tf.get_variable(
'grids_%d%d_%d%d' % (l, m, i, j),
initializer=_grid_initializer,
shape=lattice_sub_shape,
dtype=tf.bfloat16)
def update(probs, black):
"""Checkerboard algorithm update for a given color."""
if black:
idx = [[0, 0], [1, 1]]
sum_nn_color = \
nn_calculator.sum_of_nearest_neighbors_black(
super_grids, single_core)
else:
idx = [[0, 1], [1, 0]]
sum_nn_color = \
nn_calculator.sum_of_nearest_neighbors_white(
super_grids, single_core)
assign_ops = []
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
for [idx0, idx1], sum_nn in zip(idx, sum_nn_color):
acceptance_ratio_ij = (
temperature_muliplier * sum_nn[i][j] *
super_grids[idx0][idx1][i][j])
flips_ij = tf.cast(
probs[idx0][idx1][i][j] < acceptance_ratio_ij, dtype=get_dtype())
assign_ops.append(super_grids[idx0][idx1][i][j].assign_sub(
flips_ij * super_grids[idx0][idx1][i][j] *
tf.constant(2.0, dtype=get_dtype())))
return assign_ops
probs = create_list([2] * 2 + [LATTICE_SUPER_BLOCK_DIM] * 2)
for l, m in create_iterator([2] * 2):
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
probs[l][m][i][j] = (
tf.log(tf.random_uniform(lattice_sub_shape, dtype=get_dtype())))
grid_black_update = update(probs, black=True)
with tf.control_dependencies(grid_black_update):
grid_white_update = update(probs, black=False)
with tf.control_dependencies(grid_white_update):
return k + 1, (
estimation * tf.cast(k, dtype=tf.float32) /
tf.cast(k + 1, dtype=tf.float32) +
tf.cast(fn(super_grids), dtype=tf.float32) /
tf.cast(k + 1, dtype=tf.float32))
def while_loop(sweeps):
_, estimation = tf.while_loop(lambda i, _: i < sweeps, checkerboard, [
0,
tf.constant([0.0, 0.0, 0.0], dtype=tf.float32),
])
return estimation
return while_loop(sweeps)
def create_ising_mcmc_simulator(fn, single_core, device_assignment):
"""Ising model MCMC simulation on single or multiple TPUs.
The whole lattice is distributed equally among TPU cores, and each TPU core
runs one replica given cores topology `[l1, l2]`. In each core, because of
the hard limit of protobuf (2GB), we split the sub-lattice into multi-scale
sub-lattices as follows:
The sub-lattice in each core is divided into a 'kxk' grid of sub-lattices,
each sub-lattice is a tensor variable, where `k' is flag:
lattice_super_block_dim,
`[v_00, ........... v_{0,k-1}]`
`[v_10, ........... v_{1,k-1}]`
`.............................`
`[v_{k-1,0}, ..., v_{k-1,k-1}]`
Each v_ij is again a `m1xm2` grid of smaller sub-lattice, where `m1` and
`m2` are provided by flag: lattice_block_dims,
`[g_00, ............, g_{0,m2-1}]`
`[g_10, ............, g_{1,m2-1}]`
`................................`
`[g_{m1-1,0}, ..., g_{m1-1,m2-1}]`
and each g_ij is a `nxn` sub-lattice, where `n` is flag: lattice_sub_dim.
Each g_ij is furthure split into 4 compact sub-lattices, i.e.,
`g_ij_00 = g_ij[0::2, 0::2]`
`g_ij_01 = g_ij[0::2, 1::2]`
`g_ij_10 = g_ij[1::2, 0::2]`
`g_ij_11 = g_ij[1::2, 1::2]`
Where g_ij_00 and g_ij_11 are all black spins, and g_ij_01 and g_ij_10 are
all white spins.
Thus the whole lattice has dimensions [l1*k*m1*n, l2*k*m2*n].
The boundaries of each sub-lattice are collectively permuted among replicas
to calculate nearest neighbor sums, which are used to compute
Metropolis-Hastings updates.
Args:
fn: the function on a given configuration of the sub_lattice on the
replica. It takes a 2-D list of variables, i.e., v_ij, and returns a
float32. fn must be additive in order to insure the correctness of the
estimation, i.e., `fn([sub_lattice_1, ..., sub_lattice_n]) = sum_{i=1}^n
fn([sub_lattice_i])`.
single_core: a bool that specify whether the lattice is updated on single
or multiple cores.
device_assignment: a `tf.contrib.tpu.DeviceAssignment` specifing the mapping
from replica id to the TPU mesh grid coordinate.
Returns:
The tuple of a `int32` placeholder for the number of flips and a handle
for the simulation computation.
"""
num_flips_placeholder = tf.placeholder(tf.int32, [])
def compute_fn(sweeps):
return (update_optim_conv(sweeps, fn) if USE_CONV else
update_optim(sweeps, fn, single_core))
return num_flips_placeholder, tf.contrib.tpu.replicate(
compute_fn,
inputs=[[num_flips_placeholder] for _ in range(np.prod(CORES_TOPOLOGY))],
device_assignment=device_assignment)
def ising_mcmc_simulation(fn, session):
"""Ising mcmc simulation.
Args:
fn: the function on a given configuration of the lattice. It takes a 2-D
list of variables, i.e., v_ij and returns a float32. Refer to
create_ising_mcmc_on_single_core for details on the variables.
session: a function returns a tensorflow session.
Returns:
The estimation of the expectation of fn.
"""
with session() as sess:
topology = tf.contrib.tpu.Topology(
sess.run(tf.contrib.tpu.initialize_system()))
device_assignment, _ = tpu_device_assignment(
np.asarray(CORES_TOPOLOGY), topology)
single_core = is_single_core(CORES_TOPOLOGY)
print(sess.list_devices())
num_flips_placeholder, compute_handle = create_ising_mcmc_simulator(
fn, single_core, device_assignment)
sess.run(tf.global_variables_initializer())
feed_dict = {num_flips_placeholder: NUMBER_OF_BURN_IN_UPDATE}
print('--- start burning in ---')
burn_in_start_time = time.time()
sess.run(compute_handle, feed_dict=feed_dict)
burn_in_time = time.time() - burn_in_start_time
print('--- finish burning in ---')
print('--- burn-in time: %s seconds ---' % burn_in_time)
feed_dict = {num_flips_placeholder: NUMBER_OF_WHOLE_LATTICE_UPDATE}
start_time = time.time()
estimation_val = sess.run(compute_handle, feed_dict=feed_dict)
step_time = time.time() - start_time
nanos_per_flip = compute_nanoseconds_per_flip(step_time)
print('--- run time with overhead: %s seconds ---' % step_time)
print('--- step time with overhead: %s seconds per whole-lattice '
'flip ---' % (step_time / NUMBER_OF_WHOLE_LATTICE_UPDATE))
print('--- %s nanoseconds per flip (with overhead)---' % nanos_per_flip)
step_time_no_overhead = step_time - burn_in_time
nanos_per_flip_no_overhead = compute_nanoseconds_per_flip(
step_time_no_overhead,
NUMBER_OF_WHOLE_LATTICE_UPDATE - NUMBER_OF_BURN_IN_UPDATE)
print('--- run time excluding overhead: %s seconds ---' %
step_time_no_overhead)
print('--- step time excluding overhead: %s seconds '
'per whole-lattice flip ---' % (step_time_no_overhead /
(NUMBER_OF_WHOLE_LATTICE_UPDATE - NUMBER_OF_BURN_IN_UPDATE)))
print('--- %s nanoseconds per flip (without overhead)---' %
nanos_per_flip_no_overhead)
return estimation_val
def get_session():
def _get_tpu_setup():
tpu_cluster_resolver = TPUClusterResolver(tpu=os.environ['TPU_NAME'])
cluster_def = tpu_cluster_resolver.cluster_spec().as_cluster_def()
tpu_master_grpc_path = tpu_cluster_resolver.get_master()
return cluster_def, tpu_master_grpc_path
cluster_def, tpu_master_grpc_path = _get_tpu_setup()
config = tf.ConfigProto(
allow_soft_placement=True,
isolate_session_state=True,
cluster_def=cluster_def)
return tf.Session(tpu_master_grpc_path, config=config)
def reduce_mean(super_grids):
mag = tf.constant(0.0, dtype=tf.float32)
num_base = np.prod([
2, 2, LATTICE_SUPER_BLOCK_DIM, LATTICE_SUPER_BLOCK_DIM
]).astype(np.float32)
for l, m in create_iterator([2] * 2):
for i, j in create_iterator([LATTICE_SUPER_BLOCK_DIM] * 2):
mag += tf.cast(
tf.reduce_mean(super_grids[l][m][i][j]), dtype=tf.float32)
mag = mag / num_base
mag_sq = mag * mag
mag_4 = mag_sq * mag_sq
return tf.stack([mag, mag_sq, mag_4])
```
Compute magnetization and Binder parameter for a given temperature
```
NORMALIZED_TEMPERATURE = 0.1
with tf.Graph().as_default():
estimation_val = ising_mcmc_simulation(reduce_mean, get_session)
print('estimations: %s' % estimation_val[0][0])
print('magnetization: %s' % estimation_val[0][0][0])
print('Binder parameter: %s' % (
1 - estimation_val[0][0][2] / (3 * estimation_val[0][0][1] ** 2)))
```
Compute magnetization and Binder parameters for a range of temperatures
```
results = []
for t in np.arange(0.9, 1.1, 0.1):
NORMALIZED_TEMPERATURE = t
with tf.Graph().as_default():
estimation_val = ising_mcmc_simulation(reduce_mean, get_session)
print('estimations: %s' % estimation_val[0][0])
print('magnetization: %s' % estimation_val[0][0][0])
binder_param = (1 -
estimation_val[0][0][2] / (3 * estimation_val[0][0][1] ** 2))
print('Binder parameter: %s' % binder_param)
results.append((t, estimation_val[0][0][0], binder_param))
for r in results:
print('temperature: %s, magnetization: %s, Binder parameter: %s' %
(r[0], r[1], r[2]))
```
| github_jupyter |
# Introduction
In these labs we will work with HMMs in the form of Weighted Finite State Transducers. Examples were given at the start of Lecture 5, but if you need a refresh, read this [introduction](https://github.com/ZhaoZeyu1995/asr_labs/blob/master/introduction.pdf). We will use the Python interface to the [OpenFst toolkit](http://openfst.org).
# Getting started
We start by importing the OpenFst Python wrapper:
```
import openfst_python as fst
```
Then we create tables for our symbols
```
input_sym = fst.SymbolTable()
output_sym = fst.SymbolTable()
input_sym.add_symbol('<eps>') # by convention, <eps> always
# has symbol zero
input_sym.add_symbol('a') # input symbols
input_sym.add_symbol('b')
output_sym.add_symbol('<eps>') # output symbols
output_sym.add_symbol('d')
output_sym.add_symbol('c')
```
A **SymbolTable()** is simply a table associating symbols and indexes. We add symbols to the table with the method **add_symbol()**
Now that we've got our symbol tables, we will build the FST itself:
```
f = fst.Fst()
f.set_input_symbols(input_sym)
f.set_output_symbols(output_sym)
```
Our FST transduces the input to the outputs, so we set the symbol tables as such. Now, we want to add a number of states:
```
s0 = f.add_state()
s1 = f.add_state()
s2 = f.add_state()
s3 = f.add_state()
```
The output of the **add_state()** method is just the index assigned to the state, but it can be useful to assign that index to a variable to give it a more meaningful label.
To create arcs between states, we do:
```
a = input_sym.find('a')
b = input_sym.find('b')
c = output_sym.find('c')
d = output_sym.find('d')
f.add_arc(s0, fst.Arc(a, c, None, s1))
f.add_arc(s0, fst.Arc(b, d, None, s2))
f.add_arc(s1, fst.Arc(a, c, None, s3))
f.add_arc(s2, fst.Arc(b, d, None, s3))
```
The syntax for the method **add_arc** is:
**add_arc(*source state*, *arc to add*)**
while the syntax for initializing a class **Arc()** is:
**Arc(*input symbol index*, *output symbol index*, *weight*, *destination state*)**
We use the **find()** method of the symbol tables to get the index of a certain label.
Now we just add start and end states:
```
f.set_start(s0)
f.set_final(s3)
```
And voila, our first FST, shown in the example above, is done!
# Exercises
```
def parse_lexicon(lex_file):
"""
Parse the lexicon file and return it in dictionary form.
Args:
lex_file (str): filename of lexicon file with structure '<word> <phone1> <phone2>...'
eg. peppers p eh p er z
Returns:
lex (dict): dictionary mapping words to list of phones
"""
lex = {} # create a dictionary for the lexicon entries (this could be a problem with larger lexica)
with open(lex_file, 'r') as f:
for line in f:
line = line.split() # split at each space
lex[line[0]] = line[1:] # first field the word, the rest is the phones
return lex
lex = parse_lexicon('lexicon.txt')
lex
```
1. Write a function that generates symbol tables from a lexicon, using the provided phonetic dictionary in **lexicon.txt** and the helper function *parse_lexicon* in cells above. Use p_1, p_2, ..., eh_1, ... to denote the phone states.
```
def generate_symbol_tables(lexicon, n=3):
'''
Return word, phone and state symbol tables based on the supplied lexicon
Args:
lexicon (dict): lexicon to use, created from the parse_lexicon() function
n (int): number of states for each phone HMM
Returns:
word_table (fst.SymbolTable): table of words
phone_table (fst.SymbolTable): table of phones
state_table (fst.SymbolTable): table of HMM phone-state IDs
'''
state_table = fst.SymbolTable()
phone_table = fst.SymbolTable()
word_table = fst.SymbolTable()
# add empty <eps> symbol to all tables
state_table.add_symbol('<eps>')
phone_table.add_symbol('<eps>')
word_table.add_symbol('<eps>')
for word, phones in lexicon.items():
word_table.add_symbol(word)
for p in phones: # for each phone
phone_table.add_symbol(p)
for i in range(1,n+1): # for each state 1 to n
state_table.add_symbol('{}_{}'.format(p, i))
return word_table, phone_table, state_table
word_table, phone_table, state_table = generate_symbol_tables(lex)
# Write to files for autograding
word_table.write_text('word_table.txt')
phone_table.write_text('phone_table.txt')
state_table.write_text('state_table.txt')
```
2. Create functions generating WFSTs representing the following HMMs:
1. A 3-state left-to-right phone HMM with self-loops
2. A parallel-path left-to-right HMM, shown below
3. An ergodic HMM with $n$ states (you will need to think about how to handle final states)
Don't worry about the arc labels for the second two WFSTs.

```
def generate_phone_wfst(f, start_state, phone, n):
"""
Generate a WFST representing an n-state left-to-right phone HMM.
Args:
f (fst.Fst()): an FST object, assumed to exist already
start_state (int): the index of the first state, assumed to exist already
phone (str): the phone label
n (int): number of states of the HMM excluding start and end
Returns:
the final state of the FST
"""
current_state = start_state
for i in range(1, n+1):
in_label = state_table.find('{}_{}'.format(phone, i))
# self-loop back to current state
f.add_arc(current_state, fst.Arc(in_label, 0, None, current_state))
# transition to next state
# we want to output the phone label on the final state
# note: if outputting words instead this code should be modified
if i == n:
out_label = phone_table.find(phone)
else:
out_label = 0 # output empty <eps> label
next_state = f.add_state()
f.add_arc(current_state, fst.Arc(in_label, out_label, None, next_state))
current_state = next_state
return current_state
f = fst.Fst()
start = f.add_state()
f.set_start(start)
last_state = generate_phone_wfst(f, start, 'p', 3)
f.set_input_symbols(state_table)
f.set_output_symbols(phone_table)
# Write to a file for autograding
f.write('phone_wfst.fst')
def generate_parallel_path_wfst(f, start_state, n):
"""
Generate a WFST representing an n-state parallel-path left-to-right HMM
Args:
f (fst.Fst()): an FST object, assumed to exist already
start_state (int): the index of the first state, assumed to exist already
n (int): number of states of the HMM excluding start and end
Returns:
the final state of the FST
"""
current_state = start_state
next_state = f.add_state()
for i in range(n):
# self-loop back to current state
f.add_arc(current_state, fst.Arc(0, 0, None, current_state))
f.add_arc(current_state, fst.Arc(0, 0, None, next_state))
if i != n-1:
next_next_state = f.add_state()
f.add_arc(current_state, fst.Arc(0, 0, None, next_next_state))
current_state = next_state
next_state = next_next_state
return current_state
f = fst.Fst()
start = f.add_state()
f.set_start(start)
last_state = generate_parallel_path_wfst(f, start, 4)
f
# Write to a file for autograding
f.write('parallel_path_wfst.fst')
def generate_ergodic_wfst(f, start_state, n):
"""
Generate a WFST representing an n-state ergodic HMM
Args:
f (fst.Fst()): an FST object, assumed to exist already
start_state (int): the index of the first state, assumed to exist already
n (int): number of states of the HMM excluding start and end
Returns:
the final state of the FST
"""
current_state = start_state
for i in range(n):
f.add_state()
for i in range(n+1): # +1 is start state
for j in range(n+1):
f.add_arc(i, fst.Arc(0, 0, None, j))
return current_state
f = fst.Fst()
start = f.add_state()
f.set_start(start)
last_state = generate_ergodic_wfst(f, start, 5)
f
# Write to a file for autograding
f.write('ergodic_wfst.fst')
```
3. Write a function to generate an HMM for any word in the lexicon using $n$ states per phone by concatenating $n$-state phone HMMs. Make use of your *generate_phone_wfst()* function.
```
def generate_word_wfst(f, start_state, word, n):
""" Generate a WFST for any word in the lexicon, composed of n-state phone WFSTs.
This will currently output phone labels.
Args:
f (fst.Fst()): an FST object, assumed to exist already
start_state (int): the index of the first state, assumed to exist already
word (str): the word to generate
n (int): states per phone HMM
Returns:
the constructed WFST
"""
current_state = start_state
# iterate over all the phones in the word
for phone in lex[word]: # will raise an exception if word is not in the lexicon
# your code here
current_state = generate_phone_wfst(f, current_state, phone, n)
# note: new current_state is now set to the final state of the previous phone WFST
f.set_final(current_state)
return f
f = fst.Fst()
start = f.add_state()
f.set_start(start)
generate_word_wfst(f, start, 'peppers', 3)
f.set_input_symbols(state_table)
f.set_output_symbols(phone_table)
# We need special code to display the higher-resolution WFSTs inside Jupyter notebook
from subprocess import check_call
from IPython.display import Image
f.draw('tmp.dot', portrait=True)
check_call(['dot','-Tpng','-Gdpi=200','tmp.dot','-o','tmp.png'])
Image(filename='tmp.png')
# Write to a file for autograding
f.write('word_wfst.fst')
```
4. Generate an HMM that can recognise:
1. any phone contained in the lexicon
2. any sequence of phones contained in the lexicon
Think of the difference between the first and the second HMM. Again, previous functions should help in the task.
```
def generate_phone_recognition_wfst(n):
""" generate a HMM to recognise any single phone in the lexicon
Args:
n (int): states per phone HMM
Returns:
the constructed WFST
"""
f = fst.Fst()
# create a single start state
start_state = f.add_state()
f.set_start(start_state)
# get a list of all the phones in the lexicon
# there are lots of way to do this. Here, we use the set() object
# will contain all unique phones in the lexicon
phone_set = set()
for pronunciation in lex.values():
phone_set = phone_set.union(pronunciation)
for phone in phone_set:
# we need to add an empty arc from the start state to where the actual phone HMM
# will begin. If you can't see why this is needed, try without it!
current_state = f.add_state()
f.add_arc(start_state, fst.Arc(0, 0, None, current_state))
end_state = generate_phone_wfst(f, current_state, phone, n)
f.set_final(end_state)
return f
f = generate_phone_recognition_wfst(3)
f.set_input_symbols(state_table)
f.set_output_symbols(phone_table)
f.draw('tmp.dot', portrait=True)
check_call(['dot','-Tpng','-Gdpi=200','tmp.dot','-o','tmp.png'])
Image(filename='tmp.png')
# Write to a file for autograding
f.write('phone_recognition_wfst.fst')
def generate_phone_sequence_recognition_wfst(n):
""" generate a HMM to recognise any single phone sequence in the lexicon
Args:
n (int): states per phone HMM
Returns:
the constructed WFST
"""
f = fst.Fst()
# create a single start state
start_state = f.add_state()
f.set_start(start_state)
phone_set = set()
for pronunciation in lex.values():
phone_set = phone_set.union(pronunciation)
for phone in phone_set:
current_state = f.add_state()
f.add_arc(start_state, fst.Arc(0, 0, None, current_state))
end_state = generate_phone_wfst(f, current_state, phone, n)
f.add_arc(end_state, fst.Arc(0,0, None, start_state))
f.set_final(end_state)
return f
f = generate_phone_sequence_recognition_wfst(3)
f.set_input_symbols(state_table)
f.set_output_symbols(phone_table)
f.draw('tmp.dot', portrait=True)
check_call(['dot','-Tpng','-Gdpi=200','tmp.dot','-o','tmp.png'])
Image(filename='tmp.png')
# Write to a file for autograding
f.write('phone_sequence_recognition_wfst.fst')
```
5. You have decided that the best way to start teaching a friend English is to have them learn the notorious tongue-twister *peter piper picked a peck of pickled peppers*. Write a function that creates an HMM that can recognize any sequence of these words.
```
def generate_word_sequence_recognition_wfst(n):
""" generate a HMM to recognise any single word sequence for words in the lexicon
Args:
n (int): states per phone HMM
Returns:
the constructed WFST
"""
f = fst.Fst()
# create a single start state
start_state = f.add_state()
f.set_start(start_state)
for word, phones in lex.items():
current_state = f.add_state()
f.add_arc(start_state, fst.Arc(0, 0, None, current_state))
for phone in phones:
current_state = generate_phone_wfst(f, current_state, phone, n)
# note: new current_state is now set to the final state of the previous phone WFST
f.set_final(current_state)
f.add_arc(current_state, fst.Arc(0, 0, None, start_state))
return f
f = generate_word_sequence_recognition_wfst(3)
f.set_input_symbols(state_table)
f.set_output_symbols(phone_table)
f.draw('tmp.dot', portrait=True)
check_call(['dot','-Tpng','-Gdpi=200','tmp.dot','-o','tmp.png'])
Image(filename='tmp.png')
# Write to a file for autograding
f.write('word_sequence_recognition_wfst.fst')
```
# If you have more time (optional)
Probabilities in WFSTs are traditionally expressed in negative log format, that is, the weight $w$ on an arc transitioning between states $i$ and $j$ is given by $w=-\log a_{ij}$, where $a_{ij}$ is the HMM transition probability.
6. Add weights to your WFSTs corresponding to transition probabilities. Assume that the probability of a self-loop is $0.1$, and that when transitioning *between* separate phones or words, the probabilities are uniform over all transitions.
| github_jupyter |
# LEARNING
This notebook serves as supporting material for topics covered in **Chapter 18 - Learning from Examples** , **Chapter 19 - Knowledge in Learning**, **Chapter 20 - Learning Probabilistic Models** from the book *Artificial Intelligence: A Modern Approach*. This notebook uses implementations from [learning.py](https://github.com/aimacode/aima-python/blob/master/learning.py). Let's start by importing everything from the module:
```
from learning import *
from notebook import *
```
## CONTENTS
* Machine Learning Overview
* Datasets
* Iris Visualization
* Distance Functions
* Plurality Learner
* k-Nearest Neighbours
* Decision Tree Learner
* Random Forest Learner
* Naive Bayes Learner
* Perceptron
* Learner Evaluation
## MACHINE LEARNING OVERVIEW
In this notebook, we learn about agents that can improve their behavior through diligent study of their own experiences.
An agent is **learning** if it improves its performance on future tasks after making observations about the world.
There are three types of feedback that determine the three main types of learning:
* **Supervised Learning**:
In Supervised Learning the agent observes some example input-output pairs and learns a function that maps from input to output.
**Example**: Let's think of an agent to classify images containing cats or dogs. If we provide an image containing a cat or a dog, this agent should output a string "cat" or "dog" for that particular image. To teach this agent, we will give a lot of input-output pairs like {cat image-"cat"}, {dog image-"dog"} to the agent. The agent then learns a function that maps from an input image to one of those strings.
* **Unsupervised Learning**:
In Unsupervised Learning the agent learns patterns in the input even though no explicit feedback is supplied. The most common type is **clustering**: detecting potential useful clusters of input examples.
**Example**: A taxi agent would develop a concept of *good traffic days* and *bad traffic days* without ever being given labeled examples.
* **Reinforcement Learning**:
In Reinforcement Learning the agent learns from a series of reinforcements—rewards or punishments.
**Example**: Let's talk about an agent to play the popular Atari game—[Pong](http://www.ponggame.org). We will reward a point for every correct move and deduct a point for every wrong move from the agent. Eventually, the agent will figure out its actions prior to reinforcement were most responsible for it.
## DATASETS
For the following tutorials we will use a range of datasets, to better showcase the strengths and weaknesses of the algorithms. The datasests are the following:
* [Fisher's Iris](https://github.com/aimacode/aima-data/blob/a21fc108f52ad551344e947b0eb97df82f8d2b2b/iris.csv): Each item represents a flower, with four measurements: the length and the width of the sepals and petals. Each item/flower is categorized into one of three species: Setosa, Versicolor and Virginica.
* [Zoo](https://github.com/aimacode/aima-data/blob/a21fc108f52ad551344e947b0eb97df82f8d2b2b/zoo.csv): The dataset holds different animals and their classification as "mammal", "fish", etc. The new animal we want to classify has the following measurements: 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 4, 1, 0, 1 (don't concern yourself with what the measurements mean).
To make using the datasets easier, we have written a class, `DataSet`, in `learning.py`. The tutorials found here make use of this class.
Let's have a look at how it works before we get started with the algorithms.
### Intro
A lot of the datasets we will work with are .csv files (although other formats are supported too). We have a collection of sample datasets ready to use [on aima-data](https://github.com/aimacode/aima-data/tree/a21fc108f52ad551344e947b0eb97df82f8d2b2b). Two examples are the datasets mentioned above (*iris.csv* and *zoo.csv*). You can find plenty datasets online, and a good repository of such datasets is [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets.html).
In such files, each line corresponds to one item/measurement. Each individual value in a line represents a *feature* and usually there is a value denoting the *class* of the item.
You can find the code for the dataset here:
```
%psource DataSet
```
### Class Attributes
* **examples**: Holds the items of the dataset. Each item is a list of values.
* **attrs**: The indexes of the features (by default in the range of [0,f), where *f* is the number of features). For example, `item[i]` returns the feature at index *i* of *item*.
* **attrnames**: An optional list with attribute names. For example, `item[s]`, where *s* is a feature name, returns the feature of name *s* in *item*.
* **target**: The attribute a learning algorithm will try to predict. By default the last attribute.
* **inputs**: This is the list of attributes without the target.
* **values**: A list of lists which holds the set of possible values for the corresponding attribute/feature. If initially `None`, it gets computed (by the function `setproblem`) from the examples.
* **distance**: The distance function used in the learner to calculate the distance between two items. By default `mean_boolean_error`.
* **name**: Name of the dataset.
* **source**: The source of the dataset (url or other). Not used in the code.
* **exclude**: A list of indexes to exclude from `inputs`. The list can include either attribute indexes (attrs) or names (attrnames).
### Class Helper Functions
These functions help modify a `DataSet` object to your needs.
* **sanitize**: Takes as input an example and returns it with non-input (target) attributes replaced by `None`. Useful for testing. Keep in mind that the example given is not itself sanitized, but instead a sanitized copy is returned.
* **classes_to_numbers**: Maps the class names of a dataset to numbers. If the class names are not given, they are computed from the dataset values. Useful for classifiers that return a numerical value instead of a string.
* **remove_examples**: Removes examples containing a given value. Useful for removing examples with missing values, or for removing classes (needed for binary classifiers).
### Importing a Dataset
#### Importing from aima-data
Datasets uploaded on aima-data can be imported with the following line:
```
iris = DataSet(name="iris")
```
To check that we imported the correct dataset, we can do the following:
```
print(iris.examples[0])
print(iris.inputs)
```
Which correctly prints the first line in the csv file and the list of attribute indexes.
When importing a dataset, we can specify to exclude an attribute (for example, at index 1) by setting the parameter `exclude` to the attribute index or name.
```
iris2 = DataSet(name="iris",exclude=[1])
print(iris2.inputs)
```
### Attributes
Here we showcase the attributes.
First we will print the first three items/examples in the dataset.
```
print(iris.examples[:3])
```
Then we will print `attrs`, `attrnames`, `target`, `input`. Notice how `attrs` holds values in [0,4], but since the fourth attribute is the target, `inputs` holds values in [0,3].
```
print("attrs:", iris.attrs)
print("attrnames (by default same as attrs):", iris.attrnames)
print("target:", iris.target)
print("inputs:", iris.inputs)
```
Now we will print all the possible values for the first feature/attribute.
```
print(iris.values[0])
```
Finally we will print the dataset's name and source. Keep in mind that we have not set a source for the dataset, so in this case it is empty.
```
print("name:", iris.name)
print("source:", iris.source)
```
A useful combination of the above is `dataset.values[dataset.target]` which returns the possible values of the target. For classification problems, this will return all the possible classes. Let's try it:
```
print(iris.values[iris.target])
```
### Helper Functions
We will now take a look at the auxiliary functions found in the class.
First we will take a look at the `sanitize` function, which sets the non-input values of the given example to `None`.
In this case we want to hide the class of the first example, so we will sanitize it.
Note that the function doesn't actually change the given example; it returns a sanitized *copy* of it.
```
print("Sanitized:",iris.sanitize(iris.examples[0]))
print("Original:",iris.examples[0])
```
Currently the `iris` dataset has three classes, setosa, virginica and versicolor. We want though to convert it to a binary class dataset (a dataset with two classes). The class we want to remove is "virginica". To accomplish that we will utilize the helper function `remove_examples`.
```
iris2 = DataSet(name="iris")
iris2.remove_examples("virginica")
print(iris2.values[iris2.target])
```
We also have `classes_to_numbers`. For a lot of the classifiers in the module (like the Neural Network), classes should have numerical values. With this function we map string class names to numbers.
```
print("Class of first example:",iris2.examples[0][iris2.target])
iris2.classes_to_numbers()
print("Class of first example:",iris2.examples[0][iris2.target])
```
As you can see "setosa" was mapped to 0.
Finally, we take a look at `find_means_and_deviations`. It finds the means and standard deviations of the features for each class.
```
means, deviations = iris.find_means_and_deviations()
print("Setosa feature means:", means["setosa"])
print("Versicolor mean for first feature:", means["versicolor"][0])
print("Setosa feature deviations:", deviations["setosa"])
print("Virginica deviation for second feature:",deviations["virginica"][1])
```
## IRIS VISUALIZATION
Since we will use the iris dataset extensively in this notebook, below we provide a visualization tool that helps in comprehending the dataset and thus how the algorithms work.
We plot the dataset in a 3D space using `matplotlib` and the function `show_iris` from `notebook.py`. The function takes as input three parameters, *i*, *j* and *k*, which are indicises to the iris features, "Sepal Length", "Sepal Width", "Petal Length" and "Petal Width" (0 to 3). By default we show the first three features.
```
iris = DataSet(name="iris")
show_iris()
show_iris(0, 1, 3)
show_iris(1, 2, 3)
```
You can play around with the values to get a good look at the dataset.
## DISTANCE FUNCTIONS
In a lot of algorithms (like the *k-Nearest Neighbors* algorithm), there is a need to compare items, finding how *similar* or *close* they are. For that we have many different functions at our disposal. Below are the functions implemented in the module:
### Manhattan Distance (`manhattan_distance`)
One of the simplest distance functions. It calculates the difference between the coordinates/features of two items. To understand how it works, imagine a 2D grid with coordinates *x* and *y*. In that grid we have two items, at the squares positioned at `(1,2)` and `(3,4)`. The difference between their two coordinates is `3-1=2` and `4-2=2`. If we sum these up we get `4`. That means to get from `(1,2)` to `(3,4)` we need four moves; two to the right and two more up. The function works similarly for n-dimensional grids.
```
def manhattan_distance(X, Y):
return sum([abs(x - y) for x, y in zip(X, Y)])
distance = manhattan_distance([1,2], [3,4])
print("Manhattan Distance between (1,2) and (3,4) is", distance)
```
### Euclidean Distance (`euclidean_distance`)
Probably the most popular distance function. It returns the square root of the sum of the squared differences between individual elements of two items.
```
def euclidean_distance(X, Y):
return math.sqrt(sum([(x - y)**2 for x, y in zip(X,Y)]))
distance = euclidean_distance([1,2], [3,4])
print("Euclidean Distance between (1,2) and (3,4) is", distance)
```
### Hamming Distance (`hamming_distance`)
This function counts the number of differences between single elements in two items. For example, if we have two binary strings "111" and "011" the function will return 1, since the two strings only differ at the first element. The function works the same way for non-binary strings too.
```
def hamming_distance(X, Y):
return sum(x != y for x, y in zip(X, Y))
distance = hamming_distance(['a','b','c'], ['a','b','b'])
print("Hamming Distance between 'abc' and 'abb' is", distance)
```
### Mean Boolean Error (`mean_boolean_error`)
To calculate this distance, we find the ratio of different elements over all elements of two items. For example, if the two items are `(1,2,3)` and `(1,4,5)`, the ration of different/all elements is 2/3, since they differ in two out of three elements.
```
def mean_boolean_error(X, Y):
return mean(int(x != y) for x, y in zip(X, Y))
distance = mean_boolean_error([1,2,3], [1,4,5])
print("Mean Boolean Error Distance between (1,2,3) and (1,4,5) is", distance)
```
### Mean Error (`mean_error`)
This function finds the mean difference of single elements between two items. For example, if the two items are `(1,0,5)` and `(3,10,5)`, their error distance is `(3-1) + (10-0) + (5-5) = 2 + 10 + 0 = 12`. The mean error distance therefore is `12/3=4`.
```
def mean_error(X, Y):
return mean([abs(x - y) for x, y in zip(X, Y)])
distance = mean_error([1,0,5], [3,10,5])
print("Mean Error Distance between (1,0,5) and (3,10,5) is", distance)
```
### Mean Square Error (`ms_error`)
This is very similar to the `Mean Error`, but instead of calculating the difference between elements, we are calculating the *square* of the differences.
```
def ms_error(X, Y):
return mean([(x - y)**2 for x, y in zip(X, Y)])
distance = ms_error([1,0,5], [3,10,5])
print("Mean Square Distance between (1,0,5) and (3,10,5) is", distance)
```
### Root of Mean Square Error (`rms_error`)
This is the square root of `Mean Square Error`.
```
def rms_error(X, Y):
return math.sqrt(ms_error(X, Y))
distance = rms_error([1,0,5], [3,10,5])
print("Root of Mean Error Distance between (1,0,5) and (3,10,5) is", distance)
```
## PLURALITY LEARNER CLASSIFIER
### Overview
The Plurality Learner is a simple algorithm, used mainly as a baseline comparison for other algorithms. It finds the most popular class in the dataset and classifies any subsequent item to that class. Essentially, it classifies every new item to the same class. For that reason, it is not used very often, instead opting for more complicated algorithms when we want accurate classification.

Let's see how the classifier works with the plot above. There are three classes named **Class A** (orange-colored dots) and **Class B** (blue-colored dots) and **Class C** (green-colored dots). Every point in this plot has two **features** (i.e. X<sub>1</sub>, X<sub>2</sub>). Now, let's say we have a new point, a red star and we want to know which class this red star belongs to. Solving this problem by predicting the class of this new red star is our current classification problem.
The Plurality Learner will find the class most represented in the plot. ***Class A*** has four items, ***Class B*** has three and ***Class C*** has seven. The most popular class is ***Class C***. Therefore, the item will get classified in ***Class C***, despite the fact that it is closer to the other two classes.
### Implementation
Below follows the implementation of the PluralityLearner algorithm:
```
psource(PluralityLearner)
```
It takes as input a dataset and returns a function. We can later call this function with the item we want to classify as the argument and it returns the class it should be classified in.
The function first finds the most popular class in the dataset and then each time we call its "predict" function, it returns it. Note that the input ("example") does not matter. The function always returns the same class.
### Example
For this example, we will not use the Iris dataset, since each class is represented the same. This will throw an error. Instead we will use the zoo dataset.
```
zoo = DataSet(name="zoo")
pL = PluralityLearner(zoo)
print(pL([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 4, 1, 0, 1]))
```
The output for the above code is "mammal", since that is the most popular and common class in the dataset.
## K-NEAREST NEIGHBOURS CLASSIFIER
### Overview
The k-Nearest Neighbors algorithm is a non-parametric method used for classification and regression. We are going to use this to classify Iris flowers. More about kNN on [Scholarpedia](http://www.scholarpedia.org/article/K-nearest_neighbor).

Let's see how kNN works with a simple plot shown in the above picture.
We have co-ordinates (we call them **features** in Machine Learning) of this red star and we need to predict its class using the kNN algorithm. In this algorithm, the value of **k** is arbitrary. **k** is one of the **hyper parameters** for kNN algorithm. We choose this number based on our dataset and choosing a particular number is known as **hyper parameter tuning/optimising**. We learn more about this in coming topics.
Let's put **k = 3**. It means you need to find 3-Nearest Neighbors of this red star and classify this new point into the majority class. Observe that smaller circle which contains three points other than **test point** (red star). As there are two violet points, which form the majority, we predict the class of red star as **violet- Class B**.
Similarly if we put **k = 5**, you can observe that there are three yellow points, which form the majority. So, we classify our test point as **yellow- Class A**.
In practical tasks, we iterate through a bunch of values for k (like [1, 3, 5, 10, 20, 50, 100]), see how it performs and select the best one.
### Implementation
Below follows the implementation of the kNN algorithm:
```
psource(NearestNeighborLearner)
```
It takes as input a dataset and k (default value is 1) and it returns a function, which we can later use to classify a new item.
To accomplish that, the function uses a heap-queue, where the items of the dataset are sorted according to their distance from *example* (the item to classify). We then take the k smallest elements from the heap-queue and we find the majority class. We classify the item to this class.
### Example
We measured a new flower with the following values: 5.1, 3.0, 1.1, 0.1. We want to classify that item/flower in a class. To do that, we write the following:
```
iris = DataSet(name="iris")
kNN = NearestNeighborLearner(iris,k=3)
print(kNN([5.1,3.0,1.1,0.1]))
```
The output of the above code is "setosa", which means the flower with the above measurements is of the "setosa" species.
## DECISION TREE LEARNER
### Overview
#### Decision Trees
A decision tree is a flowchart that uses a tree of decisions and their possible consequences for classification. At each non-leaf node of the tree an attribute of the input is tested, based on which corresponding branch leading to a child-node is selected. At the leaf node the input is classified based on the class label of this leaf node. The paths from root to leaves represent classification rules based on which leaf nodes are assigned class labels.

#### Decision Tree Learning
Decision tree learning is the construction of a decision tree from class-labeled training data. The data is expected to be a tuple in which each record of the tuple is an attribute used for classification. The decision tree is built top-down, by choosing a variable at each step that best splits the set of items. There are different metrics for measuring the "best split". These generally measure the homogeneity of the target variable within the subsets.
#### Gini Impurity
Gini impurity of a set is the probability of a randomly chosen element to be incorrectly labeled if it was randomly labeled according to the distribution of labels in the set.
$$I_G(p) = \sum{p_i(1 - p_i)} = 1 - \sum{p_i^2}$$
We select a split which minimizes the Gini impurity in child nodes.
#### Information Gain
Information gain is based on the concept of entropy from information theory. Entropy is defined as:
$$H(p) = -\sum{p_i \log_2{p_i}}$$
Information Gain is difference between entropy of the parent and weighted sum of entropy of children. The feature used for splitting is the one which provides the most information gain.
#### Pseudocode
You can view the pseudocode by running the cell below:
```
pseudocode("Decision Tree Learning")
```
### Implementation
The nodes of the tree constructed by our learning algorithm are stored using either `DecisionFork` or `DecisionLeaf` based on whether they are a parent node or a leaf node respectively.
```
psource(DecisionFork)
```
`DecisionFork` holds the attribute, which is tested at that node, and a dict of branches. The branches store the child nodes, one for each of the attribute's values. Calling an object of this class as a function with input tuple as an argument returns the next node in the classification path based on the result of the attribute test.
```
psource(DecisionLeaf)
```
The leaf node stores the class label in `result`. All input tuples' classification paths end on a `DecisionLeaf` whose `result` attribute decide their class.
```
psource(DecisionTreeLearner)
```
The implementation of `DecisionTreeLearner` provided in [learning.py](https://github.com/aimacode/aima-python/blob/master/learning.py) uses information gain as the metric for selecting which attribute to test for splitting. The function builds the tree top-down in a recursive manner. Based on the input it makes one of the four choices:
<ol>
<li>If the input at the current step has no training data we return the mode of classes of input data received in the parent step (previous level of recursion).</li>
<li>If all values in training data belong to the same class it returns a `DecisionLeaf` whose class label is the class which all the data belongs to.</li>
<li>If the data has no attributes that can be tested we return the class with highest plurality value in the training data.</li>
<li>We choose the attribute which gives the highest amount of entropy gain and return a `DecisionFork` which splits based on this attribute. Each branch recursively calls `decision_tree_learning` to construct the sub-tree.</li>
</ol>
### Example
We will now use the Decision Tree Learner to classify a sample with values: 5.1, 3.0, 1.1, 0.1.
```
iris = DataSet(name="iris")
DTL = DecisionTreeLearner(iris)
print(DTL([5.1, 3.0, 1.1, 0.1]))
```
As expected, the Decision Tree learner classifies the sample as "setosa" as seen in the previous section.
## RANDOM FOREST LEARNER
### Overview

Image via [src](https://cdn-images-1.medium.com/max/800/0*tG-IWcxL1jg7RkT0.png)
#### Random Forest
As the name of the algorithm and image above suggest, this algorithm creates the forest with a number of trees. The more number of trees makes the forest robust. In the same way in random forest algorithm, the higher the number of trees in the forest, the higher is the accuray result. The main difference between Random Forest and Decision trees is that, finding the root node and splitting the feature nodes will be random.
Let's see how Rnadom Forest Algorithm work :
Random Forest Algorithm works in two steps, first is the creation of random forest and then the prediction. Let's first see the creation :
The first step in creation is to randomly select 'm' features out of total 'n' features. From these 'm' features calculate the node d using the best split point and then split the node into further nodes using best split. Repeat these steps until 'i' number of nodes are reached. Repeat the entire whole process to build the forest.
Now, let's see how the prediction works
Take the test features and predict the outcome for each randomly created decision tree. Calculate the votes for each prediction and the prediction which gets the highest votes would be the final prediction.
### Implementation
Below mentioned is the implementation of Random Forest Algorithm.
```
psource(RandomForest)
```
This algorithm creates an ensemble of decision trees using bagging and feature bagging. It takes 'm' examples randomly from the total number of examples and then perform feature bagging with probability p to retain an attribute. All the predictors are predicted from the DecisionTreeLearner and then a final prediction is made.
### Example
We will now use the Random Forest to classify a sample with values: 5.1, 3.0, 1.1, 0.1.
```
iris = DataSet(name="iris")
DTL = RandomForest(iris)
print(DTL([5.1, 3.0, 1.1, 0.1]))
```
As expected, the Random Forest classifies the sample as "setosa".
## NAIVE BAYES LEARNER
### Overview
#### Theory of Probabilities
The Naive Bayes algorithm is a probabilistic classifier, making use of [Bayes' Theorem](https://en.wikipedia.org/wiki/Bayes%27_theorem). The theorem states that the conditional probability of **A** given **B** equals the conditional probability of **B** given **A** multiplied by the probability of **A**, divided by the probability of **B**.
$$P(A|B) = \dfrac{P(B|A)*P(A)}{P(B)}$$
From the theory of Probabilities we have the Multiplication Rule, if the events *X* are independent the following is true:
$$P(X_{1} \cap X_{2} \cap ... \cap X_{n}) = P(X_{1})*P(X_{2})*...*P(X_{n})$$
For conditional probabilities this becomes:
$$P(X_{1}, X_{2}, ..., X_{n}|Y) = P(X_{1}|Y)*P(X_{2}|Y)*...*P(X_{n}|Y)$$
#### Classifying an Item
How can we use the above to classify an item though?
We have a dataset with a set of classes (**C**) and we want to classify an item with a set of features (**F**). Essentially what we want to do is predict the class of an item given the features.
For a specific class, **Class**, we will find the conditional probability given the item features:
$$P(Class|F) = \dfrac{P(F|Class)*P(Class)}{P(F)}$$
We will do this for every class and we will pick the maximum. This will be the class the item is classified in.
The features though are a vector with many elements. We need to break the probabilities up using the multiplication rule. Thus the above equation becomes:
$$P(Class|F) = \dfrac{P(Class)*P(F_{1}|Class)*P(F_{2}|Class)*...*P(F_{n}|Class)}{P(F_{1})*P(F_{2})*...*P(F_{n})}$$
The calculation of the conditional probability then depends on the calculation of the following:
*a)* The probability of **Class** in the dataset.
*b)* The conditional probability of each feature occurring in an item classified in **Class**.
*c)* The probabilities of each individual feature.
For *a)*, we will count how many times **Class** occurs in the dataset (aka how many items are classified in a particular class).
For *b)*, if the feature values are discrete ('Blue', '3', 'Tall', etc.), we will count how many times a feature value occurs in items of each class. If the feature values are not discrete, we will go a different route. We will use a distribution function to calculate the probability of values for a given class and feature. If we know the distribution function of the dataset, then great, we will use it to compute the probabilities. If we don't know the function, we can assume the dataset follows the normal (Gaussian) distribution without much loss of accuracy. In fact, it can be proven that any distribution tends to the Gaussian the larger the population gets (see [Central Limit Theorem](https://en.wikipedia.org/wiki/Central_limit_theorem)).
*NOTE:* If the values are continuous but use the discrete approach, there might be issues if we are not lucky. For one, if we have two values, '5.0 and 5.1', with the discrete approach they will be two completely different values, despite being so close. Second, if we are trying to classify an item with a feature value of '5.15', if the value does not appear for the feature, its probability will be 0. This might lead to misclassification. Generally, the continuous approach is more accurate and more useful, despite the overhead of calculating the distribution function.
The last one, *c)*, is tricky. If feature values are discrete, we can count how many times they occur in the dataset. But what if the feature values are continuous? Imagine a dataset with a height feature. Is it worth it to count how many times each value occurs? Most of the time it is not, since there can be miscellaneous differences in the values (for example, 1.7 meters and 1.700001 meters are practically equal, but they count as different values).
So as we cannot calculate the feature value probabilities, what are we going to do?
Let's take a step back and rethink exactly what we are doing. We are essentially comparing conditional probabilities of all the classes. For two classes, **A** and **B**, we want to know which one is greater:
$$\dfrac{P(F|A)*P(A)}{P(F)} vs. \dfrac{P(F|B)*P(B)}{P(F)}$$
Wait, **P(F)** is the same for both the classes! In fact, it is the same for every combination of classes. That is because **P(F)** does not depend on a class, thus being independent of the classes.
So, for *c)*, we actually don't need to calculate it at all.
#### Wrapping It Up
Classifying an item to a class then becomes a matter of calculating the conditional probabilities of feature values and the probabilities of classes. This is something very desirable and computationally delicious.
Remember though that all the above are true because we made the assumption that the features are independent. In most real-world cases that is not true though. Is that an issue here? Fret not, for the the algorithm is very efficient even with that assumption. That is why the algorithm is called **Naive** Bayes Classifier. We (naively) assume that the features are independent to make computations easier.
### Implementation
The implementation of the Naive Bayes Classifier is split in two; *Learning* and *Simple*. The *learning* classifier takes as input a dataset and learns the needed distributions from that. It is itself split into two, for discrete and continuous features. The *simple* classifier takes as input not a dataset, but already calculated distributions (a dictionary of `CountingProbDist` objects).
#### Discrete
The implementation for discrete values counts how many times each feature value occurs for each class, and how many times each class occurs. The results are stored in a `CountinProbDist` object.
With the below code you can see the probabilities of the class "Setosa" appearing in the dataset and the probability of the first feature (at index 0) of the same class having a value of 5. Notice that the second probability is relatively small, even though if we observe the dataset we will find that a lot of values are around 5. The issue arises because the features in the Iris dataset are continuous, and we are assuming they are discrete. If the features were discrete (for example, "Tall", "3", etc.) this probably wouldn't have been the case and we would see a much nicer probability distribution.
```
dataset = iris
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr])
for gv in target_vals
for attr in dataset.inputs}
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
print(target_dist['setosa'])
print(attr_dists['setosa', 0][5.0])
```
First we found the different values for the classes (called targets here) and calculated their distribution. Next we initialized a dictionary of `CountingProbDist` objects, one for each class and feature. Finally, we iterated through the examples in the dataset and calculated the needed probabilites.
Having calculated the different probabilities, we will move on to the predicting function. It will receive as input an item and output the most likely class. Using the above formula, it will multiply the probability of the class appearing, with the probability of each feature value appearing in the class. It will return the max result.
```
def predict(example):
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(target_vals, key=class_probability)
print(predict([5, 3, 1, 0.1]))
```
You can view the complete code by executing the next line:
```
psource(NaiveBayesDiscrete)
```
#### Continuous
In the implementation we use the Gaussian/Normal distribution function. To make it work, we need to find the means and standard deviations of features for each class. We make use of the `find_means_and_deviations` Dataset function. On top of that, we will also calculate the class probabilities as we did with the Discrete approach.
```
means, deviations = dataset.find_means_and_deviations()
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
print(means["setosa"])
print(deviations["versicolor"])
```
You can see the means of the features for the "Setosa" class and the deviations for "Versicolor".
The prediction function will work similarly to the Discrete algorithm. It will multiply the probability of the class occurring with the conditional probabilities of the feature values for the class.
Since we are using the Gaussian distribution, we will input the value for each feature into the Gaussian function, together with the mean and deviation of the feature. This will return the probability of the particular feature value for the given class. We will repeat for each class and pick the max value.
```
def predict(example):
def class_probability(targetval):
prob = target_dist[targetval]
for attr in dataset.inputs:
prob *= gaussian(means[targetval][attr], deviations[targetval][attr], example[attr])
return prob
return argmax(target_vals, key=class_probability)
print(predict([5, 3, 1, 0.1]))
```
The complete code of the continuous algorithm:
```
psource(NaiveBayesContinuous)
```
#### Simple
The simple classifier (chosen with the argument `simple`) does not learn from a dataset, instead it takes as input a dictionary of already calculated `CountingProbDist` objects and returns a predictor function. The dictionary is in the following form: `(Class Name, Class Probability): CountingProbDist Object`.
Each class has its own probability distribution. The classifier given a list of features calculates the probability of the input for each class and returns the max. The only pre-processing work is to create dictionaries for the distribution of classes (named `targets`) and attributes/features.
The complete code for the simple classifier:
```
psource(NaiveBayesSimple)
```
This classifier is useful when you already have calculated the distributions and you need to predict future items.
### Examples
We will now use the Naive Bayes Classifier (Discrete and Continuous) to classify items:
```
nBD = NaiveBayesLearner(iris, continuous=False)
print("Discrete Classifier")
print(nBD([5, 3, 1, 0.1]))
print(nBD([6, 5, 3, 1.5]))
print(nBD([7, 3, 6.5, 2]))
nBC = NaiveBayesLearner(iris, continuous=True)
print("\nContinuous Classifier")
print(nBC([5, 3, 1, 0.1]))
print(nBC([6, 5, 3, 1.5]))
print(nBC([7, 3, 6.5, 2]))
```
Notice how the Discrete Classifier misclassified the second item, while the Continuous one had no problem.
Let's now take a look at the simple classifier. First we will come up with a sample problem to solve. Say we are given three bags. Each bag contains three letters ('a', 'b' and 'c') of different quantities. We are given a string of letters and we are tasked with finding from which bag the string of letters came.
Since we know the probability distribution of the letters for each bag, we can use the naive bayes classifier to make our prediction.
```
bag1 = 'a'*50 + 'b'*30 + 'c'*15
dist1 = CountingProbDist(bag1)
bag2 = 'a'*30 + 'b'*45 + 'c'*20
dist2 = CountingProbDist(bag2)
bag3 = 'a'*20 + 'b'*20 + 'c'*35
dist3 = CountingProbDist(bag3)
```
Now that we have the `CountingProbDist` objects for each bag/class, we will create the dictionary. We assume that it is equally probable that we will pick from any bag.
```
dist = {('First', 0.5): dist1, ('Second', 0.3): dist2, ('Third', 0.2): dist3}
nBS = NaiveBayesLearner(dist, simple=True)
```
Now we can start making predictions:
```
print(nBS('aab')) # We can handle strings
print(nBS(['b', 'b'])) # And lists!
print(nBS('ccbcc'))
```
The results make intuitive sence. The first bag has a high amount of 'a's, the second has a high amount of 'b's and the third has a high amount of 'c's. The classifier seems to confirm this intuition.
Note that the simple classifier doesn't distinguish between discrete and continuous values. It just takes whatever it is given. Also, the `simple` option on the `NaiveBayesLearner` overrides the `continuous` argument. `NaiveBayesLearner(d, simple=True, continuous=False)` just creates a simple classifier.
## PERCEPTRON CLASSIFIER
### Overview
The Perceptron is a linear classifier. It works the same way as a neural network with no hidden layers (just input and output). First it trains its weights given a dataset and then it can classify a new item by running it through the network.
Its input layer consists of the the item features, while the output layer consists of nodes (also called neurons). Each node in the output layer has *n* synapses (for every item feature), each with its own weight. Then, the nodes find the dot product of the item features and the synapse weights. These values then pass through an activation function (usually a sigmoid). Finally, we pick the largest of the values and we return its index.
Note that in classification problems each node represents a class. The final classification is the class/node with the max output value.
Below you can see a single node/neuron in the outer layer. With *f* we denote the item features, with *w* the synapse weights, then inside the node we have the dot product and the activation function, *g*.

### Implementation
First, we train (calculate) the weights given a dataset, using the `BackPropagationLearner` function of `learning.py`. We then return a function, `predict`, which we will use in the future to classify a new item. The function computes the (algebraic) dot product of the item with the calculated weights for each node in the outer layer. Then it picks the greatest value and classifies the item in the corresponding class.
```
psource(PerceptronLearner)
```
Note that the Perceptron is a one-layer neural network, without any hidden layers. So, in `BackPropagationLearner`, we will pass no hidden layers. From that function we get our network, which is just one layer, with the weights calculated.
That function `predict` passes the input/example through the network, calculating the dot product of the input and the weights for each node and returns the class with the max dot product.
### Example
We will train the Perceptron on the iris dataset. Because though the `BackPropagationLearner` works with integer indexes and not strings, we need to convert class names to integers. Then, we will try and classify the item/flower with measurements of 5, 3, 1, 0.1.
```
iris = DataSet(name="iris")
iris.classes_to_numbers()
perceptron = PerceptronLearner(iris)
print(perceptron([5, 3, 1, 0.1]))
```
The correct output is 0, which means the item belongs in the first class, "setosa". Note that the Perceptron algorithm is not perfect and may produce false classifications.
## LEARNER EVALUATION
In this section we will evaluate and compare algorithm performance. The dataset we will use will again be the iris one.
```
iris = DataSet(name="iris")
```
### Naive Bayes
First up we have the Naive Bayes algorithm. First we will test how well the Discrete Naive Bayes works, and then how the Continuous fares.
```
nBD = NaiveBayesLearner(iris, continuous=False)
print("Error ratio for Discrete:", err_ratio(nBD, iris))
nBC = NaiveBayesLearner(iris, continuous=True)
print("Error ratio for Continuous:", err_ratio(nBC, iris))
```
The error for the Naive Bayes algorithm is very, very low; close to 0. There is also very little difference between the discrete and continuous version of the algorithm.
## k-Nearest Neighbors
Now we will take a look at kNN, for different values of *k*. Note that *k* should have odd values, to break any ties between two classes.
```
kNN_1 = NearestNeighborLearner(iris, k=1)
kNN_3 = NearestNeighborLearner(iris, k=3)
kNN_5 = NearestNeighborLearner(iris, k=5)
kNN_7 = NearestNeighborLearner(iris, k=7)
print("Error ratio for k=1:", err_ratio(kNN_1, iris))
print("Error ratio for k=3:", err_ratio(kNN_3, iris))
print("Error ratio for k=5:", err_ratio(kNN_5, iris))
print("Error ratio for k=7:", err_ratio(kNN_7, iris))
```
Notice how the error became larger and larger as *k* increased. This is generally the case with datasets where classes are spaced out, as is the case with the iris dataset. If items from different classes were closer together, classification would be more difficult. Usually a value of 1, 3 or 5 for *k* suffices.
Also note that since the training set is also the testing set, for *k* equal to 1 we get a perfect score, since the item we want to classify each time is already in the dataset and its closest neighbor is itself.
### Perceptron
For the Perceptron, we first need to convert class names to integers. Let's see how it performs in the dataset.
```
iris2 = DataSet(name="iris")
iris2.classes_to_numbers()
perceptron = PerceptronLearner(iris2)
print("Error ratio for Perceptron:", err_ratio(perceptron, iris2))
```
The Perceptron didn't fare very well mainly because the dataset is not linearly separated. On simpler datasets the algorithm performs much better, but unfortunately such datasets are rare in real life scenarios.
## AdaBoost
### Overview
**AdaBoost** is an algorithm which uses **ensemble learning**. In ensemble learning the hypotheses in the collection, or ensemble, vote for what the output should be and the output with the majority votes is selected as the final answer.
AdaBoost algorithm, as mentioned in the book, works with a **weighted training set** and **weak learners** (classifiers that have about 50%+epsilon accuracy i.e slightly better than random guessing). It manipulates the weights attached to the the examples that are showed to it. Importance is given to the examples with higher weights.
All the examples start with equal weights and a hypothesis is generated using these examples. Examples which are incorrectly classified, their weights are increased so that they can be classified correctly by the next hypothesis. The examples that are correctly classified, their weights are reduced. This process is repeated *K* times (here *K* is an input to the algorithm) and hence, *K* hypotheses are generated.
These *K* hypotheses are also assigned weights according to their performance on the weighted training set. The final ensemble hypothesis is the weighted-majority combination of these *K* hypotheses.
The speciality of AdaBoost is that by using weak learners and a sufficiently large *K*, a highly accurate classifier can be learned irrespective of the complexity of the function being learned or the dullness of the hypothesis space.
### Implementation
As seen in the previous section, the `PerceptronLearner` does not perform that well on the iris dataset. We'll use perceptron as the learner for the AdaBoost algorithm and try to increase the accuracy.
Let's first see what AdaBoost is exactly:
```
psource(AdaBoost)
```
AdaBoost takes as inputs: **L** and *K* where **L** is the learner and *K* is the number of hypotheses to be generated. The learner **L** takes in as inputs: a dataset and the weights associated with the examples in the dataset. But the `PerceptronLearner` doesnot handle weights and only takes a dataset as its input.
To remedy that we will give as input to the PerceptronLearner a modified dataset in which the examples will be repeated according to the weights associated to them. Intuitively, what this will do is force the learner to repeatedly learn the same example again and again until it can classify it correctly.
To convert `PerceptronLearner` so that it can take weights as input too, we will have to pass it through the **`WeightedLearner`** function.
```
psource(WeightedLearner)
```
The `WeightedLearner` function will then call the `PerceptronLearner`, during each iteration, with the modified dataset which contains the examples according to the weights associated with them.
### Example
We will pass the `PerceptronLearner` through `WeightedLearner` function. Then we will create an `AdaboostLearner` classifier with number of hypotheses or *K* equal to 5.
```
WeightedPerceptron = WeightedLearner(PerceptronLearner)
AdaboostLearner = AdaBoost(WeightedPerceptron, 5)
iris2 = DataSet(name="iris")
iris2.classes_to_numbers()
adaboost = AdaboostLearner(iris2)
adaboost([5, 3, 1, 0.1])
```
That is the correct answer. Let's check the error rate of adaboost with perceptron.
```
print("Error ratio for adaboost: ", err_ratio(adaboost, iris2))
```
It reduced the error rate considerably. Unlike the `PerceptronLearner`, `AdaBoost` was able to learn the complexity in the iris dataset.
| github_jupyter |
> **Copyright (c) 2020 Skymind Holdings Berhad**<br><br>
> **Copyright (c) 2021 Skymind Education Group Sdn. Bhd.**<br>
<br>
Licensed under the Apache License, Version 2.0 (the \"License\");
<br>you may not use this file except in compliance with the License.
<br>You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0/
<br>
<br>Unless required by applicable law or agreed to in writing, software
<br>distributed under the License is distributed on an \"AS IS\" BASIS,
<br>WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<br>See the License for the specific language governing permissions and
<br>limitations under the License.
<br>
<br>
**SPDX-License-Identifier: Apache-2.0**
<br>
# Introduction
This notebook is to introduce the basic of Numpy in managing arrays. Numpy is a useful library which will be used all the time during the whole course. This tutorial consists of two major parts, which are:
1. Creating NumPy Array
2. Manipulating Numpy Array
# Notebook Content
* [Creating Arrays](#Creating-Arrays)
* [0-Dimensional Arrays](#0-Dimensional-Array)
* [1-Dimensional Arrays](#1-Dimensional-Array)
* [2-Dimensional Arrays](#2-Dimensional-Array)
* [3-Dimensional Arrays](#3-Dimensional-Array)
* [Higher Dimensional Arrays](#Higher-Dimensional-Array)
* [Array of Zeros](#Array-of-Zeros)
* [Array of Ones](#Array-of-Ones)
* [Array with Specific Number](#Array-with-Specific-Number)
* [Identity Matrix](#Identity-Matrix)
* [Array with Random Values](#Array-with-Random-Values)
* [Properties of ndarray](#Properties-of-ndarray)
* [numpy.ndarray.shape](#numpy.ndarray.shape)
* [numpy.ndarray.ndim](#numpy.ndarray.ndim)
* [numpy.ndarray.size](#numpy.ndarray.size)
* [numpy.ndarray.itemsize](#numpy.ndarray.itemsize)
* [numpy.ndarray.dtype](#numpy.ndarray.dtype)
* [Access Element in Array](#Access-Element-in-Array)
* [Get the first row](#Get-the-first-row)
* [Get the first element](#Get-the-first-element)
* [Get the 2nd element in the 3rd row](#Get-the-2nd-element-in-the-3rd-row)
* [Retrieve list of elements](#Retrieve-list-of-elements)
* [Boolean array indexing](#Boolean-array-indexing)
* [Array Slicing](#Array-Slicing)
* [Math Operation in ndarray](#Math-Operation-in-ndarray)
* [Addition](#Addition)
* [Subtraction](#Subtraction)
* [Element-Wise Multiplication](#Element-Wise-Multiplication)
* [Division](#Division)
* [Matrix Multiplication](#Matrix-Multiplication)
* [Square Root](#Square-Root)
* [Sum All Elements](#Sum-All-Elements)
* [Maximum Value](#Maximum-Value)
* [Mean](#Mean)
* [Standard Deviation](#Standard-Deviation)
* [Array Manipulation](#Array-Manipulation)
* [Changing Shape](#Changing-Shape)
* [numpy.ndarray.reshape](#numpy.ndarray.reshape)
* [numpy.ndarray.flat](#numpy.ndarray.flat)
* [Transpose Operations](#Transpose-Operations)
* [numpy.ndarray.transpose](#numpy.ndarray.transpose)
* [numpy.swapaxes](#numpy.swapaxes)
* [Joining Array](#Joining-Array)
* [numpy.concatenate](#numpy.concatenate)
* [numpy.stack](#numpy.stack)
# Import Library
```
import numpy as np
SEED = 2021
np.random.seed(SEED)
```
# Creating Arrays
```
# Create ndarray
arr = np.array(['a', 'b', 'c', 'd'])
# Display array
print(arr)
# Show the datatype of arr
print(type(arr))
```
### 0 Dimensional Array
```
# Also known as scalar
scalar = np.array(100)
# Display scalar vector
print(scalar)
# Show the dimension of scalar
print("Dimension:", scalar.ndim)
```
### 1 Dimensional Array
```
# Create 1D array
D1_arr = np.array([1, 2, 3, 4, 5, 6, 7])
# Display 1D array
print(D1_arr)
# Show the dimension
print("Dimension:", D1_arr.ndim)
```
### 2 Dimensional Array
```
# Create 2D array
D2_arr = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# Display 2D array
print(D2_arr)
# Show the dimension
print("Dimension:", D2_arr.ndim)
```
### 3 Dimensional Array
```
# Create 3D array
D3_arr = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
# Display 3D array
print(D3_arr)
# Show the dimension
print("Dimension:", D3_arr.ndim)
```
### Higher Dimensional Array
```
# Create 5 dimension array
nd_arr = np.array(['S', 'K', 'Y', 'M', 'I', 'N', 'D'], ndmin=5)
# Display 5D array
print(nd_arr)
# Show the dimension
print("Dimension:", nd_arr.ndim)
```
### Array of Zeros
```
# Required parameter shape in tuple
arr = np.zeros(shape=(2,2))
print(arr)
```
### Array of Ones
```
# Required parameter shape in tuple
arr = np.ones(shape=(3, 4))
print(arr)
```
### Array with Specific Number
```
# Required parameter shape in tuple and fill_value
arr = np.full(shape=(4, 4), fill_value=6)
print(arr)
```
### Identity Matrix
```
# Required parameter N, Number of rows
arr = np.eye(N=2)
print(arr)
```
### Array with Random Values
```
# Create a random valued array
arr = np.random.random(size=(4, 4),)
print(arr)
```
# Properties of ndarray
### numpy.ndarray.shape
Get the current shape of an array
```
arr = np.array([[1, 2, 3], [4, 5, 6]])
print("Shape:", arr.shape)
```
### numpy.ndarray.ndim
Number of array dimensions
```
print("Dimension:", arr.ndim)
```
### numpy.ndarray.size
Number of elements in the array.
```
print("Size:", arr.size)
```
### numpy.ndarray.itemsize
Length of one array element in bytes
```
print("Item Size in (byte):", arr.itemsize)
```
### numpy.ndarray.dtype
Data-type of the array’s elements
```
print("Item datatype:", arr.dtype)
```
# Access Element in Array
```
# Use index to access the element in the array
arr = np.random.random(size=(4,4))
print(arr)
```
### Get the first row
```
print("1st row:", arr[0])
```
### Get the first element
```
print("1st element:", arr[0, 0])
```
### Get the 2nd element in the 3rd row
```
print("2nd element in the 3rd row:", arr[2, 1])
```
### Retrieve list of elements
```
# Integer array indexing
print(arr[[0, 1, 2], [2, 1, 0]])
# Is similar to
print([arr[0, 2], arr[1, 1], arr[2, 0]])
rows = np.arange(4) # [0 1 2 3]
columns = [2, 3, 2, 1]
print(arr[rows, columns])
```
### Boolean array indexing
```
# Find the elements of a that are bigger than 0.5;
# this returns a numpy array of Booleans of the same
# shape as a, where each slot of bool_idx tells
# whether that element of a is > 0.5.
bool_idx = (arr > 0.5)
print(bool_idx)
# Return all elements that more than 0.5 to a list
print(arr[bool_idx])
```
# Array Slicing
```
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1,2,3,4],
[5,6,7,8],
[9,10,11,12]])
# Use slicing to pull out the subarray consisting of the first 2 rows with 2nd and 3rd columns
# b is the following array of shape (2, 2):
b = a[:2, 1:3]
print(b)
# Assign the first element in b to 10
b[0, 0] = 10
print(b)
# Two ways of accessing the data in the middle row of the array.
# Mixing integer indexing with slices yields an array of lower rank,
# while using only slices yields an array of the same rank as the
# original array:
row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
print(row_r1, row_r1.shape)
print(row_r2, row_r2.shape)
# We can make the same distinction when accessing columns of an array:
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print(col_r1, col_r1.shape)
print(col_r2, col_r2.shape)
```
# Math Operation in ndarray
```
# Create a random int array with size=(4,4)
x = np.random.randint(0, high=99, size=(4,4))
y = np.random.randint(0, high=99, size=(4,4))
print("x:\n", x)
print("\ny:\n", y)
```
### Addition
```
# Matrix addition operation
z = x + y
print(z)
# Addition with 1D array
arr = np.array([1, 2, 3, 4])
# Add arr to all rows in x
print(np.add(x, arr))
# Addition with scalar value (broadcast)
num = 10
# Add 10 to all elements in x
print(np.add(x, num))
```
### Subtraction
```
# Matrix subtraction operation
z = x - y
print(z)
# Subtraction with 1D array
arr = np.array([2, 4, 6, 8])
# Subtract arr to all rows in x
print(np.subtract(x, arr))
# Subtraction with scalar value
num = 20
# Subtract 20 to all element in x
print(np.subtract(x, num))
```
### Element-Wise Multiplication
```
z = x * y
print(z)
# Multiplication with 1D array
arr = [2, 3, 4, 5]
# Element wise multiplication
print(np.multiply(x, arr))
# Multiplication with scalar
num = 14
print(np.multiply(x, num))
```
### Division
```
# Element wise division
z = x / y
print(z)
# Division with 1D array
arr = [1, 2, 1, 2]
print(np.divide(x, arr))
# Division with scalar
num = 2
print(np.divide(x, num))
```
### Matrix Multiplication
```
# Using np.dot to perform matrix multiplication
# Matrix 1: shape = (3,4)
# Matrix 2: shape = (4,2)
# Shape of M1 dot M2 = (3,2)
M1 = np.random.randint(0, 9, size=(3,4))
M2 = np.random.randint(0, 19, size=(4,2))
# Matrix multiplication
ans = M1.dot(M2)
print("Answer:")
print(ans)
print("\nShape:", ans.shape)
```
### Square Root
```
print(np.sqrt(x))
```
### Sum All Elements
```
# Compute sum of all elements
print("Sum of all element in matrix x:", np.sum(x))
# Compute sum of each column
print("Sum of each column in matrix x:", np.sum(x, axis=0))
# Compute sum of each row
print("Sum of each row in matrix x:", np.sum(x, axis=1))
```
### Maximum Value
```
print(x)
# Maximum value in matrix x
print("Maximum value in matrix X:", np.max(x))
# Maximum value along the vertical axis
print("Maximum value along vertical axis(Column):", np.max(x, axis=0))
# Maximum value along the horizontal axis
print("Maximum value along the horizontal axis(Row):", np.max(x, axis=1))
```
### Mean
```
# Mean of all elements
print("Mean:", np.mean(x))
```
### Standard Deviation
```
# Compute the standard deviation
print("Standard Deviation:", np.std(x))
```
# Array Manipulation
## Changing Shape
### numpy.ndarray.reshape
Gives a new shape to an array without changing its data.
```
arr = np.random.randint(0, 9, size=(3,2))
print(arr)
# Reshape to (2,3)
print(arr.reshape((2,3)))
```
### numpy.ndarray.flat
A 1-D iterator over the array.
```
z = np.random.randint(0, 9, size=(10,10))
# Create a new copy of flatten array
print(z.flatten())
```
## Transpose Operations
### numpy.ndarray.transpose
Returns a view of the array with axes transposed.
```
arr = np.random.randint(0, 9, size=(3,2))
z = np.transpose(arr)
print(z)
print("\nShape:", z.shape, "\n")
# Similar function
print(arr.T)
```
### numpy.swapaxes
Interchange two axes of an array.
```
x = np.random.randint(0, 9, size=(4,4))
print(x)
print("Original shape:", x.shape)
# Swap between axis 0 and 1, similar to transpose
y = np.swapaxes(x, 0, 1)
print(y)
```
## Joining Array
```
x = np.random.randint(0, 9, size=(4,4))
y = np.random.randint(0, 9, size=(4,4))
print("x:")
print(x)
print("\ny:")
print(y)
```
### numpy.concatenate
Join a sequence of arrays along an existing axis.
```
# Vertically joined
print(np.concatenate((x,y), axis=0))
# Horizontally joined
print(np.concatenate((x,y), axis=1))
```
### numpy.stack
Join a sequence of arrays along a new axis.
```
# Vertically stack
print(np.stack((x,y), axis=0))
# Horizontally stack
print(np.stack((x,y), axis=1))
```
# Contributors
**Author**
<br>Chee Lam
# References
1. [Numpy Documentation](https://numpy.org/doc/stable/reference/index.html)
2. [Numpy Array Manipulation](https://www.tutorialspoint.com/numpy/numpy_array_manipulation.htm)
3. [NumPy Creating Arrays](https://www.w3schools.com/python/numpy/numpy_creating_arrays.asp)
| github_jupyter |
```
import sys
if 'google.colab' in sys.modules:
!wget https://raw.githubusercontent.com/yandexdataschool/Practical_RL/0ccb0673965dd650d9b284e1ec90c2bfd82c8a94/week08_pomdp/atari_util.py
!wget https://raw.githubusercontent.com/yandexdataschool/Practical_RL/0ccb0673965dd650d9b284e1ec90c2bfd82c8a94/week08_pomdp/env_pool.py
# If you are running on a server, launch xvfb to record game videos
# Please make sure you have xvfb installed
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import numpy as np
from IPython.core import display
import matplotlib.pyplot as plt
%matplotlib inline
```
### Kung-Fu, recurrent style
In this notebook we'll once again train RL agent for for Atari [KungFuMaster](https://gym.openai.com/envs/KungFuMaster-v0/), this time using recurrent neural networks.

```
import gym
from atari_util import PreprocessAtari
def make_env():
env = gym.make("KungFuMasterDeterministic-v0")
env = PreprocessAtari(env, height=42, width=42,
crop=lambda img: img[60:-30, 15:],
color=False, n_frames=1)
return env
env = make_env()
obs_shape = env.observation_space.shape
n_actions = env.action_space.n
print("Observation shape:", obs_shape)
print("Num actions:", n_actions)
print("Action names:", env.env.env.get_action_meanings())
s = env.reset()
for _ in range(100):
s, _, _, _ = env.step(env.action_space.sample())
plt.title('Game image')
plt.imshow(env.render('rgb_array'))
plt.show()
plt.title('Agent observation')
plt.imshow(s.reshape([42, 42]))
plt.show()
```
### POMDP setting
The Atari game we're working with is actually a POMDP: your agent needs to know timing at which enemies spawn and move, but cannot do so unless it has some memory.
Let's design another agent that has a recurrent neural net memory to solve this. Here's a sketch.

```
import torch
import torch.nn as nn
import torch.nn.functional as F
# a special module that converts [batch, channel, w, h] to [batch, units]
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class SimpleRecurrentAgent(nn.Module):
def __init__(self, obs_shape, n_actions, reuse=False):
"""A simple actor-critic agent"""
super(self.__class__, self).__init__()
self.conv0 = nn.Conv2d(1, 32, kernel_size=(3, 3), stride=(2, 2))
self.conv1 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2))
self.conv2 = nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(2, 2))
self.flatten = Flatten()
self.hid = nn.Linear(512, 128)
self.rnn = nn.LSTMCell(128, 128)
self.logits = nn.Linear(128, n_actions)
self.state_value = nn.Linear(128, 1)
def forward(self, prev_state, obs_t):
"""
Takes agent's previous hidden state and a new observation,
returns a new hidden state and whatever the agent needs to learn
"""
# Apply the whole neural net for one step here.
# See docs on self.rnn(...).
# The recurrent cell should take the last feedforward dense layer as input.
<YOUR CODE>
new_state = <YOUR CODE>
logits = <YOUR CODE>
state_value = <YOUR CODE>
return new_state, (logits, state_value)
def get_initial_state(self, batch_size):
"""Return a list of agent memory states at game start. Each state is a np array of shape [batch_size, ...]"""
return torch.zeros((batch_size, 128)), torch.zeros((batch_size, 128))
def sample_actions(self, agent_outputs):
"""pick actions given numeric agent outputs (np arrays)"""
logits, state_values = agent_outputs
probs = F.softmax(logits)
return torch.multinomial(probs, 1)[:, 0].data.numpy()
def step(self, prev_state, obs_t):
""" like forward, but obs_t is a numpy array """
obs_t = torch.tensor(np.asarray(obs_t), dtype=torch.float32)
(h, c), (l, s) = self.forward(prev_state, obs_t)
return (h.detach(), c.detach()), (l.detach(), s.detach())
n_parallel_games = 5
gamma = 0.99
agent = SimpleRecurrentAgent(obs_shape, n_actions)
state = [env.reset()]
_, (logits, value) = agent.step(agent.get_initial_state(1), state)
print("action logits:\n", logits)
print("state values:\n", value)
```
### Let's play!
Let's build a function that measures agent's average reward.
```
def evaluate(agent, env, n_games=1):
"""Plays an entire game start to end, returns session rewards."""
game_rewards = []
for _ in range(n_games):
# initial observation and memory
observation = env.reset()
prev_memories = agent.get_initial_state(1)
total_reward = 0
while True:
new_memories, readouts = agent.step(
prev_memories, observation[None, ...])
action = agent.sample_actions(readouts)
observation, reward, done, info = env.step(action[0])
total_reward += reward
prev_memories = new_memories
if done:
break
game_rewards.append(total_reward)
return game_rewards
import gym.wrappers
with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor:
rewards = evaluate(agent, env_monitor, n_games=3)
print(rewards)
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from base64 import b64encode
from IPython.display import HTML
video_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
video_path = video_paths[-1] # You can also try other indices
if 'google.colab' in sys.modules:
# https://stackoverflow.com/a/57378660/1214547
with video_path.open('rb') as fp:
mp4 = fp.read()
data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode()
else:
data_url = str(video_path)
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(data_url))
```
### Training on parallel games
We introduce a class called EnvPool - it's a tool that handles multiple environments for you. Here's how it works:

```
from env_pool import EnvPool
pool = EnvPool(agent, make_env, n_parallel_games)
```
We gonna train our agent on a thing called __rollouts:__

A rollout is just a sequence of T observations, actions and rewards that agent took consequently.
* First __s0__ is not necessarily initial state for the environment
* Final state is not necessarily terminal
* We sample several parallel rollouts for efficiency
```
# for each of n_parallel_games, take 10 steps
rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(10)
print("Actions shape:", rollout_actions.shape)
print("Rewards shape:", rollout_rewards.shape)
print("Mask shape:", rollout_mask.shape)
print("Observations shape: ", rollout_obs.shape)
```
# Actor-critic objective
Here we define a loss function that uses rollout above to train advantage actor-critic agent.
Our loss consists of three components:
* __The policy "loss"__
$$ \hat J = {1 \over T} \cdot \sum_t { \log \pi(a_t | s_t) } \cdot A_{const}(s,a) $$
* This function has no meaning in and of itself, but it was built such that
* $ \nabla \hat J = {1 \over N} \cdot \sum_t { \nabla \log \pi(a_t | s_t) } \cdot A(s,a) \approx \nabla E_{s, a \sim \pi} R(s,a) $
* Therefore if we __maximize__ J_hat with gradient descent we will maximize expected reward
* __The value "loss"__
$$ L_{td} = {1 \over T} \cdot \sum_t { [r + \gamma \cdot V_{const}(s_{t+1}) - V(s_t)] ^ 2 }$$
* Ye Olde TD_loss from q-learning and alike
* If we minimize this loss, V(s) will converge to $V_\pi(s) = E_{a \sim \pi(a | s)} R(s,a) $
* __Entropy Regularizer__
$$ H = - {1 \over T} \sum_t \sum_a {\pi(a|s_t) \cdot \log \pi (a|s_t)}$$
* If we __maximize__ entropy we discourage agent from predicting zero probability to actions
prematurely (a.k.a. exploration)
So we optimize a linear combination of $L_{td}$ $- \hat J$, $-H$
```
```
```
```
```
```
__One more thing:__ since we train on T-step rollouts, we can use N-step formula for advantage for free:
* At the last step, $A(s_t,a_t) = r(s_t, a_t) + \gamma \cdot V(s_{t+1}) - V(s) $
* One step earlier, $A(s_t,a_t) = r(s_t, a_t) + \gamma \cdot r(s_{t+1}, a_{t+1}) + \gamma ^ 2 \cdot V(s_{t+2}) - V(s) $
* Et cetera, et cetera. This way agent starts training much faster since it's estimate of A(s,a) depends less on his (imperfect) value function and more on actual rewards. There's also a [nice generalization](https://arxiv.org/abs/1506.02438) of this.
__Note:__ it's also a good idea to scale rollout_len up to learn longer sequences. You may wish set it to >=20 or to start at 10 and then scale up as time passes.
```
def to_one_hot(y, n_dims=None):
""" Take an integer tensor and convert it to 1-hot matrix. """
y_tensor = y.to(dtype=torch.int64).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
return y_one_hot
opt = torch.optim.Adam(agent.parameters(), lr=1e-5)
def train_on_rollout(states, actions, rewards, is_not_done, prev_memory_states, gamma=0.99):
"""
Takes a sequence of states, actions and rewards produced by generate_session.
Updates agent's weights by following the policy gradient above.
Please use Adam optimizer with default parameters.
"""
# shape: [batch_size, time, c, h, w]
states = torch.tensor(np.asarray(states), dtype=torch.float32)
actions = torch.tensor(np.array(actions), dtype=torch.int64) # shape: [batch_size, time]
rewards = torch.tensor(np.array(rewards), dtype=torch.float32) # shape: [batch_size, time]
is_not_done = torch.tensor(np.array(is_not_done), dtype=torch.float32) # shape: [batch_size, time]
rollout_length = rewards.shape[1] - 1
# predict logits, probas and log-probas using an agent.
memory = [m.detach() for m in prev_memory_states]
logits = [] # append logit sequence here
state_values = [] # append state values here
for t in range(rewards.shape[1]):
obs_t = states[:, t]
# use agent to comute logits_t and state values_t.
# append them to logits and state_values array
memory, (logits_t, values_t) = <YOUR CODE>
logits.append(logits_t)
state_values.append(values_t)
logits = torch.stack(logits, dim=1)
state_values = torch.stack(state_values, dim=1)
probas = F.softmax(logits, dim=2)
logprobas = F.log_softmax(logits, dim=2)
# select log-probabilities for chosen actions, log pi(a_i|s_i)
actions_one_hot = to_one_hot(actions, n_actions).view(
actions.shape[0], actions.shape[1], n_actions)
logprobas_for_actions = torch.sum(logprobas * actions_one_hot, dim=-1)
# Now let's compute two loss components:
# 1) Policy gradient objective.
# Notes: Please don't forget to call .detach() on advantage term. Also please use mean, not sum.
# it's okay to use loops if you want
J_hat = 0 # policy objective as in the formula for J_hat
# 2) Temporal difference MSE for state values
# Notes: Please don't forget to call on V(s') term. Also please use mean, not sum.
# it's okay to use loops if you want
value_loss = 0
cumulative_returns = state_values[:, -1].detach()
for t in reversed(range(rollout_length)):
r_t = rewards[:, t] # current rewards
# current state values
V_t = state_values[:, t]
V_next = state_values[:, t + 1].detach() # next state values
# log-probability of a_t in s_t
logpi_a_s_t = logprobas_for_actions[:, t]
# update G_t = r_t + gamma * G_{t+1} as we did in week6 reinforce
cumulative_returns = G_t = r_t + gamma * cumulative_returns
# Compute temporal difference error (MSE for V(s))
value_loss += <YOUR CODE>
# compute advantage A(s_t, a_t) using cumulative returns and V(s_t) as baseline
advantage = <YOUR CODE>
advantage = advantage.detach()
# compute policy pseudo-loss aka -J_hat.
J_hat += <YOUR CODE>
# regularize with entropy
entropy_reg = <YOUR CODE: compute entropy regularizer>
# add-up three loss components and average over time
loss = -J_hat / rollout_length +\
value_loss / rollout_length +\
-0.01 * entropy_reg
# Gradient descent step
<YOUR CODE>
return loss.data.numpy()
# let's test it
memory = list(pool.prev_memory_states)
rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(10)
train_on_rollout(rollout_obs, rollout_actions,
rollout_rewards, rollout_mask, memory)
```
# Train
just run train step and see if agent learns any better
```
from IPython.display import clear_output
from tqdm import trange
from pandas import DataFrame
moving_average = lambda x, **kw: DataFrame(
{'x': np.asarray(x)}).x.ewm(**kw).mean().values
rewards_history = []
for i in trange(15000):
memory = list(pool.prev_memory_states)
rollout_obs, rollout_actions, rollout_rewards, rollout_mask = pool.interact(
10)
train_on_rollout(rollout_obs, rollout_actions,
rollout_rewards, rollout_mask, memory)
if i % 100 == 0:
rewards_history.append(np.mean(evaluate(agent, env, n_games=1)))
clear_output(True)
plt.plot(rewards_history, label='rewards')
plt.plot(moving_average(np.array(rewards_history),
span=10), label='rewards ewma@10')
plt.legend()
plt.show()
if rewards_history[-1] >= 10000:
print("Your agent has just passed the minimum homework threshold")
break
```
Relax and grab some refreshments while your agent is locked in an infinite loop of violence and death.
__How to interpret plots:__
The session reward is the easy thing: it should in general go up over time, but it's okay if it fluctuates ~~like crazy~~. It's also OK if it reward doesn't increase substantially before some 10k initial steps. However, if reward reaches zero and doesn't seem to get up over 2-3 evaluations, there's something wrong happening.
Since we use a policy-based method, we also keep track of __policy entropy__ - the same one you used as a regularizer. The only important thing about it is that your entropy shouldn't drop too low (`< 0.1`) before your agent gets the yellow belt. Or at least it can drop there, but _it shouldn't stay there for long_.
If it does, the culprit is likely:
* Some bug in entropy computation. Remember that it is $ - \sum p(a_i) \cdot log p(a_i) $
* Your agent architecture converges too fast. Increase entropy coefficient in actor loss.
* Gradient explosion - just [clip gradients](https://stackoverflow.com/a/56069467) and maybe use a smaller network
* Us. Or PyTorch developers. Or aliens. Or lizardfolk. Contact us on forums before it's too late!
If you're debugging, just run `logits, values = agent.step(batch_states)` and manually look into logits and values. This will reveal the problem 9 times out of 10: you'll likely see some NaNs or insanely large numbers or zeros. Try to catch the moment when this happens for the first time and investigate from there.
### "Final" evaluation
```
import gym.wrappers
with gym.wrappers.Monitor(make_env(), directory="videos", force=True) as env_monitor:
final_rewards = evaluate(agent, env_monitor, n_games=20)
print("Final mean reward", np.mean(final_rewards))
# Show video. This may not work in some setups. If it doesn't
# work for you, you can download the videos and view them locally.
from pathlib import Path
from base64 import b64encode
from IPython.display import HTML
video_paths = sorted([s for s in Path('videos').iterdir() if s.suffix == '.mp4'])
video_path = video_paths[-1] # You can also try other indices
if 'google.colab' in sys.modules:
# https://stackoverflow.com/a/57378660/1214547
with video_path.open('rb') as fp:
mp4 = fp.read()
data_url = 'data:video/mp4;base64,' + b64encode(mp4).decode()
else:
data_url = str(video_path)
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format(data_url))
```
| github_jupyter |
# References
* Stanford NLP Lecture
* http://cs224d.stanford.edu/syllabus.html
# Data
* babi data for
* http://www.thespermwhale.com/jaseweston/
* http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz
# Memory Networks Facebook AI
## Memory Networks (2014)
## End-To-End Memory Networks
* Source Code
* https://github.com/facebook/MemNN
* Differential Version Of Memory Networks
* Two grand challenges in artifical intelligence research
* Multiple computational steps in the service of answering a question or completing a task
* Long term dependencies in sequential data
* Because the function from input to output is smooth, we can easily compute gradients and back-propagate through it.
<img src="C2Q7W6NKPYUTPK448GGVG6E7W1VXQ408.png"/>
# NTM DeepMind
## Neural Turing Machine (2014)
* Human Cognition VS Computing
* Rule-based manipulation VS Simple Program
* Short-term storage of information VS Program arguments
* -> Working Memormy VS "NTM"
<img src="LKTTEJA7F9N1D58DRU1G1Q1AF3X0JUTB.png"/>
<img src="YPIE6LGFCS12CPEWOSRG7IB2GXC0C831.png"/>
* !!! bit #7, bit #8 in input are delimiter bits
* On bit #7 means "input start"
* On bit #8 means "input end and start result" such as <go> word in seq2seq
* insert zero bits in inputs while outputs are generated
<img src="B61HT3HBUA6KGAIV6NGR3HLLVRVAOAGL.png"/>
## REINFORCEMENT LEARNING NEURAL TURING MACHINES
* source code
* https://github.com/ilyasu123/rlntm
## Hybrid computing using a neural network with dynamic external memory (DNC) (2016)
* Unofficial Source code
* https://github.com/Mostafa-Samir/DNC-tensorflow
* NTM vs DNC
* Same at target level
* Implementation of addressing method + introduction of memory allocation method
* DNC is better for accuracy (comparison in bAbI task)
## Scaling Memory-Augmented Neural Networks with Sparse Reads and Writes
* SAM (Sparse Access Model)
* Upgrade NTM using ANN for large size external moemory
* "Scaling" = Large size
* Approximate Nearest Neighbor (ANN) $\mathcal{O}(\log N)$ instead of linear search $\mathcal{O}(N)$
* K-Nearest Neighbor(KNN)
* K sparse number
* c.f. Sparse Differentiable Neural Computer (SDNC) is upgrade version of DNC
<img src="ENHTL692NDDOTXHP6K151W8513729XEC.png"/>
## Pointer Networks (2015)
## Ask Me Anything: Dynamic Memory Networks for Natural Language Processing (DMN, Dynamic Memory Networks) (2015)
## "Hierarchical Memory Networks" (ICLR 2017)
Reduce memory seen by softmax by expressing memory hierarchically
## "Dynamic NTM with Continuous and Discrete Addressing Schemes" ICLR 2017
Based on REINFORCE hard attention and softmax
Soft attention is used in combination
I do not really understand
## "Lie Access Neural Turing Machine" ICLR 2017
Addressing using Lee group
I can move the head naturally
| github_jupyter |
# **Deep Learning for Detecting Pneumonia from X-ray Images**
```
import numpy as np # linear algebra
import cv2 # import cv2
from PIL import Image
from google.colab import drive
drive.mount('/content/drive')
DIR = open('/content/drive/MyDrive/Chest_Xray','r')
print(DIR)
train_folder = '/content/drive/MyDrive/Chest_Xray/train'
test_folder ='/content/drive/MyDrive/Chest_Xray/test'
val_folder ='/content/drive/MyDrive/Chest_Xray/val'
```
# DATA PREPROCESSING
```
labels = ["NORMAL", "PNEUMONIA"] # each folder has two sub folder name "PNEUMONIA", "NORMAL"
IMG_SIZE = 50 # resize image
def get_data_train(data_dir):
data = []
for label in labels:
count = 0
path = os.path.join(data_dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
try:
count = count + 1
if(count > 1341):
break
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
data.append([new_array, class_num])
except Exception as e:
print(e)
return np.array(data)
import os
train = get_data_train(train_folder)
test = get_data_train(test_folder)
val = get_data_train(val_folder)
```
# TRAIN TEST SPLIT
```
X_train = []
y_train = []
X_val = []
y_val = []
X_test = []
y_test = []
for feature, label in train:
X_train.append(feature)
y_train.append(label)
for feature, label in test:
X_test.append(feature)
y_test.append(label)
for feature, label in val:
X_val.append(feature)
y_val.append(label)
X_train = np.array(X_train) / 255
X_val = np.array(X_val) / 255
X_test = np.array(X_test) / 255
X_train = X_train.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_train = np.array(y_train)
X_val = X_val.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_val = np.array(y_val)
X_test = X_test.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_test = np.array(y_test)
```
# **CONVOLUTIONAL NEURAL NETWORK BASED MODEL**
```
import tensorflow as tf
from tensorflow.keras import models
from tensorflow.keras.layers import Flatten, Conv2D, Activation, Dense, Dropout, MaxPooling2D
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same", input_shape=X_train.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(32, activation="relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=12, validation_data=(X_test, y_test), shuffle=True)
import keras
model.save("cnn.h5")
model = keras.models.load_model("cnn.h5")
y_pred = model.predict(X_test)
```
## **TESTING ON UNSEEN DATA**
```
#print(labels[int(prediction[0])])
import matplotlib.pyplot as plt
img = cv2.imread("/content/test3.jpeg", cv2.IMREAD_GRAYSCALE)
#cv2.imshow("Test",img)
new_array = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
new_array = np.reshape(new_array,(1,50,50,1))
plt.imshow(img)
prediction = model.predict(new_array)
print(prediction)
if(int(prediction[0]) == 1):
print("\n\nHey! You are suffering from Pneumonia.\nSome Precautions that you can take: \n1. Consult a nearby doctor at the earliest.\n2. You must give a try for Saltwater Gargle.\n3. Drink Hot Peppermint Tea.\n4. Drink at least 8 cups of water or liquid per day. Liquids help to thin mucous and keep your fever down.\n5. Stick to your treatment plan.")
else:
print("You're out of danger! Stay Safe!")
#print(labels[int(prediction[0])])
import matplotlib.pyplot as plt
img = cv2.imread("/content/test2.jpg", cv2.IMREAD_GRAYSCALE)
#cv2.imshow("Test",img)
new_array = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
new_array = np.reshape(new_array,(1,50,50,1))
plt.imshow(img)
prediction = model.predict(new_array)
print(prediction)
if(int(prediction[0]) == 1):
print("\n\nHey! You are suffering from Pneumonia.\nSome Precautions that you can take: \n1. Consult a nearby doctor at the earliest.\n2. You must give a try for Saltwater Gargle.\n3. Drink Hot Peppermint Tea.\n4. Drink at least 8 cups of water or liquid per day. Liquids help to thin mucous and keep your fever down.\n5. Stick to your treatment plan.")
```
# **ANALYSIS OF TRAINED MODEL**
```
from sklearn.metrics import confusion_matrix
y_f= []
for i in y_pred:
if (i[0] > 0.5):
y_f.append(1)
else:
y_f.append(0)
cm = confusion_matrix(y_test, y_f)
```
# 1.CONFUSION MATRIX
```
plt.clf()
classNames = ["Normal ", "Pneumonia"]
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)
plt.title(' Confusion Matrix - Test Data')
plt.ylabel('True label')
plt.xlabel('Predicted label')
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation=45)
plt.yticks(tick_marks, classNames)
s = [['TN','FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j,i, str(s[i][j])+" = "+str(cm[i][j]))
plt.show()
```
# 2.ACCURACY VS EPOCH
```
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.xlabel('epochs')
plt.ylabel('Accuracy')
plt.legend(['train_accuracy','val_accuracy'], loc=0)
plt.show()
```
# 3.LOSS VS EPOCH
```
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.xlabel('epochs')
plt.ylabel('Loss')
plt.legend(['train_loss','val_loss'], loc=0)
plt.show()
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
from IPython.display import Image
```
Chapter 9 On-policy Prediction with Approximation
=========
approximate value function: parameterized function $\hat{v}(s, w) \approx v_\pi(s)$
+ applicable to partially observable problems.
### 9.1 Value-function Approximation
$s \to u$: $s$ is the state updated and $u$ is the update target that $s$'s estimated value is shifted toward.
We use machine learning methods and pass to them the $s \to g$ of each update as a training example. Then we interperet the approximate function they produce as an estimated value function.
not all function approximation methods are equally well suited for use in reinforcement learning:
+ learn efficiently from incrementally acquired data: many traditional methods assume a static training set over which multiple passes are made.
+ are able to handle nonstationary target functions.
### 9.2 The Prediction Objective (VE)
which states we care most about: a state distribution $\mu(s) \geq 0$, $\sum_s \mu(s) = 1$.
+ Often $\mu(s)$ is chosen to be the fraction of time spent in $s$.
objective function, the Mean Squared Value Error, denoted $\overline{VE}$:
\begin{equation}
\overline{VE}(w) \doteq \sum_{s \in \delta} \mu(s) \left [ v_\pi (s) - \hat{v}(s, w) \right ]^2
\end{equation}
where $v_\pi(s)$ is the true value and $\hat{v}(s, w)$ is the approximate value.
Note that best $\overline{VE}$ is no guarantee of our ultimate purpose: to find a better policy.
+ global optimum.
+ local optimum.
+ don't convergence, or diverge.
### 9.3 Stochastic-gradient and Semi-gradient Methods
SGD: well suited to online reinforcement learning.
\begin{align}
w_{t+1} &\doteq w_t - \frac1{2} \alpha \nabla \left [ v_\pi(S_t) - \hat{v}(S_t, w_t) \right ]^2 \\
&= w_t + \alpha \left [ \color{blue}{v_\pi(S_t)} - \hat{v}(S_t, w_t) \right ] \nabla \hat{v}(S_t, w_t) \\
&\approx w_t + \alpha \left [ \color{blue}{U_t} - \hat{v}(S_t, w_t) \right ] \nabla \hat{v}(S_t, w_t) \\
\end{align}
$S_t \to U_t$, is not the true value $v_\pi(S_t)$, but some, possibly random, approximation to it. (前面各种方法累计的value):
+ If $U_t$ is an unbiased estimate, $w_t$ is guaranteed to converge to a local optimum.
+ Otherwise, like boostrappig target or DP target => semi-gradient methods. (might do not converge as robustly as gradient methods)
- significantly faster learning.
- enable learning to be continual and online.
state aggregation: states are grouped together, with one estimated value for each group.
### 9.4 Linear Methods
For every state $s$, there is a real-valued feature vector $x(s) \doteq (x_1(s), x_2(s), \dots, x_d(s))^T$:
\begin{equation}
\hat{v}(s, w) \doteq w^T x(s) \doteq \sum_{i=1}^d w_i x_i(s)
\end{equation}
### 9.5 Feature Construction for Linear Methods
Choosing features appropriate to the task is an important way of adding prior domain knowledge to reinforcement learing systems.
+ Polynomials
+ Fourier Basis: low dimension, easy to select, global properities
+ Coarse Coding
+ Tile Coding: convolution kernel?
+ Radial Basis Functions
### 9.6 Selecting Step-Size Parameters Manually
A good rule of thumb for setting the step-size parameter of linear SGD methods is then $\alpha \doteq (\gamma \mathbf{E}[x^T x])^{-1}$
### 9.7 Nonlinear Function Approximation: Artificial Neural Networks
+ANN, CNN
### 9.8 Least-Squares TD
$w_{TD} = A^{-1} b$: data efficient, while expensive computation
### 9.9 Memory-based Function Approximation
nearest neighbor method
### 9.10 Kernel-based Function Approximation
RBF function
### 9.11 Looking Deeper at On-policy Learning: Interest and Emphasis
more interested in some states than others:
+ interest $I_t$: the degree to which we are interested in accurately valuing the state at time $t$.
+ emphaisis $M_t$:
\begin{align}
w_{t+n} & \doteq w_{t+n-1} + \alpha M_t \left [ G_{t:t+n} - \hat{v}(S_t, w_{t+n-1} \right ] \nabla \hat{v}(S_t, w_{t+n-1}) \\
M_t & = I_t + \gamma^n M_{t-n}, \qquad 0 \leq t < T
\end{align}
| github_jupyter |
# PCA-tSNE-AE
```
%matplotlib notebook
import tensorflow as tf
import math
from sklearn import datasets
from sklearn.manifold import TSNE
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
iris_dataset = datasets.load_iris()
```
## PCA
```
class TF_PCA:
def __init__(self, data, target=None, dtype=tf.float32):
self.data = data
self.target = target
self.dtype = dtype
self.graph = None
self.X = None
self.u = None
self.singular_values = None
self.sigma = None
def fit(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.X = tf.placeholder(self.dtype, shape=self.data.shape)
# Perform SVD
singular_values, u, _ = tf.svd(self.X)
# Create sigma matrix
sigma = tf.diag(singular_values)
with tf.Session(graph=self.graph) as session:
self.u, self.singular_values, self.sigma = session.run([u, singular_values, sigma],
feed_dict={self.X: self.data})
def reduce(self, n_dimensions=None, keep_info=None):
if keep_info:
# Normalize singular values
normalized_singular_values = self.singular_values / sum(self.singular_values)
# Create the aggregated ladder of kept information per dimension
ladder = np.cumsum(normalized_singular_values)
# Get the first index which is above the given information threshold
index = next(idx for idx, value in enumerate(ladder) if value >= keep_info) + 1
n_dimensions = index
with self.graph.as_default():
# Cut out the relevant part from sigma
sigma = tf.slice(self.sigma, [0, 0], [self.data.shape[1], n_dimensions])
# PCA
pca = tf.matmul(self.u, sigma)
with tf.Session(graph=self.graph) as session:
return session.run(pca, feed_dict={self.X: self.data})
tf_pca = TF_PCA(iris_dataset.data, iris_dataset.target)
tf_pca.fit()
pca = tf_pca.reduce(keep_info=0.9) # Results in two dimensions
color_mapping = {0: sns.xkcd_rgb['bright purple'], 1: sns.xkcd_rgb['lime'], 2: sns.xkcd_rgb['ochre']}
colors = list(map(lambda x: color_mapping[x], iris_dataset.target))
plt.scatter(pca[:, 0], pca[:, 1], c=colors)
plt.show()
```
## TSNE
```
plt.close()
model = TSNE(learning_rate=100, n_components=2, random_state=0, perplexity=5)
tsne5 = model.fit_transform(iris_dataset.data)
model = TSNE(learning_rate=100, n_components=2, random_state=0, perplexity=30)
tsne30 = model.fit_transform(iris_dataset.data)
model = TSNE(learning_rate=100, n_components=2, random_state=0, perplexity=50)
tsne50 = model.fit_transform(iris_dataset.data)
plt.figure(1)
plt.subplot(311)
plt.scatter(tsne5[:, 0], tsne5[:, 1], c=colors)
plt.subplot(312)
plt.scatter(tsne30[:, 0], tsne30[:, 1], c=colors)
plt.subplot(313)
plt.scatter(tsne50[:, 0], tsne50[:, 1], c=colors)
plt.show()
```
## Auto Encoder
```
plt.close()
def batch_generator(features, batch_size=50, n_epochs=1000):
"""
Batch generator for the iris dataset
"""
# Generate batches
for epoch in range(n_epochs):
start_index = 0
while start_index != -1:
# Calculate the end index of the batch to generate
end_index = start_index + batch_size if start_index + batch_size < n else -1
yield features[start_index:end_index]
start_index = end_index
# Auto Encoder
class TF_AutoEncoder:
def __init__(self, features, labels, dtype=tf.float32):
self.features = features
self.labels = labels
self.dtype = dtype
self.encoder = dict()
def fit(self, n_dimensions):
graph = tf.Graph()
with graph.as_default():
# Input variable
X = tf.placeholder(self.dtype, shape=(None, self.features.shape[1]))
# Network variables
encoder_weights = tf.Variable(tf.random_normal(shape=(self.features.shape[1], n_dimensions)))
encoder_bias = tf.Variable(tf.zeros(shape=[n_dimensions]))
decoder_weights = tf.Variable(tf.random_normal(shape=(n_dimensions, self.features.shape[1])))
decoder_bias = tf.Variable(tf.zeros(shape=[self.features.shape[1]]))
# Encoder part
encoding = tf.nn.sigmoid(tf.add(tf.matmul(X, encoder_weights), encoder_bias))
# Decoder part
predicted_x = tf.nn.sigmoid(tf.add(tf.matmul(encoding, decoder_weights), decoder_bias))
# Define the cost function and optimizer to minimize squared error
cost = tf.reduce_mean(tf.pow(tf.subtract(predicted_x, X), 2))
optimizer = tf.train.AdamOptimizer().minimize(cost)
with tf.Session(graph=graph) as session:
# Initialize global variables
session.run(tf.global_variables_initializer())
for batch_x in batch_generator(self.features):
self.encoder['weights'], self.encoder['bias'], _ = session.run([encoder_weights, encoder_bias, optimizer],
feed_dict={X: batch_x})
def reduce(self):
return np.add(np.matmul(self.features, self.encoder['weights']), self.encoder['bias'])
# Mix the data before training
n = len(iris_dataset.data)
random_idx = np.random.permutation(n)
features, labels = iris_dataset.data[random_idx], iris_dataset.target[random_idx]
# Create an instance and encode
tf_ae = TF_AutoEncoder(features, labels)
tf_ae.fit(n_dimensions=2)
auto_encoded = tf_ae.reduce()
colors = list(map(lambda x: color_mapping[x], labels))
plt.scatter(auto_encoded[:, 0], auto_encoded[:, 1], c=colors)
plt.show()
```
| github_jupyter |
```
import numpy as np
import tensorflow as tf
import lucid.modelzoo.vision_models as models
from lucid.misc.io import show
import lucid.misc.io.showing as showing
from lucid.misc.channel_reducer import ChannelReducer
import lucid.optvis.param as param
import lucid.optvis.objectives as objectives
import lucid.optvis.render as render
from lucid.misc.io import show, load
from lucid.misc.io.reading import read
from lucid.misc.io.showing import _image_url
import lucid.scratch.web.svelte as lucid_svelte
from lucid.misc.gradient_override import gradient_override_map
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
model = models.InceptionResnet2()
model.load_graphdef()
labels_str = read("/home/elebouder/TNSRVIS/lucid/labelfile.txt")
labels = [line[line.find(" "):].strip() for line in labels_str.split("\n")]
labels = [label[label.find(" "):].strip().replace("_", " ") for label in labels]
labels = ["dummy"] + labels
%%html_define_svelte GroupWidget
<div class="figure" style="width: 600px;">
<div class="outer" on:mouseover="set({pres_n: undefined})">
<img src="{{img}}">
{{#if pres_n != undefined}}
<img src="{{spatial_factors[pres_n]}}" class="overlay" >
{{/if}}
</div>
<div class="outer" on:mouseover="set({pres_n: undefined})">
<div style="width:100%; height: 100%; background-color: #000;"></div>
{{#each range(n_groups) as n}}
{{#if pres_n == undefined || pres_n == n}}
<img src="{{spatial_factors[n]}}" class="factor"
style="filter: hue-rotate({{n*360/n_groups}}deg);">
{{/if}}
{{/each}}
</div>
<br>
<br>
<div on:mouseleave="set({pres_n: undefined})">
{{#each range(n_groups) as n}}
<div class="group" style="background-color: hsl({{n*360/n_groups}}, 80%, 50%); "
on:mouseover="set({pres_n: n})">
<img src="{{group_icons[n]}}">
</div>
{{/each}}
</div>
</div>
<style>
.outer{
width: 224px;
height: 224px;
display: inline-block;
margin-right: 2px;
position: relative;
}
.outer img {
position: absolute;
left: 0px;
top: 0px;
width: 224px;
height: 224px;
image-rendering: pixelated;
}
.factor {
mix-blend-mode: lighten;
}
.overlay {
filter: grayscale(100%) brightness(3);
mix-blend-mode: multiply;
}
.group {
width: 80px;
height: 80px;
margin-right: 4px;
display: inline-block;
padding-top: 10px;
}
</style>
<script>
function range(n){
return Array(n).fill().map((_, i) => i);
}
export default {
data () {
return {
img: "",
n_groups: 1,
spatial_factors: [""],
group_icons: [""],
pres_n: undefined,
};
},
helpers: {range}
};
</script>
def raw_class_group_attr(img, layer, label, group_vecs, override=None):
"""How much did spatial positions at a given layer effect a output class?"""
# Set up a graph for doing attribution...
with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# Compute activations
acts = T(layer).eval()
if label is None: return np.zeros(acts.shape[1:-1])
# Compute gradient
#score = T("InceptionResnetV2/Logits/Predictions")[0, labels.index(label)]
#score = T("softmax2_pre_activation")[0, labels.index(label)]
t_grad = tf.gradients([score], [T(layer)])[0]
grad = t_grad.eval({T(layer) : acts})
# Linear approximation of effect of spatial position
return [np.sum(group_vec * grad) for group_vec in group_vecs]
def neuron_groups(img, layer, n_groups=6, attr_classes=[]):
# Compute activations
with tf.Graph().as_default(), tf.Session():
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
acts = T(layer).eval()
# We'll use ChannelReducer (a wrapper around scikit learn's factorization tools)
# to apply Non-Negative Matrix factorization (NMF).
nmf = ChannelReducer(n_groups, "NMF")
spatial_factors = nmf.fit_transform(acts)[0].transpose(2, 0, 1).astype("float32")
channel_factors = nmf._reducer.components_.astype("float32")
# Let's organize the channels based on their horizontal position in the image
x_peak = np.argmax(spatial_factors.max(1), 1)
ns_sorted = np.argsort(x_peak)
spatial_factors = spatial_factors[ns_sorted]
channel_factors = channel_factors[ns_sorted]
# And create a feature visualziation of each group
param_f = lambda: param.image(80, batch=n_groups)
obj = sum(objectives.direction(layer, channel_factors[i], batch=i)
for i in range(n_groups))
group_icons = render.render_vis(model, obj, param_f, verbose=False)[-1]
# We'd also like to know about attribution
# First, let's turn each group into a vector over activations
group_vecs = [spatial_factors[i, ..., None]*channel_factors[i]
for i in range(n_groups)]
attrs = np.asarray([raw_class_group_attr(img, layer, attr_class, group_vecs)
for attr_class in attr_classes])
print attrs
# Let's render the visualization!
lucid_svelte.GroupWidget({
"img" : _image_url(img),
"n_groups": n_groups,
"spatial_factors": [_image_url(factor[..., None]/np.percentile(spatial_factors,99)*[1,0,0]) for factor in spatial_factors],
"group_icons": [_image_url(icon) for icon in group_icons]
})
img = load("/home/elebouder/Data/ChestXray-NIHCC/test.png")
neuron_groups(img, "InceptionResnetV2/Mixed_5b/concat", 6, ["Negative", "Positive"])
img = load("/home/elebouder/Data/ChestXray-NIHCC/test.png")
neuron_groups(img, "InceptionResnetV2/Mixed_6a/concat", 6, ["Negative", "Positive"])
```
| github_jupyter |
<img src="https://raw.githubusercontent.com/Qiskit/qiskit-tutorials/master/images/qiskit-heading.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# _*Hello Qiskit*_
Click [here](https://mybinder.org/v2/gh/QISKit/qiskit-tutorial/master?filepath=community/games/Hello_Qiskit.ipynb) to run this notebook in your browser using [Binder](https://mybinder.org).
The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.
***
### Contributors
James R. Wootton, IBM Research
***
## Level 1: Beginning with bits
This is a Jupyter notebook, in which you will run some puzzles and learn about quantum computing. Don't worry if you've never used a Jupyter notebook before. It just means you'll see lots of grey boxes with code in, like the one below. These are known as cells.
```
print("Hello! I'm a code cell")
```
You'll need to run the cells to use this tutorial. To run a cell, do the following.
* For laptops and desktops, click on the cell and press **Shift-Enter**.
* For mobile devices, tap on the icon that appears to the left of a cell.
Get started by doing this for the cell below (it will take a second or two to run).
```
print('Set up started...')
%matplotlib notebook
import sys
sys.path.append('game_engines')
import hello_quantum
print('Set up complete!')
```
The rest of the cells in this notebook contain code that sets up puzzles for you to solve. To get the puzzles, just run the cells. To restart a puzzle, just rerun it.
If you want to know what the code is doing, or to create your own puzzles, see the guide [here](helper_functions/Making_your_own_hello_quantum.ipynb).
## Puzzle 1
### Intro
* Normal computers are made of bits, and quantum computers are made of qubits.
* Qubits are basically an upgraded version of bits, so let's make sure we understand basic bit-based programming first.
* The defining feature of a bit is that it has two possible output values.
* These are often called `1` and `0`, though we'll also be thinking of them as _on_ and _off_.
* We use bits represent and process information, but we typically need lots of them to do this.
* To help you understand how to manipulate bits, we'll give you one to play with.
* The simplest thing you can do to a bit (other than leave it alone) is the `NOT` gate.
* (The word 'gate' is used to describe basic computational commands)
* Try it out and see what happens.
### Exercise
* Use the `NOT` gate 3 times.
```
initialize = []
success_condition = {}
allowed_gates = {'0': {'NOT': 3}, '1': {}, 'both': {}}
vi = [[1], False, False]
qubit_names = {'0':'the only bit', '1':None}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* Here our bit was depicted by a circle that was either on (white) or off (black).
* The effect of the `NOT` gate was to turn it on and off.
* This flips out bit between `0` and `1`.
## Puzzle 2
### Intro
* Now let's do the same thing to a different bit.
* This will look the same as before. But because it is a different bit, it'll be in a different place.
### Exercise
* Turn the other bit on.
```
initialize = []
success_condition = {}
allowed_gates = {'0': {}, '1': {'NOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* You've now mastered the NOT gate: the most basic building block of computing.
## Puzzle 3
### Intro
* To really process information stored in bits, computers need more than just `NOT`s.
* We need gates that let us manipulate some bits in a way that depends on other bits.
* The simplest example is the controlled-`NOT`, or `CNOT`.
* For this you need to choose one bit to be the _target_, and the other to be the _control_.
* The `CNOT` then does a `NOT` on the target bit, but only if the control bit is on.
### Exercise
* Use the `CNOT` to turn on the bit on the right.
* **Note**: The `CNOT` acts on both bits, but you still need to choose which will be the target bit.
```
initialize = [['x', '0']]
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
## Puzzle 4
### Intro
* Now let's try something where you'll need a couple of `CNOT`s.
### Exercise
* Use some CNOTs to turn the left bit off and the right bit on.
```
initialize = [['x', '0']]
success_condition = {'ZI': 1.0, 'IZ': -1.0}
allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* Well done!
* These kind of manipulations are what all computing compiles down to
* With more bits and a controlled-controlled-`NOT`, you can do everything from Tetris to self-driving cars.
## Puzzle 5
### Intro
* Qubits have some similarities to random bit values, so let's take a few exercises to think about randomness.
* For a bit that will give us a `0` or `1` with equal probability, we'll use a grey circle.
### Exercise
* Make the bit on the right random using a CNOT.
```
initialize = [['h', '0']]
success_condition = {'IZ': 0.0}
allowed_gates = {'0': {'CNOT': 0}, '1': {'CNOT': 0}, 'both': {}}
vi = [[], False, False]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* Well done!
* If the left bit is off, the right bit stays off. If the left bit is on, the right bit gets switched on too.
* So the random value of the left bit is effectively copied over to the right by the `CNOT`.
* The end result was that both bits become random, but they nevertheless always output the same result.
* This is not the only way of having two random bits. We could also create cases where they are independently random, or always have different results.
* How can we keep track of how our random bits are correlated?
## Puzzle 6
### Intro
* To keep track of correlations, we'll add another circle.
* This doesn't represent a new bit. It simply tells us whether our two bits will agree or not.
* It will be off when they agree, on when they don't, and grey when they aren't correlated (so agreements and disagreements are random).
### Exercise
* Make the two bits always disagree (that means making the middle circle white).
```
initialize = [['h', '0']]
success_condition = {'ZZ': -1.0}
allowed_gates = {'0': {'NOT': 0, 'CNOT': 0}, '1': {'NOT': 0, 'CNOT': 0}, 'both': {}}
vi = [[], False, True]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
## Puzzle 7
### Intro
* Now you know pretty much need all you need to know about bits.
* Let's have one more exercise before we move on.
### Exercise
* Turn on bit the bit on the right.
```
initialize = [['h', '1']]
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'NOT': 0, 'CNOT': 0}, '1': {'NOT': 0, 'CNOT': 0}, 'both': {}}
vi = [[], False, True]
qubit_names = {'0':'the bit on the left', '1':'the bit on the right'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* Time to move on to qubits!
## Level 2: Basic single qubit gates
## Puzzle 1
### Intro
* There are many types of variables that you can have in a computer program.
* Here we introduce you to a new one: the qubit.
* Before we explain what they are, we'll give you one to play with.
### Exercise
* Use the `x` gate 3 times, and see what happens
```
initialize = [ ["x","0"] ]
success_condition = {"ZI":1.0}
allowed_gates = { "0":{"x":3}, "1":{}, "both":{} }
vi = [[1],True,True]
qubit_names = {'0':'the only qubit', '1':None}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
## Puzzle 2
### Intro
* You have now mastered turning a circle on and off :)
* But what does that actually mean? What is it useful for? To answer that, you need to know something about qubits.
* Basically, they are quantum objects from which we can extract a simple bit value: `0` or `1`.
* There are many ways we can do this, and the result we get depends on the method we use.
* The two circles in the last puzzle represent two different ways we could get a bit out of the same qubit.
* They are usual called z measurements and x measurements, but we'll simply call them the top and bottom outputs in this tutorial.
* A black circle means that the corresponding output would give the value `0`. A white one means we'd get a `1`.
* The `x` gate has the effect of a `NOT` on the bottom output. It has no effect on the top.
### Exercise
* Turn off the bottom circle.
```
initialize = [['x', '0']]
success_condition = {'ZI': 1.0}
allowed_gates = {'0': {'x': 0}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'the only qubit', '1':None}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* The process of extracting a bit from a qubit is called 'measurement'. The `measure` gate in Qiskit always extracts the bit from the bottom output.
* The top output is acquired indirectly, using the `h` gate that you will learn about soon.
## Puzzle 3
### Intro
* Now let's look at another qubit.
* This will also have its inner workings represented by two circles.
* But because it's a different qubit, these circles are in a different place.
### Exercise
* Turn the bottom circle of the other qubit off.
```
initialize = [['x', '1']]
success_condition = {'IZ': 1.0}
allowed_gates = {'0': {}, '1': {'x': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':None, '1':'the other qubit'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* From now on, we'll start calling the qubits by more programming-friendly names.
* The one on the left will be `q[0]`, and the one on the right will be `q[1]`.
## Puzzle 4
### Intro
* Now it's time to try a new gate: the `h` gate.
* This swaps the two circles of the qubit that it's applied to.
* If you want to see this in a nice animated form, check out the [Hello Quantum](https://helloquantum.mybluemix.net/) app.
* But while you are here, test it out with the old trick of repeating three times.
### Exercise
* Use the `h` gate 3 times.
```
initialize = []
success_condition = {'ZI': 0.0}
allowed_gates = {'0': {'h': 3}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
```
### Outro
* This made the bottom circle become grey, which corresponds to a random output.
* Note that you now have 'Your quantum program so far' being printed at the bottom.
* This is the actual Qiskit code to create the circuit you made in the puzzle.
* To see a circuit diagram of this, run the code cell below.
```
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 5
### Intro
* We know what `x` does to a circle that's fully off (it turns it on) or fully on (it turns it off).
* But what does it do to one of these random grey ones?
* By solving this exercise, you'll find out.
### Exercise
* Get the bottom circle fully off. You can use as many `h` gates as you like, but use `x` exactly 3 times.
```
initialize = [['h', '1']]
success_condition = {'IZ': 1.0}
allowed_gates = {'0': {}, '1': {'x': 3, 'h': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* It turns out that a random result is just a random result, even if you flip it.
## Puzzle 6
### Intro
* Another important gate is called `z`.
* This works similar to `x`, except that it acts as a `NOT` on the top circle instead of the bottom.
### Exercise
* Turn the top circle off.
```
initialize = [['h', '0'], ['z', '0']]
success_condition = {'XI': 1.0}
allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 7
### Intro
* The `z`, when combined with `h`, can be used to do the job of an `x`
### Exercise
* Turn on the bottom circle without using the `x` gate
```
initialize = []
success_condition = {'ZI': -1.0}
allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {}, 'both': {}}
vi = [[1], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 8
### Intro
* You might notice that the top circles are always random when the bottom circles are fully on or off.
* This is because qubits can never be simultaneously certain about each kind of output.
* If they are certain about one, the other must be random.
* Quantum computing is all about making sure that your certainty and your randomness in the right place.
### Exercise
* Move the off to the top and the randomness to the bottom.
```
initialize = [['h', '0']]
success_condition = {'IX': 1.0}
allowed_gates = {'0': {}, '1': {'z': 0, 'h': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 9
### Intro
* We can also share the limited certainty of our qubits between the two outputs.
* Then both can be mostly certain about the output they give, even of they aren't fully certain.
* In this exercise, the two circles for q[1] will start off dark grey. This means that both outputs would be highly likely, but not certain, to output a `0`.
### Exercise
* Make the two circles for q[1] both light grey. This means that they'd be highly likely, but not certain, to output a `1`.
```
initialize = [['ry(pi/4)', '1']]
success_condition = {'IZ': -0.7071, 'IX': -0.7071}
allowed_gates = {'0': {}, '1': {'z': 0, 'h': 0}, 'both': {}}
vi = [[0], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 10
### Intro
* Now you know the basic tools, you can tackle both qubits at once.
### Exercise
* Make both bottom outputs random.
```
initialize = [['x', '1']]
success_condition = {'ZI': 0.0, 'IZ': 0.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, False]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* Each bottom output here would randomly output a `0` or a `1`.
* But will their outputs be correlated? Anti-correlated? Completely unrelated?
* Just as we did with bits, we'll keep track of this information with some extra circles.
## Puzzle 11
### Intro
* In this puzzle you'll see two four new circles.
* You've already been introduced to the lower one Level 1.
* It does the same job here, keeping track of whether the bottom output of one qubit will agree with the bottom ouput of the other.
* If the two bottom outputs would definitely agree, this circle is off (black). If they'd disagree, it's on (white).
### Exercise
* Make the bottom outputs certain to disagree.
```
initialize = [['h','0'],['h','1']]
success_condition = {'ZZ': -1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 12
### Intro
* The new circle at the very top has a similar job.
* It keeps track of whether the top output from one qubit would agree with the top output from the other.
### Exercise
* Make the top outputs certain to agree.
```
initialize = [['x','0']]
success_condition = {'XX': 1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 13
### Intro
* The other new circles tell us whether the bottom output for one qubit would agree with the top one from the other.
### Exercise
* Make the top output for q[0] certain to disagree with the bottom output for q[1].
```
initialize = []
success_condition = {'XZ': -1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 14
### Intro
* Notice how the the `x`, `z` and `h` gates affect the new circles.
* Specifically, the `x` gates don't just flip a single circle between black and white, but a whole column full of them.
### Exercise
* Turn the two bottom outputs on as much as you can.
```
initialize = [['ry(-pi/4)', '1'], ['ry(-pi/4)','0']]
success_condition = {'ZI': -0.7071, 'IZ': -0.7071}
allowed_gates = {'0': {'x': 0}, '1': {'x': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 15
### Intro
* The `z` gates affect the top columns in a similar way.
* The `h` gate swaps the bottom and top columns.
### Exercise
* Turn off the top cirlces.
```
initialize = [['x', '1'], ['x','0']]
success_condition = {'XI':1, 'IX':1}
allowed_gates = {'0': {'z': 0, 'h': 0}, '1': {'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Level 3: Two qubit gates
## Puzzle 1
### Introduction
* In the exercises on bits, we used the `CNOT` gate.
* This can be used on qubits too!
* Since the `x` gate serves as our quantum `NOT`, we will use the `cx` gate to do a `CNOT` on qubits.
* Again, these have a _control_ and a _target_: The bottom circle of the control qubit decides whether an `x` is applied to the target qubit.
* When you apply this gate, the qubit you choose will serve as the target. The other qubit will then be the control.
### Exercise
* Use a `cx` or two to turn on the bottom circle of q[1], and turn off the bottom circle of q[0].
```
initialize = [['x', '0']]
success_condition = {'ZI': 1.0, 'IZ': -1.0}
allowed_gates = {'0': {'cx': 0}, '1': {'cx': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 2
### Introduction
* As well as a `cx` gate, there is also the `cz`.
* This applies a `z` to the target when the bottom circle of the control is on
### Exercise
* Turn on the top circle of q[0], and turn off the bottom circle of q[1].
```
initialize = [['h', '0'],['x', '1']]
success_condition = {'XI': -1.0, 'IZ': 1.0}
allowed_gates = {'0': {'h': 0, 'cz': 0}, '1': {'cx': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 3
### Introduction
* There's another way that we can explain what a `cz` is doing.
- It swaps the top circle of q[0] with the neighbouring circle above it.
- It does the same with the top circle of q[1].
* It also does something weird with the circle at the top of the grid.
### Exercise
* Do the `cz` twice with each qubit as control, and see what happens.
```
initialize = [['h', '0'],['x', '1'],['h', '1']]
success_condition = { }
allowed_gates = {'0':{'cz': 2}, '1':{'cz': 2}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* As you might have noticed, it doesn't matter which qubit you choose as control: the `cz` does the same thing in either case.
* So from now on, choosing the control qubit for the `cz` is not required.
## Puzzle 3b
### Introduction
* As mentioned earlier, the top and bottom circles correspond to two ways of getting an output from a qubit.
* There is actually also a third way, known as the y measurement.
* This tutorial ignores the y measurements, to make things a little simpler.
* The only time their absence can be noticed is when `cx` or `cz` gates are used.
* The 'something weird with the circle at the top of the grid' is not so weird when we add in a middle line of circles for the outputs of y measurements.
* Do the same puzzle as before with these new circles, and see for yourself.
### Exercise
* Do the `cz` twice and see what happens.
```
initialize = [['h', '0'],['x', '1'],['h', '1']]
success_condition = { }
allowed_gates = {'0': {}, '1': {}, 'both': {'cz': 2}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='y')
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* The circle at the top, is replaced by the one in the middle.
* It only seemed strange before because the middle rows where missing.
* By default, we'll do without these rows from now on.
* You can add them into puzzles yourself by using the `mode='y'` argument in `hello_quantum.run_game()`.
## Puzzle 4
### Introduction
* In a previous exercise, you've built an `x` from a `z` and some `h`s.
* In the same way, it's possible to build a `cx` from a `cz` and some `h`s.
### Exercise
* Turn on the bottom circle of q[1].
```
initialize = [['x', '0']]
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* Unlike the `cz`, the `cx` is not symmetric.
* If you instead wanted to make a `cx` whose target was `q[0]`, you would have to do the `h`s on `q[0]` instead.
## Puzzle 5
### Introduction
* Because the `cx` isn't symmetric, let's try to interpret it 'backwards'.
* So instead of thinking of it as doing an `x` on the target depending on what the bottom circle of the control is doing...
* ... we can think of it as doing a `z` to the control depending on what the top circle of the target is doing.
* With this interpretation, it seems like it is the target doing the controlling, and the control being the target!
* In case you don't believe me, here's an exercise to test out this very property
### Exercise
* Turn on the top circle of q[0].
```
initialize = [['h', '0'],['h', '1']]
success_condition = {'XI': -1.0, 'IX': -1.0}
allowed_gates = {'0': {}, '1': {'z':0,'cx': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* So there's two different stories about how the `cx` works. Though they may seem to be contradictory, they are equally good descriptions.
* This is a great example of the weird and wonderful nature of quantum gates.
## Puzzle 6
### Introduction
* These two interpretations of a `cx` can help us do something pretty useful: turning one around.
* Suppose you need a `cx` with q[1] as target, but you can only do one with q[0] as target.
* Can we somehow get the effect we need?
### Exercise
* Turn on the bottom circle of q[1].
```
initialize = []
success_condition = {'IZ': -1.0}
allowed_gates = {'0': {'x':0,'h':0,'cx':0}, '1': {'h':0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* If you remember anything from these exercises, it should probably be this.
* It is common for real qubit devices to limit which way around you can do the `cx`.
* So the ability to turn them around comes in very handy.
## Puzzle 7
### Introduction
* The `cz` and `cx` gates can also be used to make a `swap`.
* This does exactly what the name suggests: it swaps the states of two qubits.
### Exercise
* Swap the two qubits:
- Make the bottom circle white and the top circle grey for q[0];
- Make the bottom circle dark grey and the top circle light grey for q[1].
```
initialize = [['ry(-pi/4)','0'],['ry(-pi/4)','0'],['ry(-pi/4)','0'],['x','0'],['x','1']]
success_condition = {'ZI': -1.0,'XI':0,'IZ':0.7071,'IX':-0.7071}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz': 0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* Note that your solution to this puzzle might not have been a general purpose `swap`.
* Compare your solution to those for the next few puzzles, which also implement swaps.
## Puzzle 8
#### Intro
* Another puzzle based on the `swap`.
#### Exercise
* Swap the two qubits:
* Make the top circle black for q[0].
* Make the bottom white for q[1].
* And do it with 3 `cz` gates
```
initialize = [['x','0'],['h','1']]
success_condition = {'IX':1,'ZI':-1}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz':3}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names,shots=2000)
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 9
#### Intro
* Another puzzle based on the `swap`.
#### Exercise
* Swap the two qubits:
* Turn off the bottom circle for q[0].
* Turn on the bottom circle q[1].
```
initialize = [['x','1']]
success_condition = {'IZ':1.0,'ZI':-1.0}
allowed_gates = {'0': {'h':0}, '1': {'h':0}, 'both': {'cz':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names,shots=2000)
hello_quantum.get_circuit(puzzle).draw()
```
# Level 4: Beyond Clifford gates
## Puzzle 1a
### Introduction
* The gates you've seen so far are called the 'Clifford gates'.
* They are very important for moving and manipulating information in quantum computers
* But to create algorithms that will outperform standard computers, we need more gates
* This puzzle has one for you to try. Simply do it a few times, and see if you can work out what it does.
### Exercise
* Apply `ry(pi/4)` four times to q[0].
```
initialize = []
success_condition = {}
allowed_gates = {'0': {'ry(pi/4)': 4}, '1': {}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names)
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* If you were able to work it out, you are some sort of genuis!
* For those of us who are mere mortals, let's try something new to help us figure it out.
## Puzzle 1b
### Introduction
* To understand this gate, we need to slightly change the way we visualize the qubits.
* From now now, an output that is certain to give `0` will be represented by a white line rather than a white circle.
* And an output certain to give `1` will be a black line instead of a black circle.
* For a random output, you'll see a line that's part white and part black instead of a grey circle.
* Here's an old exercise to help you get used to this new visualization.
### Exercise
* Make the top outputs certain to agree.
```
initialize = [['x','0']]
success_condition = {'XX': 1.0}
allowed_gates = {'0': {'x': 0, 'z': 0, 'h': 0}, '1': {'x': 0, 'z': 0, 'h': 0}, 'both': {}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 1c
### Introduction
* In this puzzle you'll see a new gate: `bloch`.
* This doesn't actually do anything to the qubits. It just draws the two lines for each qubit on top of each other.
* It also puts a point where their Puzzles intersect.
* Using `bloch`, you should hopefully be able to figure out how `ry(pi/4)` works.
### Exercise
* Turn the bottom line of q[0] fully on, and use the `bloch` gate.
```
initialize = []
success_condition = {'ZI': -1.0}
allowed_gates = {'0': {'bloch':1, 'ry(pi/4)': 0}, '1':{}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(puzzle).draw()
```
### Outro
* As you probably noticed, this doesn't just combine the two lines for each qubit. It combines their whole columns.
* If we follow the points, the effect of `ry(pi/4)` is to rotate them.
* Each application moves it an eighth of the way around the circle, and moves the Puzzles of the lines along with it.
* The `ry(-pi/4)` gate is the same, except the rotation is in the other direction
## Puzzle 2
### Introduction
* Now let's use these gates on the other qubit too.
### Exercise
* Turn the bottom lines fully on.
```
initialize = [['h','0'],['h','1']]
success_condition = {'ZI': -1.0,'IZ': -1.0}
allowed_gates = {'0': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'qubit1'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 3
### Introduction
* Here's a puzzle you could solve with a simple `cx`, or a `cz` and some `h`s.
* Unfortunately you have neither `cx` nor `h`.
* So you'll need to work out how `cz` and `ry`s can do the job.
### Exercise
* Make the bottom outputs agree.
```
initialize = [['h','0']]
success_condition = {'ZZ': 1.0}
allowed_gates = {'0': {}, '1': {'bloch':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'unbloch':0,'cz':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 4
### Introduction
* Using `x`s or `z`s you can effectively _reflect_ an `ry`, to make it move in the opposite direction.
### Exercise
* Turn the bottom outputs fully off with just one `ry(pi/4)` on each.
```
initialize = [['ry(pi/4)','0'],['ry(pi/4)','1']]
success_condition = {'ZI': 1.0,'IZ': 1.0}
allowed_gates = {'0': {'bloch':0, 'z':0, 'ry(pi/4)': 1}, '1': {'bloch':0, 'x':0, 'ry(pi/4)': 1}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(puzzle).draw()
```
## Puzzle 5
### Introduction
* With the `ry`s, we can make conditional gates that are more interesting than just `cz` and `cx`.
* For example, we can make a controlled-`h`.
### Exercise
* Turn off the bottom output for q[1] using exactly one `ry(pi/4)` and `ry(-pi/4)` on that qubit.
```
initialize = [['x','0'],['h','1']]
success_condition = {'IZ': 1.0}
allowed_gates = {'0': {}, '1': {'bloch':0, 'cx':0, 'ry(pi/4)': 1, 'ry(-pi/4)': 1}, 'both': {'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(puzzle).draw()
```
## Sandbox
You now know enough basic quantum gates to build fully powerful quantum programs. You'll get a taste of this in the next Level. Until then, here are two grids with all the gates enabled, so you can have a play around.
Here's one with the line-based visualization, to help with the non-Clifford gates.
```
initialize = []
success_condition = {'IZ': 1.0,'IX': 1.0}
allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
line_sandbox = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
hello_quantum.get_circuit(line_sandbox).draw()
```
Here is a grid with the middle lines, which describe outputs for y measurements. With this you can also try some new non-Clifford gates: 'rx(pi/4)' and 'rx(-pi/4)'.
```
initialize = []
success_condition = {'IZ': 1.0,'IX': 1.0}
allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0, 'rx(pi/4)': 0, 'rx(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0, 'rx(pi/4)': 0, 'rx(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'q[0]', '1':'q[1]'}
y_sandbox = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='y')
hello_quantum.get_circuit(y_sandbox).draw()
```
## Level 5: Proving the Uniqueness of Quantum Variables
## Bell test for classical variables
Here we'll investigate how quantum variables (based on qubits) differ from standard ones (based on bits).
We'll do this by creating a pair of variables, which we will call `A` and `B`. We aren't going to put any conditions on what these can be, or how they are initialized. So there are a lot of possibilities:
* They could be any kind of variable, such as
* `int`
* `list`
* `dict`
* ...
* They could be initialized by any kind of process, such as
* left empty
* filled with a given set of values
* generated by a given random process
* indepedently applied to `A` and `B`
* applied to `A` and `B` together, allowing for correlations between their randomness
If the variables are initialized by a random process, it means they'll have different values every time we run our program. This is perfectly fine. The only rule we need to obey is that the process of generating the randomness is the same for every run.
We'll use the function below to set up these variables. This currently has `A` and `B` defined as to be partially correlated random floating point numbers. But you can change it to whatever you want.
```
import random
def setup_variables ():
### Replace this section with anything you want ###
r = random.random()
A = r*(2/3)
B = r*(1/3)
### End of section ###
return A, B
```
Our next job is to define a hashing function. This simply needs to take one of the variables as input, and then give a bit value as an output.
This function must also be capable of performing two different types of hash. So it needs to be able to be able to chew on a variable and spit out a bit in to different ways. Another input to the function is therefore the kind of hash we want to use.
To be consistent with the rest of the program, the two possible hash types should be called `'H'` and `'V'`. Also, the output must be in the form of a single value bit string: either `'0'` or `'1'`.
In the (fairly arbitrary) example given, the bits were created by comparing `A` and `B` to a certain value. The output is `'1'` if they are under that value, and `'0'` otherwise. The type of hash determines the value used.
```
def hash2bit ( variable, hash ):
### Replace this section with anything you want ###
if hash=='V':
bit = (variable<0.5)
elif hash=='H':
bit = (variable<0.25)
bit = str(int(bit))
### End of section ###
return bit
```
Once these are defined, there are four quantities we wish to calculate: `P['HH']`, `P['HV']`, `P['VH']` and `P['VV']`.
Let's focus on `P['HV']` as an example. This is the probability that the bit value derived from an `'H'` type hash on `A` is the same as that from a `'V'` type has on `B`. We will estimate this probability by sampling many times and determining the fraction of samples for which the corresponding bit values agree.
The other probabilities are defined similarly: `P['HH']` compares a `'H'` type hash on both `A` and `B`, `P['VV']` compares a `V` type hash on both, and `P['VH']` compares a `V` type hash on `A` with a `H` type has on `B`.
These probabilities are calculated in the following function, which returns all the values of `P` in a dictionary. The parameter `shots` is the number of samples we'll use.
```
shots = 8192
def calculate_P ( ):
P = {}
for hashes in ['VV','VH','HV','HH']:
# calculate each P[hashes] by sampling over `shots` samples
P[hashes] = 0
for shot in range(shots):
A, B = setup_variables()
a = hash2bit ( A, hashes[0] ) # hash type for variable `A` is the first character of `hashes`
b = hash2bit ( B, hashes[1] ) # hash type for variable `B` is the second character of `hashes`
P[hashes] += (a!=b) / shots
return P
```
Now let's actually calculate these values for the method we have chosen to set up and hash the variables.
```
P = calculate_P()
print(P)
```
These values will vary slightly from one run to the next due to the fact that we only use a finite number of shots. To change them significantly, we need to change the way the variables are initiated, and/or the way the hash functions are defined.
No matter how these functions are defined, there are certain restrictions that the values of `P` will always obey.
For example, consider the case that `P['HV']`, `P['VH']` and `P['VV']` are all `0.0`. The only way that this can be possible is for `P['HH']` to also be `0.0`.
To see why, we start by noting that `P['HV']=0.0` is telling us that `hash2bit ( A, H )` and `hash2bit ( B, V )` were never different in any of the runs. So this means we can always expect them to be equal.
hash2bit ( A, H ) = hash2bit ( B, V ) (1)
From `P['VV']=0.0` and `P['VH']=0.0` we can similarly get
hash2bit ( A, V ) = hash2bit ( B, V ) (2)
hash2bit ( A, V ) = hash2bit ( B, H ) (3)
Putting (1) and (2) together implies that
hash2bit ( A, H ) = hash2bit ( A, V ) (4)
Combining this with (3) gives
hash2bit ( A, H ) = hash2bit ( B, H ) (5)
And if these values are always equal, we'll never see a run in which they are different. This is exactly what we set out to prove: `P['HH']=0.0`.
More generally, we can use the values of `P['HV']`, `P['VH']` and `P['VV']` to set an upper limit on what `P['HH']` can be. By adapting the [CHSH inequality](https://en.wikipedia.org/wiki/CHSH_inequality) we find that
$\,\,\,\,\,\,\,$ `P['HH']` $\, \leq \,$ `P['HV'] + P['VH'] + P['VV']`
This is not just a special property of `P['HH']`. It's also true for all the others: each of these probabilities cannot be greater than the sum of the others.
To test whether this logic holds, we'll see how well the probabilities obey these inequalities. Note that we might get slight violations due to the fact that our the `P` values aren't exact, but are estimations made using a limited number of samples.
```
def bell_test (P):
sum_P = sum(P.values())
for hashes in P:
bound = sum_P - P[hashes]
print("The upper bound for P['"+hashes+"'] is "+str(bound))
print("The value of P['"+hashes+"'] is "+str(P[hashes]))
if P[hashes]<=bound:
print("The upper bound is obeyed :)\n")
else:
if P[hashes]-bound < 0.1:
print("This seems to have gone over the upper bound, but only by a little bit :S\nProbably just rounding errors or statistical noise.\n")
else:
print("!!!!! This has gone well over the upper bound :O !!!!!\n")
bell_test(P)
```
With the initialization and hash functions provided in this notebook, the value of `P('HV')` should be pretty much the same as the upper bound. Since the numbers are estimated statistically, and therefore are slightly approximate due to statistical noise, you might even see it go a tiny bit over. But you'll never see it significantly surpass the bound.
If you don't believe me, try it for yourself. Change the way the variables are initialized, and how the hashes are calculated, and try to get one of the bounds to be significantly broken.
## Bell test for quantum variables
Now we are going to do the same thing all over again, except our variables `A` and `B` will be quantum variables. Specifically, they'll be the simplest kind of quantum variable: qubits.
When writing quantum programs, we have to set up our qubits and bits before we can use them. This is done by the function below. It defines a register of two bits, and assigns them as our variables `A` and `B`. It then sets up a register of two bits to receive the outputs, and assigns them as `a` and `b`.
Finally it uses these registers to set up an empty quantum program. This is called `qc`.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
def initialize_program ():
qubit = QuantumRegister(2)
A = qubit[0]
B = qubit[1]
bit = ClassicalRegister(2)
a = bit[0]
b = bit[1]
qc = QuantumCircuit(qubit, bit)
return A, B, a, b, qc
```
Before we start writing the quantum program to set up our variables, let's think about what needs to happen at the end of the program. This will be where we define the different hash functions, which turn our qubits into bits.
The simplest way to extract a bit from a qubit is through the `measure` gate. This corresponds to the bottom circle of a qubit in the visualization we've been using. Let's use this as our `V` type hash.
For the output that corresponds to the top circle, there is no direct means of access. However, we can do it indirectly by first doing an `h` to swap the top and bottom circles, and then using the `measure` gate. This will be our `H` type hash.
Note that this function has more inputs that its classical counterpart. We have to tell it the `bit` on which to write the result, and the quantum program, `qc`, on which we write the gates.
```
def hash2bit ( variable, hash, bit, qc ):
if hash=='H':
qc.h( variable )
qc.measure( variable, bit )
```
Now its time to set up the variables `A` and `B`. To write this program, you can use the grid below. You can either follow the suggested exercise, or do whatever you like. Once you are ready, just move on. The cell containing the `setup_variables()` function, will then use the program you wrote with the grid.
Note that our choice of means that the probabilities `P['HH']`, `P['HV']`, `P['VH']` and `P['VV']` will explicitly correspond to circles on our grid. For example, the circle at the very top tells us how likely the two top outputs would be to disagree. If this is white, then `P['HH']=1` , if it is black then `P['HH']=0`.
### Exercise
* Make it so that outputs from the top circles of both qubits are most likely to disagree, whereas all other combinations of outputs are most likely to agree.
```
initialize = []
success_condition = {'ZZ':+0.7071,'ZX':+0.7071,'XZ':+0.7071,'XX':-0.7071}
allowed_gates = {'0': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, '1': {'bloch':0, 'x':0, 'z':0, 'h':0, 'cx':0, 'ry(pi/4)': 0, 'ry(-pi/4)': 0}, 'both': {'cz':0, 'unbloch':0}}
vi = [[], True, True]
qubit_names = {'0':'A', '1':'B'}
puzzle = hello_quantum.run_game(initialize, success_condition, allowed_gates, vi, qubit_names, mode='line')
```
Now the program as written above will be used to set up the quantum variables.
```
import numpy as np
def setup_variables ( A, B, qc ):
for line in puzzle.program:
eval(line)
```
The values of `P` are calculated in the function below. This is done by sending jobs to IBM via Qiskit and getting results which tell us how many of the samples gave each possible result. The results are given as a bit string, `string`, which Qiskit numbers from right to left. This means that the value of `a`, which corresponds to `bit[0]` is the first from the right
a = string[-1]
and the value of `b` is right next to it at the second from the right
b = string[-2]
The number of samples for this bit string is provided by the dictionary of results, `stats`, as `stats[string]`.
```
shots = 8192
from qiskit import execute
def calculate_P ( backend ):
P = {}
program = {}
for hashes in ['VV','VH','HV','HH']:
A, B, a, b, program[hashes] = initialize_program ()
setup_variables( A, B, program[hashes] )
hash2bit ( A, hashes[0], a, program[hashes])
hash2bit ( B, hashes[1], b, program[hashes])
# submit jobs
job = execute( list(program.values()), backend, shots=shots )
# get the results
for hashes in ['VV','VH','HV','HH']:
stats = job.result().get_counts(program[hashes])
P[hashes] = 0
for string in stats.keys():
a = string[-1]
b = string[-2]
if a!=b:
P[hashes] += stats[string] / shots
return P
```
Now its time to choose and set up the actually device we are going to use. By default, we'll use a simulator. You could instead use a real cloud-based device by changing the backend accordingly.
```
from qiskit import Aer
device = 'qasm_simulator'
backend = Aer.get_backend(device)
P = calculate_P( backend )
print(P)
bell_test( P )
```
If you prepared the state suggestion by the exercise, you will have found a significant violation of the upper bound for `P['HH']`. So what is going on here? The chain of logic we based the Bell test on obviously doesn't apply to quantum variables. But why?
The answer is that there is a hidden assumption in that logic. To see why, let's revisit point (4).
hash2bit ( A, H ) = hash2bit ( A, V ) (4)
Here we compare the value we'd get from an `H` type of hash of the variable `A` with that for a `V` type hash.
For classical variables, this is perfectly sensible. There is nothing stopping us from calculating both hashes and comparing the results. Even if calculating the hash of a variable changes the variable, that's not a problem. All we need to do is copy it beforehand and we can do both hashes without any issue.
The same is not true for quantum variables. The result of the hashes is not known until we actually do them. It's only then that the qubit actually decides what bit value to give. And once it decides the value for one type of hash, we can never determine what it would have decided if we had used another type of hash. We can't get around this by copying the quantum variables either, because quantum variables [cannot be copied](https://en.wikipedia.org/wiki/No-cloning_theorem). This means there is no context in which the values `hash2bit(A,H)` and `hash2bit(A,V)` are well-defined at the same time, and so it is impossible to compare them.
Another hidden assumption is that `hash2bit(A,hash)` depends only on the type of hash chosen for variable `A`, and not the one chosen for variable `B`. This is also perfectly sensible, since this exactly the way we set up the `hash2bit()` function. However, the very fact that the upper bound was violated does seem to imply that each variable knows what hash is being done to the other, so they they can conspire to give very different behaviour when both have a `H` type hash.
Even so, the effect is subtle. It is impossible to determine which variable is affecting which: You can change the order in which the hashes are done, or [effectively do them at the same time](https://en.wikipedia.org/wiki/Loopholes_in_Bell_test_experiments#Communication,_or_locality), and you'll get the same results. This means that it not clear whether we can say they affect each other (see [here](https://quantumcomputing.stackexchange.com/questions/2028/is-it-true-to-say-that-one-qubit-in-an-entangled-state-can-instantaneously-affec) for some discussion). But what we can say is that they are [contextual](https://en.wikipedia.org/wiki/Quantum_contextuality): to fully understand results from one variable, it is sometimes required to look at what was done to another.
All this goes to show that quantum variables don't always follow the logic we are used to. They follow different rules, the rules of quantum mechanics, which will allow us to find ways of performing computation in very different ways than we are used to.
```
keywords = {'Topics': ['Games', 'Bell inequality', 'Entanglement'], 'gates': ['`x`', '`z`', '`h`', '`cx`']}
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Deep Learning
## Project: Build a Traffic Sign Recognition Classifier
### Author: Sergey Morozov
In this notebook, a traffic sign classifier is implemented. [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset) is used to train the model. There is a [write-up](./Writeup.md) where different stages of the implementation are described including analysis of the pros and cons of the chosen approaches and suggestions for further improvements.
---
## Step 0: Load The Data
```
# Load pickled data
import pickle
import pandas as pd
# Data's location
training_file = "traffic-sign-data/train.p"
validation_file = "traffic-sign-data/valid.p"
testing_file = "traffic-sign-data/test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
# features and labels
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# Sign id<->name mapping
sign_names = pd.read_csv('signnames.csv').to_dict(orient='index')
sign_names = { key : val['SignName'] for key, val in sign_names.items() }
```
---
## Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES.
### A Basic Summary of the Dataset
```
import numpy as np
# Number of training examples
n_train = len(X_train)
# Number of testing examples.
n_test = len(X_test)
# Number of validation examples.
n_valid = len(X_valid)
# What's the shape of an traffic sign image?
image_shape = X_train.shape[1:]
# How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Number of validation examples =", n_valid)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
```
### An Exploratory Visualization of the Dataset
#### Number of Samples in Each Category
The categories with minimum/maximum number of samples are marked with yellow/red color correspondingly.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
plt.rcdefaults()
fig, ax = plt.subplots()
samples_per_category = [len(np.where(y_train==cat_id)[0]) for cat_id in sign_names.keys()]
category_names = tuple([val + " [ id:{id} ]".format(id=key) for key,val in sign_names.items()])
min_cnt = min(samples_per_category)
max_cnt = max(samples_per_category)
y_pos = np.arange(len(category_names))
rects = ax.barh(y_pos,
samples_per_category,
align='center',
color=['green' if val != min_cnt and val != max_cnt \
else 'yellow' if val == min_cnt \
else 'red' for val in samples_per_category])
# setting labels for each bar
for i in range(0,len(rects)):
ax.text(int(rects[i].get_width()),
int(rects[i].get_y()+rects[i].get_height()/2.0),
samples_per_category[i],
fontproperties=fm.FontProperties(size=5))
ax.set_yticks(y_pos)
ax.set_yticklabels(category_names,fontproperties=fm.FontProperties(size=5))
ax.invert_yaxis()
ax.set_title('Samples per Category')
plt.show()
```
#### Random Image from Each Category
Output a sample image from each category. Note, that images will be transformed before they are passed to neural network.
```
import random
import numpy as np
import matplotlib.pyplot as plt
import math
# Visualizations will be shown in the notebook.
%matplotlib inline
h_or_w = image_shape[0]
fig = plt.figure(figsize=(h_or_w,h_or_w))
for i in range(0, n_classes):
samples = np.where(y_train==i)[0]
index = random.randint(0, len(samples) - 1)
image = X_train[samples[index]]
ax = fig.add_subplot(math.ceil(n_classes/5), 5, i+1)
ax.set_title(sign_names[i])
ax.set_ylabel("id: {id}".format(id=i))
plt.imshow(image)
plt.show()
```
----
## Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. The LeNet-5 CNN architecture is used here with minor modifications: dropout parameter added to the first fully connected layer.
### Pre-process the Data Set (normalization, grayscale, etc.)
#### Shuffle Data
```
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
```
#### Prepare Input Images
```
import cv2
def prepare_image(image_set):
"""Transform initial set of images so that they are ready to be fed to neural network.
(1) normalize image
(2) convert RGB image to gray scale
"""
# initialize empty image set for prepared images
new_shape = image_shape[0:2] + (1,)
prep_image_set = np.empty(shape=(len(image_set),) + new_shape, dtype=int)
for ind in range(0, len(image_set)):
# normalize
norm_img = cv2.normalize(image_set[ind], np.zeros(image_shape[0:2]), 0, 255, cv2.NORM_MINMAX)
# grayscale
gray_img = cv2.cvtColor(norm_img, cv2.COLOR_RGB2GRAY)
# set new image to the corresponding position
prep_image_set[ind] = np.reshape(gray_img, new_shape)
return prep_image_set
def equalize_number_of_samples(image_set, image_labels):
"""Make number of samples in each category equal.
The data set has different number of samples for each category.
This function will transform the data set in a way that each category
will contain the number of samples equal to maximum samples per category
from the initial set. This will provide an equal probability to meet
traffic sign of each category during the training process.
"""
num = max([len(np.where(image_labels==cat_id)[0]) for cat_id in sign_names.keys()])
equalized_image_set = np.empty(shape=(num * n_classes,) + image_set.shape[1:], dtype=int)
equalized_image_labels = np.empty(shape=(num * n_classes,), dtype=int)
j = 0
for cat_id in sign_names.keys():
cat_inds = np.where(y_train==cat_id)[0]
cat_inds_len = len(cat_inds)
for i in range(0, num):
equalized_image_set[j] = image_set[cat_inds[i % cat_inds_len]]
equalized_image_labels[j] = image_labels[cat_inds[i % cat_inds_len]]
j += 1
# at this stage data is definitely not randomly shuffled, so shuffle it
return shuffle(equalized_image_set, equalized_image_labels)
X_train_prep = prepare_image(X_train)
X_test_prep = prepare_image(X_test)
X_valid_prep = prepare_image(X_valid)
X_train_prep, y_train_prep = equalize_number_of_samples(X_train_prep, y_train)
# we do not need to transform labes for validation and test sets
y_test_prep = y_test
y_valid_prep = y_valid
image_shape_prep = X_train_prep[0].shape
```
### Model Architecture
```
# LeNet-5 architecture is used.
import tensorflow as tf
from tensorflow.contrib.layers import flatten
def LeNet(x, channels, classes, keep_prob, mu=0, sigma=0.01):
# Arguments used for tf.truncated_normal, randomly defines variables
# for the weights and biases for each layer
# Layer 1: Convolutional. Input = 32x32xchannels. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, channels, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Layer 1: Activation.
conv1 = tf.nn.relu(conv1)
# Layer 1: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Layer 2: Activation.
conv2 = tf.nn.relu(conv2)
# Layer 2: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
fc0 = tf.nn.dropout(fc0, keep_prob=keep_prob)
# Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Layer 3: Activation.
fc1 = tf.nn.relu(fc1)
# Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Layer 4: Activation.
fc2 = tf.nn.relu(fc2)
# Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
```
### Train, Validate and Test the Model
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
#### Features and Labels
```
# x is a placeholder for a batch of input images
x = tf.placeholder(tf.float32, (None,) + image_shape_prep)
# y is a placeholder for a batch of output labels
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
```
#### Training Pipeline
```
# hyperparameters of the training process
RATE = 0.0008
EPOCHS = 30
BATCH_SIZE = 128
KEEP_PROB = 0.7
STDDEV = 0.01
keep_prob = tf.placeholder(tf.float32)
logits = LeNet(x, image_shape_prep[-1], n_classes, keep_prob, sigma=STDDEV)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = RATE)
training_operation = optimizer.minimize(loss_operation)
```
#### Model Evaluation
```
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
```
#### Train the Model
```
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train_prep)
print("Training...")
print()
for i in range(EPOCHS):
X_train_prep, y_train_prep = shuffle(X_train_prep, y_train_prep)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train_prep[offset:end], y_train_prep[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: KEEP_PROB})
train_accuracy = evaluate(X_train_prep, y_train_prep)
validation_accuracy = evaluate(X_valid_prep, y_valid_prep)
print("EPOCH {} ...".format(i+1))
print("Train Accuracy = {:.3f}".format(train_accuracy))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './model.ckpt')
print("Model saved")
```
#### Evaluate Trained Model Using Test Samples
```
with tf.Session() as sess:
saver.restore(sess, './model.ckpt')
test_accuracy = evaluate(X_test_prep, y_test_prep)
print("Test Accuracy = {:.3f}".format(test_accuracy))
```
---
## Step 3: Test a Model on New Images
It is time to apply the trained model to the German trafic sign images that were obtained from the Internet.
### Load and Output the Images
```
import os
import cv2
import matplotlib.image as mpimg
img_paths = os.listdir("traffic-sign-images")
images = list()
labels = list()
# read images and resize
for img_path in img_paths:
# read image from file
img = mpimg.imread(os.path.join("traffic-sign-images", img_path))
img = cv2.resize(img, image_shape[0:2], interpolation=cv2.INTER_CUBIC)
images.append(img)
# prefix of each image name is a number of its category
labels.append(int(img_path[0:img_path.find('-')]))
images = np.array(images)
labels = np.array(labels)
# output the resized images
h_or_w = image_shape[0]
fig = plt.figure(figsize=(h_or_w,h_or_w))
for i in range(0, len(images)):
ax = fig.add_subplot(1, len(images), i+1)
ax.set_title(sign_names[labels[i]])
ax.set_ylabel("id: {id}".format(id=labels[i]))
plt.imshow(images[i])
plt.show()
```
### Predict the Sign Type for Each Image
```
# preprocess images first
images_prep = prepare_image(images)
labels_prep = labels
# then make a prediction
with tf.Session() as sess:
saver.restore(sess, './model.ckpt')
sign_ids = sess.run(tf.argmax(logits, 1), feed_dict={x: images_prep, y: labels_prep, keep_prob: 1})
# output the results in the table
print('-' * 93)
print("| {p:^43} | {a:^43} |".format(p='PREDICTED', a='ACTUAL'))
print('-' * 93)
for i in range(len(sign_ids)):
print('| {p:^2} {strp:^40} | {a:^2} {stra:^40} |'.format(
p=sign_ids[i], strp=sign_names[sign_ids[i]], a=labels[i], stra=sign_names[labels[i]]))
print('-' * 93)
```
### Analyze Performance
```
# run evaluation on the new images
with tf.Session() as sess:
saver.restore(sess, './model.ckpt')
test_accuracy = evaluate(images_prep, labels_prep)
print("Accuracy = {:.3f}".format(test_accuracy))
```
### Top 5 Softmax Probabilities For Each Image Found on the Web
```
# Print out the top five softmax probabilities for the predictions on
# the German traffic sign images found on the web.
with tf.Session() as sess:
saver.restore(sess, './model.ckpt')
top_k = sess.run(tf.nn.top_k(tf.nn.softmax(logits), k=5),
feed_dict={x: images_prep, y: labels_prep, keep_prob: 1})
print(top_k)
plt.rcdefaults()
# show histogram of top 5 softmax probabilities for each image
h_or_w = image_shape[0]
fig = plt.figure()
for i in range(0, len(images)):
ax = fig.add_subplot(len(images), 1, i+1)
probabilities = top_k.values[i]
y_pos = np.arange(len(probabilities))
ax.set_ylabel("actual id: {id}".format(id=labels[i]), fontproperties=fm.FontProperties(size=5))
rects = ax.barh(y_pos,
probabilities,
align='center',
color='blue')
# setting labels for each bar
for j in range(0,len(rects)):
ax.text(int(rects[j].get_width()),
int(rects[j].get_y()+rects[j].get_height()/2.0),
probabilities[j],
fontproperties=fm.FontProperties(size=5), color='red')
ax.set_yticks(y_pos)
ax.set_yticklabels(top_k.indices[i], fontproperties=fm.FontProperties(size=5))
xticks = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks, fontproperties=fm.FontProperties(size=5))
ax.invert_yaxis()
plt.tight_layout()
plt.show()
```
| github_jupyter |
# Day 6
### Topics
1. Bit of a review
1. Read in Image data with Stitch Image
1. Colors in images with Stitch Image
1. Goodness/badness histogram
First, import our usual things:
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
## Bit of a review
Recall last time we played around with uploading data with Pandas and making some plots, with style!
```
gdp = pd.read_csv("https://raw.githubusercontent.com/UIUC-iSchool-DataViz/spring2020/master/week01/data/GDP.csv")
```
Our data formatting:
```
gdp['DATE'] = pd.to_datetime(gdp['DATE'])
```
We made a function to plot this dataset with different styles:
```
def make_gdp_plot(style): # note, "style" is something you can gooogle if you want more options
with plt.style.context(style):
fig, ax = plt.subplots(figsize=(10, 8))
ax.set_title("Style: " + style) # append 'Style:' and whatever style we chose
ax.plot(gdp["DATE"], gdp["GDP"], '-')
plt.show()
```
Now we can run our function. Let's remind ourselves of our choses:
```
plt.style.available
```
And then we made a few plots:
```
make_gdp_plot('seaborn-poster') ## change
```
Today we are going to play with an image dataset (the Stitch Image) and use it to learn about color space.
More info about image datasets can be found on Week 01 & Week 02 of the grad-level course: https://uiuc-ischool-dataviz.github.io/spring2020/
## Read in Image data with Stitch Image
Make sure you have the pillow package installed with:
```python
!conda install -c anaconda pillow
```
Or through using the Anaconda GUI.
Let's use the pillow library and its `Image` interface to load an image:
```
import PIL.Image as Image # note here we are using the Image set of functions *within* the PIL library
```
Now we'll read in image file, here I'm assuming the stitch image is stored in the same directory as this notebook:
```
im = Image.open("stitch_reworked.png", "r")
```
Note, we can take a quick look at this image:
```
im
```
The `im` variable is sort of like a "fig" object in that there is a method to display it to the screen in a Jupyter Notebook. Let's turn it into data:
```
data = np.array(im)
```
What is this data?
```
data
data.shape
```
This data is an image that is 483x430 in shape and has 4 color channels. Why 4 and not 3? The 4th channel is opacity -- how see through the image is. More on this in a moment.
Note we can also display this image with the `matplotlib` interface:
```
fig, ax = plt.subplots(1,1, figsize=(8,8))
ax.imshow(data)
plt.show()
```
We can see this way allows us to see some info about the # of pixels in the x/y directions.
Also, note that the y-direction starts at zero and *increases* going down. This is common for data in an image format.
## Colors in images with Stitch Image
One question we might ask at this point is how many unique colors are there in this image?
We could try using `np.unique` to do this for us:
```
np.unique(data)
```
Hmmm, but we know we are supposed to have channels that are like RGB triplets. By default, `np.unique` just looks for unique values across the entire data, so we have to be a little more careful. Before we get into it, what we know there are 4 channels: these are RGBA:
<img src="https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png">
This is a nice representation of how the "A", or *alpha-channel* changes the look of the image. Here is a rainbow colormap with a checkered pattern behind it. As the alpha-channel decreases we are able to see more and more of the checkered background.
So what we *really* want to see is the number of unique RGBA combinations.
Let's first recall what the dimension of the data is:
```
data.shape
```
We might be tempted to try an "axis" argument:
```
len(np.unique(data, axis=0))
```
But that doens't really do what we want, so we have to be a little trickier.
Let's first see how many unique values are in each channel:
```
channel_labels = ['R', 'G', 'B', 'A']
for i in range(data.shape[2]): # this loops over the last entry of the shape array, so the #4
print('channel=', channel_labels[i],
'unique values=', np.unique( data[:,:,i] ) ) # print the unique elements in each channel
```
So this is still not giving us unique combinations of colors but it is telling us some very interesting stuff! Its saying for example, that there are likely very few colors because there are just not that many levels of any chanel (at most 3 of each). And its telling us that there are places on the image that are either perfectly solid (the alpha channel = 255) or perfectly see-through (alpha channel = 0).
What we really want to do is change our 483x430x4 dataset into a list of RGBA combinations. We can do this with `numpy.reshape` by saying "hey, let's collapse our dataset along the 3rd dimension -- index of 2":
```
data.reshape(-1, data.shape[2])
data.reshape(-1, data.shape[2]).shape
```
Now each pixel is represented by a row and we can, FINALLY, look for unique values along this first axis:
```
np.unique(data.reshape(-1, data.shape[2]), axis=0)
```
TahDah! So, we see that ther are only 4 colors in this image and 1 is just a totally transparent color -- RGBA = [0,0,0,0].
How to figure out where these colors are? Let's first start by trying to visualize where the transparent stuff is. While drawing a checkered pattern on the back of our image might be cool, we can also "set" a gray color to our background more easily and then plot on top of this with `matplotlib`.
```
fig, ax = plt.subplots(1,1, figsize=(8,8))
ax.set_facecolor('gray') # set background image
# plot on top!
ax.imshow(data)
plt.show()
```
What if I only wanted to plot the areas that are red regions? Or change the red areas to blue?
We can do this by doing something called a "boolean mask" -- this is basically making an array of booleans (True or False) that will be true ONLY when the image is transparent. Let's give this a shot:
```
image_boolean_mask = data[:,:,0] == 126 # from our understanding of RGB combinations this is the R channel that is redist
image_boolean_mask
```
So, this is very much like doing a boolean mask for one value:
```
126 == 126
```
So, let's first turn these parts of the image blue by "resetting" their values to be 255 in the blue channel and 0 otherwise.
```
data[image_boolean_mask] = [0,0,255,255]
```
Finally, let's plot!
```
fig, ax = plt.subplots(1,1, figsize=(8,8))
ax.set_facecolor('gray') # set background image
# plot on top!
ax.imshow(data)
plt.show()
```
A beautiful blue stitch!
You can also do more complex combinations of boolean searches for colors. What we'll do now is use a "user-defined" library to do some of these manipulations for us.
Make sure the `image_manipulations.py` file is in your current directory, and do:
```
from image_manipulations import my_boolean_mask
```
This is a little function I wrote myself that I stored in an external library. I put in some doc strings too so we can see a bit of info about this function:
```
my_boolean_mask?
```
Let's try it! Let's replace black lines by purple. We couldn't do this before -- why? Let's look at our data again:
```
np.unique(data.reshape(-1, data.shape[2]), axis=0)
```
If I tried to do a simple boolean mask with one of the channels for black -- [0,0,0,255] -- it would overlap with at least one more color so that won't work. The function does a full set of boolean operations to take this into account (you can check out the function yourself if you are interested):
```
myMask = my_boolean_mask(data, [0,0,0,255])
```
This time, let's not overwrite our original image. We can do this by copying the array:
```
newImageData = data.copy()
```
Now we can replace at will without changing the original data array!
```
newImageData[myMask] = [126, 0, 126, 255] # this will be a magenta looking thing!
```
Let's plot!
```
fig, ax = plt.subplots(1,1, figsize=(8,8))
ax.set_facecolor('gray') # set background image
ax.imshow(newImageData)
plt.show()
```
Cool! Now let's say we want to compare our modified image to the previous? We can do this by making side by size figures with a slight modificaiton to our matplotlib call:
```
fig, ax = plt.subplots(1,2, figsize=(16,8))
```
I haven't filled the figure with anything, just made some side-by-side plots. We can try other configurations and even call it with the number of rows and columns explicitly:
```
fig, ax = plt.subplots(nrows=3,ncols=2, figsize=(16,8))
```
Ok, let's go back to side by side plots:
```
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(16,8))
```
How do we access the different sets of axis? Let's check what "ax" is now:
```
ax
```
`ax` is actually a an *array* of axis! We can access these axes the same way we would with any array:
```
ax[0]
ax[1]
```
Ok, let's actually put images on our plot:
```
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(16,8))
ax[0].set_facecolor('gray') # set background image
ax[0].imshow(data) # original
ax[1].set_facecolor('gray') # set background image
ax[1].imshow(newImageData)
plt.show()
```
We can even add labels/titles in the usual way:
```
fig, ax = plt.subplots(nrows=1,ncols=2, figsize=(16,8))
ax[0].set_facecolor('gray') # set background image
ax[0].imshow(data) # original
ax[0].set_title('Original')
ax[1].set_facecolor('gray') # set background image
ax[1].imshow(newImageData)
ax[1].set_title('Updated Image')
plt.show()
```
What if I just wanted to plot *just* the blue part and set the *rest* of the image to fully transparent?
Let's copy our dataset again:
```
onlyBlueData = data.copy()
```
And call a boolean mask for blues:
```
onlyBlueMask = my_boolean_mask(onlyBlueData, [0, 0, 255, 255])
```
What I want now is to turn all of the pixels that are *not* blue into transparent. So I do this with a "NOT-mask" which is basically a little twiddle in front of the mask instead of the mask itself:
```
onlyBlueData[~onlyBlueMask] = [0,0,0,0]
```
Let's plot and see what we did:
```
fig, ax = plt.subplots(1,1, figsize=(8,8)) # one fig again
ax.set_facecolor('gray') # set background image
ax.imshow(onlyBlueData)
plt.show()
```
## Goodness/badness histogram
Let's re-load our un-ultered image and take a quick look:
```
im = Image.open("stitch_reworked.png", "r")
data = np.array(im)
fig, ax = plt.subplots(1,1, figsize=(8,8)) # one fig again
ax.set_facecolor('gray') # set background image
ax.imshow(data)
plt.show()
```
So, when we first encountered this image we really wanted to measure the goodness and badness of Stitch, and now that we have masks we can do just this! Let's grab the measurement of Stitch's goodness which is the white parts of his upper head and ears and the "bad" parts which are the maroon parts using our masking.
First, what are the different colors again?
```
np.unique(data.reshape(-1, data.shape[2]), axis=0)
```
Ok, let's grab goodness and badness:
```
goodness_mask = my_boolean_mask(data, [255, 255, 255, 255])
badness_mask = my_boolean_mask(data, [126, 22, 33, 255])
```
And let's count pixels in each group:
```
npix_good = len(data[goodness_mask])
npix_bad = len(data[badness_mask])
npix_good, npix_bad
```
We could calculate a quick percentage of Stitch's "goodness":
```
npix_good/(npix_good + npix_bad)
```
So stitch is ~23% good. But! We can also make a histogram of this using a `matplotlib` bar-chart plot:
```
fig, ax = plt.subplots(figsize=(8,8))
# we'll turn these into arrays to make our lives easier down the road
labels = np.array(['badness', 'goodness'])
values = np.array([npix_bad, npix_good])
ax.bar(labels, values)
plt.show()
```
Ok this is fine, but wouldn't it be nice to be able to color these bars ourselves? We can do this by accessing the bar-chart's colors and setting them one by one:
```
fig, ax = plt.subplots(figsize=(8,8))
# we'll turn these into arrays to make our lives easier down the road
labels = np.array(['badness', 'goodness'])
values = np.array([npix_bad, npix_good])
myBarChart = ax.bar(labels, values)
plt.show()
myBarChart
myBarChart[0]
```
This is telling us we have 2 rectangles on this plot. Let's do a for loop and use the `set_color` function to pick their colors:
```
colors = ['maroon', 'lightgray'] # set badness = maroon & goodness = light gray
# set colors for each bar individually
for i in range(len(myBarChart)):
myBarChart[i].set_color(colors[i])
```
Re-show this figure:
```
fig
```
We can also set the colors by their RGBA values instead (with a normalization to 0-1 colorspace):
```
colors = np.array([(126, 22, 33, 255), (255, 255, 255, 255)])
# set colors for each bar individually
for i in range(len(myBarChart)):
myBarChart[i].set_color(colors[i]/255)
fig
```
Ah ha! But we have an issue! When we plot white we don't see the bar anymore. We can alleviate this with adding a black line around both of our bars:
```
colors = np.array([(126, 22, 33, 255), (255, 255, 255, 255)])
# set colors for each bar individually
for i in range(len(myBarChart)):
myBarChart[i].set_color(colors[i]/255)
# for the edges
myBarChart[i].set_edgecolor('black') # because one of our colors is white
myBarChart[i].set_linewidth(2) # so we can see the outlines clearly
fig
```
Let's put all this code in one place:
```
fig, ax = plt.subplots(figsize=(8,8))
# we'll turn these into arrays to make our lives easier down the road
labels = np.array(['badness', 'goodness'])
values = np.array([npix_bad, npix_good])
colors = np.array([(126, 22, 33, 255), (255, 255, 255, 255)])
myBarChart = ax.bar(labels, values)
# set colors for each bar individually
for i in range(len(myBarChart)):
myBarChart[i].set_color(colors[i]/255)
# for the edges
myBarChart[i].set_edgecolor('black') # because one of our colors is white
myBarChart[i].set_linewidth(2) # so we can see the outlines clearly
plt.show()
```
Of course, ideally, we'd like to do this for all colors in our image. We can do this with another function in `image_manipulations.py`:
```
from image_manipulations import color_components
colors, color_labels, color_rgb_labels, npix_of_a_color = color_components(data)
```
`colors` gives the array of unique colors, already scaled to 0-1
```
colors
```
`color_rgb_labels` gives the labels in terms of rgb values:
```
color_rgb_labels
```
And `npix_of_a_color` gives the number of pixels at each color:
```
npix_of_a_color
```
Which we can use with much the same code we had before to make histograms:
```
fig, ax = plt.subplots(figsize=(8,8))
myBarChart = ax.bar(color_rgb_labels, npix_of_a_color)
# set colors for each bar individually
for i in range(len(myBarChart)):
myBarChart[i].set_color(colors[i])
# for the edges
myBarChart[i].set_edgecolor('black') # because one of our colors is white
myBarChart[i].set_linewidth(2) # so we can see the outlines clearly
plt.show()
```
This is a nice histogram of the color distribution of the Stitch image showing that most of the pixels are actually transparent background!
| github_jupyter |
```
import data_structures
import utils
import filters
import labeling
import pyfolio as pf
import pandas as pd
import numpy as np
import pandas as pd
import timeit
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, classification_report, confusion_matrix, accuracy_score
from sklearn.utils import resample
from sklearn.utils import shuffle
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
%matplotlib inline
read_path = 'C:/Users/win/Desktop/afml_new_data/'
for i in os.listdir(read_path):
df = pd.read_csv(read_path + i)
break
df
dates = []
times = []
for i in df['TIMESTAMP'].index:
timestamp = df['TIMESTAMP'].get_value(i, 'TIMESTAMP')
dates.append(timestamp[0:10])
times.append(timestamp[11:19])
df['date'] = pd.Series(dates, name='date')
df['time'] = pd.Series(times, name='time')
df_2018_01_02_drop_time = df[(df['date'] == '2018-01-02') & (df['time'] < '10:01:00')].index
df_2018_11_15_drop_time = df[(df['date'] == '2018-11-15') & (df['time'] < '10:01:00')].index
df = df.drop(index=df_2018_01_02_drop_time)
df = df.drop(index=df_2018_11_15_drop_time)
df = df.reset_index(drop=True)
df = df.drop(index=df[df['time'] < '09:01:00'].index).reset_index(drop=True)
df['price'] = df['PRICE'].astype(float)
df['date_time'] = df['TIMESTAMP']
df['volume'] = df['V'].astype(int)
df = df.set_index('TIMESTAMP')
df = df.sort_values(by='TIMESTAMP')
df_v = df.groupby(df.index).sum()
df = df.loc[~df.index.duplicated(keep='first')]
df['V'] = df_v['V']
df['DV'] = df_v['DV']
df['price'] = df['price'].astype(float)
df['volume'] = df['volume'].astype(int)
df = df[['date_time', 'price', 'volume']]
df.to_csv(read_path + 'TRADE_A233740_2018_dollar_bar_all.csv', index=False)
dollar_bar = data_structures.get_dollar_bars(file_path = read_path + 'TRADE_A233740_2018_dollar_bar_all.csv' , threshold=100000000)
data = dollar_bar.copy(deep=True)
data.index = pd.to_datetime(data['date_time'])
data = data.drop('date_time', axis=1)
data
```
## Fit a Primary Model: Trend Following
- Based on the simple moving average cross-over strategy
```
# compute moving averages
fast_window = 20
slow_window = 50
data['fast_mavg'] = data['close'].rolling(window=fast_window, min_periods=fast_window, center=False).mean()
data['slow_mavg'] = data['close'].rolling(window=slow_window, min_periods=slow_window, center=False).mean()
data['side'] = np.nan
long_signals = data['fast_mavg'] >= data['slow_mavg']
short_signals = data['fast_mavg'] < data['slow_mavg']
data.loc[long_signals, 'side'] = 1
data.loc[short_signals, 'side'] = -1
# Remove Look ahead biase by lagging the signal
data['side'] = data['side'].shift(1)
# Save the raw data
raw_data = data.copy()
# Drop the NaN values from our data set
data.dropna(axis=0, how='any', inplace=True)
data['side'].value_counts()
```
## Filter Events: CUSUM Filter
- Predict what will happen when a CUSUM event is triggered. Use the signal from the MAvg Strategy to determine the side of the bet
```
# Compute daily volatility
daily_vol = utils.get_daily_vol(close=data['close'], lookback=5)
# Apply Symmetric CUSUM Filter and get timestamps for events
# Note: Only the CUSUM filter needs a point estimate for volatility
cusum_events = filters.cusum_filter(data['close'], threshold=daily_vol[:'2018-11-01'].mean()*0.5)
# # Compute vertical barrier
vertical_barriers = labeling.add_vertical_barrier(t_events=cusum_events, close=data['close'], num_days=1)
pt_sl = [1, 2]
min_ret = 0.005
triple_barrier_events = labeling.get_events(close=data['close'],
t_events=cusum_events,
pt_sl=pt_sl,
target=daily_vol,
min_ret=min_ret,
num_threads=3,
vertical_barrier_times=vertical_barriers,
side_prediction=data['side'])
labels = labeling.get_bins(triple_barrier_events, data['close'])
labels.side.value_counts()
```
## Results of Primary Model:
- What is the accuracy of predictions from the primary model(i.e., if the secondary model does not filter the bets)? what are the precision, recall, and F1-scores?
```
primary_forecast = pd.DataFrame(labels['bin'])
primary_forecast['pred'] = 1
primary_forecast.columns = ['actual', 'pred']
# Performance Metrics
actual = primary_forecast['actual']
pred = primary_forecast['pred']
print(classification_report(y_true=actual, y_pred=pred))
print("Confusion Matrix")
print(confusion_matrix(actual, pred))
print('')
print("Accuracy")
print(accuracy_score(actual, pred))
```
## Fit a Meta Model
- Train a random forest to decide whether to trade or not (i.e 1 or 0 respectively) since the earlier model has decided the side (-1 or 1)
- Create the following features:
- 1) Volatility
- 2) Serial Correlation
- 3) The returns at the different lags from the serial correlation
- 4) The sides from the SMavg Strategy
```
raw_data.head()
```
## Features
```
# Log Returns
raw_data['log_ret'] = np.log(raw_data['close']).diff()
# Momentum
raw_data['mom1'] = raw_data['close'].pct_change(periods=1)
raw_data['mom2'] = raw_data['close'].pct_change(periods=2)
raw_data['mom3'] = raw_data['close'].pct_change(periods=3)
raw_data['mom4'] = raw_data['close'].pct_change(periods=4)
raw_data['mom5'] = raw_data['close'].pct_change(periods=5)
# Volatility
raw_data['volatility_50'] = raw_data['log_ret'].rolling(window=50, min_periods=50, center=False).std()
raw_data['volatility_31'] = raw_data['log_ret'].rolling(window=31, min_periods=31, center=False).std()
raw_data['volatility_15'] = raw_data['log_ret'].rolling(window=15, min_periods=15, center=False).std()
# Serial Correlation (Takes about 4 minutes)
window_autocorr = 50
raw_data['autocorr_1'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=1), raw=False)
raw_data['autocorr_2'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=2), raw=False)
raw_data['autocorr_3'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=3), raw=False)
raw_data['autocorr_4'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=4), raw=False)
raw_data['autocorr_5'] = raw_data['log_ret'].rolling(window=window_autocorr, min_periods=window_autocorr, center=False).apply(lambda x: x.autocorr(lag=5), raw=False)
# Get the various log -t returns
raw_data['log_t1'] = raw_data['log_ret'].shift(1)
raw_data['log_t2'] = raw_data['log_ret'].shift(2)
raw_data['log_t3'] = raw_data['log_ret'].shift(3)
raw_data['log_t4'] = raw_data['log_ret'].shift(4)
raw_data['log_t5'] = raw_data['log_ret'].shift(5)
# Re compute sides
raw_data['side'] = np.nan
long_signals = raw_data['fast_mavg'] >= raw_data['slow_mavg']
short_signals = raw_data['fast_mavg'] < raw_data['slow_mavg']
raw_data.loc[long_signals, 'side'] = 1
raw_data.loc[short_signals, 'side'] = -1
# Remove look ahead bias
raw_data = raw_data.shift(1)
```
## Now get the data at the specified events
```
X = raw_data.loc[labels.index, :]
X.drop(['open', 'high', 'low', 'close', 'cum_vol', 'cum_dollar', 'cum_ticks','fast_mavg', 'slow_mavg',], axis=1, inplace=True)
y = labels['bin']
y
# Get features at event dates
X = raw_data.loc[labels.index, :]
# Drop unwanted columns
X.drop(['open', 'high', 'low', 'close', 'cum_vol', 'cum_dollar', 'cum_ticks','fast_mavg', 'slow_mavg',], axis=1, inplace=True)
y = labels['bin']
y.value_counts()
```
## Balance classes
```
# Split data into traing, validation and test sets
X_training_validation = X['2018-01-01':'2018-11-01']
y_training_validation = y['2018-01-01':'2018-11-01']
X_train, X_validate, y_train, y_validate = train_test_split(X_training_validation, y_training_validation, test_size=0.2, shuffle=False)
train_df = pd.concat([y_train, X_train], axis=1, join='inner')
train_df['bin'].value_counts()
# Upsample the training data to have a 50 - 50 split
# https://elitedatascience.com/imbalanced-classes
majority = train_df[train_df['bin'] == 0]
minority = train_df[train_df['bin'] == 1]
new_minority = resample(minority,
replace=True, # sample with replacement
n_samples=majority.shape[0], # to match majority class
random_state=42)
train_df = pd.concat([majority, new_minority])
train_df = shuffle(train_df, random_state=42)
train_df['bin'].value_counts()
train_df
# Create training data
y_train = train_df['bin']
X_train= train_df.loc[:, train_df.columns != 'bin']
```
## Fit a Model
```
parameters = {'max_depth':[2, 3, 4, 5, 7],
'n_estimators':[1, 10, 25, 50, 100, 256, 512],
'random_state':[42]}
def perform_grid_search(X_data, y_data):
rf = RandomForestClassifier(criterion='entropy')
clf = GridSearchCV(rf, parameters, cv=4, scoring='roc_auc', n_jobs=3)
clf.fit(X_data, y_data)
print(clf.cv_results_['mean_test_score'])
return clf.best_params_['n_estimators'], clf.best_params_['max_depth']
# extract parameters
n_estimator, depth = perform_grid_search(X_train, y_train)
c_random_state = 42
print(n_estimator, depth, c_random_state)
# Refit a new model with best params, so we can see feature importance
rf = RandomForestClassifier(max_depth=depth, n_estimators=n_estimator,
criterion='entropy', random_state=c_random_state)
rf.fit(X_train, y_train.values.ravel())
rf.predict_proba(X_train)[:, 1]
```
## Training Metrics
```
# Performance Metrics
y_pred_rf = rf.predict_proba(X_train)[:, 1]
y_pred = rf.predict(X_train)
fpr_rf, tpr_rf, _ = roc_curve(y_train, y_pred_rf)
print(classification_report(y_train, y_pred))
print("Confusion Matrix")
print(confusion_matrix(y_train, y_pred))
print('')
print("Accuracy")
print(accuracy_score(y_train, y_pred))
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
```
## Validation Metrics
```
# Meta-label
# Performance Metrics
y_pred_rf = rf.predict_proba(X_validate)[:, 1]
y_pred = rf.predict(X_validate)
fpr_rf, tpr_rf, _ = roc_curve(y_validate, y_pred_rf)
print(classification_report(y_validate, y_pred))
print("Confusion Matrix")
print(confusion_matrix(y_validate, y_pred))
print('')
print("Accuracy")
print(accuracy_score(y_validate, y_pred))
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
print(X_validate.index.min())
print(X_validate.index.max())
primary_forecast = pd.DataFrame(labels['bin'])
primary_forecast['pred'] = 1
primary_forecast.columns = ['actual', 'pred']
primary_forecast
start = primary_forecast.index.get_loc('2018-08-24 15:19:18.610000')
end = primary_forecast.index.get_loc('2018-11-01 09:01:10.373000') + 1
subset_prim = primary_forecast[start:end]
actual = subset_prim['actual']
pred = subset_prim['pred']
print(classification_report(y_true=actual, y_pred=pred))
print("Confusion Matrix")
print(confusion_matrix(actual, pred))
print('')
print("Accuracy")
print(accuracy_score(actual, pred))
# Primary model
primary_forecast = pd.DataFrame(labels['bin'])
primary_forecast['pred'] = 1
primary_forecast.columns = ['actual', 'pred']
start = primary_forecast.index.get_loc('2018-08-24 15:19:18.610000')
end = primary_forecast.index.get_loc('2018-11-01 09:01:10.373000') + 1
subset_prim = primary_forecast[start:end]
# Performance Metrics
actual = subset_prim['actual']
pred = subset_prim['pred']
print(classification_report(y_true=actual, y_pred=pred))
print("Confusion Matrix")
print(confusion_matrix(actual, pred))
print('')
print("Accuracy")
print(accuracy_score(actual, pred))
# Feature Importance
title = 'Feature Importance:'
figsize = (15, 5)
feat_imp = pd.DataFrame({'Importance':rf.feature_importances_})
feat_imp['feature'] = X.columns
feat_imp.sort_values(by='Importance', ascending=False, inplace=True)
feat_imp = feat_imp
feat_imp.sort_values(by='Importance', inplace=True)
feat_imp = feat_imp.set_index('feature', drop=True)
feat_imp.plot.barh(title=title, figsize=figsize)
plt.xlabel('Feature Importance Score')
plt.show()
```
## Performance Tear Sheets (In-Sample)
Without Meta Labeling
```
def get_daily_returns(intraday_returns):
"""
This changes returns into daily returns that will work using pyfolio. Its not perfect...
"""
cum_rets = ((intraday_returns + 1).cumprod())
# Downsample to daily
daily_rets = cum_rets.resample('B').last()
# Forward fill, Percent Change, Drop NaN
daily_rets = daily_rets.ffill().pct_change().dropna()
return daily_rets
valid_dates = X_validate.index
base_rets = labels.loc[valid_dates, 'ret']
primary_model_rets = get_daily_returns(base_rets)
# Set-up the function to extract the KPIs from pyfolio
perf_func = pf.timeseries.perf_stats
# Save the statistics in a dataframe
perf_stats_all = perf_func(returns=primary_model_rets,
factor_returns=None,
positions=None,
transactions=None,
turnover_denom="AGB")
perf_stats_df = pd.DataFrame(data=perf_stats_all, columns=['Primary Model'])
pf.show_perf_stats(primary_model_rets)
```
## With Meta Labeling
```
meta_returns = labels.loc[valid_dates, 'ret'] * y_pred
daily_meta_rets = get_daily_returns(meta_returns)
# Save the KPIs in a dataframe
perf_stats_all = perf_func(returns=daily_meta_rets,
factor_returns=None,
positions=None,
transactions=None,
turnover_denom="AGB")
perf_stats_df['Meta Model'] = perf_stats_all
pf.show_perf_stats(daily_meta_rets)
```
## Perform out-of-sample test
- Meta Model Metrics
```
# extarct data for out-of-sample (OOS)
X_oos = X['2018-11-02':]
y_oos = y['2018-11-02':]
# Performance Metrics
y_pred_rf = rf.predict_proba(X_oos)[:, 1]
y_pred = rf.predict(X_oos)
fpr_rf, tpr_rf, _ = roc_curve(y_oos, y_pred_rf)
print(classification_report(y_oos, y_pred))
print("Confusion Matrix")
print(confusion_matrix(y_oos, y_pred))
print('')
print("Accuracy")
print(accuracy_score(y_oos, y_pred))
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
# Primary model
primary_forecast = pd.DataFrame(labels['bin'])
primary_forecast['pred'] = 1
primary_forecast.columns = ['actual', 'pred']
subset_prim = primary_forecast['2018-11-02':]
# Performance Metrics
actual = subset_prim['actual']
pred = subset_prim['pred']
print(classification_report(y_true=actual, y_pred=pred))
print("Confusion Matrix")
print(confusion_matrix(actual, pred))
print('')
print("Accuracy")
print(accuracy_score(actual, pred))
test_dates = X_oos.index
base_rets_oos = labels.loc[test_dates, 'ret']
primary_model_rets_oos = get_daily_returns(base_rets_oos)
# Save the statistics in a dataframe
perf_stats_all = perf_func(returns=primary_model_rets_oos,
factor_returns=None,
positions=None,
transactions=None,
turnover_denom="AGB")
perf_stats_df['Primary Model OOS'] = perf_stats_all
# pf.create_returns_tear_sheet(labels.loc[test_dates, 'ret'], benchmark_rets=None)
pf.show_perf_stats(primary_model_rets_oos)
meta_returns = labels.loc[test_dates, 'ret'] * y_pred
daily_rets_meta = get_daily_returns(meta_returns)
# save the KPIs in a dataframe
perf_stats_all = perf_func(returns=daily_rets_meta,
factor_returns=None,
positions=None,
transactions=None,
turnover_denom="AGB")
perf_stats_df['Meta Model OOS'] = perf_stats_all
pf.create_returns_tear_sheet(daily_rets_meta, benchmark_rets=None)
```
| github_jupyter |
```
import sys
import os
sys.path.append(os.environ['GOTMWORK_ROOT']+'/tools', )
from gotmanalysis import *
np.seterr(all='raise')
%matplotlib inline
timetag = '20080701-20080731'
casename = 'JRA55-do_Global'
forcing_reg_type = 'BG12'
tmname = 'KPP-CVMix'
update_data = False
plot_figure = True
apply_mask = True
# check forcing_reg_type
fr_list = ['BG12', 'LF17']
if forcing_reg_type not in fr_list:
print('Forcing regime {} not supported. Stop.'.format(forcing_reg_type))
# check time tag
month_labels = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
timetag_list = ['20090101-20090131',
'20090201-20090228',
'20090301-20090331',
'20090401-20090430',
'20090501-20090531',
'20080601-20080630',
'20080701-20080731',
'20080801-20080831',
'20080901-20080930',
'20081001-20081031',
'20081101-20081130',
'20081201-20081231']
try:
idx = timetag_list.index(timetag)
except ValueError:
print('Time tag {} not supported. Stop.'.format(timetag))
mlabel = month_labels[idx]
# paths
s1data_root = os.environ['GOTMRUN_ROOT']+'/'+casename+'/VR1m_DT600s_'+timetag
s2data_root = os.environ['GOTMFIG_ROOT']+'/data/'+casename+'/VR1m_DT600s_'+timetag
fig_root = os.environ['GOTMFIG_ROOT']+'/'+casename+'/VR1m_DT600s_'+timetag
os.makedirs(s2data_root, exist_ok=True)
os.makedirs(fig_root, exist_ok=True)
# get forcing regime
basepath = s1data_root+'/'+tmname
s2data_name = s2data_root+'/data_forcing_regime_'+forcing_reg_type+'_'+tmname+'.npz'
mask_name = s2data_root+'/mask_'+tmname+'.npz'
if update_data or not os.path.isfile(s2data_name):
# update data
print('Updating data...')
loclist = sorted(os.listdir(basepath))
pathlist = [basepath+'/'+x+'/gotm_out_s1.nc' for x in loclist]
godmobj = GOTMOutputDataMap(pathlist)
forcing_regime = np.zeros(godmobj.ncase)
for i in np.arange(godmobj.ncase):
if np.mod(i, 100) == 0:
print('{:6.2f} %'.format(i/godmobj.ncase*100.0))
tmp = GOTMOutputData(godmobj._paths[i], init_time_location=False)
if forcing_reg_type == 'BG12':
forcing_regime[i] = tmp.diag_forcing_regime_BG12()
elif forcing_reg_type == 'LF17':
forcing_regime[i] = tmp.diag_forcing_regime_LF17()
gmobj = GOTMMap(data=forcing_regime, lon=godmobj.lon, lat=godmobj.lat, name='forcing_regime')
if apply_mask:
# read mask
gmobj_mask = GOTMMap().load(mask_name)
# apply mask
gmobj.masked(gmobj_mask)
# save data
gmobj.save(s2data_name)
else:
# read data
gmobj = GOTMMap().load(s2data_name)
if apply_mask:
# read mask
gmobj_mask = GOTMMap().load(mask_name)
# apply mask
gmobj.masked(gmobj_mask)
# update data
gmobj.save(s2data_name)
# plot figure
if plot_figure:
# create figure
fig = plt.figure()
fig.set_size_inches(6, 2.2)
# plot
plot_forcing_regime_map(gmobj)
# label
plt.text(40, 55, mlabel, fontsize=14, color='k', fontweight='bold', va='top',
bbox=dict(boxstyle="square",ec='k',fc='w'))
# reduce margin
plt.tight_layout()
# save figure
figname = fig_root+'/fig_forcing_regime_'+forcing_reg_type+'.png'
plt.savefig(figname, dpi = 300)
```
| github_jupyter |
# DataSynthesizer Usage (correlated attribute mode)
> This is a quick demo to use DataSynthesizer in correlated attribute mode.
### Step 1 import packages
```
import os
from time import time
from DataSynthesizer.DataDescriber import DataDescriber
from DataSynthesizer.DataGenerator import DataGenerator
from DataSynthesizer.ModelInspector import ModelInspector
from DataSynthesizer.lib.utils import read_json_file, display_bayesian_network
import pandas as pd
```
## get run time
```
def get_runtime(input_data, na_values, mode):
description_file = f'./out/{mode}/description'
description_file = description_file + '_' + input_data.split('.')[0] + '.json'
synthetic_data = f'./out/{mode}/sythetic_data'
synthetic_data = synthetic_data + '_' + input_data
input_df = pd.read_csv(os.path.join('./data', input_data), skipinitialspace=True, na_values=na_values)
cols_dict = {col: False for col in input_df.columns}
categorical_attributes = (input_df.dtypes=='O').to_dict()
# Default values set here, change here if needed.
# An attribute is categorical if its domain size is less than this threshold.
# Here modify the threshold to adapt to the domain size of "education" (which is 14 in input dataset).
threshold_value = 20
# specify categorical attributes
categorical_attributes = categorical_attributes # {'native-country': True}
# specify which attributes are candidate keys of input dataset.
candidate_keys = cols_dict # {'index': True}
# A parameter in Differential Privacy. It roughly means that removing a row in the input dataset will not
# change the probability of getting the same output more than a multiplicative difference of exp(epsilon).
# Increase epsilon value to reduce the injected noises. Set epsilon=0 to turn off differential privacy.
epsilon = 1
# The maximum number of parents in Bayesian network, i.e., the maximum number of incoming edges.
degree_of_bayesian_network = 2
# Number of tuples generated in synthetic dataset.
num_tuples_to_generate = input_df.shape[0] # Here 32561 is the same as input dataset, but it can be set to another number.
### Step 3 DataDescriber
# 1. Instantiate a DataDescriber.
# 2. Compute the statistics of the dataset.
# 3. Save dataset description to a file on local machine.
start = time()
describer = DataDescriber(category_threshold=threshold_value, null_values=na_values)
describer.describe_dataset_in_correlated_attribute_mode(dataset_file=os.path.join('./data', input_data),
epsilon=epsilon,
k=degree_of_bayesian_network,
attribute_to_is_categorical=categorical_attributes,
attribute_to_is_candidate_key=candidate_keys)
describer.save_dataset_description_to_file(description_file)
duration_desc = time() - start
### Step 4 generate synthetic dataset
# 1. Instantiate a DataGenerator.
# 2. Generate a synthetic dataset.
# 3. Save it to local machine.
generator = DataGenerator()
generator.generate_dataset_in_correlated_attribute_mode(num_tuples_to_generate, description_file)
generator.save_synthetic_data(synthetic_data)
duration_tot = time() - start
print('took {} seconds'.format(duration_tot))
return duration_desc, duration_tot
```
### Step 2 user-defined parameteres
```
files = os.listdir('./data')
files = sorted([file for file in files if file.split('_')[0] == 'synth'])
files = sorted([file for file in files if file.split('_')[2] in ['10', '15', '25', '30']])
na_values = {}
mode = 'correlated_attribute_mode'
files
duration = []
for input_data in files:
duration_ = get_runtime(input_data, na_values, mode)
duration.append(duration_)
import numpy as np
df = pd.DataFrame(np.vstack([file.split('.csv')[0].split('_')[1:] for file in files]),
columns=['n_samples','n_features','n_informative',
'n_redundant','n_repeated','n_classes',
'ncat', 'nbins'])
for col in df.columns:
df[col] = df[col].astype(int)
#df[['duration_desc', 'duration_tot']] = np.vstack(duration)
df = df.iloc[:len(duration),:]
df[['duration_desc', 'duration_tot']] = np.vstack(duration)
df
#df.to_csv('./out/correlated_attribute_mode/duration_per_nfeat_ncat_10cls.csv', index=False)
df_old = pd.read_csv('./out/correlated_attribute_mode/duration_per_nfeat_ncat_10cls.csv')
df = pd.concat([df_old, df])
df.loc[df.ncat==0, ['n_features', 'duration_tot']] \
.sort_values('n_features') \
.multiply(1 / df.loc[df.ncat==0, ['n_features', 'duration_tot']].min()).plot(x='n_features', y='duration_tot')
df.loc[df.ncat==0, ['n_features', 'duration_tot']].sort_values('n_features')#.plot(x='n_features', y='duration_tot')
1 * (1 + .3)**(df.loc[df.ncat==0, ['n_features', 'duration_tot']].sort_values('n_features')).n_features
df.loc[df.ncat==0, ['n_features', 'duration_tot']] \
.sort_values('n_features')#\
#.multiply(1 / df.loc[df.ncat==0, ['n_features', 'duration_tot']].min())
```
| github_jupyter |
# Sparse Autoregression
Here we fit NeuralProphet to data with 5-minute resolution (daily temperatures at Yosemite).
This is a continuation of the example notebook `autoregression_yosemite_temps`, focusing on sparsity.
```
if 'google.colab' in str(get_ipython()):
!pip install git+https://github.com/ourownstory/neural_prophet.git # may take a while
#!pip install neuralprophet # much faster, but may not have the latest upgrades/bugfixes
data_location = "https://raw.githubusercontent.com/ourownstory/neural_prophet/master/"
else:
data_location = "../"
import pandas as pd
from neuralprophet import NeuralProphet, set_log_level
# set_log_level("ERROR")
df = pd.read_csv(data_location + "example_data/yosemite_temps.csv")
# df.head(3)
```
# Sparsifying the AR coefficients
The autoregression component of NeuralProphet is defined as a AR-Net ([paper](https://arxiv.org/abs/1911.12436), [github](https://github.com/ourownstory/AR-Net)).
Thus, we can set `ar_sparsity` to a number smaller one, if we like to induce sparsity in the AR coefficients.
However, fitting a model with multiple components and regularizations can be harder to fit and in some cases you may need to take manual control over the training hyperparameters.
We will start by setting a sparsity to 50%
```
m = NeuralProphet(
n_lags=6*12,
n_forecasts=3*12,
changepoints_range=0.95,
n_changepoints=30,
weekly_seasonality=False,
# batch_size=64,
# epochs=100,
# learning_rate=0.1,
ar_sparsity=0.5,
)
metrics = m.fit(df, freq='5min') # validate_each_epoch=True, plot_live_loss=True
fig_param = m.plot_parameters()
m = m.highlight_nth_step_ahead_of_each_forecast(1)
fig_param = m.plot_parameters()
m = m.highlight_nth_step_ahead_of_each_forecast(36)
fig_param = m.plot_parameters()
```
## Further reducing the non-zero AR-coefficents
By setting the ar_sparsity lower, we can further reduce the number of non-zero weights.
Here we set it to 10%
```
m = NeuralProphet(
n_lags=6*12,
n_forecasts=3*12,
changepoints_range=0.95,
n_changepoints=30,
weekly_seasonality=False,
# batch_size=64,
# epochs=100,
# learning_rate=0.1,
ar_sparsity=0.1,
)
metrics = m.fit(df, freq='5min')
fig_param = m.plot_parameters()
m = m.highlight_nth_step_ahead_of_each_forecast(1)
fig_param = m.plot_parameters()
m = m.highlight_nth_step_ahead_of_each_forecast(36)
fig_param = m.plot_parameters()
```
## Extreme sparsity
The lower we set `ar_sparsity`, the fewer non-zero weiths are fitted by the model. Here we set it to 1%, which should lead to a single non-zero lag.
Note: Extreme values can lead to training instability.
```
m = NeuralProphet(
n_lags=6*12,
n_forecasts=3*12,
changepoints_range=0.95,
n_changepoints=30,
weekly_seasonality=False,
# batch_size=64,
# epochs=100,
# learning_rate=0.1,
ar_sparsity=0.01,
)
metrics = m.fit(df, freq='5min')
fig_param = m.plot_parameters()
m = m.highlight_nth_step_ahead_of_each_forecast(1)
fig_param = m.plot_parameters()
m = m.highlight_nth_step_ahead_of_each_forecast(36)
fig_param = m.plot_parameters()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/zaidalyafeai/Notebooks/blob/master/Unet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Introduction
The U-Net model is a simple fully convolutional neural network that is used for binary segmentation i.e foreground and background pixel-wise classification. Mainly, it consists of two parts.
* Contracting Path: we apply a series of conv layers and downsampling layers (max-pooling) layers to reduce the spatial size
* Expanding Path: we apply a series of upsampling layers to reconstruct the spatial size of the input.
The two parts are connected using a concatenation layers among different levels. This allows learning different features at different levels. At the end we have a simple conv 1x1 layer to reduce the number of channels to 1.

# Imports
```
import numpy as np
import matplotlib.pyplot as plt
import os
from PIL import Image
import keras
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Input, Conv2DTranspose, Concatenate, BatchNormalization, UpSampling2D
from keras.layers import Dropout, Activation
from keras.optimizers import Adam, SGD
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras import backend as K
from keras.utils import plot_model
import tensorflow as tf
import glob
import random
import cv2
from random import shuffle
```
# Dataset
We will use the The Oxford-IIIT Pet Dataset. It contains 37 classes of dogs and cats with around 200 images per each class. The dataset contains labels as bounding boxes and segmentation masks. The total number of images in the dataset is a little more than 7K.

Download the images/masks and unzip the files
```
!wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz
!wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz
!tar -xvzf images.tar.gz && tar -xvzf annotations.tar.gz
!rm images/*.mat
```
Note that we have two foulders. The first one is `images` which contains the raw images and annotation which contains the masks as a `binary` foulder image.
# Generators
```
def image_generator(files, batch_size = 32, sz = (256, 256)):
while True:
#extract a random batch
batch = np.random.choice(files, size = batch_size)
#variables for collecting batches of inputs and outputs
batch_x = []
batch_y = []
for f in batch:
#get the masks. Note that masks are png files
mask = Image.open(f'annotations/trimaps/{f[:-4]}.png')
mask = np.array(mask.resize(sz))
#preprocess the mask
mask[mask >= 2] = 0
mask[mask != 0 ] = 1
batch_y.append(mask)
#preprocess the raw images
raw = Image.open(f'images/{f}')
raw = raw.resize(sz)
raw = np.array(raw)
#check the number of channels because some of the images are RGBA or GRAY
if len(raw.shape) == 2:
raw = np.stack((raw,)*3, axis=-1)
else:
raw = raw[:,:,0:3]
batch_x.append(raw)
#preprocess a batch of images and masks
batch_x = np.array(batch_x)/255.
batch_y = np.array(batch_y)
batch_y = np.expand_dims(batch_y,3)
yield (batch_x, batch_y)
batch_size = 32
all_files = os.listdir('images')
shuffle(all_files)
split = int(0.95 * len(all_files))
#split into training and testing
train_files = all_files[0:split]
test_files = all_files[split:]
train_generator = image_generator(train_files, batch_size = batch_size)
test_generator = image_generator(test_files, batch_size = batch_size)
x, y= next(train_generator)
plt.axis('off')
img = x[0]
msk = y[0].squeeze()
msk = np.stack((msk,)*3, axis=-1)
plt.imshow( np.concatenate([img, msk, img*msk], axis = 1))
```
# IoU metric
The intersection over union (IoU) metric is a simple metric used to evaluate the performance of a segmentation algorithm. Given two masks $x_{true}, x_{pred}$ we evaluate
$$IoU = \frac{y_{true} \cap y_{pred}}{y_{true} \cup y_{pred}}$$
```
def mean_iou(y_true, y_pred):
yt0 = y_true[:,:,:,0]
yp0 = K.cast(y_pred[:,:,:,0] > 0.5, 'float32')
inter = tf.count_nonzero(tf.logical_and(tf.equal(yt0, 1), tf.equal(yp0, 1)))
union = tf.count_nonzero(tf.add(yt0, yp0))
iou = tf.where(tf.equal(union, 0), 1., tf.cast(inter/union, 'float32'))
return iou
```
# Model
```
def unet(sz = (256, 256, 3)):
x = Input(sz)
inputs = x
#down sampling
f = 8
layers = []
for i in range(0, 6):
x = Conv2D(f, 3, activation='relu', padding='same') (x)
x = Conv2D(f, 3, activation='relu', padding='same') (x)
layers.append(x)
x = MaxPooling2D() (x)
f = f*2
ff2 = 64
#bottleneck
j = len(layers) - 1
x = Conv2D(f, 3, activation='relu', padding='same') (x)
x = Conv2D(f, 3, activation='relu', padding='same') (x)
x = Conv2DTranspose(ff2, 2, strides=(2, 2), padding='same') (x)
x = Concatenate(axis=3)([x, layers[j]])
j = j -1
#upsampling
for i in range(0, 5):
ff2 = ff2//2
f = f // 2
x = Conv2D(f, 3, activation='relu', padding='same') (x)
x = Conv2D(f, 3, activation='relu', padding='same') (x)
x = Conv2DTranspose(ff2, 2, strides=(2, 2), padding='same') (x)
x = Concatenate(axis=3)([x, layers[j]])
j = j -1
#classification
x = Conv2D(f, 3, activation='relu', padding='same') (x)
x = Conv2D(f, 3, activation='relu', padding='same') (x)
outputs = Conv2D(1, 1, activation='sigmoid') (x)
#model creation
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = [mean_iou])
return model
model = unet()
```
# Callbacks
Simple functions to save the model at each epoch and show some predictions
```
def build_callbacks():
checkpointer = ModelCheckpoint(filepath='unet.h5', verbose=0, save_best_only=True, save_weights_only=True)
callbacks = [checkpointer, PlotLearning()]
return callbacks
# inheritance for training process plot
class PlotLearning(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
#self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('mean_iou'))
self.val_acc.append(logs.get('val_mean_iou'))
self.i += 1
print('i=',self.i,'loss=',logs.get('loss'),'val_loss=',logs.get('val_loss'),'mean_iou=',logs.get('mean_iou'),'val_mean_iou=',logs.get('val_mean_iou'))
#choose a random test image and preprocess
path = np.random.choice(test_files)
raw = Image.open(f'images/{path}')
raw = np.array(raw.resize((256, 256)))/255.
raw = raw[:,:,0:3]
#predict the mask
pred = model.predict(np.expand_dims(raw, 0))
#mask post-processing
msk = pred.squeeze()
msk = np.stack((msk,)*3, axis=-1)
msk[msk >= 0.5] = 1
msk[msk < 0.5] = 0
#show the mask and the segmented image
combined = np.concatenate([raw, msk, raw* msk], axis = 1)
plt.axis('off')
plt.imshow(combined)
plt.show()
```
# Training
```
train_steps = len(train_files) //batch_size
test_steps = len(test_files) //batch_size
model.fit_generator(train_generator,
epochs = 30, steps_per_epoch = train_steps,validation_data = test_generator, validation_steps = test_steps,
callbacks = build_callbacks(), verbose = 0)
```
# Testing
```
!wget http://r.ddmcdn.com/s_f/o_1/cx_462/cy_245/cw_1349/ch_1349/w_720/APL/uploads/2015/06/caturday-shutterstock_149320799.jpg -O test.jpg
raw = Image.open('test.jpg')
raw = np.array(raw.resize((256, 256)))/255.
raw = raw[:,:,0:3]
#predict the mask
pred = model.predict(np.expand_dims(raw, 0))
#mask post-processing
msk = pred.squeeze()
msk = np.stack((msk,)*3, axis=-1)
msk[msk >= 0.5] = 1
msk[msk < 0.5] = 0
#show the mask and the segmented image
combined = np.concatenate([raw, msk, raw* msk], axis = 1)
plt.axis('off')
plt.imshow(combined)
plt.show()
```
# References
1. http://deeplearning.net/tutorial/unet.html
2. https://github.com/ldenoue/keras-unet
| github_jupyter |
# Accesibilidad rural en Asturias
Este documento explica cómo calcular la accesibilidad rural a hospitales, y otros proveedores de servicios básicos, como bancos o supermercados.
Permite responder a preguntas como:
* ¿Qué porcentaje de asturianos puede no llega, conduciendo, a su hospital más cercano en 1 hora?
* ¿Cómo favorece la mejora de cierta carretera en el acceso más rápido a servicios básicos?
* ¿Qué pasa si cierta carretera queda bloqueada?
Para responder a estas preguntas, sólo hacer falta usar software y datos libre, en concreto:
* [Open Street Map](http://osm.org). Mapa de datos libres, de donde sacamos las carreteras. Es posible conseguir los datos de carreteras oficiales del [Instituto Geográfico Nacional](http://www.ign.es/web/ign/portal), pero el programa que usaremos para obtener los tiempos de conducción usa una nomenclatura de tipos de carretera diferente.
* [Open Street Routing Map](http://osrm.org). Programa de enrutado. Utilizando como base los datos y nomenclatura de OSM es capaz de calcular la ruta más rápida según el tipo de transporte (andando, coche, bici, ...) y la velocidad típica de cada tipo de carretera (autopista, nacional, pavimentada, caleya, ...)
* [Python](). Usamos el lenguaje de programación python, y una serie de librerías dentro de ésta, para calcular todos los datos de accesibilidad.
## Obtener datos OSM
Este paso descarga todas las carreteras de Asturias, y una abudante franja alrededor (para incluir casos donde la mejor ruta para llegar desde algún punto de Asturias a otro incluye salir temporalmente a otra provincia.
### Instalar software
Además de python y jupyter para poder correr este documento, hacen falta un par de utilidades:
* osmconvert:
`wget -O - http://m.m.i24.cc/osmconvert.c | cc -x c - -lz -O3 -o osmconvert`
* osmupdate
`wget -O - http://m.m.i24.cc/osmupdate.c | cc -x c - -o osmupdate`
* [Gdal](http://www.gdal.org/)
* [docker](https://docs.docker.com/install/)
### 1. Descargar los datos de Asturias
Lo más facil es hacer un rectángulo que abarque todo asturias y un poco de Galicia, León y Cantabria. Lo suficiente para incluir las autopistas que quizás sea mejor usar en algunos casos cuando vamos de un sitio a otro de Asturias, e incluir los hospitales que puedan estar más cerca de la gente que vive cerca de la frontera.
[](https://gist.github.com/brunosan/d6be05f3a58136fe7c0e816c684235f0)
En `bash` se puede descargar todo el OSM de esta región facilmente:
```sh
wget https://gist.githubusercontent.com/brunosan/d6be05f3a58136fe7c0e816c684235f0/raw/03386f6f8734320f2bca8d46a4386109afcd0488/map.geojson
ogrinfo map.geojson map | grep Extent
BBOX="-7.822265625000001,42.69858589169842,-3.8671874999999996,43.6599240747891"
#Download the latest OSM data:
DATE=`date '+%Y-%m-%d'`
mkdir data/
cd data/
wget -P data/ -O asturias_$DATE.osm "http://overpass.osm.rambler.ru/cgi/xapi_meta?*[bbox=$BBOX
```
**Opcionalmente** Si espacio en disco es un problema, podemos convertir el archivo `.osm` al formato `.o5m`, que en este caso reduce el tamaño de ~1 Gb a ~100 Mb.
```sh
../osmconvert -v asturias_$DATE.osm -o=asturias_$DATE.o5m
```
**Actualizar**. OSM se actualiza constantemente. Si queremos actualizar a la última versión, en vez de descargar todos los datos de nuevo, podemos usar este comando para obtener sólo los cambios. Mucho más rápido.
```sh
../osmupdate -v asturias_$DATE.o5m asturias_$DATE_tmp.o5m -b=$BBOX
```
### 2. Correr el enrutador
Para obtener el tiempo de viaje, en coche por ejemplo, entre cualquier par de puntos, necesitamos saber cual es la ruta más rápida. Para esto usamos el enrutador [OSRM](https://github.com/Project-OSRM/osrm-backend), con un perfil de coche.
La elección del tipo de transporte es importante porque especifica qué tipo de carreteras puede usar (por ejemplo elimina los caminos peatonales, las vías de tren, ... Además, asigna velocidades típicas en función del tipo de vía o tipo de asfalto que figure en las propiedades del archivo de OSM. Por esto llevaría mucho más tiempo usar los archivos de vías oficiales, ya que el diccionario de tipos de vías y pavimento no es el mismo que OSM; y OSRM está diseñado para usar OSM.
Con `docker` instalado, instalar y correr el enrutador son 3 líneas:
```sh
#transformar los datos de OSM, con el perfil de coche
docker run -t -v $(pwd):/data osrm/osrm-backend osrm-extract -p /opt/car.lua /data/asturias_2018-03-30.o5m
docker run -t -v $(pwd):/data osrm/osrm-backend osrm-partition /data/asturias_2018-03-30.o5m.osrm
docker run -t -v $(pwd):/data osrm/osrm-backend osrm-customize /data/asturias_2018-03-30.o5m.osrm
#Correr el servicio de rutas localmente
docker run -t -i -p 5000:5000 -v $(pwd):/data osrm/osrm-backend osrm-routed --algorithm mld /data/asturias_2018-03-30.o5m.osrm
```
Aunque el servicio ya está funcionando, también es posible usar un visualizador usual, que en este caso, funciona localmente en [http://127.0.0.1:9966](http://127.0.0.1:9966)
```sh
docker run -p 9966:9966 osrm/osrm-frontend
```
[](http://127.0.0.1:9966/?z=11¢er=43.482072%2C-5.717010&loc=43.394020%2C-5.706718&loc=43.523757%2C-6.047233&hl=en&alt=0)
**EXTRA**
OSRM es una herramienta extremadamente compleja, con muchas funcionalidades muy sencillas de usar. Una de ellas es crear una visualización de la velocidad de ruta de cada tramo de carretera, giro y paso. Además, indica --en rosa-- los tramos de carretera que no están conectados a la red general, por ejemplo por un error al introducir las trazas en OSM.
Para explorar esta vista sólo hay que visitar `http://127.0.0.1:9966/debug/`
[](http://127.0.0.1:9966/debug/?z=11¢er=43.482072%2C-5.717010&loc=43.394020%2C-5.706718&loc=43.523757%2C-6.047233&hl=en&alt=0#16.87/43.39366/-5.80220)
```
osrm_server="http://0.0.0.0:5000"
#osrm_server="https://router.project-osrm.org" #Use a OSM-based, global, car, osrm server"
#Hacer una llamada sencilla entre 2 puntos
import requests #http framework to make Mapbox API requests for routes
import json # handle response as json
import datetime # save timestamp
url=osrm_server+"/route/v1/driving/"
comma="%2C"
sep="%3B"
origin=[43.394020,-5.706718]
destination=[43.523757,-6.047233]
fullurl=url+str(origin[1])+','+str(origin[0])+";"+str(destination[1])+','+str(destination[0])
response = requests.get(fullurl) #do the request
response.raise_for_status() # ensure we notice bad responses
print, fullurl
# http://localhost:5000/route/v1/driving/-17.418823,14.721761;-13.95813,13.480448'
print("Entre La Corrada y Noreña hay %2.1f km y ~%2.0f minutos en coche"%
(json.loads(response.text)['routes'][0]['distance']/1000.,
json.loads(response.text)['routes'][0]['duration']/60.))
#Entre La Corrada y Noreña hay 48.8 km y ~37 minutos en coche
```
### Descargar todas los municipios de Asturias, y su población
El [Instituto Geográfico Nacional](http://www.ign.es/web/ign/portal) tiene un fichero `BD_Municipios-Entidades` que contiene una base de datos de todas las ["Entidades"](https://es.wikipedia.org/wiki/Entidad_singular_de_poblaci%C3%B3n) (ciudades, pueblos, aldeas, ...). El archivo incluye, entre otros, los campos que necesitamos (nombre, localización geográfica y población). El formato es `.obd` pero se puede transformar en `.geojson` (con LibreOffice para pasarlo a `.csv`, y Qgis para pasarlo a `.geojson`) con el que podemos trabajar más facilmente. Por conveniencia he subido los archivos transformados a este [gist](https://gist.github.com/brunosan/f52b12ccf7dfba0158b81646fcc7531c)
```
#descargar archivo si no existe
import sys
import os
import urllib
#carpeta
directory='./data/BD_Municipios-Entidades/'
if not os.path.exists(directory):
os.makedirs(directory)
file='entidades_asturias.geojson'
url='https://gist.githubusercontent.com/brunosan/f52b12ccf7dfba0158b81646fcc7531c/raw/e20cd38f1d6cf1e803d27a2447f4e5610f48d9dc/entidades_asturias.geojson'
if not os.path.exists(directory+file):
print("Descargando fichero desde gist.github")
urllib.request.urlretrieve (url,directory+file)
```
## Calidad del enrutado
Antes de empezar a calcular accesibilidad, podemos comprobar la calidad de los datos y el enrutado. En particular:
* Quitar lugares que no tienen asignada una ubicación
* Quitar lugar sin gente
* Comprobar que nuestro enrutador llega a todas las poblaciones.
```
import geopandas as gpd
fname = "./data/BD_Municipios-Entidades/entidades_asturias.geojson"
df = gpd.read_file(fname)
df
#Si sumamos toda la población, vemos que hay algo raro, Asturias debería tener ~1 millón
df['POBLACION'].sum()
#Vemos que las "entidades nos son exclusivas"
df[df['NOMBRE']=='Oviedo']
#Entendemos que 'Entidad singular' es lo más próximo al dato que queremos
lugares=df[df['TIPO']=='Entidad singular']
#También vemos que hay lugares que no tienen ubicación geográfica, así que hay que quitarlos
sin_ubicacion=lugares[lugares['lon']==0]
print("Hay %i lugares sin ubicación, que acumulan %i habitantes:\n%s..."%
(sin_ubicacion['NOMBRE'].count(),sin_ubicacion['POBLACION'].sum(),
", ".join(sin_ubicacion['NOMBRE'].values[0:20])))
lugares=lugares[lugares['lon']!=0]
lugares=lugares[lugares['lat']!=0]
sin_poblacion=lugares[lugares['POBLACION']==0]
print("Hay %i lugares sin población:\n%s..."%
(sin_poblacion['NOMBRE'].count(),
", ".join(sin_ubicacion['NOMBRE'].values[0:30])))
lugares=lugares[lugares['POBLACION']!=0]
lugares.set_index("CODIGOINE", inplace=True)
poblacion=lugares['POBLACION'].sum()
num=lugares['POBLACION'].count()
print("Asturias tiene %i núcleos de población, con un total de %i habitantes" %
(num,poblacion))
```
Suponemos que todas las poblaciones tienen acceso en coche, pero los datos de OSM pueden no estar completos, o puede que realmente no se pueda llegar en coche (como Bulnes). OSRM, cuando pedimos una ruta, también devuelve la distancia entre el punto pedido y la carretera más cercana desde donde empieza la ruta. Esta distancia la llamamos "saltos"

Podemos pedir la ruta entre todos los pueblo y un mismo punto, por ejemplo el aeropuerto que de todas formas usaremos luego, para explorar si existen muchos sitios donde el "salto" inicial de la ruta es demasiado grande.
```
import requests #http framework to make mhvpl requests for routes
import json # handle response as json
from geopy.distance import vincenty
import pickle
server=osrm_server+"/route/v1/driving/"
comma="%2C"
Sep="%3B"
trail=".json"
oviedo=[-5.84794521331787,
43.362579045177434]
usar_cache=1
if (usar_cache) and (os.path.exists('data/lugares.pkl')):
with open('data/lugares.pkl','rb') as f: # Python 3: open(..., 'rb')
print("Leyendo cache desde archivo")
lugares = pickle.load(f)
lugares
else:
lugares["salto"]=-1
max_salto=0
max_salto_nombre=""
i=0
for index, lugar in lugares.iterrows():
i=i+1
fullurl= server+str(lugar.lon)+','+str(lugar.lat)+';'+str(oviedo[1])+','+str(oviedo[0])+trail
name= index
response = requests.get(fullurl) # do the request
response.raise_for_status() # ensure we notice bad response+trail+token
snap=json.loads(response.text)['waypoints'][0]['location'][::-1]
salto=vincenty(snap,[lugar.lat,lugar.lon]).meters
lugares.loc[[name], ['salto']]=salto
#print(salto,":",lugares.loc[[name], ['salto']])
if salto>max_salto:
max_salto=salto
max_salto_nombre=lugar["NOMBRE"]
if not i % 50: #Solo imprimir cada 50
print("%i%% Ruta desde %.10s. Salto: %i meters. [Max %im en %s] "%
(int(i/lugares.shape[0]*100),lugar["NOMBRE"],
salto,max_salto,max_salto_nombre), end="\r")
print("")
#esto tarda bastante en correr. Guardar los datos y preferir leer que calcular
with open('data/lugares.pkl', 'wb') as f:
print("Guardando cache")
pickle.dump(lugares, f)
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
lugares['salto'].plot.hist(alpha=0.5,logy=True,logx=True,bins=500,xlim=[10,1e4],cumulative=False,density=False)
plt.ylabel('% de todas las poblaciones')
plt.xlabel('Distancia entre población e inicio de ruta [metros]')
plt.axvline(x=1e3)
plt.show()
gran_salto=lugares[lugares['salto']>1e3].sort_values(by='salto',ascending=False)
print("%i poblaciones están a más de 1km de la carretera más cercana, y suman %i personas"%
(gran_salto['NOMBRE'].count(),gran_salto['POBLACION'].sum()))
gran_salto[['NOMBRE',"POBLACION",'salto','lat','lon']]
%matplotlib notebook
from IPython.display import Image, HTML, display
for index,lugar in gran_salto.iterrows():
print("%s, Salto de %3.0f metros. Población: %i"%(lugar['NOMBRE'],lugar['salto'],lugar['POBLACION']))
lat=str(lugar['lat'])
lon=str(lugar['lon'])
display(Image(url="https://api.mapbox.com/styles/v1/mapbox/satellite-streets-v10/static/pin-s-a+27FF00("+
lon+","+lat+")/"+lon+","+lat+",14/200x200?access_token=pk.eyJ1IjoiYnJ1bm9zYW4iLCJhIjoic3FUc1dJWSJ9.v0525WacYWcsHM1KtbZitg"))
```
No parece que haya errores muy grandes, pero en caso de que queramos arreglar los datos, podemos modificar OSM añadiendo las carreteras que puedan faltar, y volver a descargar los datos.
---
### Accesibilidad a destino Unico: Aeropuerto de Ranon
Una vez tenemos todos los datos necesarios, y hemos comprobado su calidad, podemos emepezar a trabajar en accesibilidad. Para empezar a destinos únicos, como el aeropuerto
```
import numpy as np
server=osrm_server+"/table/v1/driving/"
comma="%2C"
Sep="%3B"
#**********AVISO********
#El enrutador asume calles sin tráfico, ni semáforos, ni tiempo para inicio-final de ruta
#Por eso los resultados a veces parecen demasiado optimistas, sobre todo en distancias cortas
#Si se quieren resultados más exactos no hay que añadir nada, pero si no lees esta nota, probablemente
#quiere decir que es buena idea sumarles unos minutos de inicio/final y un % de buffer proposcional al tiempo
buffer=10/60. #10 minutes, in hours
overalpenalty=1.05 #5%
batch=500
origen=[-6.031837463378906,
43.55850077671243]
origen_nombre='aeropuerto'
lugares['t_'+origen_nombre]=-1
for i in np.arange(lugares.shape[0]/batch):
print("Doing batch %i, [%i,%i] of %i"
%(i,batch*i,batch*(i+1),lugares.shape[0]),end='\r')
lugares_batch=lugares.iloc[int(batch*i):].head(n=batch)
destinos=";".join([str(i[1])+','+str(i[0]) for i in lugares_batch[['lat','lon']].values])
destinos_nombre=lugares_batch.index
trail=".json?sources=0"
fullurl= server+str(origen[0])+','+str(origen[1])+";"+destinos+trail
response = requests.get(fullurl)
response.raise_for_status()
duraciones=json.loads(response.text)['durations'][0][1:]
for i in np.arange(len(duraciones)):
lugares.loc[[destinos_nombre[i]], ['t_'+origen_nombre]]=buffer+duraciones[i]/60./60.*overalpenalty
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
lugares['t_'+origen_nombre].plot.hist(alpha=0.5,bins=500,cumulative=False,density=True)
plt.ylabel('% de todas las poblaciones')
plt.xlabel('Distancia a destino: '+origen_nombre+' [horas]')
plt.show()
plt.figure()
lugares['t_'+origen_nombre].plot.hist(alpha=0.5,bins=500,cumulative=True,density=True,weights=lugares['POBLACION'])
plt.ylabel('% de población')
plt.xlabel('Distancia a destino: '+origen_nombre+' [horas]')
plt.show()
def weight_array(ar, weights):
zipped = zip(ar, weights)
weighted = []
for i in zipped:
for j in range(i[1]):
weighted.append(i[0])
return weighted
for i in np.arange(10,101,10):
print("El %i%% de la población en Asturias tarda %2.1f horas en llegar al destino: %s"%
(i,np.percentile(weight_array(lugares['t_aeropuerto'].values, lugares['POBLACION'].values), i),origen_nombre))
```
**¿Quién tarda 3 horas?**
```
def get_route_link(origen,destino):
print(" http://localhost:9966/?"+
"loc="+str(origen[1])+"%2C"+str(origen[0])+
"&loc="+str(destino[1])+"%2C"+str(destino[0])+"&hl=en&alt=0")
lejos=lugares[lugares['t_aeropuerto']>3].sort_values(by='t_aeropuerto',ascending=False)
for index,lugar in lejos.head(n=5).iterrows():
print("%i personas en %s tardan %2.1f horas en llegar al destino: %s"%
(lugar["POBLACION"],lugar["NOMBRE"],lugar["t_"+origen_nombre],origen_nombre))
get_route_link(origen,[lugar['lon'],lugar['lat']])
```
Tiene Sentido

---
### Accesibilidad a tipo de servicio, por ejemplo hospitales
No conozco una forma de descargar la ubicación de todos los hospitales desde una fuente oficial. [El Ministerio de Sanidad tiene un listado completo](https://www.msssi.gob.es/ciudadanos/centrosCA.do), pero con la dirección en texto, no en latitud, longitud.
Para este ejemplo usaré los hospitales en OSM ([`amenity=hospital`](https://overpass-turbo.eu/s/xuR)).
[](https://gist.github.com/brunosan/522c6b59fb24b5519e7a24ecafd9df33)
```
import geopandas as gpd
fname = "./data/hospitales.geojson"
df = gpd.read_file(fname)
df['lon']=df.geometry.centroid.x
df['lat']=df.geometry.centroid.y
df['nombre']=df['name'].fillna(df['name_2']) #El nombre esta inconsistentemente en dos campos
hospitales=df[['nombre','lon','lat']]
hospitales
```
Para calcular el tiempo de viaje al hospital más cercano a cada localidad, sea cual sea ese hospital, lo que hacemos es **calcular el tiempo de viajes de cada localidad a todos los hospitales**, y nos quedamos con el mínimo. Es lo mismo que es caso anterior, pero en vez de seleccionar un origen, tenemos 78 orígenes (hospitales) y 5966 poblaciones.
```
hospitales_loc=";".join([str(i[1])+','+str(i[0]) for i in hospitales[['lat','lon']].values])
hospitales_loc
server=osrm_server+"/table/v1/driving/"
comma="%2C"
Sep="%3B"
batch=100
origen_tipo='hospital'
lugares['t_'+origen_tipo]=-1
lugares['m_'+origen_tipo]=-1
for i in np.arange(lugares.shape[0]/batch):
print("Doing batch %i, [%i,%i] of %i"
%(i,batch*i,batch*(i+1),lugares.shape[0]),end="\r")
lugares_batch=lugares.iloc[int(batch*i):].head(n=batch)
destinos=";".join([str(i[1])+','+str(i[0]) for i in lugares_batch[['lat','lon']].values])
destinos_nombre=lugares_batch.index
trail=".json?destinations="+\
';'.join([str(x) for x in np.arange(len(hospitales))])+\
"&sources="+\
';'.join([str(x) for x in np.arange(len(hospitales),len(hospitales)+len(lugares_batch))])
fullurl= server+hospitales_loc+";"+destinos+trail
#print(fullurl)
response = requests.get(fullurl)
response.raise_for_status()
duraciones_matrix=json.loads(response.text)['durations']
duraciones=[]
hospital_min=[]
for origen in np.arange(np.shape(duraciones_matrix)[0]):
duraciones+=[min(duraciones_matrix[origen])]
hospital_min+=[np.argmin(duraciones_matrix[origen])]
for i in np.arange(len(duraciones)):
lugares.loc[[destinos_nombre[i]], ['t_'+origen_tipo]]=buffer+duraciones[i]/60./60.*overalpenalty
lugares.loc[[destinos_nombre[i]], ['m_'+origen_tipo]]=hospital_min[i]
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
lugares['t_'+origen_tipo].plot.hist(alpha=0.5,bins=500,cumulative=False,density=True)
plt.ylabel('% de todas las poblaciones')
plt.xlabel('Distancia a destino: '+origen_tipo+' [horas]')
plt.show()
plt.figure()
lugares['t_'+origen_tipo].plot.hist(alpha=0.5,bins=500,cumulative=True,density=True,weights=lugares['POBLACION'])
plt.ylabel('% de población')
plt.xlabel('Distancia a destino: '+origen_tipo+' [horas]')
plt.show()
for i in np.arange(10,101,10):
print("El %i%% de la población en Asturias tarda %2.1f minutos en llegar al destino: %s"%
(i,np.percentile(weight_array(lugares['t_'+origen_tipo].values*60, lugares['POBLACION'].values), i),origen_tipo))
lejos=lugares[lugares['t_'+origen_tipo]>1].sort_values(by='t_'+origen_tipo,ascending=False)
for index,lugar in lejos.head(n=5).iterrows():
print("%i personas en %s tardan %2.1f horas en llegar al destino: %s"%
(lugar["POBLACION"],lugar["NOMBRE"],lugar["t_"+origen_tipo],origen_tipo))
get_route_link([hospitales.loc[lugar['m_hospital']]['lon'],hospitales.loc[lugar['m_hospital']]['lat']],
[lugar['lon'],lugar['lat']])
lugares.to_file("data/lugares.geojson", driver="GeoJSON")
```

### Analisis de cambios
¿Qué pasa si mejoramos o empeoramos una carretera? Qué pasa si añadimos o quitamos un hospital?
Para hacer un ejemplo rápido, quitemos el hospital de Cangas de Narcea, y repitamos el estudio
```
hospitales[hospitales['nombre']=='Hospital Comarcal de Narcea']
hospitales=hospitales[hospitales['nombre']!='Hospital Comarcal de Narcea']
hospitales_loc=";".join([str(i[1])+','+str(i[0]) for i in hospitales[['lat','lon']].values])
server=osrm_server+"/table/v1/driving/"
comma="%2C"
Sep="%3B"
batch=100
origen_tipo='hospital'
lugares['t_'+origen_tipo]=-1
lugares['m_'+origen_tipo]=-1
for i in np.arange(lugares.shape[0]/batch):
print("Doing batch %i, [%i,%i] of %i"
%(i,batch*i,batch*(i+1),lugares.shape[0]),end="\r")
lugares_batch=lugares.iloc[int(batch*i):].head(n=batch)
destinos=";".join([str(i[1])+','+str(i[0]) for i in lugares_batch[['lat','lon']].values])
destinos_nombre=lugares_batch.index
trail=".json?destinations="+\
';'.join([str(x) for x in np.arange(len(hospitales))])+\
"&sources="+\
';'.join([str(x) for x in np.arange(len(hospitales),len(hospitales)+len(lugares_batch))])
fullurl= server+hospitales_loc+";"+destinos+trail
#print(fullurl)
response = requests.get(fullurl)
response.raise_for_status()
duraciones_matrix=json.loads(response.text)['durations']
duraciones=[]
hospital_min=[]
for origen in np.arange(np.shape(duraciones_matrix)[0]):
duraciones+=[min(duraciones_matrix[origen])]
hospital_min+=[np.argmin(duraciones_matrix[origen])]
for i in np.arange(len(duraciones)):
lugares.loc[[destinos_nombre[i]], ['t_'+origen_tipo]]=buffer+duraciones[i]/60./60.*overalpenalty
lugares.loc[[destinos_nombre[i]], ['m_'+origen_tipo]]=hospital_min[i]
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure()
lugares['t_'+origen_tipo].plot.hist(alpha=0.5,bins=500,cumulative=False,density=True)
plt.ylabel('% de todas las poblaciones')
plt.xlabel('Distancia a destino: '+origen_tipo+' [horas]')
plt.show()
plt.figure()
lugares['t_'+origen_tipo].plot.hist(alpha=0.5,bins=500,cumulative=True,density=True,weights=lugares['POBLACION'])
plt.ylabel('% de población')
plt.xlabel('Distancia a destino: '+origen_tipo+' [horas]')
plt.show()
for i in np.arange(10,101,10):
print("El %i%% de la población en Asturias tarda %2.1f minutos en llegar al destino: %s"%
(i,np.percentile(weight_array(lugares['t_'+origen_tipo].values*60, lugares['POBLACION'].values), i),origen_tipo))
lugares.to_file("data/lugares2.geojson", driver="GeoJSON")
```
Con todos los hospitales:

---
Con el Hospital Comarcal de Narcea:

**Nótese que los porcentajes de población afectados son muy pocos, son zonas poco pobladas y muy dispersas**
| github_jupyter |
# Voltammetry Simulations
From Compton *et al.* "Understanding voltammetry: simulation of electrode processes", 2014
## Cyclic Voltammogram (reversible)
```
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
%matplotlib widget
def cv_rev(sigma):
#Specify simulation parameters
theta_i = 10.0
theta_v = -10.0
deltaX = 2e-3
deltaTheta = 0.02
#Calculate other parameters
deltaT = deltaTheta / sigma
maxT = 2 * np.abs(theta_v - theta_i) / sigma
maxX = 6*np.sqrt(maxT)
n = int( maxX / deltaX ) # number of spacesteps
m = int( maxT / deltaT ) # number of timesteps
# Calculate Thomas coefficients
wambda = deltaT / (deltaX**2)
alpha = -wambda
beta = 2.0*wambda + 1.0
gamma = -wambda
# Create containers
g_mod = np.zeros(n)
C = np.zeros(n)# concentration profile
Thetas = np.zeros(m)
fluxes = np.zeros(m)
#Modify gamma coefficients
g_mod[0] = 0 # boundary condition
for i in range(1,n):
g_mod[i] = gamma / (beta - g_mod[i-1] * alpha)
i+=1
# BEGIN SIMULATION
Theta = theta_i
for k in tqdm(range(m*2)):
if( k%m < m / 2 ):
Theta -= deltaTheta
else:
Theta += deltaTheta
# Forward sweep - create modified deltas
C[0] = (1.0 / (1.0 + np.exp(-Theta)))
for i in range(1,n-1):
C[i] = (( C[i] - C[i-1] * alpha ) / ( beta - g_mod[i-1] * alpha ))
i+=1
# Back Substitution
C[n-1] = 1.0
for i in np.arange(n-2,-1,-1):
C[i] = C[i] - g_mod[i] * C[i+1]
i-=1
#Output current
flux = -(-C[2] + 4*C[1] -3*C[0]) / (2*deltaX)
if(k>=m):
fluxes[k%m] = flux
Thetas[k%m] = Theta
k+=1
return Thetas, fluxes
# END SIMULATION
Thetas, Fluxes = cv_rev(100)
plt.plot(Thetas, Fluxes)
Thetas, Fluxes = cv_rev(1000)
plt.plot(Thetas, Fluxes)
Thetas, Fluxes = cv_rev(10000)
plt.plot(Thetas, Fluxes)
```
## Cyclic Voltammogram (irreversible)
```
def cv_irrev(K_0):
#Specify simulation parameters
theta_i = 10.0
theta_v = -10.0
sigma = 10e3
deltaX = 2e-3
deltaTheta = 0.02
alpha_BV = 0.5
C_Abulk = 0.5
C_Bbulk = 1 - C_Abulk
h = deltaX
def f_BV(Theta):
return np.exp(-alpha_BV*Theta)
#Calculate other parameters
deltaT = deltaTheta / sigma
maxT = 2 * np.abs(theta_v - theta_i) / sigma
maxX = 6*np.sqrt(maxT)
n = int( maxX / deltaX ) # number of spacesteps
m = int( maxT / deltaT ) # number of timesteps
# Calculate Thomas coefficients
alpha = 0
beta = 1 + h*f_BV(theta_i)*K_0*(1+np.exp(theta_i))
gamma = -1
delta = h*f_BV(theta_i)*K_0*np.exp(theta_i)
# Create containers
b_mod = np.zeros(n)
d_mod = np.zeros(n)
g_mod = np.zeros(n)
C_A = np.zeros(n)# concentration profile of A
C_B = np.zeros(n)# concentration profile of A
Thetas = np.zeros(m)
fluxes = np.zeros(m)
#Modify beta, delta coefficients
b_mod[0] = beta # boundary condition?
d_mod[0] = delta # boundary condition?
# BEGIN SIMULATION
Theta = theta_i
for k in tqdm(range(m*2)) :
if( k%m < m / 2 ):
Theta -= deltaTheta
else:
Theta += deltaTheta
g_mod[0] = 0 # boundary condition
for i in range(1,n):
g_mod[i] = gamma / (beta - g_mod[i-1] * alpha)
i+=1
# Forward sweep - create modified deltas
C_A[0] = (1.0 / (1.0 + np.exp(-Theta)))
for i in range(1,n-1):
beta = 1 + h*f_BV(Theta)*K_0*(1+np.exp(Theta))
delta = h*f_BV(Theta)*K_0*np.exp(Theta)
C_A[i] = C_A[i-1] * beta - delta * C_Bbulk
C_B[i] = 1 + C_Bbulk - C_A[i]
i+=1
# Back Substitution
C_A[n-1] = C_Abulk
C_B[n-1] = 1 + C_Bbulk - C_A[n-1]
for i in np.arange(n-2,-1,-1):
C_A[i] = C_A[i] - g_mod[i] * C_A[i+1]
i-=1
#Output current
flux = (C_A[1] - C_A[0]) / h
if(k>=m):
fluxes[k%m] = flux
Thetas[k%m] = Theta
k+=1
return Thetas, fluxes
# END SIMULATION
# Thetas, Fluxes = sim(10)
# plt.plot(Thetas, Fluxes)
# Thetas, Fluxes = sim(100)
# plt.plot(Thetas, Fluxes)
Thetas, Fluxes = cv_irrev(.1)
plt.plot(Thetas, Fluxes)
```
| github_jupyter |
```
# initial setup
try:
# settings colab:
import google.colab
except ModuleNotFoundError:
# settings local:
%run "../../../common/0_notebooks_base_setup.py"
```
---
<img src='../../../common/logo_DH.png' align='left' width=35%/>
#### Distribución Poisson
La distribución Poisson cuenta **cantidad de eventos en un período de tiempo dado**.
Podemos pensar esta distribución como la probabilidad de que ocurra un determinado número de eventos durante cierto período de tiempo.
Sea la variable aleatoria discreta X el número de veces que determinado evento ocurre en un intervalo de tiempo o espacio.
Entonces X puede ser una variable Poisson que toma valores $x=0,1,2,…$ si cumple con las siguientes condiciones:
1. El número de eventos que ocurren en períodos de tiempo, sin superposición entre períodos, es independiente.
2. La probabilidad de exactamente un evento en un intervalo de tiempo corto de duración h=1/n es aproximadamente h*λ donde n es la cantidad de intervalos dentro del período considerado.
3. La probabilidad de exactamente dos o más eventos en un intervalo de tiempo corto es esencialmente cero.
Si se cumplen esas condiciones X es una variable aleatoria que sigue un proceso de Poisson aproximado con parámetro $\lambda > 0$ por lo que su función de probabilidad puntual es
\begin{equation}
P(X = k) = \frac{\lambda^k . e^{-\lambda}}{k!} \\
\lambda > 0, \hspace{0.2cm} k = 0, 1, 2, ...
\end{equation}
Se puede mostrar que $\lambda$ es la media y la varianza de una variable Poisson.
<div>
<div>Función de densidad de probabilidad:</div><div>
<img src="img/prob_poisson.png" width="500"/></div>
</div>
**Ejemplos**:
* X: Cantidad de pacientes que ingresan en la guardia de un hospital en una hora
* X: Cantidad de autos que pasan por una cabina de peaje
* X: Cantidad de llamados que llegan a un operador en un call center en la mañana.
---
Vamos a ver ahora cómo generar datos con esta distibución de probabilidad.
Necesitamos un generador de números aleatorios, que expone métodos para generar números aleatorios con alguna distribución de probabilidad especificada. Construimos este generador de este modo `np.random.default_rng()`
https://docs.scipy.org/doc/numpy/reference/random/generator.html
Estas son las distribuciones de probabilidad disponibles:
https://docs.scipy.org/doc/numpy/reference/random/generator.html#distributions
Vamos a generar datos con distribución empleando el método `poisson`
https://docs.scipy.org/doc/numpy/reference/random/generated/numpy.random.Generator.poisson.html#numpy.random.Generator.poisson
```
import seaborn as sns
def distribution_plotter(data, label, bins=None):
sns.set(rc={"figure.figsize": (10, 7)})
sns.set_style("white")
dist = sns.distplot(data, bins= bins, hist_kws={'alpha':0.2}, kde_kws={'linewidth':5})
dist.set_title('Distribucion de ' + label + '\n', fontsize=16)
import numpy as np
random_generator = np.random.default_rng()
lambda_value = 10
sample_size = 10000
random_poisson_data = random_generator.poisson(lam=lambda_value, size = sample_size)
distribution_plotter(random_poisson_data, "Poisson")
```
#### Referencias
Gráficos: https://es.wikipedia.org/wiki/Distribuci%C3%B3n_de_Poisson
| github_jupyter |
```
%matplotlib inline
import numpy as np
np.random.seed(42)
%reload_ext autoreload
%autoreload 2
PROJECT_DIR = "/Users/raghav/envPython3/experiments/one_class_neural_networks/"
import sys,os
import numpy as np
sys.path.append(PROJECT_DIR)
```
## Obtain Training and Test Datasets
```
## Obtaining the training and testing data
%reload_ext autoreload
%autoreload 2
from src.models.RCAE import RCAE_AD
DATASET = "mnist"
IMG_DIM= 784
IMG_HGT =28
IMG_WDT=28
IMG_CHANNEL=1
HIDDEN_LAYER_SIZE= 32
MODEL_SAVE_PATH = PROJECT_DIR + "/models/MNIST/RCAE/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/MNIST/RCAE/"
PRETRAINED_WT_PATH = ""
rcae = RCAE_AD(DATASET,IMG_DIM, HIDDEN_LAYER_SIZE, IMG_HGT, IMG_WDT,IMG_CHANNEL, MODEL_SAVE_PATH, REPORT_SAVE_PATH,PRETRAINED_WT_PATH)
print("Train Data Shape: ",rcae.data._X_train.shape)
print("Train Label Shape: ",rcae.data._y_train.shape)
print("Validation Data Shape: ",rcae.data._X_val.shape)
print("Validation Label Shape: ",rcae.data._y_val.shape)
print("Test Data Shape: ",rcae.data._X_test.shape)
print("Test Label Shape: ",rcae.data._y_test.shape)
print("===========TRAINING AND PREDICTING WITH RCAE============================")
rcae.fit_and_predict()
print("========================================================================")
```
### Pretrain Autoencoder
```
%reload_ext autoreload
%autoreload 2
DATASET = "MNIST"
IMG_DIM= 784
IMG_HGT =28
IMG_WDT=28
IMG_CHANNEL=1
HIDDEN_LAYER_SIZE= 128
MODEL_SAVE_PATH = PROJECT_DIR + "/models/MNIST/Deep_SVDD/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/MNIST/Deep_SVDD/"
PRETRAINED_WT_PATH = ""
## Prepare the data for pretraining CAE
x_train = trainX.reshape((len(trainX), 28, 28, 1))
x_trainForWtInit= x_train
test_ones = test_ones.reshape((len(test_ones), 28, 28, 1))
test_sevens = test_sevens.reshape((len(test_sevens), 28, 28, 1))
x_test = np.concatenate((test_ones,test_sevens))
print("Reshaped Training samples for CAE",x_train.shape)
print("Reshaped Testing samples for CAE",x_test.shape)
from src.models.Deep_SVDD import Deep_SVDD
deep_svdd = Deep_SVDD(DATASET,x_trainForWtInit,IMG_DIM,HIDDEN_LAYER_SIZE,IMG_HGT,IMG_WDT,IMG_CHANNEL,MODEL_SAVE_PATH,REPORT_SAVE_PATH,PRETRAINED_WT_PATH)
Deep_SVDD()
```
## Train and Test FF_NN Model Supervised Model
```
%reload_ext autoreload
%autoreload 2
IMG_DIM= 784
IMG_HGT =28
IMG_WDT=28
IMG_DEPTH=1
HIDDEN_LAYER_SIZE=196
MODEL_SAVE_PATH = PROJECT_DIR + "/models/MNIST/FF_NN/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/MNIST/FF_NN/"
print("[INFO]",train_Anomaly_X.shape[0],"Anomalous Samples Appended to training set")
data_train = np.concatenate((trainX,train_Anomaly_X),axis=0)
data_train_label = np.concatenate((trainY,train_Anomaly_Y),axis=0)
print("[INFO]",data_train.shape[0],"Training Samples Contains both 1's and 7s")
nClass =2
NUM_EPOCHS = 100
clf_FF_NN = FF_NN(IMG_DIM,HIDDEN_LAYER_SIZE,IMG_HGT,IMG_WDT,MODEL_SAVE_PATH,REPORT_SAVE_PATH)
clf_FF_NN.fit(data_train,data_train_label,NUM_EPOCHS,IMG_HGT,IMG_WDT,IMG_DEPTH,nClass)
## Predict the scores
auc_FF_NN = clf_FF_NN.score(test_ones,label_ones,test_sevens,label_sevens)
print("===========")
print("AUC: ",auc_FF_NN)
print("===========")
```
## FakeNoise FF_NN Model
```
## Fake Noise data to be generated which will be added to the training set before training
%reload_ext autoreload
%autoreload 2
IMG_DIM= 784
IMG_HGT =28
IMG_WDT=28
IMG_DEPTH=1
HIDDEN_LAYER_SIZE=196
MODEL_SAVE_PATH = PROJECT_DIR + "/models/MNIST/FAKE_NOISE_FF_NN/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/MNIST/FAKE_NOISE_FF_NN/"
from src.models.Fake_Noise_FF_NN import Fake_Noise_FF_NN
## Remove the Anomalous data and instead add Noise
X_Noise,X_NoiseLabel = createData.get_FAKE_Noise_MNIST_TrainingData(trainX)
print("[INFO]",X_Noise.shape[0],"Noise Samples Appended for training set")
data_train = np.concatenate((trainX,X_Noise),axis=0)
data_train_label = np.concatenate((trainY,X_NoiseLabel),axis=0)
clf_FakeNoise_FF_NN = Fake_Noise_FF_NN(IMG_DIM,HIDDEN_LAYER_SIZE,IMG_HGT,IMG_WDT,MODEL_SAVE_PATH,REPORT_SAVE_PATH)
clf_FakeNoise_FF_NN.fit(data_train,data_train_label,NUM_EPOCHS,IMG_HGT,IMG_WDT,IMG_DEPTH,nClass)
# Predict the scores
auc_FAKENOISE_FF_NN = clf_FakeNoise_FF_NN.score(test_ones,label_ones,test_sevens,label_sevens)
print("===========")
print("AUC: ",auc_FAKENOISE_FF_NN)
print("===========")
```
## SKlearn OCSVM
```
%reload_ext autoreload
%autoreload 2
##create the classifier
from src.models.ocsvmSklearn import OCSVM
IMG_HGT =28
IMG_WDT=28
ocsvm = OCSVM(IMG_HGT,IMG_WDT)
nu= 0.01
kernel = 'linear'
clf = ocsvm.fit(trainX,nu,kernel)
res = ocsvm.score(clf,test_ones,test_sevens)
auc_OCSVM_linear = res
print("="*35)
print("AUC:",res)
print("="*35)
kernel = 'rbf'
clf = ocsvm.fit(trainX,nu,kernel)
res = ocsvm.score(clf,test_ones,test_sevens)
auc_OCSVM_rbf = res
print("="*35)
print("AUC:",res)
print("="*35)
```
## OC-NN
```
##create the classifier
## Instantiate the object and call the function
%reload_ext autoreload
%autoreload 2
DATASET= "MNIST"
IMG_DIM= 784
IMG_HGT =28
IMG_WDT=28
IMG_DEPTH=1
HIDDEN_LAYER_SIZE=196
nClass=2
MODEL_SAVE_PATH = PROJECT_DIR + "/models/MNIST/OC_NN/"
REPORT_SAVE_PATH = PROJECT_DIR + "/reports/figures/MNIST/OC_NN/"
PRE_TRAINED_WT_PATH = PROJECT_DIR +"/models/MNIST/FF_NN/"
from src.models.OC_NN import OC_NN
import keras
ocnn = OC_NN(DATASET,IMG_DIM,HIDDEN_LAYER_SIZE,IMG_HGT,IMG_WDT,MODEL_SAVE_PATH,REPORT_SAVE_PATH,PRE_TRAINED_WT_PATH)
nu= 0.01
NUM_EPOCHS = 100
ocnn.fit(trainX,nu,NUM_EPOCHS,IMG_HGT,IMG_WDT,IMG_DEPTH,nClass)
res = ocnn.score(test_ones,test_sevens)
auc_OCNN = res
print("="*35)
print("AUC:",res)
print("="*35)
```
## Comparing AUC scores of various methods
```
import pandas as pd
import matplotlib.pyplot as plt
auc = np.zeros((1,5))
auc[0][0] = auc_FF_NN
auc[0][1] = auc_FAKENOISE_FF_NN
auc[0][2] = auc_OCSVM_linear
auc[0][3] = auc_OCSVM_rbf
auc[0][4] = auc_OCNN
aucList = [auc_FF_NN,auc_FAKENOISE_FF_NN, auc_OCSVM_linear,auc_OCSVM_rbf, auc_OCNN]
index = ['FF_NN', 'Fake_NN', 'OCSVM_L','OCSVM_rbf','OCNN']
df = pd.DataFrame({'auc': aucList}, index=index)
ax = df.plot.bar(rot=0)
plt.ylabel('AUC')
plt.xlabel('Methods')
plt.title('AUC Comparision for MNIST Dataset ')
```
| github_jupyter |
```
import sys
import json
import time
from datetime import datetime
from pathlib import Path
from dateutil import parser
import requests
import fiona
import shapely.geometry as geom
import pandas as pd
import geopandas as gpd
DIR = Path('..')
sys.path.append(str(DIR))
DATA_DIR = DIR/'data/'
%load_ext autoreload
%autoreload 2
def decode(encoded):
"""
An algorithms to decode the string to create a list of latitude,longitude coordinates.
"""
#six degrees of precision in valhalla
inv = 1.0 / 1e6;
decoded = []
previous = [0,0]
i = 0
#for each byte
while i < len(encoded):
#for each coord (lat, lon)
ll = [0,0]
for j in [0, 1]:
shift = 0
byte = 0x20
#keep decoding bytes until you have this coord
while byte >= 0x20:
byte = ord(encoded[i]) - 63
i += 1
ll[j] |= (byte & 0x1f) << shift
shift += 5
#get the final value adding the previous offset and remember it for the next
ll[j] = previous[j] + (~(ll[j] >> 1) if ll[j] & 1 else (ll[j] >> 1))
previous[j] = ll[j]
#scale by the precision and chop off long coords also flip the positions so
#its the far more standard lon,lat instead of lat,lon
decoded.append([float('%.6f' % (ll[1] * inv)), float('%.6f' % (ll[0] * inv))])
#hand back the list of coordinates
return decoded
def route(
locations_gdf, #a pair of locations in geodataframe fromat
mode='TRANSIT,WALK',
trip_id = '',
date_time = datetime.now(),
control_vars = dict(), # a dictionary of control variables
api_key = ''):
#convert time into text
t = date_time.strftime("T%H:%M")
d = date_time.strftime("%d-%b-%Y")
#get from and to location from locations_gdf
f = locations_gdf['geometry'].iat[0]
t = locations_gdf['geometry'].iat[-1]
f_text = "{0}, {1}".format(f.x, f.y)
t_text = "{0}, {1}".format(t.x, t.y)
#send query to api
url = 'http://localhost:8080/otp/routers/default/plan'
query = {
"fromPlace":f_text,
"toPlace":t_text,
"time":t,
"date":d,
"mode":mode,
"maxWalkDistance":"1000",
"arriveBy":"false",
"wheelchair":"false",
"locale":"en"}
r = requests.get(url, params=query)
#convert request output ot a GeoDataFrame
legs = r.json()['plan']['itineraries'][0]['legs']
legs_list = list()
for i, leg in enumerate(legs):
items = [
'from',
'to',
'distance',
'duration',
'startTime',
'endTime',
'mode',
'legGeometry']
#select only necessary items
l = {k: leg[k] for k in items}
#add leg id
l['leg_id'] = i
#add leg geometry
l['geometry'] = geom.LineString(decode(leg['legGeometry']['points']))
l.pop('legGeometry', None)
#add origin and destination stops
if 'stop_id' in l['from']:
l['from_name']=l['from']['stop_id']
else:
l['from_name'] = l['from']['name']
if 'stop_id' in l['to']:
l['to_name']=l['to']['stop_id']
else:
l['to_name'] = l['to']['name']
#fix from and to to theri locations
l['from'] = geom.Point(l['from']['lon'], l['from']['lat'])
l['to'] = geom.Point(l['to']['lon'], l['to']['lat'])
#convert to dataframe
l_df = pd.DataFrame(pd.Series(l)).T
#fix the field order
field_order = [
'leg_id',
'mode',
'from',
'from_name',
'to',
'to_name',
'distance',
'duration',
'startTime',
'endTime',
'geometry']
l_df = l_df[field_order]
legs_list.append(l_df)
legs_df = pd.concat(legs_list).reset_index(drop=True)
legs_gdf = gpd.GeoDataFrame(legs_df)
return legs_gdf
legs = r.json()['plan']['itineraries'][1]['legs']
legs[0]
legs_list = list()
for i, leg in enumerate(legs):
items = [
'from',
'to',
'distance',
'duration',
'startTime',
'endTime',
'mode',
'legGeometry']
#select only necessary items
l = {k: leg[k] for k in items}
#add leg id
l['leg_id'] = i
#add leg geometry
l['geometry'] = geom.LineString(decode(leg['legGeometry']['points']))
l.pop('legGeometry', None)
#add origin and destination stops
if 'stop_id' in l['from']:
l['from_name']=l['from']['stop_id']
else:
l['from_name'] = l['from']['name']
if 'stop_id' in l['to']:
l['to_name']=l['to']['stop_id']
else:
l['to_name'] = l['to']['name']
#fix from and to to theri locations
l['from'] = geom.Point(l['from']['lon'], l['from']['lat'])
l['to'] = geom.Point(l['to']['lon'], l['to']['lat'])
#convert to dataframe
l_df = pd.DataFrame(pd.Series(l)).T
#fix the field order
field_order = [
'leg_id',
'mode',
'from',
'from_name',
'to',
'to_name',
'distance',
'duration',
'startTime',
'endTime',
'geometry']
l_df = l_df[field_order]
legs_list.append(l_df)
pd.concat(legs_list).reset_index(drop=True)
url = 'http://localhost:8080/otp/routers/default/isochrone'
query = {
"fromPlace":"-36.850504993712725,174.76690292358398",
"date":"2015/12/24",
"time":"8:00am",
"date":"12-24-2015",
"mode":"TRANSIT,WALK",
"cutoffSec":"1200",
}
#params = {'json': json.dumps(query)}
%time r = requests.get(url, params=query)
r.json()
```
| github_jupyter |
# PyTorch Metric Learning
See the documentation [here](https://kevinmusgrave.github.io/pytorch-metric-learning/)
## Install the packages
```
!pip install pytorch-metric-learning
!pip install -q faiss-gpu
!git clone https://github.com/akamaster/pytorch_resnet_cifar10
```
## Import the packages
```
%matplotlib inline
from pytorch_resnet_cifar10 import resnet # pretrained models from https://github.com/akamaster/pytorch_resnet_cifar10
from pytorch_metric_learning.utils.inference import MatchFinder, InferenceModel
from pytorch_metric_learning.distances import CosineSimilarity
from pytorch_metric_learning.utils import common_functions as c_f
from torchvision import datasets, transforms
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import matplotlib.pyplot as plt
import numpy as np
```
## Create helper functions
```
def print_decision(is_match):
if is_match:
print("Same class")
else:
print("Different class")
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
inv_normalize = transforms.Normalize(
mean= [-m/s for m, s in zip(mean, std)],
std= [1/s for s in std]
)
def imshow(img, figsize=(8, 4)):
img = inv_normalize(img)
npimg = img.numpy()
plt.figure(figsize = figsize)
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
```
## Create the dataset and load the trained model
```
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
dataset = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=transform, download=True)
labels_to_indices = c_f.get_labels_to_indices(dataset.targets)
model = torch.nn.DataParallel(resnet.resnet20())
checkpoint = torch.load("pytorch_resnet_cifar10/pretrained_models/resnet20-12fca82f.th")
model.load_state_dict(checkpoint['state_dict'])
model.module.linear = c_f.Identity()
model.to(torch.device("cuda"))
print("done model loading")
```
## Create the InferenceModel wrapper
```
match_finder = MatchFinder(distance=CosineSimilarity(), threshold=0.7)
inference_model = InferenceModel(model, match_finder=match_finder)
# cars and frogs
classA, classB = labels_to_indices[1], labels_to_indices[6]
```
## Get nearest neighbors of a query
```
# create faiss index
inference_model.train_indexer(dataset)
# get 10 nearest neighbors for a car image
for img_type in [classA, classB]:
img = dataset[img_type[0]][0].unsqueeze(0)
print("query image")
imshow(torchvision.utils.make_grid(img))
indices, distances = inference_model.get_nearest_neighbors(img, k=10)
nearest_imgs = [dataset[i][0] for i in indices[0]]
print("nearest images")
imshow(torchvision.utils.make_grid(nearest_imgs))
```
## Compare two images of the same class
```
# compare two images of the same class
(x, _), (y, _) = dataset[classA[0]], dataset[classA[1]]
imshow(torchvision.utils.make_grid(torch.stack([x,y], dim=0)))
decision = inference_model.is_match(x.unsqueeze(0), y.unsqueeze(0))
print_decision(decision)
```
## Compare two images of different classes
```
# compare two images of a different class
(x, _), (y, _) = dataset[classA[0]], dataset[classB[0]]
imshow(torchvision.utils.make_grid(torch.stack([x,y], dim=0)))
decision = inference_model.is_match(x.unsqueeze(0), y.unsqueeze(0))
print_decision(decision)
```
## Compare multiple pairs of images
```
# compare multiple pairs of images
x = torch.zeros(20, 3, 32, 32)
y = torch.zeros(20, 3, 32, 32)
for i in range(0, 20, 2):
x[i] = dataset[classA[i]][0]
x[i+1] = dataset[classB[i]][0]
y[i] = dataset[classA[i+20]][0]
y[i+1] = dataset[classB[i+20]][0]
imshow(torchvision.utils.make_grid(torch.cat((x,y), dim=0), nrow=20), figsize=(30, 3))
decision = inference_model.is_match(x, y)
for d in decision:
print_decision(d)
print("accuracy = {}".format(np.sum(decision)/len(x)))
```
## Compare all pairs within a batch
```
# compare all pairs within a batch
match_matrix = inference_model.get_matches(x)
assert match_matrix[0,0] # the 0th image should match with itself
imshow(torchvision.utils.make_grid(torch.stack((x[3],x[4]), dim=0)))
print_decision(match_matrix[3,4]) # does the 3rd image match the 4th image?
```
## Compare all pairs between queries and references
```
# compare all pairs between queries and references
match_matrix = inference_model.get_matches(x, y)
imshow(torchvision.utils.make_grid(torch.stack((x[6],y[6]), dim=0)))
print_decision(match_matrix[6, 6]) # does the 6th query match the 6th reference?
```
# Get results in tuple form
```
# make a new model with high threshold
match_finder = MatchFinder(distance=CosineSimilarity(), threshold=0.95)
inference_model = InferenceModel(model, match_finder=match_finder)
# get all matches in tuple form
match_tuples = inference_model.get_matches(x, y, return_tuples=True)
print("MATCHING IMAGE PAIRS")
for i,j in match_tuples:
print(i,j)
imshow(torchvision.utils.make_grid(torch.stack((x[i],y[j]), dim=0)))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jgraving/deepposekit/blob/master/examples/step1_create_annotation_set.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# DeepPoseKit Step 1 - Create an annotation set
This is step 1 of the example notebooks for using DeepPoseKit. This notebook shows you how to load and sample images from a video, define a keypoint skeleton, and save the data to a file for labelling with keypoints.
**NOTE**: If you run into problems, you can help us improve DeepPoseKit by [opening an issue](https://github.com/jgraving/deepposekit/issues/new) or [submitting a pull request](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork)
If you haven't already installed DeepPoseKit you can run the next cell
```
import sys
!{sys.executable} -m pip install -U deepposekit
import numpy as np
import cv2
import h5py
import matplotlib.pyplot as plt
from deepposekit.io import VideoReader, DataGenerator, initialize_dataset
from deepposekit.annotate import KMeansSampler
import tqdm
import glob
import pandas as pd
from os.path import expanduser
try:
import google.colab
IN_COLAB = True
except:
IN_COLAB = False
HOME = expanduser("~") if not IN_COLAB else '.'
```
Use the next cell to download the example data into your home directory
```
!git clone https://github.com/jgraving/deepposekit-data {HOME + '/deepposekit-data'}
```
# A note on image resolutions
Currently DeepPoseKit only supports image resolutions that can be repeatedly divided by 2. For example, all of these values are valid image resolutions for either height or width:
```
exp = np.arange(1,12)
exp = 2**exp
print(1*exp)
print(3*exp)
print(5*exp)
print(7*exp)
print(11*exp)
```
Images that do not match these resolutions must be manually resized, cropped, or padded. We are working to add automated image size adjustments.
# Open a video
The `VideoReader` class allows you to load in single video frames or batches of frames from nearly any video format.
```
videos = glob.glob(HOME + '/deepposekit-data/datasets/fly/*.avi')
videos
reader = VideoReader(HOME + '/deepposekit-data/datasets/fly/video.avi', gray=True)
frame = reader[0] # read a frame
reader.close()
frame.shape
plt.figure(figsize=(5,5))
plt.imshow(frame[0,...,0])
plt.show()
```
# Sample video frames
This loads batches of 100 frames from the video, and then randomly samples frames from the batches to hold them in memory. You can use any method for sampling frames.
```
reader = VideoReader(HOME + '/deepposekit-data/datasets/fly/video.avi', batch_size=100, gray=True)
randomly_sampled_frames = []
for idx in tqdm.tqdm(range(len(reader)-1)):
batch = reader[idx]
random_sample = batch[np.random.choice(batch.shape[0], 10, replace=False)]
randomly_sampled_frames.append(random_sample)
reader.close()
randomly_sampled_frames = np.concatenate(randomly_sampled_frames)
randomly_sampled_frames.shape
```
# Apply k-means to reduce correlation
This applies the k-means algorithm to the images using `KMeansSampler` to even out sampling across the distribution of images and reduce correlation within the annotation set.
```
kmeans = KMeansSampler(n_clusters=10, max_iter=1000, n_init=10, batch_size=100, verbose=True)
kmeans.fit(randomly_sampled_frames)
kmeans.plot_centers(n_rows=2)
plt.show()
kmeans_sampled_frames, kmeans_cluster_labels = kmeans.sample_data(randomly_sampled_frames, n_samples_per_label=10)
kmeans_sampled_frames.shape
```
# Define a keypoints skeleton file
You must create a .xlsx or .csv file with keypoint names, parent relationships, and swapping relationships for bilaterally symmetric parts (only relevant if using flipping augmentations). If you leave out the `parent` and `swap` columns, then these will simply not be used for annotating data and training the model.
See example skeleton.csv files for more details
```
skeleton = pd.read_csv(HOME + '/deepposekit-data/datasets/fly/skeleton.csv')
skeleton
```
# Initialize a new data set for annotations
You can use any method for sampling images to create a numpy array with the shape (n_images, height, width, channels) and then initialize an annotation set. Check the doc string for more details:
```
initialize_dataset?
initialize_dataset(
images=kmeans_sampled_frames,
datapath=HOME + '/deepposekit-data/datasets/fly/example_annotation_set.h5',
skeleton=HOME + '/deepposekit-data/datasets/fly/skeleton.csv',
# overwrite=True # This overwrites the existing datapath
)
```
# Create a data generator
This creates a `DataGenerator` for loading annotated data. Indexing the generator returns an image-keypoints pair, which you can then visualize. Right now all the keypoints are set to zero, because they haven't been annotated.
You can also look at the doc string for more explanation:
```
DataGenerator?
data_generator = DataGenerator(HOME + '/deepposekit-data/datasets/fly/example_annotation_set.h5', mode="full")
image, keypoints = data_generator[0]
plt.figure(figsize=(5,5))
image = image[0] if image.shape[-1] is 3 else image[0, ..., 0]
cmap = None if image.shape[-1] is 3 else 'gray'
plt.imshow(image, cmap=cmap, interpolation='none')
for idx, jdx in enumerate(data_generator.graph):
if jdx > -1:
plt.plot(
[keypoints[0, idx, 0], keypoints[0, jdx, 0]],
[keypoints[0, idx, 1], keypoints[0, jdx, 1]],
'r-'
)
plt.scatter(keypoints[0, :, 0], keypoints[0, :, 1], c=np.arange(data_generator.keypoints_shape[0]), s=50, cmap=plt.cm.hsv, zorder=3)
plt.show()
```
| github_jupyter |
# The Local News Dataset
View this document on [Github](https://github.com/yinleon/LocalNewsDataset/blob/master/nbs/local_news_dataset.ipynb?flush_cache=true) | [NbViewer](https://nbviewer.jupyter.org/github/yinleon/LocalNewsDataset/blob/master/nbs/local_news_dataset.ipynb?flush_cache=true#datasheet)
by [Leon Yin](https://www.leonyin.org/)<br>
Data scientist SMaPP Lab NYU and affiliate at Data & Society.
## Table of Contents <a name='top'></a>
1. [Introduction](#intro)
2. [Tech Specs](#specs)
3. [Using the Dataset](#use)
# 1. Introduction <a name='intro'></a>
Though not particularly a noticeable part of the current discussion, 2018 has shown us that understanding "fake news", and the media (manipulation) ecosystem at large, has as much to do with local broadcasting stations as it does Alex Jones and CNN.
We saw local news outlets used as a sounding board to decry mainstream media outlets as "[fake news](https://www.youtube.com/watch?v=khbihkeOISc)." We also saw Russian trolls masquerade as local news outlets to [build trust](https://www.npr.org/2018/07/12/628085238/russian-influence-campaign-sought-to-exploit-americans-trust-in-local-news) as sleeper accounts on Twitter.
To help put the pieces of this disinformation ecosystem into context, we can refer to a 2016 [Pew Study](http://www.journalism.org/2016/07/07/trust-and-accuracy/) on Trust and Accuracy of the Modern Media Consumer which showed that 86% of survey respondents had "a lot" or "some" confidence in local news. This was more than their confidence of national media outlets, social media, and family and friends.
<a href="http://www.journalism.org/2016/07/07/the-modern-news-consumer/pj_2016-07-07_modern-news-consumer_2-01/"><img width="420" height="399" src="http://assets.pewresearch.org/wp-content/uploads/sites/13/2016/07/PJ_2016.07.07_Modern-News-Consumer_2-01.png" class="attachment-large size-large" alt="Few have a lot of confidence in information from professional news outlets or friends and family, though majorities show at least some trust in both, but social media garners less trust than either" /></a>
Social media is the least trustworthy news source according to the 4.6K respondents of the Pew study. It's important to note that this study was published before the 2016 US Presidential Election and social media platforms were not under the same scrutiny as they are today.
Perhaps the most significant finding in this study is that very few have "a lot" of trust in information from professional news outlets. Is this because so called "fake news" blurs the line between reputable and false pieces of information? Political scientist Andy Guess has shown that older (60+ yrs old) citizens are more sussceptitble to spreading links containing junk news on Facebook. Yet the mistrust economy is more than the [junk news](https://www.buzzfeednews.com/article/craigsilverman/viral-fake-election-news-outperformed-real-news-on-facebook) sites Craig Silverman analyzed when he first coined "fake news" in late 2016.
<img src='docs/media/fake_news.png' alt='Few have a lot of confidence in information from professional news outlets or friends and family, though majorities show at least some trust in both, but social media garners less trust than either'/>
In 2017, media historian Caroline Jack released a [lexicon](https://datasociety.net/output/lexicon-of-lies/) in an effort to define what was formerly referred to as "fake news," with more nuance. Jack calls this umbrella of deceptive content problematic information.
The social media scholar Alice Marwick -- who made some of the first breakthroughs in thie field with [Becca Lewis](https://datasociety.net/output/media-manipulation-and-disinfo-online/), [recently reminded us that](https://www.georgetownlawtechreview.org/why-do-people-share-fake-news-a-sociotechnical-model-of-media-effects/GLTR-07-2018/) problematic information spreads not only through junk news headlines, but also through memes, videos and podcasts. What other mediums are we overlooking? As a hint, we can listen to Marwick and other researchers such as ethnographer [Francesca Tripoldi](https://datasociety.net/output/searching-for-alternative-facts/), who observe that problematic information is deeply connected to one's self-presentation and the reinforcement of group identity. So where does local news fit into this equation?
Though local news is widely viewed as a relatively trustworthy news source, its role in the current media and information landscape is not well studied. To better understand that role, I put together the <b>Local News Dataset</b> in the hopes that it will accelerate research of local news across the web.
## About the Data Set
This dataset is a machine-readable directory of state-level newspapers, TV stations and magazines. In addition to basic information such as the name of the outlet and state it is located in, all available information regarding web presence, social media (Twitter, YouTube, Facebook) and their owners is scraped, too.
The sources of this dataset are [usnpl.com](www.usnpl.com)-- newspapers and magazines by state, [stationindex.com](www.stationindex.com) -- TV stations by state and by owner, and homepages of the media corporations [Meredith](http://www.meredith.com/local-media/broadcast-and-digital), [Sinclair](http://sbgi.net/tv-channels/), [Nexstar](https://www.nexstar.tv/stations/), [Tribune](http://www.tribunemedia.com/our-brands/) and [Hearst](http://www.hearst.com/broadcasting/our-markets).
This dataset was inspired by ProPublica's [Congress API](https://projects.propublica.org/api-docs/congress-api/). I hope that this dataset will serve a similar purpose as a starting point for research and applications, as well as a bridge between datasets from social media, news articles and online communities.
While you use this dataset, if you see irregularities, questionable entries, or missing outlets please [submit an issue](https://github.com/yinleon/LocalNewsDataset/issues/new) on Github or contact me on [Twitter](https://twitter.com/LeonYin). I'd love to hear how this dataset is put to work
You can browse the dataset on [Google Sheets](https://docs.google.com/spreadsheets/d/1f3PjT2A7-qY0SHcDW30Bc_FXYC_7RxnZfCKyXpoWeuY/edit?usp=sharing)<br>
Or look at the raw dataset on [Github](https://github.com/yinleon/LocalNewsDataset/blob/master/data/local_news_dataset_2018.csv)<br>
Or just scroll down to the [tech specs](#local_news_dataset_2018)!
Happy hunting!
## Acknowledgements
I'd like to acknowledge the work of the people behind usnpl.com and stationindex.com for compiling lists of local media outlets.
Andreu Casas and Gregory Eady provided invaluable comments to improve this dataset for public release. Kinjal Dave provided much needed proofreading. The dataset was created by Leon Yin at the SMaPP Lab at NYU. Thank you Josh Tucker, Jonathan Nagler, Richard Bonneau and my collegue Nicole Baram.
## Citation
If this dataset is helpful to you please cite it as:
```
@misc{leon_yin_2018_1345145,
author = {Leon Yin},
title = {Local News Dataset},
month = aug,
year = 2018,
doi = {10.5281/zenodo.1345145},
url = {https://doi.org/10.5281/zenodo.1345145}
}
```
## License
This data is free to use, but please follow the ProPublica [Terms](#terms).
<hr>
# 2.Tech Specs <a name='specs'></a>
This section is an in-depth look at what is scraped from the web and how these pieces of disparate Internet matter come together to form the [Local News Dataset](https://github.com/yinleon/LocalNewsDataset).
<i>For those who tinker...</i><br>
The intermediates can be generated and updated:<br>
```>>> python download_data.py```<br>
The output file is created from merging and pre-processing the intermediates:<br>
```>>> python merge.py``` <br>
These [two scripts](https://github.com/yinleon/LocalNewsDataset/tree/master/py) -- and this notebook, is written in Python 3.6.5 using open sources packages listed in in [requirements.txt](https://github.com/yinleon/LocalNewsDataset/blob/master/requirements.txt).
[Top of Notebook](#top)
```
from runtimestamp.runtimestamp import runtimestamp # for reproducibility
from docs.build_docs import * # auto-generates docs
runtimestamp('Leon')
generate_docs()
```
# 3. Using the Dataset <a name='use'></a>
Below is some starter code in Python to read the Local News Dataset from the web into a Pandas Dataframe.
[Top of Notebook](#top)
```
url = 'https://raw.githubusercontent.com/yinleon/LocalNewsDataset/master/data/local_news_dataset_2018.csv'
df_local = pd.read_csv(url)
```
If you want to use this dataset for a list of web domains, there are a few steps you'll need to take:
```
df_local_website.to_csv('../data/local_news_dataset_2018_for_domain_analysis.csv', index=False)
df_local_website = df_local[(~df_local.domain.isnull()) &
(df_local.domain != 'facebook.com') &
(df_local.domain != 'google.com') &
(df_local.domain != 'tumblr.com') &
(df_local.domain != 'wordpress.com') &
(df_local.domain != 'comettv.com')].drop_duplicates(subset=['domain'])
```
We do these steps because some entries don't have websites, at least one listed website is Facebook pages, comet TV is a nationwide franchise, and some stations share the a website.
```
df_local_website.sample(3, random_state=303)
```
For convenience this filtered dataset is available here: `https://raw.githubusercontent.com/yinleon/LocalNewsDataset/master/data/local_news_dataset_2018_for_domain_analysis.csv`
and also here:<br> `http://bit.ly/local_news_dataset_domains`
```
df_local_news_domain = pd.read_csv('http://bit.ly/local_news_dataset_domains')
df_local_news_domain.head(2)
```
If you want to get Twitter accounts for all local news stations in Kansas you can filter the dataset as follows:
```
twitter_ks = df_local[(~df_local.twitter.isnull()) &
(df_local.state == 'KS')]
twitter_ks.twitter.unique()
```
We can also get an array of all domains affiliated with Sinclair:
```
sinclair_stations = df_local[df_local.owner == 'Sinclair'].domain.unique()
sinclair_stations
```
Stay tuned for more in-depth tutorials about how this dataset can be used!
# 4. Data Sheet <a name='datasheet'></a>
In the spirit of transparency and good documentation, I am going to answer some questions for datasets proposed in the recent paper [Datasheets for Datasets](https://arxiv.org/abs/1803.09010) by Timnit Gebru, Jamie Morgenstern, Briana Vecchione, Jennifer Wortman Vaughan, Hanna Wallach, Hal Daumeé III, Kate Crawford.
[Top of Notebook](#top)
### Motivation for Dataset Creation
*Why was the dataset created? (e.g., were there specific
tasks in mind, or a specific gap that needed to be filled?)*<br>
This Dataset was created to study the role of state-level local news on Twitter.<br>
We wanted to find users who follow both local news outlets and members of congress.<br>
*What (other) tasks could the dataset be used for? Are
there obvious tasks for which it should not be used?*<br>
The dataset can be used to query other social media platforms for local news outlet's social feeds.<br>
It can also serve as a list of state-level domains for link analysis. This is one use of this dataset in an uncoming report on the Internet Research Agency's use of links on Twitter.<br>
I hope that this dataset might be of interest for researchers applying to the [Social Science One and Facebook RFP](https://socialscience.one/our-facebook-partnership).
*Has the dataset been used for any tasks already? If so,
where are the results so others can compare (e.g., links to
published papers)?*<br>
A study of IRA Twitter accounts sharing national, local, and junk news articles.
*Who funded the creation of the dataset? If there is an
associated grant, provide the grant number.*<br>
The dataset was created by Leon Yin at the SMaPP Lab at NYU. For more information, please visit our [website](https://wp.nyu.edu/smapp/).
### Dataset Composition
*What are the instances? (that is, examples; e.g., documents,
images, people, countries) Are there multiple types
of instances? (e.g., movies, users, ratings; people, interactions
between them; nodes, edges)*<br>
Each instance is a local news outlet.
*Are relationships between instances made explicit in
the data (e.g., social network links, user/movie ratings, etc.)?
How many instances of each type are there?*<br>
We have relational links in this data, but that is up to you to make those connections. For counts, please refer to the spec sheet above.
*What data does each instance consist of? “Raw” data
(e.g., unprocessed text or images)? Features/attributes?*<br>
Each instance is a scraped entity from a website. There are no images involved. The metadata fields regarding state, website, and social accounts are scraped from raw HTML.
*Is there a label/target associated with instances? If the instances are related to people, are subpopulations identified
(e.g., by age, gender, etc.) and what is their distribution?*<br>
This is not a traditional supervised machine learning dataset.
*Is everything included or does the data rely on external
resources? (e.g., websites, tweets, datasets) If external
resources, a) are there guarantees that they will exist, and
remain constant, over time; b) is there an official archival
version.*<br>
The data relies of external sources! There are abolutely no guarentees that data to Twitter, Youtube, Facebook, the source websites (where data is scraped), or the destination websites (homepages for news outlets).
Currently there are open source libraries -- like [TweePy](http://www.tweepy.org/), to query Twitter, and my collegue Megan Brown and I are about to release a Python wrapper for the Youtube Data API library.
*Are there licenses, fees or rights associated with
any of the data?*<br>
This dataset is free to use. We're copying terms of use from [ProPublica](https://www.propublica.org/datastore/terms)<a name='terms'></a>:
```
In general, you may use this dataset under the following terms. However, there may be different terms included for some data sets. It is your responsibility to read carefully the specific terms included with the data you download or purchase from our website.
You can’t republish the raw data in its entirety, or otherwise distribute the data (in whole or in part) on a stand-alone basis.
You can’t change the data except to update or correct it.
You can’t charge people money to look at the data, or sell advertising specifically against it.
You can’t sub-license or resell the data to others.
If you use the data for publication, you must cite Leon Yin and the SMaPP Lab.
We do not guarantee the accuracy or completeness of the data. You acknowledge that the data may contain errors and omissions.
We are not obligated to update the data, but in the event we do, you are solely responsible for checking our site for any updates.
You will indemnify, hold harmless, and defend Leon Yin and the SMaPP Lab from and against any claims arising out of your use of the data.
```
### Data Collection Process
*How was the data collected? (e.g., hardware apparatus/sensor,
manual human curation, software program,
software interface/API; how were these constructs/measures/methods
validated?)*<br>
The data was collected using 4 CPUs on the NYU HPC Prince Cluster. It was written using [custom code](https://github.com/yinleon/LocalNewsDataset/tree/master/py) that utilizes the requests, beautifulsoup, and Pandas Python libraries. For this reason no APIs are used to collect this data. Data was quality checked by exploring data in Jupyter Noteooks. It was compared to lists curated by [AbilityPR](https://www.agilitypr.com/resources/top-media-outlets/) of the top 10 newspapers by state.
*Who was involved in the data collection process?*<br>
This dataset was collected by Leon Yin.
*Over what time-frame was the data collected?* <br>
The `process_datetime` columns capture when datasets are collected. Initial development for this project began in April 2018.
*How was the data associated with each instance acquired?*<br>
Data is directly scraped from HTML, there is no inferred data. There is no information how the sources curate their websites-- especially TVstationindex.com and USNPL.com.
*Does the dataset contain all possible instances?* <br>
Ths is not a sample, but the best attempt at creating a comprehensive list.
*Is there information missing from the dataset and why?* <br>
News Outlets not listed in the websites we scrape, or the custom additions JSON are not included. We'll make attempt to take requests for additions and ammendments on GitHub with the intention of creating a website with a submission forum.
*Are there any known errors, sources of noise, or redundancies
in the data?*
There are possible redundencies of news outlets occuring across the websites scraped. We have measures to drop duplicates, but if we missed any please submit an error in GitHub.
### Data Preprocessing
*What preprocessing/cleaning was done?* <br>
Twitter Screen Names are extracted from URLs, states are parsed from raw HTML that usually contains a city name, there is no aggregation or engineered features.
*Was the “raw” data saved in addition to the preprocessed/cleaned
data?* <br>
The raw HTML for each site is not provided (so changes in website UI's) will crash future collection. There are no warranties for this. However the intermediate files are saved, and thoroughly documented in the [tech specs](#specs) above.
*Is the preprocessing software available?* <br>
The dataset is a standard CSV, so any relevant open source software can be used.
*Does this dataset collection/processing procedure
achieve the motivation for creating the dataset stated
in the first section of this datasheet?* <br>
The addition of Twitter Screen names makes it possible to use this data for Twitter research. The inclusion of additional fields like website, other social media platforms (Facebook, Youtube) allows for additional applications
### Dataset Distribution
*How is the dataset distributed? (e.g., website, API, etc.;
does the data have a DOI; is it archived redundantly?)* <br>
The dataset is being hosted on GitHub at the moment. It does not have a DOI (if you have suggestions on how to get one please reach out!). There are plans to migrate the dataset to its own website.
*When will the dataset be released/first distributed?* <br>
August 2018.
*What license (if any) is it distributed under?* <br>
MIT
*Are there any fees or access/export restrictions?* <br>
Not while it is on GitHub, but if its migrated elsewhere that's possible.
### Dataset Maintenance
*Who is supporting/hosting/maintaining the dataset?* <br>
The dataset is currently solely maintained by Leon Yin. This seems unsustainable, so if this project sparks an interest with you please reach out to me here: `data-smapp_lab at nyu dot edu`
*Will the dataset be updated? How often and by whom?
How will updates/revisions be documented and communicated
(e.g., mailing list, GitHub)? Is there an erratum?*<br>
The dataset can be updated locally by running the scripts in this repo. Ammendments to the hosted dataset will contain a separate filepath and URL, and be documented in the README.
*If the dataset becomes obsolete how will this be communicated?*<br>
If the dataset becomes obsolete, we'll make this clear in the README in the GitHub repository (or whereever it is being hosted).
*Is there a repository to link to any/all papers/systems
that use this dataset?*<br>
There aren't any publications that use this dataset that are published. We'll keep a list on the README or the website.
*If others want to extend/augment/build on this dataset,
is there a mechanism for them to do so?* <br>
Modifications can be made by adding records to the ammendments [JSON](https://github.com/yinleon/LocalNewsDataset/blob/master/data/custom_additions.json).
### Legal & Ethical Considerations
*If the dataset relates to people (e.g., their attributes) or
was generated by people, were they informed about the
data collection?* <br>
This dataset has no people-level information. However we don't know anything about the people who generated the webpages that this dataset is built on.
*Does the dataset contain information that might be considered
sensitive or confidential?* <br>
To my knowledge there is no personally identifiable information in this dataset.
*Does the dataset contain information that might be considered
inappropriate or offensive?* <br>
I hope not!
[Top of Notebook](#top)
| github_jupyter |
```
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.datasets import load_boston
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
X_train = np.array([[1,0.7,0.2],[.9, .1, .3], [.1, .3, .2], [0, .1, .5]])
y_train = np.array([1,1,0,0])
n = X_train.shape[1]
with tf.name_scope("input"):
# define variables
X = tf.placeholder(tf.float32, shape=[None, n], name='x')
y = tf.placeholder(tf.float32, shape=[None, 1], name='y')
with tf.name_scope("regression"):
# define variables
W = tf.Variable(tf.zeros([n,1], dtype=tf.float32), name='weights')
b = tf.Variable(tf.zeros([1], dtype=tf.float32), name='biases')
with tf.name_scope("operations"):
# regular calculation of pred, similar to linear regression
st1 = tf.add(tf.matmul(X ,W), b)
# sigmoid converts from 0 to 1
y_pred = tf.nn.sigmoid(st1)
# compute regular error functions
squared_error = tf.square(tf.subtract(y_pred, y))
loss = tf.reduce_sum(squared_error)
# result, needs to be 0 or 1
res = tf.round(y_pred)
# define optimization
learning_rate = tf.placeholder(tf.float32)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss)
# tensorboard
# creat a summary for x and y
tf.summary.scalar("loss", loss)
summary_op = tf.summary.merge_all()
# no need to specify graph
writer = tf.summary.FileWriter('./example', graph=tf.get_default_graph())
# run it
epochs=100
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(1, epochs):
# loss, summary = sess.run([train_op, summary_op], feed_dict)
feed_dict = {
X: X_train,
y: y_train.reshape(-1,1),
learning_rate: .5/epoch
}
# run
_, summary = sess.run([train_op, summary_op], feed_dict)
# log results
writer.add_summary(summary)
# log to console
if epoch%(epochs/10) == 0:
curr_loss, curr_W, curr_b, pred = sess.run([loss, W, b, y_pred], feed_dict)
print(pred)
#print(pred)
# print("Epoch: {}, R2: {:.2f}, loss: {:.2f} ".format(epoch, r2, curr_loss))
feed_dict = {
X: X_train,
y: y_train.reshape(-1,1)
}
curr_loss, pred = sess.run([loss, res], feed_dict)
acc = accuracy_score(y_train, pred)
print("Acc: {:.2f}, loss: {:.2f} ".format(acc, curr_loss))
```
# In scikit-learn
```
from sklearn.svm import SVC
clf = SVC(kernel='linear')
clf.fit(X_train, y_train)
pred = clf.predict(X_train)
acc = accuracy_score(y_train, pred)
print("Acc: {:.2f}".format(acc))
clf.coef_, clf.intercept_
```
| github_jupyter |
# Count epitope mutations by trunk status for natural populations
For a given tree, classify each node as trunk or not and count the number of epitope and non-epitope mutations. Finally, summarize the number of mutations by category of trunk and mutation.
```
from augur.distance import read_distance_map
from augur.utils import json_to_tree
import Bio.Phylo
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
```
## Load tree data
```
with open("../results/auspice/flu_natural_natural_sample_1_with_90_vpm_sliding_full_tree_2015-10-01_tree.json", "r") as fh:
tree_json = json.load(fh)
tree = json_to_tree(tree_json)
tree
```
## Load distance map
```
distance_map = read_distance_map("../config/distance_maps/h3n2/ha/luksza.json")
# Extract all epitope sites from the distance map, readjusting to one-based coordinates
# for comparison with one-based coordinates of amino acid mutations annotated on trees.
epitope_sites = [site + 1 for site in distance_map["map"]["HA1"].keys()]
np.array(epitope_sites)
", ".join([str(site) for site in epitope_sites[:-1]]) + ", and " + str(epitope_sites[-1])
```
## Annotate number of epitope and non-epitope mutations per node
```
for node in tree.find_clades():
epitope_mutations = 0
nonepitope_mutations = 0
if len(node.aa_muts) > 0:
for gene, muts in node.aa_muts.items():
for mut in muts:
if gene == "HA1" and int(mut[1:-1]) in epitope_sites:
epitope_mutations += 1
else:
nonepitope_mutations += 1
node.epitope_mutations = epitope_mutations
node.nonepitope_mutations = nonepitope_mutations
set([node.epitope_mutations for node in tree.find_clades() if node.epitope_mutations > 0])
```
## Assign trunk status
[Bedford et al. 2015](http://www.nature.com.offcampus.lib.washington.edu/nature/journal/v523/n7559/pdf/nature14460.pdf) defines trunk as "all branches ancestral to viruses
sampled within 1 year of the most recent sample". The algorithm for finding the trunk based on this definition is then:
1. Select all nodes in the last year
1. Select the parent of each selected node until the root
1. Create a unique set of nodes
1. Omit all nodes from the last year since resolution of the trunk is limited (note: this step is not implemented below)
Note that this definition was based on 12 years of flu data from 2000 to 2012.
```
max_date = max([tip.attr["num_date"] for tip in tree.find_clades(terminal=True)])
max_date
# Find all tips of the tree sampled within a year of the most recent sample in the tree.
recent_nodes = [node for node in tree.find_clades(terminal=True) if node.attr["num_date"] > (max_date - 1)]
len(recent_nodes)
# Find the last common ancestor of all recent nodes.
mrca = tree.common_ancestor(recent_nodes)
mrca
mrca.attr["num_date"]
# Label all nodes as not part of the trunk by default.
for node in tree.find_clades():
node.is_trunk = False
node.is_side_branch_ancestor = False
# Find all nodes that are ancestral to recent nodes.
# Label these ancestral nodes as part of the "trunk"
# and collect the set of distinct nodes in the trunk.
for recent_node in recent_nodes:
current_node = recent_node.parent
# Traverse from the current node to the tree's root.
while current_node != tree.root:
# Mark a node as part of the trunk if it was sampled
# before the MRCA of all recent nodes.
if current_node.attr["num_date"] < mrca.attr["num_date"]:
current_node.is_trunk = True
current_node = current_node.parent
def is_side_branch_ancestor(node):
"""Returns True if the current node belongs to a "side branch" clade
and is the immediate descendent from a trunk.
"""
return node.parent is not None and node.parent.is_trunk
trunk_path = [node for node in tree.find_clades(terminal=False)
if node.is_trunk]
# Find all nodes that are not on the trunk. These are
# side branch nodes.
side_branch_nodes = [node for node in tree.find_clades(terminal=False)
if not node.is_trunk and node.attr["num_date"] < mrca.attr["num_date"]]
len(trunk_path)
len(side_branch_nodes)
# Find all side branch nodes whose immediate parent is on the trunk.
side_branch_ancestors = []
for node in side_branch_nodes:
if is_side_branch_ancestor(node):
node.is_side_branch_ancestor = True
side_branch_ancestors.append(node)
len(side_branch_ancestors)
# Color nodes by status as on the trunk or as a side branch ancestor.
for node in tree.find_clades():
if node.is_trunk:
node.color = "green"
elif node.is_side_branch_ancestor:
node.color = "orange"
else:
node.color = "black"
# Draw tree with node colors instead of with node labels.
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
Bio.Phylo.draw(tree, axes=ax, label_func=lambda node: "")
```
## Annotate mutations by trunk status
```
records = []
for node in tree.find_clades(terminal=False):
# Collect records for nodes that are on the trunk or that were sampled prior
# to the MRCA of recent nodes (i.e., side branch nodes).
if node.is_trunk or node.attr["num_date"] < mrca.attr["num_date"]:
records.append({
"node": node.name,
"branch type": "trunk" if node.is_trunk else "side branch",
"epitope mutations": node.epitope_mutations,
"non-epitope mutations": node.nonepitope_mutations
})
df = pd.DataFrame(records)
df.head()
counts_by_trunk_status = df.groupby("branch type").aggregate({"epitope mutations": "sum", "non-epitope mutations": "sum"})
counts_by_trunk_status["epitope-to-non-epitope ratio"] = round(
counts_by_trunk_status["epitope mutations"] / counts_by_trunk_status["non-epitope mutations"]
, 2)
counts_by_trunk_status
counts_by_trunk_status_table = counts_by_trunk_status.to_latex(escape=False)
with open("../manuscript/tables/mutations_by_trunk_status.tex", "w") as oh:
oh.write(counts_by_trunk_status_table)
```
| github_jupyter |
# Imports
We will be importing the following packages:
1. numpy
2. matplotlib
3. urllib
4. tqdm
5. imageio
6. glob
7. os
8. base64
9. IPython
10. **wandb**
```
%%capture
! pip install -q imageio
! pip install --upgrade wandb
! wandb login
import numpy as np
import matplotlib.pyplot as plt
import urllib.request
# import wandb
from tqdm import tqdm
import imageio
import glob
import os
import base64
from IPython import display
np.random.seed(666)
name_run = input('Enter name of the run')
wandb.init(entity="authors", project="rnn-viz", name=name_run)
```
# Data
We will be taking Shakespeare's work as our data. The data [url](https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt) is fetched from this [tensorflow tutorial on text generation](https://www.tensorflow.org/tutorials/text/text_generation).
```
url = 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt'
filename = 'input.txt'
urllib.request.urlretrieve(url, filename)
```
## Data processing
In this section we read the `input.txt` file that is downloaded. Vocabulary is the unique characters in the entire text file. This is extracted so that we can generate characters with a constraint.
```
text = open('input.txt','r').read()[:30000]
vocab = sorted(set(text))
text_size, vocab_size = len(text), len(vocab)
print('Number of characters: {}'.format(text_size))
print('Number of unique characters:{}'.format(vocab_size))
```
Let us look at the first 250 characters in the input text.
```
print(text[:250])
```
Here we map the unique characters to specific indices.
A -> 0
B -> 1
and so on
This process helps us in converting characters to numbers and also vice versa. The input text file which is read, is converted to numbers instead of characters. Computers are always better with numbers.
```
char_to_ix = {c:ix for ix,c in enumerate(vocab)}
ix_to_char = np.array(vocab)
text_as_int = np.array([char_to_ix[c] for c in text])
i = 1
print('The first 5 mappings of char_to_ix')
for key, value in char_to_ix.items():
print('{} : {}'.format(repr(key), value))
if i == 5:
break
i += 1
print('The first 5 mappings of ix_to_char')
for ix, value in enumerate(ix_to_char[:5]):
print('{} : {}'.format(ix, repr(value)))
print(text[:10])
print(text_as_int[:10])
```
# Hyperparameters
We are looking to have a hidden state of `100` dimensions. The recurrent neural network is to be unrolled for `25` time steps. The learning rate is chosen to be `0.1`.
```
hidden_size = 100 # size of hidden layer of neurons
seq_length = 25 # number of steps to unroll the RNN for
learning_rate = 1e-1
```
# Model Parameters
To get into this part let us have a look at the formulas governing the Recurrent Neural Nets.
$$
h^{l}_{t} =\tanh\begin{pmatrix}
h^{l-1}_{t}\\
h^{l}_{t-1}
\end{pmatrix}
$$
The above equation is a simple representation of the recurrence formula. This shows that the present hidden state of layer $(l)$, depends on the present hidden state of the immediate lower layer $(l-1)$ and the immediate past $(t-1)$ hidden layer of the same layer. A little nuance of the representation is that we consider $h^{0}_{t}$ as the input layer. We can write $h^{0}_{t}$ as $x_{t}$.
We can break down the above representation in the following way.
$$
raw\_h^{l}_{t} =W_{h\_prev\_layer}h^{l-1}_{t}+W_{h\_prev\_time}h^{l}_{t-1}+b_{h}\\
\boxed{h^{l}_{t} =\tanh raw\_h^{l}_{t}}\\
\boxed{y^{l+1}_{t} =W_{y}\times h^{l}_{t}+b_{y}}
$$
```
Wxh = np.random.normal(loc=0.0, scale=1e-2, size=(hidden_size, vocab_size))
Whh = np.random.normal(loc=0.0, scale=1e-2, size=(hidden_size, hidden_size))
Why = np.random.normal(loc=0.0, scale=1e-2, size=(vocab_size, hidden_size))
bh = np.random.normal(loc=0.0, scale=1e-2, size=(hidden_size, 1))
by = np.random.normal(loc=0.0, scale=1e-2, size=(vocab_size, 1))
print("Size of Wxh: {}".format(Wxh.shape))
print("Size of Whh: {}".format(Whh.shape))
print("Size of Why: {}".format(Why.shape))
print("Size of bh: {}".format(bh.shape))
print("Size of by: {}".format(by.shape))
def show_weights_hist(weight):
plt.hist(weight.reshape(-1),100)
plt.xlim(-1,1)
plt.show()
show_weights_hist(Why)
```
# Loss Function
In this section we will decipher the loss function and the back-propagation algorithm. In a recurrent sequence model, the back-propagation has a fancy term hooked to it, the **back propagation through time**.
```
def lossFun(inputs, target, hprev, hist_flag=False):
"""
This is the loss function
Inputs:
inputs- A list of integers for the input sequence
targets- A list of integers for the target sequence
hprev- The first hidden state h[t-1]
hist_flag- A bollean variable that holds the necessity of the histograms
Outputs:
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs= {}, {}
hs[-1] = hprev
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1))
xs[t][inputs[t],0] = 1
hs[t] = np.tanh(np.matmul(Wxh, xs[t]) + np.matmul(Whh, hs[t-1]) + bh)
# backward pass
y = np.matmul(Why, hs[t]) + by #projection
p = np.exp(y) / np.sum(np.exp(y)) #probability
loss = -np.log(p[target,0]) #softmax loss
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dy = np.copy(p)
dy[target] -= 1
dWhy = np.matmul(dy, hs[t].T)
dby = dy
dhnext = np.matmul(Why.T, dy)
if hist_flag:
dh_list = []
for t in reversed(range(len(inputs))):
if hist_flag:
dh_list.append(dhnext)
dh = dhnext
dhraw = (1 - hs[t] * hs[t]) * dh
dbh += dhraw
dWxh += np.matmul(dhraw, xs[t].T)
dWhh += np.matmul(dhraw, hs[t-1].T)
dhnext = np.matmul(Whh.T, dhraw)
if hist_flag:
return dh_list[::-1]
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
```
# Text generation function
This function will be used for inference. We will provide a seed character, and will expect this function to return us with generated text of the sequence size mentioned.
```
def text_generate(inputs):
"""
A text generation function
Inputs:
Outputs:
"""
# forward pass
h = np.zeros((hidden_size,1))
for t in range(len(inputs)):
x = np.zeros((vocab_size,1))
x[inputs[t],0] = 1
h = np.tanh(np.matmul(Wxh, x) + np.matmul(Whh, h) + bh)
y = np.matmul(Why, h) + by #projection
p = np.exp(y) / np.sum(np.exp(y)) #probability
ix = np.random.choice(np.arange(vocab_size),p=p.ravel()) #this is to leak a little information, provides a soft bound
return ix
def imsave(dh, name, time_step):
'''
This function helps in saving the image
inputs:
image - Tensor
name - The name of the image
'''
fig = plt.figure(figsize=(5,5))
plt.hist(dh.reshape(-1),100)
plt.title('Time Step {}'.format(time_step))
plt.xlim(-1e-2, 1e-2)
plt.ylim(0,5)
plt.savefig(name)
plt.close()
def create_gif(path_to_images, name_gif):
filenames = glob.glob(path_to_images)
filenames = sorted(filenames,reverse=True)
images = []
for filename in tqdm(filenames):
images.append(imageio.imread(filename))
kargs = { 'duration': 0.50 }
imageio.mimsave(name_gif, images, 'GIF', **kargs)
def show_gif(fname):
with open(fname, 'rb') as fd:
b64 = base64.b64encode(fd.read()).decode('ascii')
return display.HTML(f'<img src="data:image/gif;base64,{b64}" />')
```
# Train
```
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
num_of_batches = text_size//(seq_length+1)
for iter in range(51):
seq_pointer = 0
hprev = np.zeros((hidden_size,1))
loss_avg = 0
# Inside an epoch
for batch in tqdm(range(num_of_batches)):
input = text_as_int[seq_pointer:seq_pointer+seq_length]
target = text_as_int[seq_pointer+seq_length]
# Create the histogram GIF
if seq_pointer == 0 and iter%10 == 0:
os.mkdir('RNN_hidden{:02d}'.format(iter))
dh_list = lossFun(input, target, hprev,hist_flag=True)
for time,dh in enumerate(dh_list):
imsave(dh, 'RNN_hidden{:02d}/time{:03d}.png'.format(iter,time), time)
create_gif('RNN_hidden{:02d}/time*.png'.format(iter), 'RNN_hidden{:02d}.gif'.format(iter))
# wandb.log({"video": wandb.Video('RNN_hidden{:02d}.gif'.format(iter), fps=2, format="gif")})
seq_pointer += seq_length
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(input, target, hprev)
loss_avg += loss
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
# Outside an epoch
print('Epoch: {} loss: {:0.2f}'.format(iter, loss_avg/num_of_batches))
wandb.log({"loss":loss_avg/num_of_batches})
text_input = text_as_int[0:seq_length].tolist()
for index in range(200):
infer_char = text_generate(text_input)
text_input += [infer_char]
txt = ''.join(ix_to_char[ix] for ix in text_input)
print('----\n{}\n----'.format(txt))
```
# Vanishing and Exploding Gradients
The model has been trained. We will now try and look into the problems of a simple RNN. We will feed a single sequence to the network. The network produces the gradients for each time step. We will be plotting these gradient along the time step.
```
show_gif('RNN_hidden00.gif')
```
# Connectivity
$$
Connectivity\left( t,t^{'}\right) =\frac{\partial L_{t^{'}}}{\partial x_{t}}
$$
```
def connectivity(inputs, target, hprev):
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = hprev
loss = 0
connections = []
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1))
xs[t][inputs[t],0] = 1
hs[t] = np.tanh(np.matmul(Wxh, xs[t]) + np.matmul(Whh, hs[t-1]) + bh)
# backward pass
ys = np.matmul(Why, hs[t]) + by
ps = np.exp(ys) / np.sum(np.exp(ys))
loss = -np.log(ps[target,0])
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dy = np.copy(ps)
dy[target] -= 1
dWhy += np.matmul(dy, hs[t].T)
dby += dy
dhnext = np.matmul(Why.T, dy)
heat = []
for t in reversed(range(len(inputs))):
dh = dhnext
dhraw = (1 - hs[t] * hs[t]) * dh
dbh += dhraw
dWxh += np.matmul(dhraw, xs[t].T)
dWhh += np.matmul(dhraw, hs[t-1].T)
dhnext = np.matmul(Whh.T, dhraw)
dx = np.matmul(Wxh.T, dhraw)
heat.append(np.sum(dx*dx))
return heat[::-1]
def coloring(value):
if value == -99:
return (0,1,0)
if value <= 0.5:
return (0,0,1,value)
else:
return (1,0,0,value-0.5)
def draw_text(text, heat_map):
'''
text is a list string
'''
fig, ax = plt.subplots()
counter_x = 0.0
counter_y = 1.0
for ch, heat in zip(text,heat_map):
if ch == '\n':
counter_x = 0.0
counter_y -= 0.1
continue
if ch == '\t':
counter_x += 0.05
continue
ax.text(x = 0.+counter_x,
y = 1.+counter_y,
s = ch,
color = 'black',
fontweight = 'bold',
fontsize=10,
backgroundcolor = coloring(heat)
)
counter_x += 0.05
ax.axis('off')
plt.show()
draw_text([0,1,2,3,4,5,6,7,8,9],[0, 0.1, 0.2, 0.3, 0.4, 0.5 ,0.6, 0.7, 0.8, 0.9])
index = 100
input = text_as_int[0:index]
target = text_as_int[index]
hprev = np.zeros((hidden_size,1))
connections = connectivity(input, target, hprev)
mi = min(connections)
ma = max(connections)
connections = [(value-mi)/(ma-mi) for value in connections]
draw_text(ix_to_char[text_as_int[:index+1]], connections+[-99])
```
| github_jupyter |
# Python 装饰器
装饰器(Decorators)是 Python 的一个重要部分。简单地说:他们是修改其他函数的功能的函数。他们有助于让我们的代码更简短,也更Pythonic(Python范儿)。大多数初学者不知道在哪儿使用它们,所以我将要分享下,哪些区域里装饰器可以让你的代码更简洁。 首先,让我们讨论下如何写你自己的装饰器。
这可能是最难掌握的概念之一。我们会每次只讨论一个步骤,这样你能完全理解它。
## 一切皆对象
首先我们来理解下 Python 中的函数:
```
def hi(name="yasoob"):
return "hi " + name
print(hi())
# 我们甚至可以将一个函数赋值给一个变量,比如
greet = hi
# 我们这里没有在使用小括号,因为我们并不是在调用hi函数
# 而是在将它放在greet变量里头。我们尝试运行下这个
print(greet())
# 如果我们删掉旧的hi函数,看看会发生什么!
del hi
print(hi())
print(greet())
del greet
```
## 在函数中定义函数
刚才那些就是函数的基本知识了。我们来让你的知识更进一步。在 Python 中我们可以在一个函数中定义另一个函数:
```
def hi(name="yasoob"):
print("now you are inside the hi() function")
def greet():
return "now you are in the greet() function"
def welcome():
return "now you are in the welcome() function"
print(greet())
print(welcome())
print("now you are back in the hi() function")
hi()
# 上面展示了无论何时你调用hi(), greet()和welcome()将会同时被调用。
# 然后greet()和welcome()函数在hi()函数之外是不能访问的,比如:
greet()
```
那现在我们知道了可以在函数中定义另外的函数。也就是说:我们可以创建嵌套的函数。现在你需要再多学一点,就是函数也能返回函数。
## 从函数中返回函数
其实并不需要在一个函数里去执行另一个函数,我们也可以将其作为输出返回出来:
```
def hi(name="yasoob"):
def greet():
return "now you are in the greet() function"
def welcome():
return "now you are in the welcome() function"
if name == "yasoob":
return greet
else:
return welcome
a = hi()
print(a)
#上面清晰地展示了`a`现在指向到hi()函数中的greet()函数
#现在试试这个
print(a())
```
再次看看这个代码。在 if/else 语句中我们返回 greet 和 welcome,而不是 greet() 和 welcome()。为什么那样?这是因为当你把一对小括号放在后面,这个函数就会执行;然而如果你不放括号在它后面,那它可以被到处传递,并且可以赋值给别的变量而不去执行它。 你明白了吗?让我再稍微多解释点细节。
当我们写下 a = hi(),hi() 会被执行,而由于 name 参数默认是 yasoob,所以函数 greet 被返回了。如果我们把语句改为 a = hi(name = "ali"),那么 welcome 函数将被返回。我们还可以打印出 hi()(),这会输出 now you are in the greet() function。
```
a = hi(name='ali')
print(a)
hi()()
```
## 将函数作为参数传给另一个函数
```
def hi():
return "hi yasoob!"
def doSomethingBeforeHi(func):
print("I am doing some boring work before executing hi()")
print(func())
doSomethingBeforeHi(hi)
```
现在你已经具备所有必需知识,来进一步学习装饰器真正是什么了。装饰器让你在一个函数的前后去执行代码。
## 你的第一个装饰器
在上一个例子里,其实我们已经创建了一个装饰器!现在我们修改下上一个装饰器,并编写一个稍微更有用点的程序:
```
def a_new_decorator(a_func):
def wrapTheFunction():
print("I am doing some boring work before executing a_func()")
a_func()
print("I am doing some boring work after executing a_func()")
return wrapTheFunction
def a_function_requiring_decoration():
print("I am the function which needs some decoration to remove my foul smell")
a_function_requiring_decoration()
print(a_function_requiring_decoration.__name__)
a_function_requiring_decoration = a_new_decorator(a_function_requiring_decoration)
#now a_function_requiring_decoration is wrapped by wrapTheFunction()
a_function_requiring_decoration()
```
你看明白了吗?我们刚刚应用了之前学习到的原理。这正是 python 中装饰器做的事情!它们封装一个函数,并且用这样或者那样的方式来修改它的行为。现在你也许疑惑,我们在代码里并没有使用 @ 符号?那只是一个简短的方式来生成一个被装饰的函数。这里是我们如何使用 @ 来运行之前的代码:
```
@a_new_decorator
def a_function_requiring_decoration():
"""Hey you! Decorate me!"""
print("I am the function which needs some decoration to "
"remove my foul smell")
a_function_requiring_decoration()
```
`@a_new_decorator`实际上就是`a_function_requiring_decoration = a_new_decorator(a_function_requiring_decoration)`的简写
希望你现在对 Python 装饰器的工作原理有一个基本的理解。
现在,如果我们运行如下代码会存在一个问题:
```
print(a_function_requiring_decoration.__name__)
```
这并不是我们想要的!Ouput输出应该是"a_function_requiring_decoration"。这里的函数被warpTheFunction替代了。它重写了我们函数的名字和注释文档(docstring)。幸运的是Python提供给我们一个简单的函数来解决这个问题,那就是functools.wraps。我们修改上一个例子来使用functools.wraps:
```
from functools import wraps
def a_new_decorator(a_func):
@wraps(a_func)
def wrapTheFunction():
print("I am doing some boring work before executing a_func()")
a_func()
print("I am doing some boring work after executing a_func()")
return wrapTheFunction
@a_new_decorator
def a_function_requiring_decoration():
"""Hey yo! Decorate me!"""
print("I am the function which needs some decoration to "
"remove my foul smell")
print(a_function_requiring_decoration.__name__)
```
现在好多了。我们接下来学习装饰器的一些常用场景。
装饰器的蓝本规范:
```
from functools import wraps
def decorator_name(f):
@wraps(f)
def decorated(*args, **kwargs):
if not can_run:
return "Function will not run"
return f(*args, **kwargs)
return decorated
@decorator_name
def func():
return("Function is running")
can_run = True
print(func())
can_run = False
print(func())
```
注意:@wraps接受一个函数来进行装饰,并加入了复制函数名称、注释文档、参数列表等等的功能。这可以让我们在装饰器里面访问在装饰之前的函数的属性。
## 使用场景
现在我们来看一下装饰器在哪些地方特别耀眼,以及使用它可以让一些事情管理起来变得更简单。
### 1.授权(Authorization)
装饰器能有助于检查某个人是否被授权去使用一个web应用的端点(endpoint)。它们被大量使用于Flask和Django web框架中。这里是一个例子来使用基于装饰器的授权:
```
from functools import wraps
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
authenticate()
return f(*args, **kwargs)
return decorated
```
### 2.日志(Logging)
日志是装饰器运用的另一个亮点。这是个例子:
```
from functools import wraps
def logit(func):
@wraps(func)
def with_logging(*args, **kwargs):
print(func.__name__ + " was called")
return func(*args, **kwargs)
return with_logging
@logit
def addition_func(x):
"""Do some math."""
return x + x
result = addition_func(4)
```
我敢肯定你已经在思考装饰器的一个其他聪明用法了。
## 带参数的装饰器
来想想这个问题,难道@wraps不也是个装饰器吗?但是,它接收一个参数,就像任何普通的函数能做的那样。那么,为什么我们不也那样做呢? 这是因为,当你使用@my_decorator语法时,你是在应用一个以单个函数作为参数的一个包裹函数。记住,Python里每个东西都是一个对象,而且这包括函数!记住了这些,我们可以编写一下能返回一个包裹函数的函数。
### 在函数中嵌入装饰器
我们回到日志的例子,并创建一个包裹函数,能让我们指定一个用于输出的日志文件。
```
from functools import wraps
def logit(logfile='out.log'):
def logging_decorator(func):
@wraps(func)
def wrapped_function(*args, **kwargs):
log_string = func.__name__ + " was called"
print(log_string)
# 打开logfile,并写入内容
with open(logfile, 'a') as opened_file:
# 现在将日志打到指定的logfile
opened_file.write(log_string + '\n')
return func(*args, **kwargs)
return wrapped_function
return logging_decorator
@logit()
def myfunc1():
pass
myfunc1()
# 现在一个叫做 out.log 的文件出现了,里面的内容就是上面的字符串
@logit(logfile='func2.log')
def myfunc2():
pass
myfunc2()
```
现在一个叫做 func2.log 的文件出现了,里面的内容就是上面的字符串
## 装饰器类
现在我们有了能用于正式环境的logit装饰器,但当我们的应用的某些部分还比较脆弱时,异常也许是需要更紧急关注的事情。比方说有时你只想打日志到一个文件。而有时你想把引起你注意的问题发送到一个email,同时也保留日志,留个记录。这是一个使用继承的场景,但目前为止我们只看到过用来构建装饰器的函数。
幸运的是,类也可以用来构建装饰器。那我们现在以一个类而不是一个函数的方式,来重新构建logit。
```
class logit(object):
_logfile = 'out.log'
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
log_string = self.func.__name__ + " was called"
print(log_string)
# 打开logfile并写入
with open(self._logfile, 'a') as opened_file:
# Now we log to the specified logfile
opened_file.write(log_string + '\n')
# 现在,发送一个通知
self.notify()
# return base func
return self.func(*args, **kwargs)
def notify(self):
# logit only logs, no more
pass
```
这个实现有一个附加优势,在于比嵌套函数的方式更加整洁,而且包裹一个函数还是使用跟以前一样的语法:
```
@logit
def myfunc1():
pass
myfunc1()
# 现在,我们给 logit 创建子类,来添加 email 的功能(虽然 email 这个话题不会在这里展开)。
class email_logit(logit):
'''
一个logit的实现版本,可以在函数调用时发送email给管理员
'''
def __init__(self,func,email='admin@myproject.com'):
self.email = email
super(email_logit, self).__init__(func)
def notify(self):
# 发送一封email到self.email
# 这里就不做实现了
print("mail to: ",self.email)
pass
@email_logit
def myfunc1():
pass
myfunc1()
```
从现在起,@email_logit 将会和 @logit 产生同样的效果,但是在打日志的基础上,还会多发送一封邮件给管理员。
如果我们需要装饰器类也能接收参数,那么我们可以用logit类的__init__方法来接收参数,用__call__来接收被装饰的函数:
```
class logit2:
def __init__(self, outputfile='out.log'):
self._logfile = outputfile
def __call__(self,func):
def wrapper(*args,**kwargs):
log_string = func.__name__ + " was called"
print(log_string)
# Open the logfile and append
with open(self._logfile, 'a') as opened_file:
# Now we log to the specified logfile
opened_file.write(log_string + '\n')
# Now, send a notification
self.notify()
func(*args,**kwargs)
# return base func
return wrapper
def notify(self):
# logit only logs, no more
pass
@logit2()
def myfunc2():
pass
myfunc2()
@logit2(outputfile='out3.log')
def myfunc2():
pass
myfunc2()
class email_logit2(logit2):
'''
一个logit的实现版本,可以在函数调用时发送email给管理员
'''
def __init__(self,outputfile='out.log',email='admin@myproject.com'):
self.email = email
super(email_logit2, self).__init__(outputfile)
def notify(self):
# 发送一封email到self.email
# 这里就不做实现了
print("mail to: ",self.email)
pass
@email_logit2(outputfile='emailout.log',email='admin@admin')
def myfunc2():
pass
myfunc2()
```
## 常用装饰器
### 1. property
property是一种特殊的属性,访问它时会执行一段功能(函数)然后返回值(就是一个装饰器)
注意:被property装饰的属性会优先于对象的属性被使用,而被propery装饰的属性,分成三种:property、被装饰的函数名.setter、被装饰的函数名.deleter(都是以装饰器的形式)。
```
class room: #定义一个房间的类
def __init__(self,length,width,high):
self.length = length #房间的长
self.width = width #房间的宽
self.high = high #房间的高
@property
def area(self): #求房间的平方的功能
return self.length * self.width #房间的面积就是:长x宽
@property
def perimeter(self): #求房间的周长的功能
return 2 * (self.length + self.width) #公式为:(长 + 宽)x 2
@property
def volume(self): #求房间的体积的功能
return self.length * self.width * self.high #公式为:长 x 宽 x 高
r1 = room(2,3,4) #实例化一个对象r1
print("r1.area:",r1.area) #可以像访问数据属性一样去访问area,会触发一个函数的执行,动态计算出一个值
print("r1.perimeter:",r1.perimeter) #同上,就不用像调用绑定方法一样,还得加括号,才能运行
print("r1.volume:",r1.volume) #同上,就像是把运算过程封装到一个函数内部,我们不管过程,只要有结果就行
```
注意:此时的特性arear、perimeter和volume不能被赋值。
```
r1.area = 8 #为特性area赋值
```
将一个类的函数定义成特性以后,对象再去使用的时候obj.name,根本无法察觉自己的name是执行了一个函数然后计算出来的,这种特性的使用方式遵循了统一访问的原则。
```
class people: #定义一个人的类
def __init__(self,name,sex):
self.name = name
self.sex = sex #p1.sex = "male",遇到property,优先用property
@property #查看sex的值
def sex(self):
return self.__sex #返回正真存值的地方
@sex.setter #修改sex的值
def sex(self,value):
if not isinstance(value,str): #在设定值之前进行类型检查
raise TypeError("性别必须是字符串类型") #不是str类型时,主动抛出异常
self.__sex = value #类型正确的时候,直接修改__sex的值,这是值正真存放的地方
#这里sex前加"__",对sex变形,隐藏。
@sex.deleter #删除sex
def sex(self):
del self.__sex
p1 = people("egon","male") #实例化对象p1
print(p1.sex) #查看p1的sex,此时要注意self.sex的优先级
p1.sex = "female" #修改sex的值
print(p1.sex) #查看修改后p1的sex
print(p1.__dict__) #查看p1的名称空间,此时里面有sex
del p1.sex #删除p1的sex
print(p1.__dict__) #查看p1的名称空间,此时发现里面已经没有sex了
p1 = people("egon","male") #实例化对象p1
dir(p1)
```
此时发现多了一个私有属性`_people__sex`,但我们在`__init__`中并没用定义类似于`__sex`的属性,这是因为self.sex = sex调用了@sex.setter装饰的类方法。
```
class people: #定义一个人的类
def __init__(self,name,sex):
self.name = name
self.sex = sex #p1.sex = "male",遇到property,优先用property
@property #查看sex的值
def sex(self):
return self.__sex #返回正真存值的地方
@sex.setter #修改sex的值
def sex(self,value):
if not isinstance(value,str): #在设定值之前进行类型检查
raise TypeError("性别必须是字符串类型") #不是str类型时,主动抛出异常
print("I'm Here!!")
self.__sex = value #类型正确的时候,直接修改__sex的值,这是值正真存放的地方
#这里sex前加"__",对sex变形,隐藏。
@sex.deleter #删除sex
def sex(self):
del self.__sex
p1 = people("egon","male")
```
请观察如果我们注释掉初始化方法中的self.sex = sex会发生什么:
```
class people: #定义一个人的类
def __init__(self,name,sex):
self.name = name
# self.sex = sex #p1.sex = "male",遇到property,优先用property
@property #查看sex的值
def sex(self):
print("I'm Here 2.")
return self.__sex #返回正真存值的地方
@sex.setter #修改sex的值
def sex(self,value):
if not isinstance(value,str): #在设定值之前进行类型检查
raise TypeError("性别必须是字符串类型") #不是str类型时,主动抛出异常
print("I'm Here!!")
self.__sex = value #类型正确的时候,直接修改__sex的值,这是值正真存放的地方
#这里sex前加"__",对sex变形,隐藏。
@sex.deleter #删除sex
def sex(self):
print("I'm Dead.")
del self.__sex
p1 = people("egon","male")
dir(p1)
```
显然,没有“I'm Here!!”语句被打印出来,但使用dir返回的实例p1的属性和方法列表里,还是有一个‘sex’,这是类定义中被装饰器装饰过的类方法sex,如果调用:
```
p1.sex
```
显然,因为没有初始化,所以`return self.__sex`会显示没有这个属性。
```
p1.sex = 'female'
p1.sex
del p1.sex
```
### 2. classmethod staticmethod
另一组常用的装饰器就是@classmethod和@staticmethod。
Python 的类方法采用装饰器@classmethod来定义,我们直接看例子。
```
class Kls(object):
num_inst = 0
def __init__(self):
Kls.num_inst = Kls.num_inst + 1
print(self.num_inst)
@classmethod
def get_no_of_instance(cls):
return cls.num_inst
ik1 = Kls()
ik2 = Kls()
print(ik1.get_no_of_instance())
print(Kls.get_no_of_instance())
```
在上述例子中,我们需要统计类Kls实例的个数,因此定义了一个类变量num_inst来存放实例个数。通过装饰器@classmethod的使用,方法get_no_of_instance被定义成一个类方法。在调用类方法时,Python 会将类(class Kls)传递给cls,这样在get_no_of_instance内部就可以引用类变量num_inst。
由于在调用类方法时,只需要将类型本身传递给类方法,因此,既可以通过类也可以通过实例来调用类方法。
```
class Kls(object):
num_inst = 0
def __init__(self):
Kls.num_inst = Kls.num_inst + 1
print(self.num_inst)
def get_no_of_instance(cls):
return cls.num_inst
ik1 = Kls()
ik2 = Kls()
print(ik1.get_no_of_instance())
print(Kls.get_no_of_instance())
```
如果不加类方法装饰器,那么就不会自动将类型本身传递给类方法,因此会报错。
在开发中,我们常常需要定义一些方法,这些方法跟类有关,但在实现时并不需要引用类或者实例,例如,设置环境变量,修改另一个类的变量,等。这个时候,我们可以使用静态方法。
Python 使用装饰器@staticmethod来定义一个静态方法。
```
IND = 'ON'
class Kls(object):
def __init__(self, data):
self.data = data
@staticmethod
def checkind():
return IND == 'ON'
def do_reset(self):
if self.checkind():
print('Reset done for: %s' % self.data)
def set_db(self):
if self.checkind():
print('DB connection made for: %s' % self.data)
ik1 = Kls(24)
ik1.do_reset()
ik1.set_db()
```
在代码中,我们定义了一个全局变量IND,由于IND跟类Kls相关,所以我们将方法checkind放置在类Kls中定义。方法checkind只需检查IND的值,而不需要引用类或者实例,因此,我们将方法checkind定义为静态方法。
对于静态方法,Python 并不需要传递类或者实例,因此,既可以使用类也可以使用实例来调用静态方法。
```
print(ik1.checkind())
print(Kls.checkind())
```
我们用代码说明实例方法,类方法,静态方法的区别。注意下述代码中方法foo,class_foo,static_foo的定义以及使用。
```
class Kls(object):
def foo(self, x):
print('executing foo(%s,%s)' % (self, x))
@classmethod
def class_foo(cls,x):
print('executing class_foo(%s,%s)' % (cls,x))
@staticmethod
def static_foo(x):
print('executing static_foo(%s)' % x)
ik = Kls()
# 实例方法
ik.foo(1)
print(ik.foo)
print('==========================================')
# 类方法
ik.class_foo(1)
Kls.class_foo(1)
print(ik.class_foo)
print('==========================================')
# 静态方法
ik.static_foo(1)
Kls.static_foo('hi')
print(ik.static_foo)
```
对于实例方法,调用时会把实例ik作为第一个参数传递给self参数。因此,调用ik.foo(1)时输出了实例ik的地址。
对于类方法,调用时会把类Kls作为第一个参数传递给cls参数。因此,调用ik.class_foo(1)时输出了Kls类型信息。
前面提到,可以通过类也可以通过实例来调用类方法,在上述代码中,我们再一次进行了验证。
对于静态方法,调用时并不需要传递类或者实例。其实,静态方法很像我们在类外定义的函数,只不过静态方法可以通过类或者实例来调用而已。
值得注意的是,在上述例子中,foo只是个函数,但当调用ik.foo的时候我们得到的是一个已经跟实例ik绑定的函数。调用foo时需要两个参数,但调用ik.foo时只需要一个参数。foo跟ik进行了绑定,因此,当我们打印ik.foo时,会看到以下输出:
```
<bound method Kls.foo of <__main__.Kls object at 0x0551E190>>
```
当调用ik.class_foo时,由于class_foo是类方法,因此,class_foo跟Kls进行了绑定(而不是跟ik绑定)。当我们打印ik.class_foo时,输出:
```
<bound method type.class_foo of <class '__main__.Kls'>>
```
当调用ik.static_foo时,静态方法并不会与类或者实例绑定,因此,打印ik.static_foo(或者Kls.static_foo)时输出:
```
<function static_foo at 0x055238B0>
```
概括来说,是否与类或者实例进行绑定,这就是实例方法,类方法,静态方法的区别。
| github_jupyter |
使用ResNet改良版的“批量归一化、激活和卷积”结构
```
import time
import torch
from torch import nn, optim
import torch.nn.functional as F
import d2lzh_pytorch as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def conv_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
)
return blk
class DenseBlock(nn.Module):
def __init__(self, num_convs, in_channels, out_channels):
super(DenseBlock, self).__init__()
net = []
for i in range(num_convs):
in_c = in_channels + i * out_channels
net.append(conv_block(in_c, out_channels))
self.net = nn.ModuleList(net)
self.out_channels = in_channels + num_convs * out_channels
def forward(self, X):
for blk in self.net:
Y = blk(X)
X = torch.cat((X, Y), dim=1)
return X
blk = DenseBlock(2, 3, 10)
X = torch.rand(4, 3, 8, 8)
Y = blk(X)
Y.shape
def transition_block(in_channels, out_channels):
blk = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Conv2d(in_channels, out_channels, kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2)
)
return blk
net = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
num_channels, growth_rate = 64, 32 # num_channels为当前的通道数
num_convs_in_dense_blocks = [4, 4, 4, 4]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
DB = DenseBlock(num_convs, num_channels, growth_rate)
net.add_module("DenseBlock_%d" % i, DB)
num_channels = DB.out_channels
if i != len(num_convs_in_dense_blocks) - 1:
net.add_module("transition_block_%d" % i, transition_block(num_channels, num_channels // 2))
num_channels = num_channels // 2
net.add_module("BN", nn.BatchNorm2d(num_channels))
net.add_module("relu", nn.ReLU())
net.add_module("global_avg_pool", d2l.GlobalAvgPool2d())
net.add_module("fc", nn.Sequential(d2l.FlattenLayer(), nn.Linear(num_channels, 10)))
X = torch.rand((1, 1, 96, 96))
for name, layer in net.named_children():
X = layer(X)
print(name, 'output shape:\t', X.shape)
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/irahulcse/Data-Science-Work-For-Quora/blob/master/Copy_of_quora.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Quora Data Framework New
```
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from wordcloud import WordCloud as wc
from nltk.corpus import stopwords
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from pandas import get_dummies
import matplotlib as mpl
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib
import warnings
from sklearn.ensemble import RandomForestClassifier
import sklearn
import string
import scipy
import numpy
import nltk
import json
import sys
import csv
import os
nltk.download('averaged_perceptron_tagger')
nltk.download("stopwords")
```
# Version of the different libraries
```
print('matplotlib: {}'.format(matplotlib.__version__))
print('sklearn: {}'.format(sklearn.__version__))
print('scipy: {}'.format(scipy.__version__))
print('seaborn: {}'.format(sns.__version__))
print('pandas: {}'.format(pd.__version__))
print('numpy: {}'.format(np.__version__))
print('Python: {}'.format(sys.version))
```
# Getting alll the data from nltk stopwords
```
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.corpus import stopwords
data = "All work and no play makes jack dull boy. All work and no play makes jack a dull boy."
```
# Print the tokenize data
```
print(word_tokenize(data))
print(sent_tokenize(data))
# stopWords=set(stopwords.words('english'))
# words=word_tokenize(data)
# wordsFiltered=[]
# for w in words:
# if w in stopWords:
# wordsFiltered.append(w)
# print(wordsFiltered)
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize,word_tokenize
words=["game","gaming","gamed","games"]
ps=PorterStemmer()
for word in words:
print(ps.stem(word))
from nltk.tokenize import PunktSentenceTokenizer
sentences=nltk.sent_tokenize(data)
for set in sentences:
print(nltk.pos_tag(nltk.word_tokenize(set)))
```
# How to make the use of the sns i am not able to get it in poproperly
```
sns.set(style='white',context='notebook',palette="deep")
```
# EDA
## I will be going to write the diffrent exploratoion technique which can be used to explore the dataset
```
train=pd.read_csv('/home/rahul/Desktop/Link to rahul_environment/Projects/Machine_Learning Projects/Quora_DataFramework/train.csv')
test=pd.read_csv('/home/rahul/Desktop/Link to rahul_environment/Projects/Machine_Learning Projects/Quora_DataFramework/test.csv')
print('shape of the train',train.shape)
print('shape of the test',test.shape)
train.size # finding the size of the training set
type(train) # tells us about the object type
train.describe() #describe use us about the data
train.sample(5)
```
# Data Cleaning
# for finding that there is any kind of the null element is present or not(sum of the null values)
```
train.isnull().sum()
# # but if we have the null values used it for finding the result in the dataset
print('Before Dropping the items',train.shape)
train=train.dropna()
print('After droping',train.shape)
```
# for finding the unique items for the target with command below:
# getting all the unique from the dataset
```
train_target=train['target'].values
np.unique(train_target)
train.head(5)
train.tail(5)
train.describe()
```
** Data preprocessing refers to the transformations applied to our data before feeding it to the algorithm.
Data Preprocessing is a technique that is used to convert the raw data into a clean data set. In other words, whenever the data is gathered from different sources it is collected in raw format which is not feasible for the analysis. there are plenty of steps for data preprocessing and we just listed some of them in general(Not just for Quora) :
removing Target column (id)
Sampling (without replacement)
Making part of iris unbalanced and balancing (with undersampling and SMOTE)
Introducing missing values and treating them (replacing by average values)
Noise filtering
Data discretization
Normalization and standardization
PCA analysis
Feature selection (filter, embedded, wrapper)
Etc.
now we will be going to perfrom some queries on the dataset**
```
train.where(train['target']==1).count()
train[train['target']>1]
train.where(train['target']==1).head(5)
```
** Imbalanced dataset is relevant primarily in the context of supervised machine learning involving two or more classes.
Imbalance means that the number of data points available for different the classes is different: If there are two classes, then balanced data would mean 50% points for each of the class. For most machine learning techniques, little imbalance is not a problem. So, if there are 60% points for one class and 40% for the other class, it should not cause any significant performance degradation. Only when the class imbalance is high, e.g. 90% points for one class and 10% for the other, standard optimization criteria or performance measures may not be as effective and would need modification.
Now we will be going to explore the exploreing question**
```
question=train['question_text']
i=0
for q in question[:5]:
i=i+1
print("Question came from the Quora Data_set=="+q)
train["num_words"] = train["question_text"].apply(lambda x: len(str(x).split()))
```
# Some Feature Engineering
eng_stopwords=set(stopwords.words("english"))
print(len(eng_stopwords))
print(eng_stopwords)
```
print(train.columns)
train.head()
# # Count Plot
ax=sns.countplot(x='target',hue='target',data=train,linewidth=5,edgecolor=sns.color_palette("dark",3))
plt.title('Is data set imbalance')
plt.show()
plt.savefig('targetsetimbalance')
ax=train['target'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',shadow=True)
ax.set_title('target')
ax.set_ylabel('')
plt.savefig('targetdiagramforpie')
plt.show()
# cf=RandomForestClassifier(n_estimators=)
```
# Histogram
f,ax=plt.subplots(1,2,figsize=(20,10))
train[train['target']==0].num_words.plot.hist(ax=ax[0],bins=20,edgecolor='black',color='red')
ax[0].set_title('target=0')
x1=list(range(0,85,5))
f,ax=plt.subplots(1,2,figsize=(18,8))
train[['target','num_words']].groupby(['target']).mean().plot().bar(ax=ax[0])
ax[0].set_title('num vs target')
sns.countplot('num_words',hue='target',data=train,ax=ax[1])
ax[1].set_title('num_words:target=0 vs target=1')
plt.show()
# histogram
```
train.hist(figsize=(15,20))
plt.figure()
# # Creating the histogram which can be used to make the
```
# Making the violin plot
```
sns.violinplot(data=train,x='target',y='num_words')
plt.savefig('violinplot')
```
# Making the kde plot
```
sns.FacetGrid(train,hue="target",size=5).map(sns.kdeplot,"num_words").add_legend()
plt.savefig('facetgrid-target')
plt.show()
```
# Box Plot
```
train['num_words'].loc[train['num_words']>60]=60
axes=sns.boxplot(x='target',y='num_words',data=train)
axes.set_xlabel('Target',fontsize=12)
axes.set_title("No of words in each class",fontsize=15)
plt.savefig('target-numwords')
plt.show()
# # How to Generate the Word Cloud in the d plotting we will be going to make the commit
# eng_stopwords=set(stopwords.words("english"))
# def generate_wordcloud(text):
# wordcloud = wc(relative_scaling = 1.0,stopwords = eng_stopwords).generate(text)
# fig,ax = plt.subplots(1,1,figsize=(10,10))
# ax.imshow(wordcloud, interpolation='bilinear')
# ax.axis("off")
# ax.margins(x=0, y=0)
# plt.show()
# text=' '.join(train.question_text)
# generate_wordcloud(text)
```
| github_jupyter |
```
import pandas as pd
df = pd.read_csv('data/tpch.csv', sep=',')
df1 = df.head(1000000)
df1['gid'] = 1
df1
paritioning_attributes = ['count_order', 'sum_base_price', 'sum_disc_price',
'sum_charge', 'avg_qty', 'avg_price',
'avg_disc', 'sum_qty']
k = len(paritioning_attributes)
mean_values = df1.groupby('gid')['count_order', 'sum_base_price', 'sum_disc_price',
'sum_charge', 'avg_qty', 'avg_price',
'avg_disc', 'sum_qty'].mean()
group_threshold = 100 # can change this threshold
def get_group_id(group_binary_arr):
ans = 0
power = 1
for val in group_binary_arr:
ans += val*power
power *= 2
return ans
# eg group ids from binary array of partitioning attributes:
# [0,0,0] : 0, [0,0,1], [0,1,0], [0,1,1] : 3 [0, 1, 1]
# Logic to implement
# First assign all rows to group 1
# calculate mean of group for partitioning attributes ... do comparison for rows of that group to assign to new group
# increase least group id by 2^k
# do partition again for groups which has more rows then threshold.
df1 = df1.head(10000)
least_group_id = 0
while True:
for group_id, group_rows in df1.groupby('gid'):
print(group_id, len(group_rows))
if len(group_rows) > group_threshold:
mean_values = group_rows[paritioning_attributes].mean()
for index, row in group_rows.iterrows():
new_group_id = get_group_id(list((row[paritioning_attributes] < mean_values[paritioning_attributes]).astype(int)))
row['gid'] = new_group_id # need a way to update group id.... this won't update gid
print("new group id {}".format(new_group_id))
group_sizes = [len(group_rows) for group_id, group_rows in df1.groupby('gid')]
if all(group_sizes) <= group_threshold:
break
least_group_id += 2**len(paritioning_attributes)
print(least_group_id)
least_group_id = 0
while True:
df1.groupby('gid').agg({'gid': })
for group_id, group_rows in df1.groupby('gid'):
print(group_id, len(group_rows))
if len(group_rows) > group_threshold:
mean_values = group_rows[paritioning_attributes].mean()
for index, row in group_rows.iterrows():
new_group_id = get_group_id(list((row[paritioning_attributes] < mean_values[paritioning_attributes]).astype(int)))
row['gid'] = new_group_id
print("new group id {}".format(new_group_id))
group_sizes = [len(group_rows) for group_id, group_rows in df1.groupby('gid')]
if all(group_sizes) <= group_threshold:
break
least_group_id += 2**len(paritioning_attributes)
print(least_group_id)
df1
```
| github_jupyter |
# Lesson 1 - What's your pet
### Trying to pass pretrained weights into Learner created model, and making create_cnn working with custom models. I am using resnet50 only to make it comparable to the usual fastai resnet50 training scores to confirm that my method works.
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
We import all the necessary packages. We are going to work with the [fastai V1 library](http://www.fast.ai/2018/10/02/fastai-ai/) which sits on top of [Pytorch 1.0](https://hackernoon.com/pytorch-1-0-468332ba5163). The fastai library provides many useful functions that enable us to quickly and easily build neural networks and train our models.
```
from fastai import *
from fastai.vision import *
gpu_device = 1
defaults.device = torch.device(f'cuda:{gpu_device}')
torch.cuda.set_device(gpu_device)
path = untar_data(URLs.PETS); path
path.ls()
path_anno = path/'annotations'
path_img = path/'images'
```
The first thing we do when we approach a problem is to take a look at the data. We _always_ need to understand very well what the problem is and what the data looks like before we can figure out how to solve it. Taking a look at the data means understanding how the data directories are structured, what the labels are and what some sample images look like.
The main difference between the handling of image classification datasets is the way labels are stored. In this particular dataset, labels are stored in the filenames themselves. We will need to extract them to be able to classify the images into the correct categories. Fortunately, the fastai library has a handy function made exactly for this, `ImageDataBunch.from_name_re` gets the labels from the filenames using a [regular expression](https://docs.python.org/3.6/library/re.html).
```
fnames = get_image_files(path_img)
fnames[:5]
np.random.seed(2)
pat = re.compile(r'/([^/]+)_\d+.jpg$')
```
If you're using a computer with an unusually small GPU, you may get an out of memory error when running this notebook. If this happens, click Kernel->Restart, uncomment the 2nd line below to use a smaller *batch size* (you'll learn all about what this means during the course), and try again.
```
bs = 64
```
## Training: resnet50
Now we will train in the same way as before but with one caveat: instead of using resnet34 as our backbone we will use resnet50 (resnet34 is a 34 layer residual network while resnet50 has 50 layers. It will be explained later in the course and you can learn the details in the [resnet paper](https://arxiv.org/pdf/1512.03385.pdf)).
Basically, resnet50 usually performs better because it is a deeper network with more parameters. Let's see if we can achieve a higher performance here. To help it along, let's us use larger images too, since that way the network can see more detail. We reduce the batch size a bit since otherwise this larger network will require more GPU memory.
```
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(),
size=299, bs=bs//2).normalize(imagenet_stats)
import pretrainedmodels
pretrainedmodels.model_names
# this works
def get_model(pretrained=True, model_name = 'resnet50', **kwargs ):
if pretrained:
arch = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
else:
arch = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained=None)
return arch
# get_model() # uncomment if you want to see its arch
custom_head = create_head(nf=2048*2, nc=37, ps=0.5, bn_final=False)
# Although that original resnet50 last layer in_features=2048 as you can see below, but the modified fastai head should be in_features = 2048 *2 since it has 2 Pooling
# AdaptiveConcatPool2d((ap): AdaptiveAvgPool2d(output_size=1) + (mp): AdaptiveMaxPool2d(output_size=1)
children(models.resnet50())[-2:]
custom_head
fastai_resnet50=nn.Sequential(*list(children(get_model(model_name = 'resnet50'))[:-2]),custom_head)
learn = Learner(data, fastai_resnet50, metrics=error_rate) # It seems `Learner' is not using transfer learning. Jeremy: It’s better to use create_cnn, so that fastai will create a version you can use for transfer learning for your problem.
# https://forums.fast.ai/t/lesson-5-advanced-discussion/30865/21
# fastai_resnet50 # uncomment if you want to see its arch
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5)
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
# learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
learn.fit_one_cycle(1, max_lr=slice(1e-6,1e-4))
```
### Comparing the previous learn scores (which seem did not use pretrained weights) with create_cnn method.
```
fastai_resnet50=nn.Sequential(*list(children(get_model(model_name = 'resnet50'))[:-2]),custom_head)
def get_fastai_model(pretrained=True, **kwargs ):
return fastai_resnet50
# get_fastai_model() # uncomment if you want to see its arch. You can see that it is identical to model.resnet50
learn = create_cnn(data, get_fastai_model, metrics=error_rate)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5)
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
# learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
learn.fit_one_cycle(1, max_lr=slice(1e-6,1e-4))
```
### Comparing the previous learn scores with the original fastai create_cnn method.
```
learn = create_cnn(data,models.resnet50, metrics=error_rate)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5)
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
# learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
learn.fit_one_cycle(1, max_lr=slice(1e-6,1e-4))
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
from scipy.io import wavfile, loadmat
import os
import sys
print(sys.path)
# directory where phonemes are kept, as per phon_input_directory_path
dirr = '/Users/robinson/Downloads/data/pred/20180902_171045_col8_cond_i1to5'
'''
for each phrase, get source and predicted f0 for all syllables and assemble into one sequence, along with unvoiced #
parts, and output into separate text files for us in the following code block
'''
# path to source F0 CSV input files directory
csv_input_directory_path = '/Users/robinson/Dropbox/anasynth/_data/emoVC/Olivia2006/f0_raw_phoneme'
# Olivia2006.e01.p01.i00.s01_s e.csv
csv_input_file_extension = '.csv'
# path to predicted F0 CSV input files directory
phon_input_directory_path = '/Users/robinson/Downloads/data/pred/' + dirr + '/phonemes'
# Olivia2006.e01.p01.i00.s01_s e1.csv
phon_input_file_extension = '.csv'
# path to MAT input files directory
mat_input_directory_path = '/Users/robinson/Dropbox/anasynth/_data/emoVC/Olivia2006'
# Olivia2006.e01.p01.i00.mat
mat_input_file_extension = '.mat'
# path to WAV input files directory
raw_input_directory_path = '/Users/robinson/Downloads/data/Olivia2006/Olivia2006_AUDIO/'
# Olivia2006.e01.p01.i00.1.wav
raw_input_file_extension = '.wav'
# define input file root - common to all filetypes
input_file_root = 'Olivia2006'
# define output directory and files
output_directory = os.path.join(raw_input_directory_path, 'f0compare')
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# open test_log.txt and split into list of lines
f_test_log = open(os.path.join(csv_input_directory_path, 'out', 'test_log.txt'), 'r')
# f_test_log = open(os.path.join('/Users/robinson/Downloads/data/pred/20180628_151243', 'test_log.txt'), 'r') # debug
test_log = f_test_log.read().split('\n')
# define phrase range
phrase_from = 1
phrase_to = 10 #10
# define source and target emotion ranges
source_emotion_from = 1
source_emotion_to = 8 #8
# define source and target intensity ranges
source_intensity_from = 0
source_intensity_to = 0
# set sample rate
step_s = 0.005 # 5ms, which I assume is what was used to sample the file
# file counter
i = 0
# Turn interactive plotting off
plt.ioff()
# lists to store all syllables and phonemes in all files
all_phonemes = []
# for each wavfile that we want to treat..
# for each phrase
for p in range(phrase_from, phrase_to + 1):
# for each source emotion
for e_s in range(source_emotion_from, source_emotion_to + 1):
# for each source intensity
for i_s in range(source_intensity_from, source_intensity_to + 1):
# build the source file path
filename_base = ''.join([input_file_root,
'.e', format(e_s, '02d'),
'.p', format(p, '02d'),
'.i', format(i_s, '02d')])
# open the mat file
mat_filename = ''.join([filename_base, mat_input_file_extension])
mat_filepath = os.path.join(mat_input_directory_path, mat_filename)
mat_dict = loadmat(mat_filepath)
syll_label = mat_dict['syll_label']
syll_label = syll_label.reshape((syll_label.shape[1],))
# print(syll_label.shape)
# print(syll_label)
# reshape this to 2d, to preserve start/end relationship
# syll_time.shape (2, 11)
# I want syll_time.shape (11, 2) BUT with the start and end times in different 'columns' - just transpose!
syll_time = mat_dict['syll_time']
# print('syll_time.shape', syll_time.shape)
syll_time = syll_time.T
# get list of phonemes
phon_label = mat_dict['phone_label']
phon_label = phon_label.reshape((phon_label.shape[1],))
# get list of phoneme start/end times
phon_time = mat_dict['phone_time']
# print(phon_time.shape)
# print(phon_time)
# phon_time = phon_time.reshape((phon_time.shape[1], phon_time.shape[0])) # wrong!
phon_time = phon_time.T # much better
# make list of true/false to indicate vowels(true)/nonvowel(false)
vowel_phonemes = ['e~', '9~', 'a~', 'o~', 'i', 'e', 'E', 'a', 'A', 'O', 'o', 'u', 'y', '2', '9', '@']
# [a if C else b for i in items]
vowels = [True if phon[0] in vowel_phonemes else False for i, phon in enumerate(phon_label)]
# print('vowels ', vowels)
# create list to hold sequence of voiced and unvoiced f0 contour values
# all_contours = []
# create new figure for this phrase
fig = plt.figure(figsize=(16, 8))
plt.title(filename_base)
plt.xlabel('Time (s)')
plt.ylabel('Freq (Hz)')
# plt.xlim(0, 800)
plt.ylim(-20, 700)
values = np.arange(phon_label.shape[0]).tolist()
# print(values)
# print(syll_label.shape[0])
jet = plt.get_cmap('Dark2')
cNorm = colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
start_source = 0
start_target = 0
colour_counter = 0
# voiced_syll_counter = 0
# dict to hold phoneme identifiers for this file (phrase)
phon_dict = {}
# for each syll in syll_label
for i, syll in enumerate(syll_label):
# if syll_label doesn't contain a '#' in it
if '#' not in syll[0]:
# print(syll[0])
# get syll_time start and end times
# print('i = ', i)
syll_start_time = syll_time[i, 0]
syll_end_time = syll_time[i, 1]
phone_id_list = []
# for each phone label in phone_label (this mat file)
for j, label in enumerate(phon_label):
# get start/end times from phone_time
phone_start_time = phon_time[j, 0]
phone_end_time = phon_time[j, 1]
# if start time => syll_start_time AND end time <= syll_end_time then this phoneme is in the syllable
if phone_start_time >= syll_start_time and phone_end_time <= syll_end_time:
# add phoneme label id to a list (to use to reference label and start/end times)
phone_id_list.append(j)
# iterate through phonemes of this syllable
for k in phone_id_list:
# for all_phon_counter, phon in enumerate(phon_label):
# check if phone_label[k][0] is in phon_dict
# if so, incremement the value and save value into phon_incval
if phon_label[k][0] in phon_dict:
phon_dict[phon_label[k][0]] = phon_dict[phon_label[k][0]] + 1
phon_incval = phon_dict[phon_label[k][0]]
# print('IN')
# if not, add it with value 1 and save value into phon_incval
else:
phon_dict[phon_label[k][0]] = 1
phon_incval = phon_dict[phon_label[k][0]]
# print('NOT')
# append the phone_label[k][0] + phon_incval to all_phonemes
# print('phone_label[k][0] = ', phone_label[k][0])
# print('phon_label = {}'.format(phone_label[k][0] + str(phon_incval)))
all_phonemes.append(phon_label[k][0] + str(phon_incval))
# print('******** SYL ********')
# build the filename of the syllable
filename_phon = ''.join([filename_base, '_',
phon_label[k][0], str(phon_incval),
csv_input_file_extension])
# build the source file path
source_file_path = os.path.join(csv_input_directory_path, filename_phon)
# print(source_file_path)
# if phoneme is not a vowel, or if phon file is not in test_log.txt (i.e. it's missing or empty),
# get the length of the nonvowel phoneme and add zeroes to all_contours list
# if not vowels[all_phon_counter] or filename_phon not in test_log:
if phon_label[k][0] not in vowel_phonemes:
# print('!!! non-vowel')
time = phon_time[k, 1] - phon_time[k, 0]
# print('time ', time)
num_zeroes = int(time // step_s)
# print('num_zeroes ', num_zeroes)
contour_source = contour_target = [0 for _ in range(num_zeroes)]
# if syllable is voiced, add its contents to the all_contours list
else:
# print('!!! vowel')
replacement = ''
with open(source_file_path) as f:
s = f.read()
s = s.replace('a', replacement)
s = s.replace('b', replacement)
s = s.replace('c', replacement)
with open(source_file_path, 'w') as f:
f.write(s)
# load the source file and extract vars
source_f0_raw = np.loadtxt(source_file_path, dtype='int')
# iterate over the numpy array, adding items to the list
contour_source = [source_f0_raw[x] for x in range(source_f0_raw.shape[0])]
# build the target file path
target_file_path = os.path.join(phon_input_directory_path, filename_phon)
# load the target file and extract vars
try:
target_f0_raw = np.loadtxt(target_file_path, dtype='int')
except:
continue
# iterate over the numpy array, adding items to the list
contour_target = [target_f0_raw[x] for x in range(target_f0_raw.shape[0])]
# # increment counter
# voiced_syll_counter += 1
# if a voiced phoneme, plot as a colour
if vowels[k]:
colorVal = scalarMap.to_rgba(values[k])
# if unvoiced, plot as grey (to show not converted
else:
# print(colors.to_rgba('grey'))
colorVal = colors.to_rgba('black')
# plot the source phoneme
plt.plot(range(start_source, start_source + len(contour_source)), contour_source, color=colorVal,
alpha=0.7, linewidth=3, label='source')
start_source += len(contour_source)
plt.plot(range(start_target, start_target + len(contour_target)), contour_target, color=colorVal,
alpha=0.7, linestyle=':', linewidth=3, label='predicted')
start_target += len(contour_target)
# add legend to each figure
# plt.legend()
plt.tight_layout()
fig.savefig(os.path.join(output_directory, filename_base + '.png'))
# close the fig so it never gets displayed - used with plt.ioff()
plt.close(fig)
# increment counters
i += 1
print('done')
```
| github_jupyter |
```
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import math
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from src.preparation import fetch_dataset
from src.processing import split_train_test_val, dataset_to_array, dataset_to_generator, make_tf_dataset
from src.modules.visualizer import do_heatmap
import numpy as np
from src.processing import make_tf_dataset, split_train_test
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Dropout, Flatten
from tensorflow.python.keras.layers import LSTM
from tensorflow.compat.v1.math import confusion_matrix
# This is the size of the window that is fed into the DNN
window_size = 15
# The number of the features present in the dataset
num_of_features = 42
# Number of distinct labels in the output
label_length = 4
# Hyperparameter that defines the number of samples to work through
# before updating the internal model parameters.
batch_size = 25
# Epochs
ep = 50
# Create a model
model = Sequential()
model.add(LSTM(512, return_sequences=False, input_shape=(window_size, num_of_features)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(label_length, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy',])
def list_to_num(el):
if el == [1,0,0,0]:
return 0
elif el == [0,1,0,0]:
return 1
elif el == [0,0,1,0]:
return 2
elif el == [0,0,0,1]:
return 3
else:
print('Error',el)
def test_model(md, dt):
print('\n')
data_test = make_tf_dataset(dt, window_size, num_of_features, label_length ).batch(batch_size)
results = md.evaluate(data_test)
ph = []
predictions = md.predict(data_test)
for pred in predictions:
i = list(pred).index(max(list(pred)))
ph.append(i)
# Ground truth
fm , labels = dataset_to_array(dt)
# Padding
ph = ph + [3 for i in range (0, len(labels) - len(ph))]
# -----------------
labels = [list_to_num(list(x)) for x in labels]
# CM
cm = confusion_matrix(labels, ph)
print('\n',cm)
def build_and_test(dataset, name):
train, test, val = split_train_test_val(dataset)
data_train = make_tf_dataset(train, window_size, num_of_features, label_length ).batch(batch_size)
data_test = make_tf_dataset(test ,window_size, num_of_features, label_length).batch(batch_size)
model.fit(data_train, epochs=ep)
data_val = make_tf_dataset(val ,window_size, num_of_features, label_length).batch(batch_size)
results = model.evaluate(data_val)
test_model(model, val)
model.save('model_{0}_{1}.h5'.format(name, ep)) # creates a HDF5 file 'my_model.h5'
dataset = fetch_dataset('dataset_a1_slim.pkl')
build_and_test(dataset, 'a1_slim')
dataset = fetch_dataset('dataset_f1_slim.pkl')
build_and_test(dataset, 'f1_slim')
dataset = fetch_dataset('dataset_g1_slim.pkl')
build_and_test(dataset, 'g1_slim')
```
| github_jupyter |
# The InterlockLedger RESTful API
This notebook will show the usage of some features of the Python client of the InterlockLedger RESTful API.
```
%load_ext autoreload
%autoreload 2
import sys
import traceback
import json
#sys.path.append('../')
from il2_rest import RestNode
from il2_rest.util import PKCS12Certificate
# Comment these two lines if you want to show InsecureRequestWarnings
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
```
## Getting an instance of a node
To use the `il2_rest` client, you need to create an instance of the `RestNode` by passing a certificate file and the address of the node (default address is `localhost`).
> The PKCS12 certificate must be already imported to the InterlockLedger node and be permissioned on the desired chain. See the InterlockLedger node manual.
With the `RestNode` class, it is possible to retrieve details of the node, such as the list of valid apps in the network, peers, mirrors and chains.
```
address = 'minerva-data.il2.io'
port = 32094
cert_file = 'danielchino.api.pfx'
with open('password.details','r') as f :
cert_pass = f.readline()
node = RestNode(cert_file = cert_file, cert_pass = cert_pass, port = port, address=address)
print(node.details)
```
## Exploring the chains
To see and store records and documents, you need to use an instance of the `RestChain`. You can get `RestChain` instances by retrieving the list of chains in the network:
```
chains = node.chains
for chain in chains :
print(chain)
chain_id = chains[0].id
```
## Checking the content of a chain
By getting an instance of the `RestChain`, it is possible to retrieve and send information about the chain.
On this example, we will use the chain with the following id (`chain_id` - change this to your il2 chain).
It is possible to see the permitted apps and keys.
It is also possible to check the records stored in the chain.
```
chain = node.chain_by_id(chain_id)
print(chain.active_apps)
for key in chain.permitted_keys :
print(key)
for record in chain.records(firstSerial=0, lastSerial=2).items :
print(record)
```
## Creating a Chain
If your are using a certificate with administration privileges, it is possible to create new chains.
```
from il2_rest.models import ChainCreationModel
new_chain = ChainCreationModel(
name = 'IL2App Chain',
description = 'Chain to store person enrollments',
managementKeyPassword = 'management_password',
emergencyClosingKeyPassword = 'closing_password'
)
created_chain = node.create_chain(model=new_chain)
print(created_chain)
```
## Managing Keys and Apps
If your are using a certificate allowed to permit key, you can permit other keys in the chain. The `permit_keys` method will return the list of permitted keys in the chain.
> To permit other keys, the certificate must be already imported to the Interlockledger node with actions for App #2 and actions 500,501.
```
from il2_rest.models import KeyPermitModel
from il2_rest.enumerations import KeyPurpose
chain = node.chain_by_id(chain_id)
try :
key_model = KeyPermitModel(app = 4, appActions = [1000, 1001], key_id = 'Key!MJ0kidltB324mfkiOG0aBlEocPA#SHA1',
name = 'documenter', publicKey = 'PubKey!KPgQEPgItqh4hu-oJtc7pSX0jp0fYEkV_kazPxOPGxiv1YX6dbt1QNb9AFb3mYpgUE9lRsehQz9Keh80K3mxdsURZbyhACRo3ljjKKBOQY4aKIIje9yPTTnJqg0XwwyBsx1zb-qEQaWm6S5HsVvMipGSfZIhgf3R2RYOvKR8zJRr7M1h7yoPN-02wzY1wubUpmpVB6aI_wAinTfUhBxKTuOkpe6M8OhPM-W4RUC-Et22Z4SzYK9-w08PULDBl3hCD2F-0K7TnQk8j-_1K0zV9bd2v0WovdjMrWUtMmWGcJ3Z2bJpB3-0e9Q_MxVw89r1nhYnj8zVf36HV8oVBZk4axWhFbTDrxADAQAB#RSA',
purposes = [KeyPurpose.Action, KeyPurpose.Protocol])
keys = chain.permit_keys([key_model])
for key in keys:
print(key)
except :
print(traceback.format_exc())
```
Similarly, you can permit apps using the following method. In this example we are trying to permit the app 4 (to store documents).
```
chain = node.chain_by_id(chain_id)
try :
apps = chain.permit_apps([4])
print(apps)
except :
print(traceback.format_exc())
```
## Storing Documents
You can store a series of documents in a single record using the documents API. To do so, you need to start a transaction, then you upload the documents and finally commit the transaction.
> Whe you commit the documents, you will receive the locator of the documents, remember to store this locator in your system.
```
from il2_rest.models import DocumentsBeginTransactionModel
from il2_rest.models import DocumentsTransactionModel
print(str(node.documents_config))
chain = node.chain_by_id(chain_id)
# Using a DocumentsBeginTransactionModel
try :
model = DocumentsBeginTransactionModel(chain = chain_id,
comment ='Using model',
encryption = 'PBKDF2-SHA256-AES128-HIGH',
password = 'qwerty123456')
resp = chain.documents_begin_transaction(model = model)
transaction_id = resp.transactionId
print(chain.documents_transaction_add_item(transaction_id, "item1.txt", "./test.txt"))
print(chain.documents_transaction_add_item(transaction_id, "item2.txt", "./test2.txt", comment = "This file has a comment."))
print(chain.documents_transaction_add_item(transaction_id, "item3.pdf", "../InterlockLedgerAPI.pdf", comment = "PDF file."))
print(chain.documents_transaction_status(transaction_id))
locator = chain.documents_transaction_commit(transaction_id)
print(locator)
except :
print(traceback.format_exc())
# Passing parameters to the documents_begin_transaction method
try :
resp = chain.documents_begin_transaction(comment ='Using parameters')
transaction_id = resp.transactionId
print(chain.documents_transaction_add_item(transaction_id, "item1.txt", "./test.txt"))
print(chain.documents_transaction_add_item(transaction_id, "item2.txt", "./test2.txt", comment = "This file has a comment."))
print(chain.documents_transaction_status(transaction_id))
locator = chain.documents_transaction_commit(transaction_id)
print(locator)
except :
print(traceback.format_exc())
```
To get information about a multi-document record, you will need to use the locator id.
With the locator id you can check the metadata of a multi-document.
Or you can download the files stored in the record.
It is possible to download a single file (indicating the file with its index), or download all files in a compressed file.
```
chain = node.chain_by_id(chain_id)
resp = chain.documents_transaction_metadata(locator)
print(resp)
# Download a single file
chain.download_single_document_at(locator, 1)
# Download a compressed file with all files in the record
chain.download_documents_as_zip(locator)
```
## Storing JSON Documents
If you wish to store a generic JSON data, you can use the following script to store in a JSON document record:
```
chain = node.chain_by_id(chain_id)
json_data = {
"field1" : 1,
"field2" : "Test",
"field3": [1,2,3],
"field4" : {
"value1" : 10,
"value2" : 20
}
}
resp = chain.store_json_document(json_data)
print(resp)
```
### Decrypting the JSON Document
```
pkcs12_cert = PKCS12Certificate(path = cert_file, password = cert_pass)
decrypted_json = resp.encryptedJson.decode_with(pkcs12_cert)
print(json.dumps(decrypted_json, indent=4))
```
## Storing Records
There is also a generic interface to store records. It is possible to store records using the model classes or using the unpacked method.
> To use this method, you will need to know the exact record's logic schema of the InterlockApp (`applicationId`) you want to store -- either JSON or bytewise.
> If you are using InterlockApps 4 (Multi-Document) or 8 (JSON Documents), we highly recommend to use the specific methods for each app.
```
from il2_rest.models import NewRecordModelAsJson
from il2_rest.models import NewRecordModel
chain = node.chain_by_id(chain_id)
try :
model_json = NewRecordModelAsJson(applicationId = 1, payloadTagId = 300, rec_json= {'tagId': 300,'version' : 1, 'apps': [4]})
record_json = chain.add_record_as_json(model = model_json)
print(record_json)
except :
print(traceback.format_exc())
try :
model_bytes = NewRecordModel(applicationId = 1, payloadTagId = 300, payloadBytes = bytes([248, 52, 7, 5, 0, 0, 20, 2, 1, 4]))
record_bytes = chain.add_record(model_bytes)
print(record_bytes)
except :
print(traceback.format_exc())
try :
record_unpacked = chain.add_record_unpacked(applicationId = 1, payloadTagId = 300, rec_bytes = bytes([5, 0, 0, 20, 2, 1, 4]))
print(record_unpacked)
except :
print(traceback.format_exc())
```
## Interlockings
It is also possible to check or force interlocks using the API.
```
from il2_rest.models import ForceInterlockModel
chain = node.chains[0]
for interlock in chain.interlocks().items :
print(interlock)
break
force_model = ForceInterlockModel(targetChain = 'or7lzOGOvzH3GeNUTPqJI41CY0rVcEWgw6IEBmSSDxI')
interlock_model = chain.force_interlock(model = force_model)
print(interlock_model)
```
| github_jupyter |
##### Copyright 2021 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# On-Device Training with TensorFlow Lite
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/examples/on_device_training/overview"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/examples/on_device_training/overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/examples/on_device_training/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/examples/on_device_training/overview.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
When deploying TensorFlow Lite machine learning model to device or mobile app, you may want to enable the model to be improved or personalized based on input from the device or end user. Using on-device training techniques allows you to update a model *without* data leaving your users' devices, improving user privacy, and without requiring users to update the device software.
For example, you may have a model in your mobile app that recognizes fashion items, but you want users to get improved recognition performance over time based on their interests. Enabling on-device training allows users who are interested in shoes to get better at recognizing a particular style of shoe or shoe brand the more often they use your app.
This tutorial shows you how to construct a TensorFlow Lite model that can be incrementally trained and improved within an installed Android app.
Note: The on-device training technique can be added to existing TensorFlow Lite implementations, provided the devices you are targeting support local file storage.
## Setup
This tutorial uses Python to train and convert a TensorFlow model before incorporating it into an Android app. Get started by installing and importing the following packages.
```
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
```
Note: The On-Device Training APIs are available in TensorFlow version 2.7 and higher.
## Classify images of clothing
This example code uses the [Fashion MNIST dataset](https://keras.io/api/datasets/fashion_mnist/) to train a neural network model for classifying images of clothing. This dataset contains 60,000 small (28 x 28 pixel) grayscale images containing 10 different categories of fashion accessories, including dresses, shirts, and sandals.
<figure>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST images">
<figcaption><b>Figure 1</b>: <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).</figcaption>
</figure>
You can explore this dataset in more depth in the [Keras classification tutorial](https://www.tensorflow.org/tutorials/keras/classification#import_the_fashion_mnist_dataset).
## Build a model for on-device training
TensorFlow Lite models typically have only a single exposed function method (or [signature](https://www.tensorflow.org/lite/guide/signatures)) that allows you to call the model to run an inference. For a model to be trained and used on a device, you must be able to perform several separate operations, including train, infer, save, and restore functions for the model. You can enable this functionality by first extending your TensorFlow model to have multiple functions, and then exposing those functions as signatures when you convert your model to the TensorFlow Lite model format.
The code example below shows you how to add the following functions to a TensorFlow model:
* `train` function trains the model with training data.
* `infer` function invokes the inference.
* `save` function saves the trainable weights into the file system.
* `restore` function loads the trainable weights from the file system.
```
IMG_SIZE = 28
class Model(tf.Module):
def __init__(self):
self.model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(IMG_SIZE, IMG_SIZE), name='flatten'),
tf.keras.layers.Dense(128, activation='relu', name='dense_1'),
tf.keras.layers.Dense(10, name='dense_2')
])
self.model.compile(
optimizer='sgd',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True))
# The `train` function takes a batch of input images and labels.
@tf.function(input_signature=[
tf.TensorSpec([None, IMG_SIZE, IMG_SIZE], tf.float32),
tf.TensorSpec([None, 10], tf.float32),
])
def train(self, x, y):
with tf.GradientTape() as tape:
prediction = self.model(x)
loss = self.model.loss(y, prediction)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.model.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
result = {"loss": loss}
return result
@tf.function(input_signature=[
tf.TensorSpec([None, IMG_SIZE, IMG_SIZE], tf.float32),
])
def infer(self, x):
logits = self.model(x)
probabilities = tf.nn.softmax(logits, axis=-1)
return {
"output": probabilities,
"logits": logits
}
@tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)])
def save(self, checkpoint_path):
tensor_names = [weight.name for weight in self.model.weights]
tensors_to_save = [weight.read_value() for weight in self.model.weights]
tf.raw_ops.Save(
filename=checkpoint_path, tensor_names=tensor_names,
data=tensors_to_save, name='save')
return {
"checkpoint_path": checkpoint_path
}
@tf.function(input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)])
def restore(self, checkpoint_path):
restored_tensors = {}
for var in self.model.weights:
restored = tf.raw_ops.Restore(
file_pattern=checkpoint_path, tensor_name=var.name, dt=var.dtype,
name='restore')
var.assign(restored)
restored_tensors[var.name] = restored
return restored_tensors
```
The `train` function in the code above uses the [GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) class to record operations for automatic differentiation. For more information on how to use this class, see the [Introduction to gradients and automatic differentiation](https://www.tensorflow.org/guide/autodiff).
You could use the `Model.train_step` method of the keras model here instead of a from-scratch implementation. Just note that the loss (and metrics) returned by `Model.train_step` is the running average, and should be reset regularly (typically each epoch). See [Customize Model.fit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit) for details.
Note: The weights generated by this model are serialized into a TensorFlow 1 format checkpoint file.
## Prepare the data
Get the the Fashion MNIST dataset for training your model.
```
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
### Preprocess the dataset
Pixel values in this dataset are between 0 and 255, and must be normalized to a value between 0 and 1 for processing by the model. Divide the values by 255 to make this adjustment.
```
train_images = (train_images / 255.0).astype(np.float32)
test_images = (test_images / 255.0).astype(np.float32)
```
Convert the data labels to categorical values by performing one-hot encoding.
```
train_labels = tf.keras.utils.to_categorical(train_labels)
test_labels = tf.keras.utils.to_categorical(test_labels)
```
Note: Make sure you preprocess your *training* and *testing* datasets in the same way, so that your testing accurately evaluate your model's performance.
## Train the model
Before converting and setting up your TensorFlow Lite model, complete the initial training of your model using the preprocessed dataset and the `train` signature method. The following code runs model training for 100 epochs, processing batches of 100 images at a time, and displaying the loss value after every 10 epochs. Since this training run is processing quite a bit of data, it may take a few minutes to finish.
```
NUM_EPOCHS = 100
BATCH_SIZE = 100
epochs = np.arange(1, NUM_EPOCHS + 1, 1)
losses = np.zeros([NUM_EPOCHS])
m = Model()
train_ds = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
train_ds = train_ds.batch(BATCH_SIZE)
for i in range(NUM_EPOCHS):
for x,y in train_ds:
result = m.train(x, y)
losses[i] = result['loss']
if (i + 1) % 10 == 0:
print(f"Finished {i+1} epochs")
print(f" loss: {losses[i]:.3f}")
# Save the trained weights to a checkpoint.
m.save('/tmp/model.ckpt')
plt.plot(epochs, losses, label='Pre-training')
plt.ylim([0, max(plt.ylim())])
plt.xlabel('Epoch')
plt.ylabel('Loss [Cross Entropy]')
plt.legend();
```
Note: You should complete initial training of your model before converting it to TensorFlow Lite format, so that the model has an initial set of weights, and is able to perform reasonable inferences *before* you start collecting data and conducting training runs on the device.
## Convert model to TensorFlow Lite format
After you have extended your TensorFlow model to enable additional functions for on-device training and completed initial training of the model, you can convert it to TensorFlow Lite format. The following code converts and saves your model to that format, including the set of signatures that you use with the TensorFlow Lite model on a device: `train, infer, save, restore`.
```
SAVED_MODEL_DIR = "saved_model"
tf.saved_model.save(
m,
SAVED_MODEL_DIR,
signatures={
'train':
m.train.get_concrete_function(),
'infer':
m.infer.get_concrete_function(),
'save':
m.save.get_concrete_function(),
'restore':
m.restore.get_concrete_function(),
})
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_DIR)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
converter.experimental_enable_resource_variables = True
tflite_model = converter.convert()
```
### Setup the TensorFlow Lite signatures
The TensorFlow Lite model you saved in the previous step contains several function signatures. You can access them through the `tf.lite.Interpreter` class and invoke each `restore`, `train`, `save`, and `infer` signature separately.
```
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
infer = interpreter.get_signature_runner("infer")
```
Compare the output of the original model, and the converted lite model:
```
logits_original = m.infer(x=train_images[:1])['logits'][0]
logits_lite = infer(x=train_images[:1])['logits'][0]
#@title
def compare_logits(logits):
width = 0.35
offset = width/2
assert len(logits)==2
keys = list(logits.keys())
plt.bar(x = np.arange(len(logits[keys[0]]))-offset,
height=logits[keys[0]], width=0.35, label=keys[0])
plt.bar(x = np.arange(len(logits[keys[1]]))+offset,
height=logits[keys[1]], width=0.35, label=keys[1])
plt.legend()
plt.grid(True)
plt.ylabel('Logit')
plt.xlabel('ClassID')
delta = np.sum(np.abs(logits[keys[0]] - logits[keys[1]]))
plt.title(f"Total difference: {delta:.3g}")
compare_logits({'Original': logits_original, 'Lite': logits_lite})
```
Above, you can see that the behavior of the model is not changed by the conversion to TFLite.
## Retrain the model on a device
After converting your model to TensorFlow Lite and deploying it with your app, you can retrain the model on a device using new data and the `train` signature method of your model. Each training run generates a new set of weights that you can save for re-use and further improvement of the model, as shown in the next section.
Note: Since training tasks are resource intensive, you should consider performing them when users are not actively interacting with the device, and as a background process. Consider using the [WorkManager](https://developer.android.com/topic/libraries/architecture/workmanager) API to schedule model retraining as an asynchronous task.
On Android, you can perform on-device training with TensorFlow Lite using either Java or C++ APIs. In Java, use the `Interpreter` class to load a model and drive model training tasks. The following example shows how to run the training procedure using the `runSignature` method:
```Java
try (Interpreter interpreter = new Interpreter(modelBuffer)) {
int NUM_EPOCHS = 100;
int BATCH_SIZE = 100;
int IMG_HEIGHT = 28;
int IMG_WIDTH = 28;
int NUM_TRAININGS = 60000;
int NUM_BATCHES = NUM_TRAININGS / BATCH_SIZE;
List<FloatBuffer> trainImageBatches = new ArrayList<>(NUM_BATCHES);
List<FloatBuffer> trainLabelBatches = new ArrayList<>(NUM_BATCHES);
// Prepare training batches.
for (int i = 0; i < NUM_BATCHES; ++i) {
FloatBuffer trainImages = FloatBuffer.allocateDirect(BATCH_SIZE * IMG_HEIGHT * IMG_WIDTH).order(ByteOrder.nativeOrder());
FloatBuffer trainLabels = FloatBuffer.allocateDirect(BATCH_SIZE * 10).order(ByteOrder.nativeOrder());
// Fill the data values...
trainImageBatches.add(trainImages.rewind());
trainImageLabels.add(trainLabels.rewind());
}
// Run training for a few steps.
float[] losses = new float[NUM_EPOCHS];
for (int epoch = 0; epoch < NUM_EPOCHS; ++epoch) {
for (int batchIdx = 0; batchIdx < NUM_BATCHES; ++batchIdx) {
Map<String, Object> inputs = new HashMap<>();
inputs.put("x", trainImageBatches.get(batchIdx));
inputs.put("y", trainLabelBatches.get(batchIdx));
Map<String, Object> outputs = new HashMap<>();
FloatBuffer loss = FloatBuffer.allocate(1);
outputs.put("loss", loss);
interpreter.runSignature(inputs, outputs, "train");
// Record the last loss.
if (batchIdx == NUM_BATCHES - 1) losses[epoch] = loss.get(0);
}
// Print the loss output for every 10 epochs.
if ((epoch + 1) % 10 == 0) {
System.out.println(
"Finished " + (epoch + 1) + " epochs, current loss: " + loss.get(0));
}
}
// ...
}
```
You can see a complete code example of model retraining inside an Android app in the [model personalization demo app](https://github.com/tensorflow/examples/blob/master/lite/examples/model_personalization/android/transfer_api/src/main/java/org/tensorflow/lite/examples/transfer/api/LiteMultipleSignatureModel.java).
Run training for a few epochs to improve or personalize the model. In practice, you would run this additional training using data collected on the device. For simplicity, this example uses the same training data as the previous training step.
```
train = interpreter.get_signature_runner("train")
NUM_EPOCHS = 50
BATCH_SIZE = 100
more_epochs = np.arange(epochs[-1]+1, epochs[-1] + NUM_EPOCHS + 1, 1)
more_losses = np.zeros([NUM_EPOCHS])
for i in range(NUM_EPOCHS):
for x,y in train_ds:
result = train(x=x, y=y)
more_losses[i] = result['loss']
if (i + 1) % 10 == 0:
print(f"Finished {i+1} epochs")
print(f" loss: {more_losses[i]:.3f}")
plt.plot(epochs, losses, label='Pre-training')
plt.plot(more_epochs, more_losses, label='On device')
plt.ylim([0, max(plt.ylim())])
plt.xlabel('Epoch')
plt.ylabel('Loss [Cross Entropy]')
plt.legend();
```
Above you can see that the on-device training picks up exactly where the pretraining stopped.
## Save the trained weights
When you complete a training run on a device, the model updates the set of weights it is using in memory. Using the `save` signature method you created in your TensorFlow Lite model, you can save these weights to a checkpoint file for later reuse and improve your model.
```
save = interpreter.get_signature_runner("save")
save(checkpoint_path=np.array("/tmp/model.ckpt", dtype=np.string_))
```
In your Android application, you can store the generated weights as a checkpoint file in the internal storage space allocated for your app.
```Java
try (Interpreter interpreter = new Interpreter(modelBuffer)) {
// Conduct the training jobs.
// Export the trained weights as a checkpoint file.
File outputFile = new File(getFilesDir(), "checkpoint.ckpt");
Map<String, Object> inputs = new HashMap<>();
inputs.put("checkpoint_path", outputFile.getAbsolutePath());
Map<String, Object> outputs = new HashMap<>();
interpreter.runSignature(inputs, outputs, "save");
}
```
## Restore the trained weights
Any time you create an interpreter from a TFLite model, the interpreter will initially load the original model weights.
So after you've done some training and saved a checkpoint file, you'll need to run the `restore` signature method to load the checkpoint.
A good rule is "Anytime you create an Interpreter for a model, if the checkpoint exists, load it". If you need to reset the model to the baseline behavior, just delete the checkpoint and create a fresh interpreter.
```
another_interpreter = tf.lite.Interpreter(model_content=tflite_model)
another_interpreter.allocate_tensors()
infer = another_interpreter.get_signature_runner("infer")
restore = another_interpreter.get_signature_runner("restore")
logits_before = infer(x=train_images[:1])['logits'][0]
# Restore the trained weights from /tmp/model.ckpt
restore(checkpoint_path=np.array("/tmp/model.ckpt", dtype=np.string_))
logits_after = infer(x=train_images[:1])['logits'][0]
compare_logits({'Before': logits_before, 'After': logits_after})
```
The checkpoint was generated by training and saving with TFLite. Above you can see that applying the checkpoint updates the behavior of the model.
Note: Loading the saved weights from the checkpoint can take time, based on the number of variables in the model and the size of the checkpoint file.
In your Android app, you can restore the serialized, trained weights from the checkpoint file you stored earlier.
```Java
try (Interpreter anotherInterpreter = new Interpreter(modelBuffer)) {
// Load the trained weights from the checkpoint file.
File outputFile = new File(getFilesDir(), "checkpoint.ckpt");
Map<String, Object> inputs = new HashMap<>();
inputs.put("checkpoint_path", outputFile.getAbsolutePath());
Map<String, Object> outputs = new HashMap<>();
anotherInterpreter.runSignature(inputs, outputs, "restore");
}
```
Note: When your application restarts, you should reload your trained weights prior to running new inferences.
## Run Inference using trained weights
Once you have loaded previously saved weights from a checkpoint file, running the `infer` method uses those weights with your original model to improve predictions. After loading the saved weights, you can use the `infer` signature method as shown below.
Note: Loading the saved weights is not required to run an inference, but running in that configuration produces predictions using the originally trained model, without improvements.
```
infer = another_interpreter.get_signature_runner("infer")
result = infer(x=test_images)
predictions = np.argmax(result["output"], axis=1)
true_labels = np.argmax(test_labels, axis=1)
result['output'].shape
```
Plot the predicted labels.
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def plot(images, predictions, true_labels):
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i], cmap=plt.cm.binary)
color = 'b' if predictions[i] == true_labels[i] else 'r'
plt.xlabel(class_names[predictions[i]], color=color)
plt.show()
plot(test_images, predictions, true_labels)
predictions.shape
```
In your Android application, after restoring the trained weights, run the inferences based on the loaded data.
```Java
try (Interpreter anotherInterpreter = new Interpreter(modelBuffer)) {
// Restore the weights from the checkpoint file.
int NUM_TESTS = 10;
FloatBuffer testImages = FloatBuffer.allocateDirect(NUM_TESTS * 28 * 28).order(ByteOrder.nativeOrder());
FloatBuffer output = FloatBuffer.allocateDirect(NUM_TESTS * 10).order(ByteOrder.nativeOrder());
// Fill the test data.
// Run the inference.
Map<String, Object> inputs = new HashMap<>();
inputs.put("x", testImages.rewind());
Map<String, Object> outputs = new HashMap<>();
outputs.put("output", output);
anotherInterpreter.runSignature(inputs, outputs, "infer");
output.rewind();
// Process the result to get the final category values.
int[] testLabels = new int[NUM_TESTS];
for (int i = 0; i < NUM_TESTS; ++i) {
int index = 0;
for (int j = 1; j < 10; ++j) {
if (output.get(i * 10 + index) < output.get(i * 10 + j)) index = testLabels[j];
}
testLabels[i] = index;
}
}
```
Congratulations! You now have built a TensorFlow Lite model that supports on-device training. For more coding details, check out the example implementation in the [model personalization demo app](https://github.com/tensorflow/examples/tree/master/lite/examples/model_personalization).
If you are interested in learning more about image classification, check [Keras classification tutorial](https://www.tensorflow.org/tutorials/keras/classification) in the TensorFlow official guide page. This tutorial is based on that exercise and provides more depth on the subject of classification.
| github_jupyter |
# Feature Engineering
In this notebook, I will focus on feature engineering and seeing how that may improve the performance of the LightGBM model I am currently working on.
## Potential Models to Submit
1. LGBM, all features, OHE of cat variables, on X scores: 0.7994213376003929. Submission score: 0.7306
2. LGBM, all features, OHE of cat variables + geo_level_1_id, on X scores: 0.7989033042850949. Submission score: 0.7319
Well this isn't good... now my model evaluation technique is no longer reliable. Model 2 scored lower on my evaluation but actually higher in real-life...
```
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import pickle
import lightgbm as lgb
from pathlib import Path
from lightgbm import LGBMClassifier
from pprint import pprint
from sklearn.compose import ColumnTransformer
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import mean_squared_error, f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import KFold, train_test_split, RandomizedSearchCV
from sklearn.model_selection import cross_val_score, StratifiedKFold, GridSearchCV
############ USE FOR GOOGLE COLAB ############
# DATA_DIR = Path('/content/drive/MyDrive/Work/Delivery/Current/Earthquake_damage/data')
# SUBMISSIONS_DIR = Path('drive/MyDrive/Work/Delivery/Current/Earthquake_damage/submissions')
# MODEL_DIR = Path('/content/drive/MyDrive/Work/Delivery/Current/Earthquake_damage/models')
# MY_DATA_DIR = PATH('/content/drive/MyDrive/Work/Delivery/Current/Earthquake_damage/my_data')
# from google.colab import drive
# drive.mount('/content/drive')
#############################################
### USE FOR LOCAL JUPYTER NOTEBOOKS ###
DATA_DIR = Path('data')
SUBMISSIONS_DIR = Path('submissions')
MODEL_DIR = Path('models')
MY_DATA_DIR = Path('my_data')
#######################################
# The code runs the same if working on Jupyter or Colab, just need to change the
# dirs above
X = pd.read_csv(DATA_DIR / 'train_values.csv', index_col='building_id')
categorical_columns = X.select_dtypes(include='object').columns
bool_columns = [col for col in X.columns if col.startswith('has')]
X[categorical_columns] = X[categorical_columns].astype('category')
X[bool_columns] = X[bool_columns].astype('bool')
# X = pd.get_dummies(X)
y = pd.read_csv(DATA_DIR / 'train_labels.csv', index_col='building_id')
lgbm_hyperparams = {'num_leaves': 120,
'n_estimators': 240,
'min_child_samples': 40,
'learning_rate': 0.2,
'boosting_type': 'goss'}
sns.set()
def make_submission(pipeline, title):
"""
Given a trained pipeline object, use it to make predictions on the
submission test set 'test_values.csv' and write them a csv in the submissions
folder.
"""
# Read in test_values csv and apply data preprocessing
# note: will create a data preprocessing pipeline or function in future
test_values = pd.read_csv(DATA_DIR / 'test_values.csv', index_col='building_id')
test_values[categorical_columns] = test_values[categorical_columns].astype('category')
test_values[bool_columns] = test_values[bool_columns].astype('bool')
# Generate predictions using pipeline we pass in
predictions = pipeline.predict(test_values)
submission_format = pd.read_csv(DATA_DIR / 'submission_format.csv',
index_col='building_id')
my_submission = pd.DataFrame(data=predictions,
columns=submission_format.columns,
index=submission_format.index)
my_submission.to_csv(SUBMISSIONS_DIR / f'{title}.csv')
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33,
random_state=42, stratify=y,
shuffle=True)
X.head()
```
## Function to test which categorical variables to encode
```
def lgbm_vary_categorical_variables(categorical_columns):
"""
Build a LGBM model using the best hyperparameters we have (as of 09/02/21)
and vary which columns are one-hot encoded.
Pass the columns you wish to OHE as cat_variables.
"""
t = [('ohe', OneHotEncoder(), categorical_columns)]
ct = ColumnTransformer(transformers=t, remainder='passthrough')
lgbm_hyperparams = {'num_leaves': 120,
'n_estimators': 240,
'min_child_samples': 40,
'learning_rate': 0.2,
'boosting_type': 'goss'}
steps = [('ct', ct),
('lgbm', LGBMClassifier(**lgbm_hyperparams))]
pipe = Pipeline(steps)
pipe.fit(X, np.ravel(y))
y_pred_X = pipe.predict(X)
y_pred_X_val = pipe.predict(X_val)
f1_score_X = f1_score(y, y_pred_X, average='micro')
f1_score_X_val = f1_score(y_val, y_pred_X_val, average='micro')
print('F1 score on X: ', f1_score_X)
print('F1 score on X_val:', f1_score_X_val)
return pipe
```
## Encode all categorical features as categorical and make Pipeline
```
lgbm_cat_cols = lgbm_vary_categorical_variables(categorical_columns)
make_submission(lgbm_cat_cols, '09-02 LGBM all features, lgbm_02_02 hyperparameters')
```
## Encode geo_level_1_id as categorical
```
cat_cols_plus_geo_1 = categorical_columns.insert(0, 'geo_level_1_id')
cat_cols_plus_geo_1
lgbm_plus_geo_1 = lgbm_vary_categorical_variables(cat_cols_plus_geo_1)
make_submission(lgbm_plus_geo_1, '09-02 LGBM all features, geo_level_1_id OHE, lgbm_02_02 hyperparameters')
```
## Encode first two geo_levels as categorical
```
cat_cols_plus_2_geos = ['geo_level_1_id', 'geo_level_2_id',
'land_surface_condition', 'foundation_type',
'roof_type', 'ground_floor_type', 'other_floor_type',
'position','plan_configuration', 'legal_ownership_status']
lgbm_plus_2_geos = lgbm_vary_categorical_variables(cat_cols_plus_2_geos)
```
## Encode all geo_levels as categorical
This doesn't help, actually makes it worse. Perhaps because we are splitting it into THOUSANDS of different features?
```
cat_cols_plus_all_geos = ['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id',
'land_surface_condition', 'foundation_type',
'roof_type', 'ground_floor_type', 'other_floor_type',
'position','plan_configuration', 'legal_ownership_status']
lgbm_plus_all_geos = lgbm_vary_categorical_variables(cat_cols_plus_all_geos)
```
## Geo level 1 with top 14 features
```
X_dummies = pd.get_dummies(X)
any(elem in categorical_columns for elem in top_14_features)
X_dummies[top_14_features]
top_14_features = ['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id',
'count_floors_pre_eq', 'age' , 'area_percentage' ,
'height_percentage',
'has_superstructure_mud_mortar_stone',
'has_superstructure_stone_flag',
'has_superstructure_mud_mortar_brick',
'has_superstructure_cement_mortar_brick',
'has_superstructure_timber', 'count_families',
'other_floor_type_q']
def lgbm_encoded_geo_level_variables_with_top_14_features(geo_level_id_cols, X, y):
"""
Build LGBM classifier using the top_14_features subset and a custom
number of columns you want to make categorical. The only columns to choose
from are the geo_level_i_id columns (since all other categorical cols are
not included in the top_14_features list).
This is to test how many of the geo_level_i_id columns should be one hot encoded
"""
X = pd.get_dummies(X)
X = X[top_14_features]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33,
random_state=42, stratify=y,
shuffle=True)
lgbm_hyperparams = {'num_leaves': 120,
'n_estimators': 240,
'min_child_samples': 40,
'learning_rate': 0.2,
'boosting_type': 'goss'}
t = [('ohe', OneHotEncoder(), geo_level_id_cols)]
steps = [('ct', ColumnTransformer(transformers=t, remainder='passthrough')),
('lgbm', LGBMClassifier(**lgbm_hyperparams))]
pipe = Pipeline(steps)
pipe.fit(X, np.ravel(y))
y_pred_X = pipe.predict(X)
y_pred_X_val = pipe.predict(X_val)
f1_score_X = f1_score(y, y_pred_X, average='micro')
f1_score_X_val = f1_score(y_val, y_pred_X_val, average='micro')
print('F1 score on X: ', f1_score_X)
print('F1 score on X_val:', f1_score_X_val)
return pipe
lgbm_encoded_geo_level_variables_with_top_14_features([], X, y)
lgbm_encoded_geo_level_variables_with_top_14_features(['geo_level_1_id'], X, y)
geo_level_id_cols = ['geo_level_1_id', 'geo_level_2_id']
lgbm_encoded_geo_level_variables_with_top_14_features(geo_level_id_cols, X, y)
geo_level_id_cols = ['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id']
lgbm_encoded_geo_level_variables_with_top_14_features(geo_level_id_cols, X, y)
```
## Boxplots
```
df = X.join(y)
for col in df.select_dtypes('number'):
sns.boxplot(x=df[col])
plt.show()
for col in df.select_dtypes('number').columns[:-1]:
sns.boxplot(x='damage_grade', y=col, data=df)
plt.show()
```
## Checking FIs without OHE geo_level_1_id
```
X_dummies = pd.get_dummies(X)
model = LGBMClassifier(**lgbm_hyperparams)
model.fit(X_dummies, np.ravel(y))
s_fi_dummies = pd.Series(data=model.feature_importances_,
index=X_dummies.columns,
name='FI')
# s_fi_dummies.sort_values(ascending=False).to_csv(MY_DATA_DIR / 'FIs_ohe_just_categorical_vars.csv')
s_fi_dummies
```
## Checking Feature Importances After geo_level_1_id is encoded
TL;DR
Encoding `geo_level_1_id` and `geo_level_2_id` produce some useful features (some as high as 600 feature importance!). Now it's time to explore how many of those we should keep.
From EDA we can see that some of the geo_level_1_id locations have a LOT of predictive power. Perhaps one reason for a lack of lift in the models is that we have introduced loads of unhelpful features.
So,
1. Build a model with geo_level_1_id encoded and check all the feature importances
2. What happens to the features when we apply OHE? How can I accurately select the most important ones afterwards? How do I know which features are which? One easier way would be to use the pd.get_dummies() function.
2. Remove all the features with 0 importance, then iteratively remove more and more features until I find an optimal amount (based on the feature importances).
```
cat_cols_plus_geo_1 = categorical_columns.insert(0, 'geo_level_1_id')
cat_cols_plus_geo_1
X.loc[:, 'geo_level_1_id'] = X.loc[:, 'geo_level_1_id'].astype('category')
X_ohe = pd.get_dummies(X)
lgbm_hyperparams = {'num_leaves': 120,
'n_estimators': 240,
'min_child_samples': 40,
'learning_rate': 0.2,
'boosting_type': 'goss'}
model = LGBMClassifier(**lgbm_hyperparams)
model.fit(X_ohe, np.ravel(y))
model.feature_importances_
s_fi = pd.Series(data=model.feature_importances_,
index=X_ohe.columns,
name='FI')
s_fi.sort_values(ascending=False).to_csv(MY_DATA_DIR / 'feature_importances_geo_level_1_id_ohe.csv')
```
## FIs with first 2 geo_level_i_id columns OHE'd
Probably too many
```
X_geo_categorical = X.copy()
X_geo_categorical.iloc[:, [0, 1]] = X_geo_categorical.iloc[:, [0, 1]].astype('category')
X_geo_categorical_dummies = pd.get_dummies(X_geo_categorical)
len(X_geo_categorical_dummies.columns)
def fit_and_save_feature_importances(df, title):
model = LGBMClassifier(**lgbm_hyperparams)
model.fit(df, np.ravel(y))
s_fi = pd.Series(data=model.feature_importances_,
index=df.columns,
name='FI')
s_fi.sort_values(ascending=False).to_csv(MY_DATA_DIR / f'{title}.csv')
fit_and_save_feature_importances(X_geo_categorical_dummies,
'FIs_geo_levels_1+2_OHE')
```
## Feature Selection using SelectFromModel
We've seen that encoding geo_levels 1 and 2 into categorical variables leads to many valuable features being created. Let's build a suite of models now with both geo_level_1 and geo_levels 1+2 encoded and see which models give the best score when trained on X and evaluted on both X and X_val
Let's see how we get on
```
X_geo_level_1_ohe = X.copy()
X_geo_level_1_ohe.iloc[:, 0] = X_geo_level_1_ohe.iloc[:, 0].astype('category')
X_geo_level_1_ohe = pd.get_dummies(X_geo_level_1_ohe)
X_geo_levels_1_and_2_ohe = X.copy()
X_geo_levels_1_and_2_ohe.iloc[:, [0, 1]] = X_geo_levels_1_and_2_ohe.iloc[:, [0, 1]]\
.astype('category')
X_geo_levels_1_and_2_ohe = pd.get_dummies(X_geo_levels_1_and_2_ohe)
def feature_selection_with_FI(df, thresh='mean'):
lgbm_selector = LGBMClassifier(**lgbm_hyperparams)
lgbm = LGBMClassifier(**lgbm_hyperparams)
# Want to modify the threshold and see how it influences results
# can this be done with GridSearchCV?
selector = SelectFromModel(estimator=lgbm_selector, threshold=thresh)
steps = [('selector', selector),
('lgbm', lgbm)]
pipe = Pipeline(steps)
pipe.fit(df, np.ravel(y))
y_pred = pipe.predict(df)
score = f1_score(y, y_pred, average='micro')
print('Threshold:', thresh)
print('F1 score: ', score)
thresholds = [0, 100, 200, 300, 400, 500, 600, 1000]
for thresh in thresholds:
feature_selection_with_FI(X_geo_level_1_ohe, thresh=thresh)
```
From this it looks like having more features is better but this could just be because it is learning the mapping. Let's try it with GridSearch and see what scores I get based on each.
```
lgbm_selector = LGBMClassifier(**lgbm_hyperparams)
lgbm = LGBMClassifier(**lgbm_hyperparams)
# Want to modify the threshold and see how it influences results
# can this be done with GridSearchCV?
selector = SelectFromModel(estimator=lgbm_selector)
steps = [('selector', selector),
('lgbm', lgbm)]
pipe = Pipeline(steps)
thresholds = [0, 50, 100, 200, 300, 400, 500, 600, 1000]
param_grid = {'selector__threshold': thresholds}
gs = GridSearchCV(pipe, param_grid, scoring='f1_micro', n_jobs=-1,
cv=5, verbose=10)
lgbm_geo_level_1_feature_selected = gs.fit(X_geo_level_1_ohe, np.ravel(y))
# It got 0.7303 upon submission (this is great that this score is trustworthy!)
lgbm_geo_level_1_feature_selected.best_score_
lgbm_geo_level_1_feature_selected.best_params_
pd.DataFrame(lgbm_geo_level_1_feature_selected.cv_results_)
```
Based off of these scores, lets select thresholds 200 and 0 and submit scores and see what we get.
```
def make_submission(pipeline, title):
"""
Given a trained pipeline object, use it to make predictions on the
submission test set 'test_values.csv' and write them a csv in the submissions
folder.
"""
# Read in test_values csv and apply data preprocessing
# note: will create a data preprocessing pipeline or function in future
test_values = pd.read_csv(DATA_DIR / 'test_values.csv', index_col='building_id')
test_values[categorical_columns] = test_values[categorical_columns].astype('category')
test_values[bool_columns] = test_values[bool_columns].astype('bool')
# Generate predictions using pipeline we pass in
predictions = pipeline.predict(test_values)
submission_format = pd.read_csv(DATA_DIR / 'submission_format.csv',
index_col='building_id')
my_submission = pd.DataFrame(data=predictions,
columns=submission_format.columns,
index=submission_format.index)
my_submission.to_csv(SUBMISSIONS_DIR / f'{title}.csv')
test_values = pd.read_csv(DATA_DIR / 'test_values.csv', index_col='building_id')
test_values[categorical_columns] = test_values[categorical_columns].astype('category')
test_values[bool_columns] = test_values[bool_columns].astype('bool')
# OHE geo_level_1_id column
test_values.iloc[:, 0] = test_values.iloc[:, 0].astype('category')
test_values = pd.get_dummies(test_values)
# Add in custom model
predictions = lgbm_geo_level_1_feature_selected.predict(test_values)
submission_format = pd.read_csv(DATA_DIR / 'submission_format.csv',
index_col='building_id')
my_submission = pd.DataFrame(data=predictions,
columns=submission_format.columns,
index=submission_format.index)
# Change title
title = 'LGBM lgbm_02_02 hyperparams - geo_level_1_id OHE, feature importance threshold=0'
my_submission.to_csv(SUBMISSIONS_DIR / f'{title}.csv')
```
| github_jupyter |
# Computer Lab 4: Forecasting Daily Data using Facebook Prophet
**In this practical you will learn:**
* How to wrangle time series data into `Prophet` format
* How to fit a `Prophet` model to a time series
* How to obtain a point forecast and prediction intervals using `Prophet`
* How to model special calender events.
* How to use `Prophet`'s built in diagnostic and cross validation tools
---
* Video to introduce the notebook: https://bit.ly/intro_prophet
> Note that in this video I use a slightly older version of Prophet. The only material difference is that prior to version 1.0 (that we are using) you imported from `fbprophet` instead of `prophet`!
---
## Standard Imports
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
```
## FB Prophet Imports
If you are using the provided conda environment `hds_forecast` you will already have `fbprophet` installed.
> At the time of writing a slightly older version of Prophet is also installed by default in Google Colab. For this version you will need to use `import fbprophet` instead of `import prophet`
```
from prophet import Prophet
import prophet
# should be version 1.0
prophet.__version__
```
## forecast-tools imports
```
# if running in Google Colab install forecast-tools
if 'google.colab' in sys.modules:
!pip install forecast-tools
from forecast_tools.model_selection import auto_naive
from forecast_tools.metrics import mean_absolute_error
```
# ARIMA imports
At the end of the notebook we will compare Prophet's performance to ARIMA
```
# if running in Google Colab install pmdarima
if 'google.colab' in sys.modules:
!pip install pmdarima
from pmdarima import auto_arima, ARIMA
from pmdarima.model_selection import RollingForecastCV, cross_val_score
```
# Data - ED reattendances
We will use an time series of patients that reattend an ED within 7 days. The data are held at the **daily** level.
The data is held in the file `ed_reattends_day.csv`
# Exercise 1: Read in and adjust
**Task:**
* Load `ed_reattends_day.csv` (url provided below)
* Plot the data
* Name the `pd.DataFrame` you create `y_train`
**Hints**
* The data is stored in UK day first format.
* Remember to set the freq of the DataTimeIndex
**Questions**:
* How would you describe the time series?
```
# your code here ...
url = 'https://raw.githubusercontent.com/health-data-science-OR/' \
+ 'hpdm097-datasets/master/ed_reattends_day.csv'
```
# Exercise 2: Wrangle the data into Prophet Format
A `Prophet` model requires the data in a `pd.DataFrame` with two columns: 'ds' - the datetimestamp and `y` the observations used in training (the index is an arbitrary numeric value). But your current (and standard form) dataframe has a datetimeindex
**Task:**
* Code a function that converts the training data into the correct format for Prophet.
* The end product should be a dataframe with 2 columns (in the following order)
* 'ds': the date
* 'y': the number of reattends on that day.
* Convert your data
```
# your code goes here...
```
# Exercise 3: Fit and predict using a basic Prophet model
Fitting a basic Prophet model is relatively straightforward. We need to create a `Prophet` object
```python
model = Prophet()
```
Unlike ARIMA where we specified the prediction interval width at prediction time, with Prophet we need to specify the interval width as we create the model. We do this using the parameter `interval_width`. By default this produces a 80\% prediction interval. Note we specify a $1-\alpha$ interval width (unlike in ARIMA where we specified $\alpha$)
```python
model = Prophet(interval_width=0.95)
```
and call `.fit()` passing in the training data.
```python
model.fit(y_train)
```
## Exercise 3.1 Fitting
**Task:**
* Fit a basic `Prophet` model to the training data
* Set the model up to produce 95\% prediction intervals
```
# your code here ...
```
# Exercise 3.2 Forecasting
There are two steps to makeing a forecast with a fitted `Prophet` model. You must first create a future dataframe. This is a `pd.DataFrame` with a single column 'ds' that runs from the start of the training period until the end of the training period + a horizon. Prophet makes this easy by including a `make_future_dataframe()` method.
```python
future = model.make_future_dataframe(periods=28)
```
Once we have the future dataframe we can pass that to the predict method.
```python
prophet_forecast = model.predict(future)
```
This returns a `pd.DataFrame` that contains both the in-sample fitted values and the out of sample forecasts. It contains all of the components of the fitted model. For example the last 5 rows of a forecast are:
| | ds | trend | yhat_lower | yhat_upper | trend_lower | trend_upper | additive_terms | additive_terms_lower | additive_terms_upper | weekly | weekly_lower | weekly_upper | yearly | yearly_lower | yearly_upper | multiplicative_terms | multiplicative_terms_lower | multiplicative_terms_upper | yhat |
|-----:|:--------------------|--------:|-------------:|-------------:|--------------:|--------------:|-----------------:|-----------------------:|-----------------------:|----------:|---------------:|---------------:|---------:|---------------:|---------------:|-----------------------:|-----------------------------:|-----------------------------:|--------:|
| 1629 | 2018-09-18 00:00:00 | 187.353 | 163.694 | 236.271 | 186.22 | 188.383 | 11.0807 | 11.0807 | 11.0807 | -2.83494 | -2.83494 | -2.83494 | 13.9156 | 13.9156 | 13.9156 | 0 | 0 | 0 | 198.433 |
| 1630 | 2018-09-19 00:00:00 | 187.305 | 157.7 | 226.979 | 186.153 | 188.356 | 5.91039 | 5.91039 | 5.91039 | -7.49286 | -7.49286 | -7.49286 | 13.4032 | 13.4032 | 13.4032 | 0 | 0 | 0 | 193.215 |
| 1631 | 2018-09-20 00:00:00 | 187.257 | 150.343 | 224.271 | 186.088 | 188.328 | 0.527382 | 0.527382 | 0.527382 | -12.3179 | -12.3179 | -12.3179 | 12.8452 | 12.8452 | 12.8452 | 0 | 0 | 0 | 187.785 |
| 1632 | 2018-09-21 00:00:00 | 187.209 | 156.873 | 226.588 | 186.023 | 188.3 | 3.90459 | 3.90459 | 3.90459 | -8.34281 | -8.34281 | -8.34281 | 12.2474 | 12.2474 | 12.2474 | 0 | 0 | 0 | 191.114 |
| 1633 | 2018-09-22 00:00:00 | 187.162 | 169.299 | 241.272 | 185.954 | 188.272 | 15.9777 | 15.9777 | 15.9777 | 4.36149 | 4.36149 | 4.36149 | 11.6162 | 11.6162 | 11.6162 | 0 | 0 | 0 | 203.139 |
We can the plot the forecast using Prophet's built in functionality
```python
model.plot(prophet_forecast);
```
In the plot you will see
* Black dots - these represent the actual observations (training data)
* Blue line - this represents the point forecast
* Light blue shaded region - this is the 95\% prediction interval
**Task:**
* Use your Prophet model to make a 84 day prediction.
* Plot the prediction
```
# your code here ...
```
# Exercise 4: Plot the model components
To decompose the Prophet model and see the trend and seasonal components you can call:
```python
model.plot_components(forecast)
```
**Task:**
* Plot the Prophet model's trend and seasonal components.
**Questions:**
* What observations can you make about the weekly (i.e. days of the week) and yearly (i.e months of the year) seasonality?
```
# your code here...
```
# Exercise 5: Adding standard holidays
As this is ED reattendance data at the daily level, it is likely that we will be seeing some calender day/holiday effects. Prophet has been designed to deal with 'holidays'. Effectively a series of binary variables are added for each holiday.
Prophet has a number of standard holidays built in by country. To add them in for 'England' you would use the following code:
```python
model = Prophet(interval_width=0.95)
model.add_country_holidays(country_name='England')
model.fit(y_train)
```
Note that you call the `add_country_holidays` **before** you fit the model. This is because it is command telling Prophet what you would like to fit.
To see what models have been fitted you can call:
```python
model.train_holiday_names.to_list()
```
When you plot the components of the model a new panel will be added for holidays. It can sometimes be a bit difficult to which holidays are having which effect so you can plot individual holidays as follows:
```python
from prophet.plot import plot_forecast_component
plot_forecast_component(model, prophet_forecast, 'Christmas Day');
```
**Task:**
* Modify your code to include a holidays for 'England'
* Make an 84 day forecast
* Plot the model components.
* Explore the holidays
**Questions:**
* What do you notice about Christmas day, Easter Monday and Good Friday? Are there any other days that appear important?
**Hints:**
* Take a look at the new forecast dataframe. This includes new columns with the effect of each `holidays`.
```
# your code here ...
```
# Exercise 6: Cross Validation
FBProphet provides its own cross validation procedures in the `diagnostics` module. Two key functions are the `cross_validation` and `performance_metrics` functions.
```python
from prophet.diagnostics import cross_validation, performance_metrics
```
Prophet uses a rolling origin forecast type procedure for cross validation (but rather than working forwards Prophet works backwards from the final fold). The `cross_validation` function requires some parameters to be **time delta** compatable. This means that we need to pass in a `str` such as `"7 days"`
To use the `cross_validation` function with your model to do a CV where the initial length of the training set is 1095 days (3 years), the step size (period) is 28 days and the forecast horizon is 84 days we would use the following code:
```python
df_cv = cross_validation(model=model, initial='1095 days', period='28 days',
horizon='84 days')
```
The variable `df_cv` is of type `pandas.DataFrame`. It contains all of the predictions and actual values by date and cut off. In our example above the first row will be day 366; the next 83 rows will be all part of the same cross-validation fold. See below for example, note that all the select rows (0, 1 and 83) have the same **cut-off** - this the **forecast origin** of the cross validation fold.
```python
df_cv.iloc[[0, 1, 83]
```
| | ds | yhat | yhat_lower | yhat_upper | y | cutoff |
|---:|:--------------------|--------:|-------------:|-------------:|----:|:--------------------|
| 0 | 2017-04-09 00:00:00 | 215.292 | 180.674 | 249.161 | 241 | 2017-04-08 00:00:00 |
| 1 | 2017-04-10 00:00:00 | 222.806 | 187.367 | 255.534 | 259 | 2017-04-08 00:00:00 |
| 83 | 2017-07-01 00:00:00 | 234.83 | 200.091 | 270.619 | 221 | 2017-04-08 00:00:00 |
If we then look at rows 84 and 85 we see that the cutoff (forecast origin) has rolled forward. It is equal to the previous cut-off + 28 days (period in prophet terminology).
```python
df_cv.iloc[[84, 85]
```
| | ds | yhat | yhat_lower | yhat_upper | y | cutoff |
|---:|:--------------------|--------:|-------------:|-------------:|----:|:--------------------|
| 84 | 2017-05-07 00:00:00 | 226.971 | 192.175 | 260.083 | 217 | 2017-05-06 00:00:00 |
| 85 | 2017-05-08 00:00:00 | 234.145 | 198.177 | 269.963 | 213 | 2017-05-06 00:00:00 |
So in essence `df_cv` is a record of all of the cross-validation folds stacked on top of each other. This is useful, because it means have all of the data to calculate any forecast error measure we would like for over any sub-forecast horizon. Prophet provides the function `performance_metrics` and the plotting function `fbprophet.plot.plot_cross_validation_metric` to automate this for you. For example:
**Note: mdape is median absolute percentage error**
```python
df_p = performance_metrics(df_cv)
df_p.head()
```
| | horizon | mse | rmse | mae | mape | mdape | coverage |
|---:|:-----------------|--------:|--------:|--------:|----------:|----------:|-----------:|
| 0 | 9 days 00:00:00 | 387.234 | 19.6783 | 15.6597 | 0.0798998 | 0.0632103 | 0.91453 |
| 1 | 10 days 00:00:00 | 355.57 | 18.8566 | 14.9167 | 0.0775167 | 0.0632103 | 0.931013 |
| 2 | 11 days 00:00:00 | 317.469 | 17.8176 | 13.8917 | 0.0726941 | 0.0535132 | 0.945665 |
| 3 | 12 days 00:00:00 | 311.386 | 17.6461 | 13.8011 | 0.0719934 | 0.0535132 | 0.954212 |
| 4 | 13 days 00:00:00 | 321.142 | 17.9204 | 13.908 | 0.0719082 | 0.0575246 | 0.948718 |
---
**Task:**
* Run Prophet's built in cross validation
* Use an initial training period of 1095 days, period=28 and horizon of '84 days'
* Use Prophet's built in `cross_validation` and `performance_metrics` functions
```
# your code here...
```
# Exercise 7: Is Prophet better than ARIMA?
**Task:**
* Choose and fit an ARIMA model to the dataset.
* Use cross-validation of the 84 day MAE compare the performance of ARIMA and Prophet.
**Questions**
* Which of the two methods would you choose in practice?
* What else might you do?
**Hints**:
* See Introduction to ARIMA notebook for a reminder about how to use ARIMA models in Python
* This is daily level data. For ARIMA this is `m=7`
* Remember that Prophet and ARIMA use data in different formats.
* It might take a few minutes for `auto_arima` to return results. Once it has write down the order of the model so that you can easily recreate it with a `ARIMA` object.
* ARIMA fitting is not always stable for daily level data and procedures may output some warnings. Types of warning you may encounter are:
**Convergence warning**:
Where the the Maximum likelihood optimisation has failed to converge. If you get a convergence warning when fitting `ARIMA` to your chosen specification then try passing increasing the parameter `maxiter` (default=50) to 100 or more in the `auto_arima` function and `ARIMA` constructor method.
**Non-invertible starting MA parameters warning**.
Details of what is meant by an invertible MA process can be found [here](https://otexts.com/fpp2/MA.html). For our practical purposes the chances are if you see this then when using `auto_arima` and it is unlikely that this model will be selected by the procedure. There is a good chance the warning will also be encountered in cross validation when the model is using less data. Its worth knowing that `statsmodels` will still fit a model in these circumstances.
* If you wish you can surpress the warnings in `auto_arima` by setting `supress_warnings=True`
* It will also take a few minutes for ARIMA cross validation to run.
```
# your code here
```
| github_jupyter |
```
%matplotlib inline
import matplotlib.pyplot as plt
import random
def choose(n, k):
if n == k:
return [[1]*k]
subsets = [[0] + a for a in choose(n-1,k)]
if k > 0:
subsets += [[1] + a for a in choose(n-1,k-1)]
return subsets
def graham_sloane_codes(n):
# n is length of codeword
# number of on bits is 4
def code_sum(codeword):
return sum([i*c for i, c in enumerate(codeword)]) % n
return [c for c in choose(n, 4) if code_sum(c) == 0]
from numpy.random import permutation, rand, normal
from numpy import ones, zeros, concatenate, array, float
from numpy.random import poisson
from pandas import DataFrame, concat
from skimage.filters import gaussian
p = {'N_high':4, #number of on bits (not used with current codebook)
'N_barcode':8, #length of barcode
'N_flour':200, #mean number of flourophores per transcripts - depends on amplification strategy (e.g HCR, bDNA)
'N_photons_per_flour':50, #mean number of photons per flourophore - depends on exposure time, bleaching rate of dye
'N_photon_background':1000, #mean number of background photons per pixel - depends on tissue clearing and autoflourescence
'detection_efficiency':.25, #quantum efficiency of the camera detector units number of electrons per photon
'N_background_electrons':1, #camera read noise per pixel in units electrons
'N_spots':100, #number of RNA puncta
'N_size':100, #height and width of image in pixel units
'psf':2, #standard devitation of gaussian in pixel units
'graylevel' : 37000.0/2**16, #dynamic range of camera sensor 37,000 assuming a 16-bit AD converter
'bits': 16, #16-bit AD converter
'dimension': 2, # dimension of data, 2 for planar, 3 for volume
'N_planes': 20, # number of z planes, only used if dimension greater than 3
'psf_z':4 #standard devitation of gaussian in pixel units for z dim
}
codebook = graham_sloane_codes(p['N_barcode'])
def generate_spot(p):
position = rand(p['dimension'])
gene = random.choice(range(len(codebook)))
barcode = array(codebook[gene])
photons = [poisson(p['N_photons_per_flour'])*poisson(p['N_flour'])*b for b in barcode]
return DataFrame({'position': [position], 'barcode': [barcode], 'photons': [photons], 'gene':gene})
# right now there is no jitter on positions of the spots, we might want to make it a vector
spots = concat([generate_spot(p) for i in range(p['N_spots'])])
if p['dimension'] == 2:
image = zeros((p['N_barcode'], p['N_size'], p['N_size'],))
for s in spots.itertuples():
image[:, int(p['N_size']*s.position[0]), int(p['N_size']*s.position[1])] = s.photons
image_with_background = image + poisson(p['N_photon_background'], size = image.shape)
filtered = array([gaussian(im, p['psf']) for im in image_with_background])
else:
image = zeros((p['N_barcode'], p['N_planes'], p['N_size'], p['N_size'],))
for s in spots.itertuples():
image[:, int(p['N_planes']*s.position[0]), int(p['N_size']*s.position[1]), int(p['N_size']*s.position[2])] = s.photons
image_with_background = image + poisson(p['N_photon_background'], size = image.shape)
filtered = array([gaussian(im, (p['psf_z'], p['psf'], p['psf'])) for im in image_with_background])
filtered = filtered*p['detection_efficiency'] + normal(scale=p['N_background_electrons'], size=filtered.shape)
signal = array([(x/p['graylevel']).astype(int).clip(0, 2**p['bits']) for x in filtered])
plt.imshow(signal[7])
spots
```
| github_jupyter |
Copyright (c) Microsoft Corporation. All rights reserved.
# Tutorial (part 2): Use automated machine learning to build your regression model
This tutorial is **part two of a two-part tutorial series**. In the previous tutorial, you [prepared the NYC taxi data for regression modeling](regression-part1-data-prep.ipynb).
Now, you're ready to start building your model with Azure Machine Learning service. In this part of the tutorial, you will use the prepared data and automatically generate a regression model to predict taxi fare prices. Using the automated ML capabilities of the service, you define your machine learning goals and constraints, launch the automated machine learning process and then allow the algorithm selection and hyperparameter-tuning to happen for you. The automated ML technique iterates over many combinations of algorithms and hyperparameters until it finds the best model based on your criterion.
In this tutorial, you learn how to:
> * Setup a Python environment and import the SDK packages
> * Configure an Azure Machine Learning service workspace
> * Auto-train a regression model
> * Run the model locally with custom parameters
> * Explore the results
> * Register the best model
If you don’t have an Azure subscription, create a [free account](https://aka.ms/AMLfree) before you begin.
> Code in this article was tested with Azure Machine Learning SDK version 1.0.0
## Prerequisites
> * [Run the data preparation tutorial](regression-part1-data-prep.ipynb)
> * Automated machine learning configured environment e.g. Azure notebooks, Local Python environment or Data Science Virtual Machine. [Setup](https://docs.microsoft.com/azure/machine-learning/service/samples-notebooks) automated machine learning.
### Import packages
Import Python packages you need in this tutorial.
```
import azureml.core
import pandas as pd
from azureml.core.workspace import Workspace
import logging
```
### Configure workspace
Create a workspace object from the existing workspace. A `Workspace` is a class that accepts your Azure subscription and resource information, and creates a cloud resource to monitor and track your model runs. `Workspace.from_config()` reads the file **aml_config/config.json** and loads the details into an object named `ws`. `ws` is used throughout the rest of the code in this tutorial.
Once you have a workspace object, specify a name for the experiment and create and register a local directory with the workspace. The history of all runs is recorded under the specified experiment.
```
ws = Workspace.from_config()
# choose a name for the run history container in the workspace
experiment_name = 'automated-ml-regression'
# project folder
project_folder = './automated-ml-regression'
import os
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
```
## Explore data
Utilize the data flow object created in the previous tutorial. Open and execute the data flow and review the results.
```
import azureml.dataprep as dprep
file_path = os.path.join(os.getcwd(), "dflows.dprep")
package_saved = dprep.Package.open(file_path)
dflow_prepared = package_saved.dataflows[0]
dflow_prepared.get_profile()
```
You prepare the data for the experiment by adding columns to `dflow_X` to be features for our model creation. You define `dflow_y` to be our prediction value; cost.
```
dflow_X = dflow_prepared.keep_columns(['pickup_weekday','pickup_hour', 'distance','passengers', 'vendor'])
dflow_y = dflow_prepared.keep_columns('cost')
```
### Split data into train and test sets
Now you split the data into training and test sets using the `train_test_split` function in the `sklearn` library. This function segregates the data into the x (features) data set for model training and the y (values to predict) data set for testing. The `test_size` parameter determines the percentage of data to allocate to testing. The `random_state` parameter sets a seed to the random generator, so that your train-test splits are always deterministic.
```
from sklearn.model_selection import train_test_split
x_df = dflow_X.to_pandas_dataframe()
y_df = dflow_y.to_pandas_dataframe()
x_train, x_test, y_train, y_test = train_test_split(x_df, y_df, test_size=0.2, random_state=223)
# flatten y_train to 1d array
y_train.values.flatten()
```
You now have the necessary packages and data ready for auto training for your model.
## Automatically train a model
To automatically train a model:
1. Define settings for the experiment run
1. Submit the experiment for model tuning
### Define settings for autogeneration and tuning
Define the experiment parameters and models settings for autogeneration and tuning. View the full list of [settings](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train).
|Property| Value in this tutorial |Description|
|----|----|---|
|**iteration_timeout_minutes**|10|Time limit in minutes for each iteration|
|**iterations**|30|Number of iterations. In each iteration, the model trains with the data with a specific pipeline|
|**primary_metric**|spearman_correlation | Metric that you want to optimize.|
|**preprocess**| True | True enables experiment to perform preprocessing on the input.|
|**verbosity**| logging.INFO | Controls the level of logging.|
|**n_cross_validationss**|5|Number of cross validation splits
```
automl_settings = {
"iteration_timeout_minutes" : 10,
"iterations" : 30,
"primary_metric" : 'spearman_correlation',
"preprocess" : True,
"verbosity" : logging.INFO,
"n_cross_validations": 5
}
from azureml.train.automl import AutoMLConfig
# local compute
automated_ml_config = AutoMLConfig(task = 'regression',
debug_log = 'automated_ml_errors.log',
path = project_folder,
X = x_train.values,
y = y_train.values.flatten(),
**automl_settings)
```
### Train the automatic regression model
Start the experiment to run locally. Pass the defined `automated_ml_config` object to the experiment, and set the output to `true` to view progress during the experiment.
```
from azureml.core.experiment import Experiment
experiment=Experiment(ws, experiment_name)
local_run = experiment.submit(automated_ml_config, show_output=True)
```
## Explore the results
Explore the results of automatic training with a Jupyter widget or by examining the experiment history.
### Option 1: Add a Jupyter widget to see results
Use the Jupyter notebook widget to see a graph and a table of all results.
```
from azureml.widgets import RunDetails
RunDetails(local_run).show()
```
### Option 2: Get and examine all run iterations in Python
Alternatively, you can retrieve the history of each experiment and explore the individual metrics for each iteration run.
```
children = list(local_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
```
## Retrieve the best model
Select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last fit invocation. There are overloads on `get_output` that allow you to retrieve the best run and fitted model for any logged metric or a particular iteration.
```
best_run, fitted_model = local_run.get_output()
print(best_run)
print(fitted_model)
```
## Register the model
Register the model in your Azure Machine Learning Workspace.
```
description = 'Automated Machine Learning Model'
tags = None
local_run.register_model(description=description, tags=tags)
print(local_run.model_id) # Use this id to deploy the model as a web service in Azure
```
## Test the best model accuracy
Use the best model to run predictions on the test data set. The function `predict` uses the best model, and predicts the values of y (trip cost) from the `x_test` data set. Print the first 10 predicted cost values from `y_predict`.
```
y_predict = fitted_model.predict(x_test.values)
print(y_predict[:10])
```
Create a scatter plot to visualize the predicted cost values compared to the actual cost values. The following code uses the `distance` feature as the x-axis, and trip `cost` as the y-axis. The first 100 predicted and actual cost values are created as separate series, in order to compare the variance of predicted cost at each trip distance value. Examining the plot shows that the distance/cost relationship is nearly linear, and the predicted cost values are in most cases very close to the actual cost values for the same trip distance.
```
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(111)
distance_vals = [x[4] for x in x_test.values]
y_actual = y_test.values.flatten().tolist()
ax1.scatter(distance_vals[:100], y_predict[:100], s=18, c='b', marker="s", label='Predicted')
ax1.scatter(distance_vals[:100], y_actual[:100], s=18, c='r', marker="o", label='Actual')
ax1.set_xlabel('distance (mi)')
ax1.set_title('Predicted and Actual Cost/Distance')
ax1.set_ylabel('Cost ($)')
plt.legend(loc='upper left', prop={'size': 12})
plt.rcParams.update({'font.size': 14})
plt.show()
```
Calculate the `root mean squared error` of the results. Use the `y_test` dataframe, and convert it to a list to compare to the predicted values. The function `mean_squared_error` takes two arrays of values, and calculates the average squared error between them. Taking the square root of the result gives an error in the same units as the y variable (cost), and indicates roughly how far your predictions are from the actual value.
```
from sklearn.metrics import mean_squared_error
from math import sqrt
rmse = sqrt(mean_squared_error(y_actual, y_predict))
rmse
```
Run the following code to calculate MAPE (mean absolute percent error) using the full `y_actual` and `y_predict` data sets. This metric calculates an absolute difference between each predicted and actual value, sums all the differences, and then expresses that sum as a percent of the total of the actual values.
```
sum_actuals = sum_errors = 0
for actual_val, predict_val in zip(y_actual, y_predict):
abs_error = actual_val - predict_val
if abs_error < 0:
abs_error = abs_error * -1
sum_errors = sum_errors + abs_error
sum_actuals = sum_actuals + actual_val
mean_abs_percent_error = sum_errors / sum_actuals
print("Model MAPE:")
print(mean_abs_percent_error)
print()
print("Model Accuracy:")
print(1 - mean_abs_percent_error)
```
## Next steps
In this automated machine learning tutorial, you:
> * Configured a workspace and prepared data for an experiment
> * Trained using an automated regression model locally with custom parameters
> * Explored and reviewed training results
> * Registered the best model
[Deploy your model](02.deploy-models.ipynb) with Azure Machine Learning.
| github_jupyter |
# Generating Skewed Data for Prediction
This notebook helps generating skewed data based on the [covertype](https://archive.ics.uci.edu/ml/datasets/covertype) dataset from UCI Machine Learning Repository. The generated data is then used to simulate online prediction request workload to a deployed model version on the AI Platform Prediction.
The notebook covers the following steps:
1. Download the data
2. Define dataset metadata
3. Sample unskewed data points
4. Prepare skewed data points
5. Simulate serving workload to AI Platform Prediction
## Setup
### Install packages and dependencies
```
!pip install -U -q google-api-python-client
!pip install -U -q pandas
```
### Setup your GCP Project
```
PROJECT_ID = 'sa-data-validation'
BUCKET = 'sa-data-validation'
REGION = 'us-central1'
!gcloud config set project $PROJECT_ID
```
### Authenticate your GCP account
This is required if you run the notebook in Colab
```
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
```
### Import libraries
```
import os
from tensorflow import io as tf_io
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
```
### Define constants
You can change the default values for the following constants
```
LOCAL_WORKSPACE = './workspace'
LOCAL_DATA_DIR = os.path.join(LOCAL_WORKSPACE, 'data')
LOCAL_DATA_FILE = os.path.join(LOCAL_DATA_DIR, 'train.csv')
BQ_DATASET_NAME = 'data_validation'
BQ_TABLE_NAME = 'covertype_classifier_logs'
MODEL_NAME = 'covertype_classifier'
VERSION_NAME = 'v1'
MODEL_OUTPUT_KEY = 'probabilities'
SIGNATURE_NAME = 'serving_default'
```
## 1. Download Data
The covertype dataset is preprocessed, split, and uploaded to uploaded to the `gs://workshop-datasets/covertype` public GCS location.
We use this version of the preprocessed dataset in this notebook. For more information, see [Cover Type Dataset](https://github.com/GoogleCloudPlatform/mlops-on-gcp/tree/master/datasets/covertype)
```
if tf_io.gfile.exists(LOCAL_WORKSPACE):
print("Removing previous workspace artifacts...")
tf_io.gfile.rmtree(LOCAL_WORKSPACE)
print("Creating a new workspace...")
tf_io.gfile.makedirs(LOCAL_WORKSPACE)
tf_io.gfile.makedirs(LOCAL_DATA_DIR)
!gsutil cp gs://workshop-datasets/covertype/data_validation/training/dataset.csv {LOCAL_DATA_FILE}
!wc -l {LOCAL_DATA_FILE}
data = pd.read_csv(LOCAL_DATA_FILE)
print("Total number of records: {}".format(len(data.index)))
data.sample(10).T
```
## 2. Define Metadata
```
HEADER = ['Elevation', 'Aspect', 'Slope','Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points', 'Wilderness_Area', 'Soil_Type',
'Cover_Type']
TARGET_FEATURE_NAME = 'Cover_Type'
FEATURE_LABELS = ['0', '1', '2', '3', '4', '5', '6']
NUMERIC_FEATURE_NAMES = ['Aspect', 'Elevation', 'Hillshade_3pm',
'Hillshade_9am', 'Hillshade_Noon',
'Horizontal_Distance_To_Fire_Points',
'Horizontal_Distance_To_Hydrology',
'Horizontal_Distance_To_Roadways','Slope',
'Vertical_Distance_To_Hydrology']
CATEGORICAL_FEATURE_NAMES = ['Soil_Type', 'Wilderness_Area']
FEATURE_NAMES = CATEGORICAL_FEATURE_NAMES + NUMERIC_FEATURE_NAMES
HEADER_DEFAULTS = [[0] if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME] else ['NA']
for feature_name in HEADER]
NUM_CLASSES = len(FEATURE_LABELS)
for feature_name in CATEGORICAL_FEATURE_NAMES:
data[feature_name] = data[feature_name].astype(str)
```
## 3. Sampling Normal Data
```
normal_data = data.sample(2000)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 10))
normal_data['Elevation'].plot.hist(bins=15, ax=axes[0][0], title='Elevation')
normal_data['Aspect'].plot.hist(bins=15, ax=axes[0][1], title='Aspect')
normal_data['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1][0], title='Wilderness Area')
normal_data[TARGET_FEATURE_NAME].value_counts(normalize=True).plot.bar(ax=axes[1][1], title=TARGET_FEATURE_NAME)
```
## 4. Prepare Skewed Data
We are going to introduce the following skews to the data:
1. **Numerical Features**
* *Elevation - Feature Skew*: Convert the unit of measure from meters to kilometers for 1% of the data points
* *Aspect - Distribution Skew*: Decrease the value by randomly from 1% to 50%
2. **Categorical Features**
* *Wilderness_Area - Feature Skew*: Adding a new category "Others" for 1% of the data points
* *Wilderness_Area - Distribution Skew*: Increase of of the frequency of "Cache" and "Neota" values by 25%
```
skewed_data = data.sample(1000)
```
### 4.1 Skewing numerical features
#### 4.1.1 Elevation Feature Skew
```
ratio = 0.1
size = int(len(skewed_data.index) * ratio)
indexes = np.random.choice(skewed_data.index, size=size, replace=False)
skewed_data['Elevation'][indexes] = skewed_data['Elevation'][indexes] // 1000
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
normal_data['Elevation'].plot.hist(bins=15, ax=axes[0], title='Elevation - Normal')
skewed_data['Elevation'].plot.hist(bins=15, ax=axes[1], title='Elevation - Skewed')
```
#### 4.1.2 Aspect Distribution Skew
```
skewed_data['Aspect'] = skewed_data['Aspect'].apply(
lambda value: int(value * np.random.uniform(0.5, 0.99))
)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
normal_data['Aspect'].plot.hist(bins=15, ax=axes[0], title='Aspect - Normal')
skewed_data['Aspect'].plot.hist(bins=15, ax=axes[1], title='Aspect - Skewed')
```
### 4.2 Skew categorical features
#### 4.2.1 Wilderness Area Feature Skew
Adding a new category "Others"
```
skewed_data['Wilderness_Area'] = skewed_data['Wilderness_Area'].apply(
lambda value: 'Others' if np.random.uniform() <= 0.1 else value
)
```
#### 4.2.2 Wilderness Area Distribution Skew
```
skewed_data['Wilderness_Area'] = skewed_data['Wilderness_Area'].apply(
lambda value: 'Neota' if value in ['Rawah', 'Commanche'] and np.random.uniform() <= 0.25 else value
)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 5))
normal_data['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[0], title='Wilderness Area - Normal')
skewed_data['Wilderness_Area'].value_counts(normalize=True).plot.bar(ax=axes[1], title='Wilderness Area - Skewed')
```
## 5. Simulating serving workload
### 5.1 Implement the model API client
```
import googleapiclient.discovery
import numpy as np
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME, VERSION_NAME)
print("Service name: {}".format(name))
def caip_predict(instance):
request_body={
'signature_name': SIGNATURE_NAME,
'instances': [instance]
}
response = service.projects().predict(
name=name,
body=request_body
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
probability_list = [output[MODEL_OUTPUT_KEY] for output in response['predictions']]
classes = [FEATURE_LABELS[int(np.argmax(probabilities))] for probabilities in probability_list]
return classes
import time
def simulate_requests(data_frame):
print("Simulation started...")
print("---------------------")
print("Number of instances: {}".format(len(data_frame.index)))
i = 0
for _, row in data_frame.iterrows():
instance = dict(row)
instance.pop(TARGET_FEATURE_NAME)
for k,v in instance.items():
instance[k] = [v]
predicted_class = caip_predict(instance)
i += 1
print(".", end='')
if (i + 1) % 100 == 0:
print()
print("Sent {} requests.".format(i + 1))
time.sleep(0.5)
print("")
print("-------------------")
print("Simulation finised.")
```
### 5.2 Simulate AI Platform Prediction requests
```
simulate_requests(normal_data)
simulate_requests(skewed_data)
```
| github_jupyter |
<div style="text-align: right">Peter Norvig, 12 Feb 2016<br>Revised 17 Feb 2018</div>
# A Concrete Introduction to Probability (using Python)
In 1814, Pierre-Simon Laplace [wrote](https://en.wikipedia.org/wiki/Classical_definition_of_probability):
>*Probability theory is nothing but common sense reduced to calculation. ... [Probability] is thus simply a fraction whose numerator is the number of favorable cases and whose denominator is the number of all the cases possible ... when nothing leads us to expect that any one of these cases should occur more than any other.*

<center><a href="https://en.wikipedia.org/wiki/Pierre-Simon_Laplace">Pierre-Simon Laplace</a><br>1814</center>
Laplace nailed it. To untangle a probability problem, all you have to do is define exactly what the cases are, and careful count the favorable and total cases. Let's be clear on our vocabulary words:
- **[Trial](https://en.wikipedia.org/wiki/Experiment_(probability_theory%29):**
A single occurrence with an outcome that is uncertain until we observe it.
<br>*For example, rolling a single die.*
- **[Outcome](https://en.wikipedia.org/wiki/Outcome_(probability%29):**
A possible result of a trial; one particular state of the world. What Laplace calls a **case.**
<br>*For example:* `4`.
- **[Sample Space](https://en.wikipedia.org/wiki/Sample_space):**
The set of all possible outcomes for the trial.
<br>*For example,* `{1, 2, 3, 4, 5, 6}`.
- **[Event](https://en.wikipedia.org/wiki/Event_(probability_theory%29):**
A subset of outcomes that together have some property we are interested in.
<br>*For example, the event "even die roll" is the set of outcomes* `{2, 4, 6}`.
- **[Probability](https://en.wikipedia.org/wiki/Probability_theory):**
As Laplace said, the probability of an event with respect to a sample space is the "number of favorable cases" (outcomes from the sample space that are in the event) divided by the "number of all the cases" in the sample space (assuming "nothing leads us to expect that any one of these cases should occur more than any other"). Since this is a proper fraction, probability will always be a number between 0 (representing an impossible event) and 1 (representing a certain event).
<br>*For example, the probability of an even die roll is 3/6 = 1/2.*
This notebook will explore these concepts in a concrete way using Python code. The code is meant to be succint and explicit, and fast enough to handle sample spaces with millions of outcomes. If you need to handle trillions, you'll want a more efficient implementation. I also have [another notebook](http://nbviewer.jupyter.org/url/norvig.com/ipython/ProbabilityParadox.ipynb) that covers paradoxes in Probability Theory.
# `P` is for Probability
The code below implements Laplace's quote directly: *Probability is thus simply a fraction whose numerator is the number of favorable cases and whose denominator is the number of all the cases possible.*
```
from fractions import Fraction
def P(event, space):
"The probability of an event, given a sample space."
return Fraction(cases(favorable(event, space)),
cases(space))
favorable = set.intersection # Outcomes that are in the event and in the sample space
cases = len # The number of cases is the length, or size, of a set
```
# Warm-up Problem: Die Roll
What's the probability of rolling an even number with a single six-sided fair die? Mathematicians traditionally use a single capital letter to denote a sample space; I'll use `D` for the die:
```
D = {1, 2, 3, 4, 5, 6} # a sample space
even = { 2, 4, 6} # an event
P(even, D)
```
Good to confirm what we already knew. We can explore some other events:
```
prime = {2, 3, 5, 7, 11, 13}
odd = {1, 3, 5, 7, 9, 11, 13}
P(odd, D)
P((even | prime), D) # The probability of an even or prime die roll
P((odd & prime), D) # The probability of an odd prime die roll
```
# Card Problems
Consider dealing a hand of five playing cards. An individual card has a rank and suit, like `'J♥'` for the Jack of Hearts, and a `deck` has 52 cards:
```
suits = u'♥♠♦♣'
ranks = u'AKQJT98765432'
deck = [r + s for r in ranks for s in suits]
len(deck)
```
Now I want to define `Hands` as the sample space of all 5-card combinations from `deck`. The function `itertools.combinations` does most of the work; we than concatenate each combination into a space-separated string:
```
import itertools
def combos(items, n):
"All combinations of n items; each combo as a space-separated str."
return set(map(' '.join, itertools.combinations(items, n)))
Hands = combos(deck, 5)
len(Hands)
```
There are too many hands to look at them all, but we can sample:
```
import random
random.sample(Hands, 7)
random.sample(deck, 7)
```
Now we can answer questions like the probability of being dealt a flush (5 cards of the same suit):
```
flush = {hand for hand in Hands if any(hand.count(suit) == 5 for suit in suits)}
P(flush, Hands)
```
Or the probability of four of a kind:
```
four_kind = {hand for hand in Hands if any(hand.count(rank) == 4 for rank in ranks)}
P(four_kind, Hands)
```
# Urn Problems
Around 1700, Jacob Bernoulli wrote about removing colored balls from an urn in his landmark treatise *[Ars Conjectandi](https://en.wikipedia.org/wiki/Ars_Conjectandi)*, and ever since then, explanations of probability have relied on [urn problems](https://www.google.com/search?q=probability+ball+urn). (You'd think the urns would be empty by now.)

<center><a href="https://en.wikipedia.org/wiki/Jacob_Bernoulli">Jacob Bernoulli</a><br>1700</center>
For example, here is a three-part problem [adapted](http://mathforum.org/library/drmath/view/69151.html) from mathforum.org:
> *An urn contains 6 blue, 9 red, and 8 white balls. We select six balls at random. What is the probability of each of these outcomes:*
> - *All balls are red*.
- *3 are blue, and 1 is red, and 2 are white, *.
- *Exactly 4 balls are white*.
We'll start by defining the contents of the urn. A `set` can't contain multiple objects that are equal to each other, so I'll call the blue balls `'B1'` through `'B6'`, rather than trying to have 6 balls all called `'B'`:
```
def balls(color, n):
"A set of n numbered balls of the given color."
return {color + str(i)
for i in range(1, n + 1)}
urn = balls('B', 6) | balls('R', 9) | balls('W', 8)
urn
```
Now we can define the sample space, `U6`, as the set of all 6-ball combinations:
```
U6 = combos(urn, 6)
random.sample(U6, 5)
```
Define `select` such that `select('R', 6)` is the event of picking 6 red balls from the urn:
```
def select(color, n, space=U6):
"The subset of the sample space with exactly `n` balls of given `color`."
return {s for s in space if s.count(color) == n}
```
Now I can answer the three questions:
```
P(select('R', 6), U6)
P(select('B', 3) & select('R', 1) & select('W', 2), U6)
P(select('W', 4), U6)
```
## Urn problems via arithmetic
Let's verify these calculations using basic arithmetic, rather than exhaustive counting. First, how many ways can I choose 6 out of 9 red balls? It could be any of the 9 for the first ball, any of 8 remaining for the second, and so on down to any of the remaining 4 for the sixth and final ball. But we don't care about the *order* of the six balls, so divide that product by the number of permutations of 6 things, which is 6!, giving us
9 × 8 × 7 × 6 × 5 × 4 / 6! = 84. In general, the number of ways of choosing *c* out of *n* items is (*n* choose *c*) = *n*! / ((*n* - *c*)! × c!).
We can translate that to code:
```
from math import factorial
def choose(n, c):
"Number of ways to choose c items from a list of n items."
return factorial(n) // (factorial(n - c) * factorial(c))
choose(9, 6)
```
Now we can verify the answers to the three problems. (Since `P` computes a ratio and `choose` computes a count,
I multiply the left-hand-side by `N`, the length of the sample space, to make both sides be counts.)
```
N = len(U6)
N * P(select('R', 6), U6) == choose(9, 6)
N * P(select('B', 3) & select('W', 2) & select('R', 1), U6) == choose(6, 3) * choose(8, 2) * choose(9, 1)
N * P(select('W', 4), U6) == choose(8, 4) * choose(6 + 9, 2) # (6 + 9 non-white balls)
```
We can solve all these problems just by counting; all you ever needed to know about probability problems you learned from Sesame Street:

<center><a href="https://en.wikipedia.org/wiki/Count_von_Count">The Count</a><br>1972—</center>
# Non-Equiprobable Outcomes
So far, we have accepted Laplace's assumption that *nothing leads us to expect that any one of these cases should occur more than any other*.
In real life, we often get outcomes that are not equiprobable--for example, a loaded die favors one side over the others. We will introduce three more vocabulary items:
* [Frequency](https://en.wikipedia.org/wiki/Frequency_%28statistics%29): a non-negative number describing how often an outcome occurs. Can be a count like 5, or a ratio like 1/6.
* [Distribution](http://mathworld.wolfram.com/StatisticalDistribution.html): A mapping from outcome to frequency of that outcome. We will allow sample spaces to be distributions.
* [Probability Distribution](https://en.wikipedia.org/wiki/Probability_distribution): A probability distribution
is a distribution whose frequencies sum to 1.
I could implement distributions with `Dist = dict`, but instead I'll make `Dist` a subclass `collections.Counter`:
```
from collections import Counter
class Dist(Counter):
"A Distribution of {outcome: frequency} pairs."
```
Because a `Dist` is a `Counter`, we can initialize it in any of the following ways:
```
# A set of equiprobable outcomes:
Dist({1, 2, 3, 4, 5, 6})
# A collection of outcomes, with repetition indicating frequency:
Dist('THHHTTHHT')
# A mapping of {outcome: frequency} pairs:
Dist({'H': 5, 'T': 4})
# Keyword arguments:
Dist(H=5, T=4) == Dist({'H': 5}, T=4) == Dist('TTTT', H=5)
```
Now I will modify the code to handle distributions.
Here's my plan:
- Sample spaces and events can both be specified as either a `set` or a `Dist`.
- The sample space can be a non-probability distribution like `Dist(H=50, T=50)`; the results
will be the same as if the sample space had been a true probability distribution like `Dist(H=1/2, T=1/2)`.
- The function `cases` now sums the frequencies in a distribution (it previously counted the length).
- The function `favorable` now returns a `Dist` of favorable outcomes and their frequencies (not a `set`).
- I will redefine `Fraction` to use `"/"`, not `fractions.Fraction`, because frequencies might be floats.
- `P` is unchanged.
```
def cases(outcomes):
"The total frequency of all the outcomes."
return sum(Dist(outcomes).values())
def favorable(event, space):
"A distribution of outcomes from the sample space that are in the event."
space = Dist(space)
return Dist({x: space[x]
for x in space if x in event})
def Fraction(n, d): return n / d
```
For example, here's the probability of rolling an even number with a crooked die that is loaded to prefer 6:
```
Crooked = Dist({1: 0.1, 2: 0.1, 3: 0.1, 4: 0.1, 5: 0.1, 6: 0.5})
P(even, Crooked)
```
As another example, an [article](http://people.kzoo.edu/barth/math105/moreboys.pdf) gives the following counts for two-child families in Denmark, where `GB` means a family where the first child is a girl and the second a boy (I'm aware that not all births can be classified as the binary "boy" or "girl," but the data was reported that way):
GG: 121801 GB: 126840
BG: 127123 BB: 135138
```
DK = Dist(GG=121801, GB=126840,
BG=127123, BB=135138)
first_girl = {'GG', 'GB'}
P(first_girl, DK)
second_girl = {'GG', 'BG'}
P(second_girl, DK)
```
This says that the probability of a girl is somewhere between 48% and 49%. The probability of a girl is very slightly higher for the second child.
Given the first child, are you more likely to have a second child of the same sex?
```
same = {'GG', 'BB'}
P(same, DK)
```
Yes, but only by about 0.3%.
# Predicates as events
To calculate the probability of an even die roll, I originally said
even = {2, 4, 6}
But that's inelegant—I had to explicitly enumerate all the even numbers from one to six. If I ever wanted to deal with a twelve or twenty-sided die, I would have to go back and redefine `even`. I would prefer to define `even` once and for all like this:
```
def even(n): return n % 2 == 0
```
Now in order to make `P(even, D)` work, I'll allow an `Event` to be either a collection of outcomes or a `callable` predicate (that is, a function that returns true for outcomes that are part of the event). I don't need to modify `P`, but `favorable` will have to convert a callable `event` to a `set`:
```
def favorable(event, space):
"A distribution of outcomes from the sample space that are in the event."
if callable(event):
event = {x for x in space if event(x)}
space = Dist(space)
return Dist({x: space[x]
for x in space if x in event})
favorable(even, D)
P(even, D)
```
I'll define `die` to make a sample space for an *n*-sided die:
```
def die(n): return set(range(1, n + 1))
favorable(even, die(12))
P(even, die(12))
P(even, die(2000))
P(even, die(2001))
```
We can define more interesting events using predicates; for example we can determine the probability that the sum of rolling *d* 6-sided dice is prime:
```
def sum_dice(d): return Dist(sum(dice) for dice in itertools.product(D, repeat=d))
def is_prime(n): return (n > 1 and not any(n % i == 0 for i in range(2, n)))
for d in range(1, 9):
p = P(is_prime, sum_dice(d))
print("P(is_prime, sum_dice({})) = {}".format(d, round(p, 3)))
```
# Fermat and Pascal: The Unfinished Game
<table>
<tr><td><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/9/98/Pierre_de_Fermat2.png/140px-Pierre_de_Fermat2.png"><center><a href="https://en.wikipedia.org/wiki/Pierre_de_Fermat">Pierre de Fermat</a><br>1654
<td><img src="https://www.umass.edu/wsp/images/pascal.jpg"><center><a href="https://en.wikipedia.org/wiki/Blaise_Pascal">Blaise Pascal]</a><br>1654
</table>
Consider a gambling game consisting of tossing a coin repeatedly. Player H wins the game as soon as a total of 10 heads come up, and T wins if a total of 10 tails come up before H wins. If the game is interrupted when H has 8 heads and T has 7 tails, how should the pot of money (which happens to be 100 Francs) be split? Here are some proposals, and arguments against them:
- It is uncertain, so just split the pot 50-50.
<br>*No, because surely H is more likely to win.*
- In proportion to each player's current score, so H gets a 8/(8+7) share.
<br>*No, because if the score was 0 heads to 1 tail, H should get more than 0/1.*
- In proportion to how many tosses the opponent needs to win, so H gets 3/(3+2).
<br>*This seems better, but no, if H is 9 away and T is only 1 away from winning, then it seems that giving H a 1/10 share is too much.*
In 1654, Blaise Pascal and Pierre de Fermat corresponded on this problem, with Fermat [writing](http://mathforum.org/isaac/problems/prob1.html):
>Dearest Blaise,
>As to the problem of how to divide the 100 Francs, I think I have found a solution that you will find to be fair. Seeing as I needed only two points to win the game, and you needed 3, I think we can establish that after four more tosses of the coin, the game would have been over. For, in those four tosses, if you did not get the necessary 3 points for your victory, this would imply that I had in fact gained the necessary 2 points for my victory. In a similar manner, if I had not achieved the necessary 2 points for my victory, this would imply that you had in fact achieved at least 3 points and had therefore won the game. Thus, I believe the following list of possible endings to the game is exhaustive. I have denoted 'heads' by an 'h', and tails by a 't.' I have starred the outcomes that indicate a win for myself.
> h h h h * h h h t * h h t h * h h t t *
> h t h h * h t h t * h t t h * h t t t
> t h h h * t h h t * t h t h * t h t t
> t t h h * t t h t t t t h t t t t
>I think you will agree that all of these outcomes are equally likely. Thus I believe that we should divide the stakes by the ration 11:5 in my favor, that is, I should receive (11/16)×100 = 68.75 Francs, while you should receive 31.25 Francs.
>I hope all is well in Paris,
>Your friend and colleague,
>Pierre
Pascal agreed with this solution, and [replied](http://mathforum.org/isaac/problems/prob2.html) with a generalization that made use of his previous invention, Pascal's Triangle. There's even [a book](https://smile.amazon.com/Unfinished-Game-Pascal-Fermat-Seventeenth-Century/dp/0465018963?sa-no-redirect=1) about it.
We can solve the problem with the tools we have:
```
def win_unfinished_game(h, t):
"The probability that H will win the unfinished game, given the number of points needed by H and T to win."
return P(at_least(h, 'h'), finishes(h, t))
def at_least(n, item):
"The event of getting at least n instances of item in an outcome."
return lambda outcome: outcome.count(item) >= n
def finishes(h, t):
"All finishes of a game where player H needs h points to win and T needs t."
tosses = ['ht'] * (h + t - 1)
return set(itertools.product(*tosses))
```
We can generate the 16 equiprobable finished that Pierre wrote about:
```
finishes(2, 3)
```
And we can find the 11 of them that are favorable to player `H`:
```
favorable(at_least(2, 'h'), finishes(2, 3))
```
Finally, we can answer the question:
```
100 * win_unfinished_game(2, 3)
```
We agree with Pascal and Fermat; we're in good company!
# Newton's Answer to a Problem by Pepys
<table>
<tr><td><img src="http://scienceworld.wolfram.com/biography/pics/Newton.jpg"><center><a href="https://en.wikipedia.org/wiki/Isaac_Newton">Isaac Newton</a><br>1693</center>
<td><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Samuel_Pepys_portrait.jpg/148px-Samuel_Pepys_portrait.jpg"><center><a href="https://en.wikipedia.org/wiki/Samuel_Pepys">Samuel Pepys</a><br>1693</center>
</table>
Let's jump ahead from 1654 all the way to 1693, [when](http://fermatslibrary.com/s/isaac-newton-as-a-probabilist) Samuel Pepys wrote to Isaac Newton posing the problem:
> Which of the following three propositions has the greatest chance of success?
1. Six fair dice are tossed independently and at least one “6” appears.
2. Twelve fair dice are tossed independently and at least two “6”s appear.
3. Eighteen fair dice are tossed independently and at least three “6”s appear.
Newton was able to answer the question correctly (although his reasoning was not quite right); let's see how we can do. Since we're only interested in whether a die comes up as "6" or not, we can define a single die like this:
```
die6 = Dist({6: 1/6, '-': 5/6})
```
Next we can define the joint distribution formed by combining two independent distribution like this:
```
def joint(A, B, combine='{}{}'.format):
"""The joint distribution of two independent distributions.
Result is all entries of the form {'ab': frequency(a) * frequency(b)}"""
return Dist({combine(a, b): A[a] * B[b]
for a in A for b in B})
joint(die6, die6)
```
And the joint distribution from rolling *n* dice:
```
def dice(n, die):
"Joint probability distribution from rolling `n` dice."
if n == 1:
return die
else:
return joint(die, dice(n - 1, die))
dice(4, die6)
```
Now we are ready to determine which proposition is more likely to have the required number of sixes:
```
P(at_least(1, '6'), dice(6, die6))
P(at_least(2, '6'), dice(12, die6))
P(at_least(3, '6'), dice(18, die6))
```
We reach the same conclusion Newton did, that the best chance is rolling six dice.
# More Urn Problems: M&Ms and Bayes
Here's another urn problem (actually a "bag" problem) [from](http://allendowney.blogspot.com/2011/10/my-favorite-bayess-theorem-problems.html) prolific Python/Probability pundit [Allen Downey ](http://allendowney.blogspot.com/):
> The blue M&M was introduced in 1995. Before then, the color mix in a bag of plain M&Ms was (30% Brown, 20% Yellow, 20% Red, 10% Green, 10% Orange, 10% Tan). Afterward it was (24% Blue , 20% Green, 16% Orange, 14% Yellow, 13% Red, 13% Brown).
A friend of mine has two bags of M&Ms, and he tells me that one is from 1994 and one from 1996. He won't tell me which is which, but he gives me one M&M from each bag. One is yellow and one is green. What is the probability that the yellow M&M came from the 1994 bag?
To solve this problem, we'll first create distributions for each bag: `bag94` and `bag96`:
```
bag94 = Dist(brown=30, yellow=20, red=20, green=10, orange=10, tan=10)
bag96 = Dist(blue=24, green=20, orange=16, yellow=14, red=13, brown=13)
```
Next, define `MM` as the joint distribution—the sample space for picking one M&M from each bag. The outcome `'94:yellow 96:green'` means that a yellow M&M was selected from the 1994 bag and a green one from the 1996 bag. In this problem we don't get to see the actual outcome; we just see some evidence about the outcome, that it contains a yellow and a green.
```
MM = joint(bag94, bag96, '94:{} 96:{}'.format)
MM
```
We observe that "One is yellow and one is green":
```
def yellow_and_green(outcome): return 'yellow' in outcome and 'green' in outcome
favorable(yellow_and_green, MM)
```
Given this observation, we want to know "What is the probability that the yellow M&M came from the 1994 bag?"
```
def yellow94(outcome): return '94:yellow' in outcome
P(yellow94, favorable(yellow_and_green, MM))
```
So there is a 74% chance that the yellow comes from the 1994 bag.
Answering this question was straightforward: just like all the other probability problems, we simply create a sample space, and use `P` to pick out the probability of the event in question, given what we know about the outcome.
But in a sense it is curious that we were able to solve this problem with the same methodology as the others: this problem comes from a section titled **My favorite Bayes's Theorem Problems**, so one would expect that we'd need to invoke Bayes Theorem to solve it. The computation above shows that that is not necessary.

<center><a href="https://en.wikipedia.org/wiki/Thomas_Bayes">Rev. Thomas Bayes</a><br>1701-1761
</center>
Of course, we *could* solve it using Bayes Theorem. Why is Bayes Theorem recommended? Because we are asked about the probability of an outcome given the evidence—the probability the yellow came from the 94 bag, given that there is a yellow and a green. But the problem statement doesn't directly tell us the probability of that outcome given the evidence; it just tells us the probability of the evidence given the outcome.
Before we see the colors of the M&Ms, there are two hypotheses, `A` and `B`, both with equal probability:
A: first M&M from 94 bag, second from 96 bag
B: first M&M from 96 bag, second from 94 bag
P(A) = P(B) = 0.5
Then we get some evidence:
E: first M&M yellow, second green
We want to know the probability of hypothesis `A`, given the evidence:
P(A | E)
That's not easy to calculate (except by enumerating the sample space, which our `P` function does). But Bayes Theorem says:
P(A | E) = P(E | A) * P(A) / P(E)
The quantities on the right-hand-side are easier to calculate:
P(E | A) = 0.20 * 0.20 = 0.04
P(E | B) = 0.10 * 0.14 = 0.014
P(A) = 0.5
P(B) = 0.5
P(E) = P(E | A) * P(A) + P(E | B) * P(B)
= 0.04 * 0.5 + 0.014 * 0.5 = 0.027
And we can get a final answer:
P(A | E) = P(E | A) * P(A) / P(E)
= 0.04 * 0.5 / 0.027
= 0.7407407407
You have a choice: Bayes Theorem allows you to do less calculation at the cost of more algebra; that is a great trade-off if you are working with pencil and paper. Enumerating the sample space allows you to do less algebra at the cost of more calculation; usually a good trade-off if you have a computer. But regardless of the approach you use, it is important to understand Bayes theorem and how it works.
There is one important question that Allen Downey does not address: *would you eat twenty-year-old M&Ms*?
😨
<hr>
# Simulation
Sometimes it is inconvenient, difficult, or even impossible to explicitly enumerate a sample space. Perhaps the sample space is infinite, or perhaps it is just very large and complicated (perhaps with a bunch of low-probability outcomes that don't seem very important). In that case, we might feel more confident in writing a program to *simulate* a random outcome. *Random sampling* from such a simulation
can give an accurate estimate of probability.
# Simulating Monopoly
<center>[Mr. Monopoly](https://en.wikipedia.org/wiki/Rich_Uncle_Pennybags)<br>1940—
Consider [problem 84](https://projecteuler.net/problem=84) from the excellent [Project Euler](https://projecteuler.net), which asks for the probability that a player in the game Monopoly ends a roll on each of the squares on the board. To answer this we need to take into account die rolls, chance and community chest cards, and going to jail (from the "go to jail" space, from a card, or from rolling doubles three times in a row). We do not need to take into account anything about acquiring properties or exchanging money or winning or losing the game, because these events don't change a player's location.
A game of Monopoly can go on forever, so the sample space is infinite. Even if we limit the sample space to say, 1000 rolls, there are $21^{1000}$ such sequences of rolls, and even more possibilities when we consider drawing cards. So it is infeasible to explicitly represent the sample space. There are techniques for representing the problem as
a Markov decision problem (MDP) and solving it, but the math is complex (a [paper](https://faculty.math.illinois.edu/~bishop/monopoly.pdf) on the subject runs 15 pages).
The simplest approach is to implement a simulation and run it for, say, a million rolls. Here is the code for a simulation:
```
from collections import deque as Deck # a Deck of community chest or chance cards
# The Monopoly board, as specified by https://projecteuler.net/problem=84
(GO, A1, CC1, A2, T1, R1, B1, CH1, B2, B3,
JAIL, C1, U1, C2, C3, R2, D1, CC2, D2, D3,
FP, E1, CH2, E2, E3, R3, F1, F2, U2, F3,
G2J, G1, G2, CC3, G3, R4, CH3, H1, T2, H2) = board = range(40)
# A card is either a square, a set of squares meaning advance to the nearest,
# a -3 to go back 3 spaces, or None meaning no change to location.
CC_deck = Deck([GO, JAIL] + 14 * [None])
CH_deck = Deck([GO, JAIL, C1, E3, H2, R1, -3, {U1, U2}]
+ 2 * [{R1, R2, R3, R4}] + 6 * [None])
def monopoly(rolls):
"""Simulate given number of dice rolls of a Monopoly game,
and return the counts of how often each square is visited."""
counts = [0] * len(board)
doubles = 0 # Number of consecutive doubles rolled
random.shuffle(CC_deck)
random.shuffle(CH_deck)
goto(GO)
for _ in range(rolls):
d1, d2 = random.randint(1, 6), random.randint(1, 6)
doubles = (doubles + 1 if d1 == d2 else 0)
goto(here + d1 + d2)
if here == G2J or doubles == 3:
goto(JAIL)
doubles = 0
elif here in (CC1, CC2, CC3):
do_card(CC_deck)
elif here in (CH1, CH2, CH3):
do_card(CH_deck)
counts[here] += 1
return counts
def goto(square):
"Update 'here' to be this square (and handle passing GO)."
global here
here = square % len(board)
def do_card(deck):
"Take the top card from deck and do what it says."
card = deck.popleft() # The top card
deck.append(card) # Move top card to bottom of deck
if card == None: # Don't move
pass
elif card == -3: # Go back 3 spaces
goto(here - 3)
elif isinstance(card, set): # Advance to next railroad or utility
next1 = min({place for place in card if place > here} or card)
goto(next1)
else: # Go to destination named on card
goto(card)
```
Let's run the simulation for a million dice rolls:
```
counts = monopoly(10**6)
```
And print a table of square names and their percentages:
```
property_names = """
GO, A1, CC1, A2, T1, R1, B1, CH1, B2, B3,
JAIL, C1, U1, C2, C3, R2, D1, CC2, D2, D3,
FP, E1, CH2, E2, E3, R3, F1, F2, U2, F3,
G2J, G1, G2, CC3, G3, R4, CH3, H1, T2, H2""".replace(',', ' ').split()
for (c, n) in sorted(zip(counts, property_names), reverse=True):
print('{:4} {:.2%}'.format(n, c / sum(counts)))
```
There is one square far above average: `JAIL`, at a little over 6%. There are four squares far below average: the three chance squares, `CH1`, `CH2`, and `CH3`, at around 1% (because 10 of the 16 chance cards send the player away from the square), and the "Go to Jail" square, which has a frequency of 0 because you can't end a turn there. The other squares are around 2% to 3% each, which you would expect, because 100% / 40 = 2.5%.
# The Central Limit Theorem
We have covered the concept of *distributions* of outcomes. You may have heard of the *normal distribution*, the *bell-shaped curve.* In Python it is called `random.normalvariate` (also `random.gauss`). We can plot it with the help of the `repeated_hist` function defined below, which samples a distribution `n` times and displays a histogram of the results. (*Note:* in this section I am using "distribution" to mean a function that, each time it is called, returns a random sample from a distribution. I am not using it to mean a mapping of type `Dist`.)
```
%matplotlib inline
import matplotlib.pyplot as plt
from statistics import mean
from random import normalvariate, triangular, choice, vonmisesvariate, uniform
def normal(mu=0, sigma=1): return random.normalvariate(mu, sigma)
def repeated_hist(dist, n=10**6, bins=100):
"Sample the distribution n times and make a histogram of the results."
samples = [dist() for _ in range(n)]
plt.hist(samples, bins=bins, density=True)
plt.title('{} (μ = {:.1f})'.format(dist.__name__, mean(samples)))
plt.grid(axis='x')
plt.yticks([], '')
plt.show()
# Normal distribution
repeated_hist(normal)
```
Why is this distribution called *normal*? The **Central Limit Theorem** says that it is the ultimate limit of other distributions, as follows (informally):
- Gather *k* independent distributions. They need not be normal-shaped.
- Define a new distribution to be the result of sampling one number from each of the *k* independent distributions and adding them up.
- As long as *k* is not too small, and the component distributions are not super-pathological, then the new distribution will tend towards a normal distribution.
Here's a simple example: summing ten independent die rolls:
```
def sum10dice(): return sum(random.randint(1, 6) for _ in range(10))
repeated_hist(sum10dice, bins=range(10, 61))
```
As another example, let's take just *k* = 5 component distributions representing the per-game scores of 5 basketball players, and then sum them together to form the new distribution, the team score. I'll be creative in defining the distributions for each player, but [historically accurate](https://www.basketball-reference.com/teams/GSW/2016.html) in the mean for each distribution.
```
def SC(): return max(0, normal(12.1, 3) + 3 * triangular(1, 13, 4)) # 30.1
def KT(): return max(0, triangular(8, 22, 15.3) + choice((0, 3 * triangular(1, 9, 4)))) # 22.1
def DG(): return max(0, vonmisesvariate(30, 2) * 3.08) # 14.0
def HB(): return max(0, choice((normal(6.7, 1.5), normal(16.7, 2.5)))) # 11.7
def BE(): return max(0, normal(17, 3) + uniform(0, 40)) # 37.0
team = (SC, KT, DG, HB, BE)
def Team(team=team): return sum(player() for player in team)
for player in team:
repeated_hist(player, bins=range(70))
```
We can see that none of the players have a distribution that looks like a normal distribution: `SC` is skewed to one side (the mean is 5 points to the right of the peak); the three next players have bimodal distributions; and `BE` is too flat on top.
Now we define the team score to be the sum of the *k* = 5 players, and display this new distribution:
```
repeated_hist(Team, bins=range(50, 180))
```
Sure enough, this looks very much like a normal distribution. The **Central Limit Theorem** appears to hold in this case. But I have to say: "Central Limit" is not a very evocative name, so I propose we re-name this as the **Strength in Numbers Theorem**, to indicate the fact that if you have a lot of numbers, you tend to get the expected result.
# Conclusion
We've had an interesting tour and met some giants of the field: Laplace, Bernoulli, Fermat, Pascal, Bayes, Newton, ... even Mr. Monopoly and The Count.
The conclusion is: be methodical in defining the sample space and the event(s) of interest, and be careful in counting the number of outcomes in the numerator and denominator. and you can't go wrong. Easy as 1-2-3.
<hr>
# Appendix: Continuous Sample Spaces
Everything up to here has been about discrete, finite sample spaces, where we can *enumerate* all the possible outcomes.
But a reader asked about *continuous* sample spaces, such as the space of real numbers. The principles are the same: probability is still the ratio of the favorable cases to all the cases, but now instead of *counting* cases, we have to (in general) compute integrals to compare the sizes of cases.
Here we will cover a simple example, which we first solve approximately by simulation, and then exactly by calculation.
## The Hot New Game Show Problem: Simulation
Oliver Roeder posed [this problem](http://fivethirtyeight.com/features/can-you-win-this-hot-new-game-show/) in the 538 *Riddler* blog:
>Two players go on a hot new game show called *Higher Number Wins.* The two go into separate booths, and each presses a button, and a random number between zero and one appears on a screen. (At this point, neither knows the other’s number, but they do know the numbers are chosen from a standard uniform distribution.) They can choose to keep that first number, or to press the button again to discard the first number and get a second random number, which they must keep. Then, they come out of their booths and see the final number for each player on the wall. The lavish grand prize — a case full of gold bullion — is awarded to the player who kept the higher number. Which number is the optimal cutoff for players to discard their first number and choose another? Put another way, within which range should they choose to keep the first number, and within which range should they reject it and try their luck with a second number?
We'll use this notation:
- **A**, **B**: the two players.
- *A*, *B*: the cutoff values they choose: the lower bound of the range of first numbers they will accept.
- *a*, *b*: the actual random numbers that appear on the screen.
For example, if player **A** chooses a cutoff of *A* = 0.6, that means that **A** would accept any first number greater than 0.6, and reject any number below that cutoff. The question is: What cutoff, *A*, should player **A** choose to maximize the chance of winning, that is, maximize P(*a* > *b*)?
First, simulate the number that a player with a given cutoff gets (note that `random.random()` returns a float sampled uniformly from the interval [0..1]):
```
number= random.random
def strategy(cutoff):
"Play the game with given cutoff, returning the first or second random number."
first = number()
return first if first > cutoff else number()
strategy(.5)
```
Now compare the numbers returned with a cutoff of *A* versus a cutoff of *B*, and repeat for a large number of trials; this gives us an estimate of the probability that cutoff *A* is better than cutoff *B*:
```
def Pwin(A, B, trials=20000):
"The probability that cutoff A wins against cutoff B."
return mean(strategy(A) > strategy(B)
for _ in range(trials))
Pwin(0.6, 0.9)
```
Now define a function, `top`, that considers a collection of possible cutoffs, estimate the probability for each cutoff playing against each other cutoff, and returns a list with the `N` top cutoffs (the ones that defeated the most number of opponent cutoffs), and the number of opponents they defeat:
```
def top(N, cutoffs):
"Return the N best cutoffs and the number of opponent cutoffs they beat."
winners = Counter(A if Pwin(A, B) > 0.5 else B
for (A, B) in itertools.combinations(cutoffs, 2))
return winners.most_common(N)
from numpy import arange
top(10, arange(0.5, 1.0, 0.01))
```
We get a good idea of the top cutoffs, but they are close to each other, so we can't quite be sure which is best, only that the best is somewhere around 0.60. We could get a better estimate by increasing the number of trials, but that would consume more time.
## The Hot New Game Show Problem: Exact Calculation
More promising is the possibility of making `Pwin(A, B)` an exact calculation. But before we get to `Pwin(A, B)`, let's solve a simpler problem: assume that both players **A** and **B** have chosen a cutoff, and have each received a number above the cutoff. What is the probability that **A** gets the higher number? We'll call this `Phigher(A, B)`. We can think of this as a two-dimensional sample space of points in the (*a*, *b*) plane, where *a* ranges from the cutoff *A* to 1 and *b* ranges from the cutoff B to 1. Here is a diagram of that two-dimensional sample space, with the cutoffs *A*=0.5 and *B*=0.6:
<img src="http://norvig.com/ipython/probability2da.jpg" width=413>
The total area of the sample space is 0.5 × 0.4 = 0.20, and in general it is (1 - *A*) · (1 - *B*). What about the favorable cases, where **A** beats **B**? That corresponds to the shaded triangle below:
<img src="http://norvig.com/ipython/probability2d.jpg" width=413>
The area of a triangle is 1/2 the base times the height, or in this case, 0.4<sup>2</sup> / 2 = 0.08, and in general, (1 - *B*)<sup>2</sup> / 2. So in general we have:
Phigher(A, B) = favorable / total
favorable = ((1 - B) ** 2) / 2
total = (1 - A) * (1 - B)
Phigher(A, B) = (((1 - B) ** 2) / 2) / ((1 - A) * (1 - B))
Phigher(A, B) = (1 - B) / (2 * (1 - A))
And in this specific case we have:
A = 0.5; B = 0.6
favorable = 0.4 ** 2 / 2 = 0.08
total = 0.5 * 0.4 = 0.20
Phigher(0.5, 0.6) = 0.08 / 0.20 = 0.4
But note that this only works when the cutoff *A* ≤ *B*; when *A* > *B*, we need to reverse things. That gives us the code:
```
def Phigher(A, B):
"Probability that a sample from [A..1] is higher than one from [B..1]."
if A <= B:
return (1 - B) / (2 * (1 - A))
else:
return 1 - Phigher(B, A)
Phigher(0.5, 0.6)
```
We're now ready to tackle the full game. There are four cases to consider, depending on whether **A** and **B** gets a first number that is above or below their cutoff choices:
| first *a* | first *b* | P(*a*, *b*) | P(A wins | *a*, *b*) | Comment |
|:-----:|:-----:| ----------- | ------------- | ------------ |
| *a* > *A* | *b* > *B* | (1 - *A*) · (1 - *B*) | Phigher(*A*, *B*) | Both above cutoff; both keep first numbers |
| *a* < *A* | *b* < *B* | *A* · *B* | Phigher(0, 0) | Both below cutoff, both get new numbers from [0..1] |
| *a* > *A* | *b* < *B* | (1 - *A*) · *B* | Phigher(*A*, 0) | **A** keeps number; **B** gets new number from [0..1] |
| *a* < *A* | *b* > *B* | *A* · (1 - *B*) | Phigher(0, *B*) | **A** gets new number from [0..1]; **B** keeps number |
For example, the first row of this table says that the event of both first numbers being above their respective cutoffs has probability (1 - *A*) · (1 - *B*), and if this does occur, then the probability of **A** winning is Phigher(*A*, *B*).
We're ready to replace the old simulation-based `Pwin` with a new calculation-based version:
```
def Pwin(A, B):
"With what probability does cutoff A win against cutoff B?"
return ((1-A) * (1-B) * Phigher(A, B) # both above cutoff
+ A * B * Phigher(0, 0) # both below cutoff
+ (1-A) * B * Phigher(A, 0) # A above, B below
+ A * (1-B) * Phigher(0, B)) # A below, B above
Pwin(0.5, 0.6)
```
`Pwin` relies on a lot of algebra. Let's define a few tests to check for obvious errors:
```
def test():
assert Phigher(0.5, 0.5) == Phigher(0.75, 0.75) == Phigher(0, 0) == 0.5
assert Pwin(0.5, 0.5) == Pwin(0.75, 0.75) == 0.5
assert Phigher(.6, .5) == 0.6
assert Phigher(.5, .6) == 0.4
return 'ok'
test()
```
Let's repeat the calculation with our new, exact `Pwin`:
```
top(10, arange(0.5, 1.0, 0.01))
```
It is good to see that the simulation and the exact calculation are in rough agreement; that gives me more confidence in both of them. We see here that 0.62 defeats all the other cutoffs, and 0.61 defeats all cutoffs except 0.62. The great thing about the exact calculation code is that it runs fast, regardless of how much accuracy we want. We can zero in on the range around 0.6:
```
top(10, arange(0.5, 0.7, 0.001))
```
This says 0.618 is best, better than 0.620. We can get even more accuracy:
```
top(10, arange(0.617, 0.619, 0.000001))
```
So 0.618034 is best. Does that number [look familiar](https://en.wikipedia.org/wiki/Golden_ratio)? Can we prove that it is what I think it is?
To understand the strategic possibilities, it is helpful to draw a 3D plot of `Pwin(A, B)` for values of *A* and *B* between 0 and 1:
```
import numpy as np
from mpl_toolkits.mplot3d.axes3d import Axes3D
def map2(fn, A, B):
"Map fn to corresponding elements of 2D arrays A and B."
return [list(map(fn, Arow, Brow))
for (Arow, Brow) in zip(A, B)]
cutoffs = arange(0.00, 1.00, 0.02)
A, B = np.meshgrid(cutoffs, cutoffs)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.set_xlabel('A')
ax.set_ylabel('B')
ax.set_zlabel('Pwin(A, B)')
ax.plot_surface(A, B, map2(Pwin, A, B));
```
What does this [Pringle of Probability](http://fivethirtyeight.com/features/should-you-shoot-free-throws-underhand/) show us? The highest win percentage for **A**, the peak of the surface, occurs when *A* is around 0.5 and *B* is 0 or 1. We can confirm that, finding the maximum `Pwin(A, B)` for many different cutoff values of `A` and `B`:
```
cutoffs = (set(arange(0.00, 1.00, 0.01)) |
set(arange(0.500, 0.700, 0.001)) |
set(arange(0.61803, 0.61804, 0.000001)))
def Pwin_summary(A, B): return [Pwin(A, B), 'A:', A, 'B:', B]
max(Pwin_summary(A, B) for A in cutoffs for B in cutoffs)
```
So **A** could win 62.5% of the time if only **B** would chose a cutoff of 0. But, unfortunately for **A**, a rational player **B** is not going to do that. We can ask what happens if the game is changed so that player **A** has to declare a cutoff first, and then player **B** gets to respond with a cutoff, with full knowledge of **A**'s choice. In other words, what cutoff should **A** choose to maximize `Pwin(A, B)`, given that **B** is going to take that knowledge and pick a cutoff that minimizes `Pwin(A, B)`?
```
max(min(Pwin_summary(A, B) for B in cutoffs)
for A in cutoffs)
```
And what if we run it the other way around, where **B** chooses a cutoff first, and then **A** responds?
```
min(max(Pwin_summary(A, B) for A in cutoffs)
for B in cutoffs)
```
In both cases, the rational choice for both players in a cutoff of 0.618034, which corresponds to the "saddle point" in the middle of the plot. This is a *stable equilibrium*; consider fixing *B* = 0.618034, and notice that if *A* changes to any other value, we slip off the saddle to the right or left, resulting in a worse win probability for **A**. Similarly, if we fix *A* = 0.618034, then if *B* changes to another value, we ride up the saddle to a higher win percentage for **A**, which is worse for **B**. So neither player will want to move from the saddle point.
The moral for continuous spaces is the same as for discrete spaces: be careful about defining your sample space; measure carefully, and let your code take care of the rest.
| github_jupyter |
# Setting up a non-equilibrium dynamic simulation on quantum hardware
___
This notebook executes the example outlined in Sec. 7.2.
## Overview
In this notebook, we will learn how to design a quantum circuit in Qiskit to simulate a quantum quench and run the circuit on the IBM's Quantum Computer and quantum simulator. [Qiskit](https://qiskit.org/) is a python library developed by IBM for designing quantum circuits and interfacing with their quantum computer over the cloud. You can get started right away by creating an [IBM Q Experience](https://quantum-computing.ibm.com/login) account. The Hamiltonian of interest is a one-dimensional(1D) antiferromagnetic (AF) Heisenberg model (equation 18 in the Review paper):
$H = J \sum_{i=1} \{\sigma_{i}^x \sigma_{i+1}^x + \sigma_{i}^y \sigma_{i+1}^y + g\sigma_{i}^z \sigma_{i+1}^z \}$
To perform a quantum quench, we initialize the qubits into the ground state of an inital Hamiltonian and then evolve the system under a final Hamiltonian. In this example, we set $J=1$ take the initial Hamiltonian to be the limit of $g \rightarrow \infty$. The ground state of this Hamilotian is the N\'eel state, given by $|\psi_0\rangle = |\uparrow \downarrow \uparrow ... \downarrow\rangle$. Thus, the first part of our quantum circuit needs to initialize the qubits into this state, which simply requires apply the $X$-gate on alternating qubits.
Next, we need to evolve the system under the final Hamiltonian. In this example, we perform three different quenches by setting $J=1$ and setting $g$ to three different values: g={0,0.2,4.0}. In order to simulate the evolution of the qubits under this Hamitonian, we must apply the time-evolution operator $U(t)$ to the qubits, which for a time-independent Hamiltonian is given by:
$U(t) = e^{-iHt/\hbar}$
Since exponentiation of the Hamiltonian is difficult, we use the Trotter decomposition. To perform the Trotter decomposition, the Hamiltonian must be divided into components that are each efficiently diagonalizable on their own:
$H = H_x + H_y + H_z $
Then, the time evolution operator can be approximated as:
$U(n \Delta t) = \prod_{j=0}^{n-1} e^{-iH_{x}\Delta t/\hbar}e^{-iH_{y}\Delta t/\hbar} e^{-iH_{z}\Delta t/\hbar}$
Finally, the observable of interest is the staggered magnetization, given by
$m_s(t) = \frac{1}{N}\sum_i (-1)^i \langle\sigma_{i}^{z}(t)\rangle$
Fortunately, this only requires measuring the expectation of the Pauli-Z operator, which can be derived from an ensemble of measurements of the qubits in the computational basis, and thus requires no extra gates. It only requires a small amount of post-processing from qubit measurements.
First we import necessary libraries and define our system parameters, and then we outline how to build up the quantum circuit and run it below.
```
#import necessary libraries
import numpy as np
import qiskit as qk
from qiskit.tools.monitor import job_monitor
from qiskit import Aer, IBMQ, execute
import matplotlib.pyplot as plt
#Simulation parameters
#define Heisenberg model parameters
N = 7 #number of spins/qubits
J = 1.0
g = 0.2 #we will vary this parameter
delta_t = 0.025 #time-step size
num_steps = 10 #number of time-steps, we start with a small number for speed of demonstration
shots = 1024 #number of shots for circuit execution
```
## Create initial state preparation circuit
```
#create the initial state preparation (ISP) circuit
ISP_circuit = qk.QuantumCircuit(N)
ISP_circuit.x(1)
ISP_circuit.x(3)
ISP_circuit.x(5)
#draw the circuit
ISP_circuit.draw()
```
## Create the time-evolution circuit
```
#create the circuit to execute the time-evolution operator for a given time-step
def evolution_circuit(num_time_steps, J, g, N):
hbar = 0.658212 # eV*fs
time_evol_circuit = qk.QuantumCircuit(N)
#define rotation angles for gates in circuit
psiX = 2.0*J*delta_t/hbar
psiY = 2.0*J*delta_t/hbar
psiZ = 2.0*J*g*delta_t/hbar
for step in range(num_time_steps):
#implement XX operator
for q in range(0,N-1):
time_evol_circuit.h(q)
time_evol_circuit.h(q+1)
time_evol_circuit.cx(q,q+1)
time_evol_circuit.rz(psiX,q+1)
time_evol_circuit.cx(q,q+1)
time_evol_circuit.h(q)
time_evol_circuit.h(q+1)
#implement YY operator
for q in range(0,N-1):
time_evol_circuit.rx(-np.pi/2,q)
time_evol_circuit.rx(-np.pi/2,q+1)
time_evol_circuit.cx(q,q+1)
time_evol_circuit.rz(psiY,q+1)
time_evol_circuit.cx(q,q+1)
time_evol_circuit.rx(np.pi/2,q)
time_evol_circuit.rx(np.pi/2,q+1)
#implement ZZ operator
for q in range(0,N-1):
time_evol_circuit.cx(q, q+1)
time_evol_circuit.rz(psiZ, q+1)
time_evol_circuit.cx(q, q+1)
return time_evol_circuit
#draw circuit for time-step 1
circ = evolution_circuit(1,J,g,N)
circ.draw()
```
## Compose the final circuits
For the dynamic simulation, we will need one circuit for every time-step. Each circuit should have the same initial state preparation concatenated with the evolution circuit for the associated length of time simulated for the given time step. Finally, measurement of each qubit should be added to each circuit.
```
#Create set of final circuits for quantum quench simulations
circuits = []
for i in range(0, num_steps+1):
total_circ = qk.QuantumCircuit(N,N)
total_circ.compose(ISP_circuit, inplace=True)
total_circ.compose(evolution_circuit(i,J,g,N), inplace=True)
total_circ.measure(range(N),range(N))
circuits.append(total_circ)
#draw the circuit for the first time-step
circuits[1].draw()
```
## Run Circuits
In order to run the circuits on the quantum processor or simulator, you will need to connect with IBM's server. To do so, you will need you API_Token, which you can find by logging in to your IBM Q Experience account under "My Account".
The first time you run this notebook, you should run the save_account function with your token as the argument. This will since it will save your account in '~/.qiskit/qiskitrc', so all future times running this notebook you will only need to load your account with the load_account function. You can delete this file if you need to regenerate a token with delete_account.
```
#qk.IBMQ.save_account('here') ## only run once!
#qk.IBMQ.delete_accounts() ## only run if you need to use a new token
qk.IBMQ.load_account()
```
Next, we need to choose on backend on which to run our quantum circuits.
We can either run on a quantum simulator or a real quantum processor. You can only run on a quantum processor your account has access to, so be sure to check!
```
#Show available backends
provider = qk.IBMQ.get_provider(group='open')
provider.backends()
#To run on the quantum computer, assign a quantum computer of your choice as the backend
#backend = provider.get_backend('ibmq_ourense')
#Or if you wish to run on the quantum simulator, select the QasmSimulator from the Aer provider
backend = Aer.get_backend('qasm_simulator')
```
Given a backend, we can now transpile our circuits so they may run on our chosen backend.
```
#Transpile the circuits
circs = qk.transpile(circuits, backend=backend, optimization_level=3)
#Run job according to backend, either a quantum processor OR the quantum simulator
########Uncomment this section to run on quantum processor#########
#quantum computer execution
#job = qk.execute(circ, backend=backend, shots=shots)
#job_monitor(job)
#results = job.result()
####################################################################
#Line below executes simulation on quantum simulator execution
results = execute(circs, backend).result()
```
## Post-Processing Results
Finally, some post-processing is required to compute the staggered magnetization from the measurement results from the quantum backend. A measured value of $0$ for a given qubit maps to a value of $+1$ for the expecatation value of the Pauli-Z operator, while a measured value of $1$ for a given qubit maps to a value of $-1$ for the expecatation value of the Pauli-Z operator. Recall the staggered magnetization is computed from the expecatation values of the Pauli-Z operator for each qubit.
```
#define function to compute staggered magnetization
def staggered_magnetization(result: dict, shots: int):
"""Compute staggered magnetization from results of execute function.
Args:
- result (dict): a dictionary with the counts for each qubit, see qk.result.result module
- shots (int): number of trials
Return:
- staggered_mag (float)
"""
sm_val = 0
for spin_str, count in result.items():
spin_int = [1 - 2 * float(s) for s in spin_str]
for i in range(len(spin_int)):
spin_int[i] = spin_int[i]*(-1)**i
sm_val += (sum(spin_int) / len(spin_int)) * count
average_sm = sm_val/shots
return average_sm
#Post-processing results
avg_sm = []
for c in circs:
result_dict = results.get_counts(c)
avg_sm.append(staggered_magnetization(result_dict, shots))
#Plot results
plt.plot(range(num_steps+1), avg_sm)
plt.show()
```
## Reproduce plot in Figure 6 in Review paper
To reproduce the plot in Figure 6, we must run the simulation three separate times for the three different values of $g$: $g=0$, $g=0.2$, and $g=4.0$, and increase the number of time-steps to 100.
<span style="color:red">**Note that circuit creation and transpilation make take some time as we are using 100 time-steps!**</span>
```
g=0
num_steps=100
#create and traspile circuits for g=0
circuits_g0 = []
for i in range(0, num_steps+1):
total_circ = qk.QuantumCircuit(N,N)
total_circ.compose(ISP_circuit, inplace=True)
total_circ.compose(evolution_circuit(i,J,g,N), inplace=True)
total_circ.measure(range(N),range(N))
circuits_g0.append(total_circ)
print('Circuits created for g=0')
circs_g0 = qk.transpile(circuits_g0, backend=backend, optimization_level=3)
print('Circuits transpiled for g=0')
#create and traspile circuits for g=0.2
g=0.2
circuits_g02 = []
for i in range(0, num_steps+1):
total_circ = qk.QuantumCircuit(N,N)
total_circ.compose(ISP_circuit, inplace=True)
total_circ.compose(evolution_circuit(i,J,g,N), inplace=True)
total_circ.measure(range(N),range(N))
circuits_g02.append(total_circ)
print('Circuits created for g=0.2')
circs_g02 = qk.transpile(circuits_g02, backend=backend, optimization_level=3)
print('Circuits transpiled for g=0.2')
#create and traspile circuits for g=4.0
g=4.0
circuits_g4 = []
for i in range(0, num_steps+1):
total_circ = qk.QuantumCircuit(N,N)
total_circ.compose(ISP_circuit, inplace=True)
total_circ.compose(evolution_circuit(i,J,g,N), inplace=True)
total_circ.measure(range(N),range(N))
circuits_g4.append(total_circ)
print('Circuits created for g=4')
circs_g4 = qk.transpile(circuits_g4, backend=backend, optimization_level=3)
print('Circuits transpiled for g=4')
#Execute all simulations
results0 = execute(circs_g0, backend).result()
results02 = execute(circs_g02, backend).result()
results4 = execute(circs_g4, backend).result()
#Post-process all results
avg_sm0 = []
for c in circs_g0:
result_dict = results0.get_counts(c)
avg_sm0.append(staggered_magnetization(result_dict, shots))
avg_sm02 = []
for c in circs_g02:
result_dict = results02.get_counts(c)
avg_sm02.append(staggered_magnetization(result_dict, shots))
avg_sm4 = []
for c in circs_g4:
result_dict = results4.get_counts(c)
avg_sm4.append(staggered_magnetization(result_dict, shots))
#Plot the results
plt.figure(figsize=(6,4))
plt.plot(avg_sm0, label="$g=0$")
plt.plot(avg_sm02, label="$g=0.2$")
plt.plot(avg_sm4, label="$g=4.0$")
plt.hlines(0, 0, 150, linestyles='dashed')
plt.ylim(-0.6,1.0)
plt.xlim(0,100)
#plt.legend()
plt.xlabel("Simulation Timestep", fontsize=12, fontname="Times New Roman")
plt.yticks([-0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0], fontsize=12, fontname="Times New Roman")
plt.xticks(fontsize=12, fontname="Times New Roman")
plt.ylabel("Staggered Magnetization", fontsize=12, fontname="Times New Roman")
plt.show()
#Use line below to save figuge as a PNG file
#plt.savefig("staggered_mag.png")
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
e_set_cgan = np.load('e_set_cgan.npy')*100
e_set_cvae = np.concatenate(([0],np.load('e_set_cvae.npy')*100))
e_single_cgan = np.load('e_single_cgan.npy')*100
e_single_cvae = np.concatenate(([0],np.load('e_single_cvae.npy')*100))
y_set_cgan = np.load('y_set_cgan.npy')*100
y_set_cvae = np.concatenate(([0],np.load('y_set_cvae.npy')*100))
y_single_cgan = np.load('y_single_cgan.npy')*100
y_single_cvae = np.concatenate(([0],np.load('y_single_cvae.npy')*100))
```
Earlier, this has been the slot for AIS. We have changed it to reconstruction attack which is not applicable to GANs
```
y_single_cvae
e_set_cgan[1] = 0
e_single_cgan[1] = 0
y_set_cgan[1] = 0
y_single_cgan[1] = 0
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams.update({'font.size': 16})
f = plt.figure(figsize=(7, 7))
ax = plt.subplot()
names = ['White Box', 'Reconstruction Attack','MC PCA', 'MC CHIST']
N = len(names)
ind = np.arange(N)
offset = 0.2
width = 0.4
rects1 = ax.bar(ind-offset, y_single_cgan, width=width, label='DCGAN')
rects1 = ax.bar(ind+offset, y_single_cvae, width=width, label='VAE')
plt.errorbar(ind-offset, y_single_cgan, e_single_cgan, color='k', linestyle='None', capsize=7)
plt.errorbar(ind+offset, y_single_cvae, e_single_cvae, color='k', linestyle='None', capsize=7)
plt.ylim(ymin=45)
plt.axhline(y=50, color='k', linestyle='--')
plt.ylabel('Accuracy Single (\%)', fontsize='large')
ax.set_xticks(ind)
plt.setp(ax.get_xticklabels(), rotation='90')
ax.set_xticklabels(names)
plt.legend()
plt.savefig('CIFAR_GAN_VAE_Single.pgf', bbox_inches="tight")
#############################################################################################################
f = plt.figure(figsize=(7, 7))
ax = plt.subplot()
names = ['White Box', 'Reconstruction Attack','MC PCA', 'MC CHIST']
N = len(names)
ind = np.arange(N)
offset = 0.2
width = 0.4
rects1 = ax.bar(ind-offset, y_set_cgan, width=width, label='DCGAN')
rects1 = ax.bar(ind+offset, y_set_cvae, width=width, label='VAE')
plt.errorbar(ind-offset, y_set_cgan, e_set_cgan, color='k', linestyle='None', capsize=7)
plt.errorbar(ind+offset, y_set_cvae, e_set_cvae, color='k', linestyle='None', capsize=7)
plt.ylim(ymin=45)
plt.axhline(y=50, color='k', linestyle='--')
plt.ylabel('Accuracy Set (\%)', fontsize='large')
ax.set_xticks(ind)
plt.setp(ax.get_xticklabels(), rotation='90')
ax.set_xticklabels(names)
plt.legend()
plt.savefig('CIFAR_GAN_VAE_Set.pgf', bbox_inches="tight")
print(y_set_cgan)
print(y_set_cvae)
print(y_single_cvae)
print(e_single_cvae)
print(y_set_cvae)
print(e_set_cvae)
```
| github_jupyter |
```
!date
#Download datasets
import requests
import os
from tqdm import tnrange, tqdm_notebook
def download_file(doi,ext):
url = 'https://api.datacite.org/dois/'+doi+'/media'
r = requests.get(url).json()
netcdf_url = r['data'][0]['attributes']['url']
r = requests.get(netcdf_url,stream=True)
#Set file name
fname = doi.split('/')[-1]+ext
#Download file with progress bar
if r.status_code == 403:
print("File Unavailable")
if 'content-length' not in r.headers:
print("Did not get file")
else:
with open(fname, 'wb') as f:
total_length = int(r.headers.get('content-length'))
pbar = tnrange(int(total_length/1024), unit="B")
for chunk in r.iter_content(chunk_size=1024):
if chunk:
pbar.update()
f.write(chunk)
return fname
#10x VMH data
#metadata.csv
download_file('10.22002/D1.2065','.gz')
#tenx.mtx (log counts)
download_file('10.22002/D1.2072','.gz')
#SMART-seq VMH data
#metadata.csv
download_file('10.22002/D1.2067','.gz')
#smartseq.mtx (log counts)
download_file('10.22002/D1.2071','.gz')
#MERFISH data
#metadata.csv
download_file('10.22002/D1.2063','.gz')
#counts.h5ad
download_file('10.22002/D1.2064','.gz')
os.system("gunzip *.gz")
os.system("mv D1.2065 tenxmetadata.csv")
os.system("mv D1.2072 tenx.mtx")
os.system("mv D1.2067 smartmetadata.csv")
os.system("mv D1.2071 smartseq.mtx")
os.system("mv D1.2063 metadata.csv")
os.system("mv D1.2064 counts.h5ad")
#Read in files from NCBI GEO
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_GSM5068636-GSM5068641_scRNA.MetaData.105.csv.gz")
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_GSM5068636-GSM5068641_scRNA.normalized.assay105.csv.gz")
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_GSM5068636-GSM5068641_scRNA.integrated.scaled.assay105.csv.gz")
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_scRNA.MetaData.85.csv.gz")
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_scRNA.normalized.assay85.csv.gz")
#Read in files from NCBI GEO for Integrate Utero E8.5 (will only use Ex-utero data)
#Read in files from NCBI GEO
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_scRNA.MetaData.85.csv.gz")
os.system("wget --quiet https://ftp.ncbi.nlm.nih.gov/geo/series/GSE149nnn/GSE149372/suppl/GSE149372_scRNA.normalized.assay85.csv.gz")
os.system("git clone https://github.com/hhcho/densvis.git")
%cd ./densvis/densne/
!g++ sptree.cpp densne.cpp densne_main.cpp -o den_sne -O2
import densne
%cd ../../
!gunzip *.gz
!git clone https://github.com/pachterlab/CBP_2021.git
%cd ./CBP_2021/scripts
!pip3 install --quiet torch
!pip3 install --quiet anndata
!pip3 install --quiet matplotlib
!pip3 install --quiet scikit-learn
!pip3 install --quiet torchsummary
!pip install --quiet scanpy==1.7.0rc1
!pip3 install --quiet umap-learn
# !pip install tbb
```
## **Install Packages**
```
import networkx as nx
import anndata
import pandas as pd
import numpy as np
import visualizations as vis
import tools as tl
import random
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.neighbors import NeighborhoodComponentsAnalysis, NearestNeighbors
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import scale
import torch
import time
import scanpy as sc
import seaborn as sns
import umap
from scipy import stats
import scipy.io as sio
import matplotlib
import matplotlib.patches as patches
matplotlib.rc('axes',edgecolor='black')
%matplotlib inline
sc.set_figure_params(dpi=125)
#sns.set_style('white')
sc.set_figure_params(dpi=125)
```
## **Find KNN Jaccard Distances with Ambient Space in Latent Spaces**
```
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams['axes.linewidth'] = 0.1
state = 42
ndims = 2
data_path = '../..'
pcs = 50
n_latent = 50
```
### **Make 15/50/100D Recon MCML, 15/50/100D PCA, t-SNE/UMAP from all PCAs**
```
def getJac(orig_indices,latents, latentLab, n_neighbors=30):
emb = []
xs = []
ys = []
knnDF = pd.DataFrame()
for p in range(len(latents)):
i = latents[p]
l = latentLab[p]
ind = tl.getNeighbors(i, n_neigh = n_neighbors,p=1)
x = tl.getJaccard(orig_indices,ind)
xs += x
#ys += list(y)
emb += [l]*len(x)
print(l)
print(np.mean(tl.getJaccard(orig_indices,ind)))
knnDF['x'] = xs
#knnDF['y'] = ys
knnDF['latent'] = emb
return knnDF
def latentComp(scaled_mat, log_mat, ndims=2, pcs=[15,50,100], rounds = 3):
""" Compute latent space representations as baseline for reconstruction abilities"""
reducer = umap.UMAP(n_components = ndims) # random_state = state
#densUMAP = umap.UMAP(n_components = ndims,densmap=True)
tsne = TSNE(n_components = ndims)
latents = []
latentLab = []
latentType = []
orig_indices = tl.getNeighbors(log_mat, n_neigh = 30,p=1)
for i in range(rounds):
for j in pcs:
tsvd = TruncatedSVD(n_components=j)
x_pca = tsvd.fit_transform(scaled_mat)
pcaUMAP = reducer.fit_transform(x_pca)
#pcaDensUMAP = densUMAP.fit_transform(x_pca)
pcaTSNE = tsne.fit_transform(x_pca)
latents += [x_pca,pcaTSNE, pcaUMAP]
latentLab += ['PCA '+str(j)+'D','PCA '+str(j)+'D t-SNE','PCA '+str(j)+'D UMAP']
latentType += [str(j)+'D','2D','2D']
pcaTSNE = tsne.fit_transform(x_pca)
pcaUMAP = reducer.fit_transform(x_pca)
tsvd = TruncatedSVD(n_components=2)
x_pca = tsvd.fit_transform(scaled_mat)
latents += [x_pca, pcaTSNE, pcaUMAP]
latentLab += ['PCA 2D','t-SNE 2D','UMAP 2D']
latentType += ['2D','2D','2D']
df = getJac(orig_indices,latents, latentLab, 30)
return df
def latentPCAComp(scaled_mat, ndims=2, pcs=[15,50,100], rounds = 3):
""" Compute latent space representations as baseline for reconstruction abilities"""
reducer = umap.UMAP(n_components = ndims) # random_state = state
#densUMAP = umap.UMAP(n_components = ndims,densmap=True)
tsne = TSNE(n_components = ndims)
latents = []
latentLab = []
latentType = []
frames = []
for j in pcs:
tsvd = TruncatedSVD(n_components=j)
x_pca = tsvd.fit_transform(scaled_mat)
orig_indices = tl.getNeighbors(x_pca, n_neigh = 30,p=1)
for i in range(rounds):
pcaUMAP = reducer.fit_transform(x_pca)
#pcaDensUMAP = densUMAP.fit_transform(x_pca)
pcaTSNE = tsne.fit_transform(x_pca)
latents += [pcaTSNE, pcaUMAP]
latentLab += ['PCA '+str(j)+'D t-SNE','PCA '+str(j)+'D UMAP']
latentType += ['2D','2D']
frames += [getJac(orig_indices,latents, latentLab, 30)]
res = pd.concat(frames)
return res
```
### **Read in Data**
Read in 10x VMH data
```
count_mat = sio.mmread(data_path+'/tenx.mtx')
count_mat.shape
#Center and scale log-normalized data
scaled_mat = scale(count_mat)
meta = pd.read_csv(data_path+'/tenxmetadata.csv',index_col = 0)
meta.head()
meta.sex_label.value_counts()[0]/meta.sex_label.value_counts()[1]
lab1 = list(meta.cluster)
lab2 = list(meta.sex_label)
lab3 = list(meta.sample_name)
allLabs = np.array([lab1,lab2])
nanLabs = np.array([[np.nan]*len(lab1)])
#Shuffled labels for over-fitting check
shuff_lab1 = random.sample(lab1, len(lab1))
shuff_lab2 = random.sample(lab2, len(lab2))
shuff_allLabs = np.array([shuff_lab1,shuff_lab2])
clus_colors = list(pd.unique(meta.cluster_color))
sex_colors = ['#F8C471','#abacb7']
#Get KNN Jaccard distances
tenxAmb = latentComp(scaled_mat,count_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(tenxAmb.head())
tenxPCAAmb = latentPCAComp(scaled_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(tenxPCAAmb.head())
!mkdir allOuts
tenxPCAAmb.to_csv('allOuts/tenxPCAAmb.csv')
tenxAmb.to_csv('allOuts/tenxAmb.csv')
```
**Read in SMART-Seq VMH data**
```
count_mat = sio.mmread(data_path+'/smartseq.mtx')
count_mat.shape
#Center and scale log-normalized data
scaled_mat = scale(count_mat)
meta = pd.read_csv(data_path+'/smartmetadata.csv',index_col = 0)
meta.head()
print(meta.sex_label.value_counts()[0]/meta.sex_label.value_counts()[1])
print(meta.sex_label.value_counts()[1])
lab1 = list(meta.smartseq_cluster)
lab2 = list(meta.sex_label)
lab3 = list(meta.medical_cond_label)
allLabs = np.array([lab1,lab2])
nanLabs = np.array([[np.nan]*len(lab1)])
#Shuffled labels for over-fitting check
shuff_lab1 = random.sample(lab1, len(lab1))
shuff_lab2 = random.sample(lab2, len(lab2))
shuff_allLabs = np.array([shuff_lab1,shuff_lab2])
clus_colors = list(pd.unique(meta.smartseq_cluster_color))
sex_colors = ['#abacb7','#F8C471']
#Get KNN Jaccard distances
smartAmb = latentComp(scaled_mat,count_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(smartAmb.head())
smartPCAAmb = latentPCAComp(scaled_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(smartPCAAmb.head())
smartPCAAmb.to_csv('allOuts/smartPCAAmb.csv')
smartAmb.to_csv('allOuts/smartAmb.csv')
```
**Read in MERFISH Data**
```
counts = anndata.read(data_path+'/counts.h5ad')
print(counts)
cellMeta = pd.read_csv(data_path+'/metadata.csv')
print(cellMeta.head())
cellMeta.columns
choice = np.unique(cellMeta.slice_id)[7] #7
choice
counts.obs['slice'] = pd.Categorical(cellMeta.slice_id)
counts.obs['type'] = pd.Categorical(cellMeta.subclass)
counts.obs['x'] = list(cellMeta.center_x)
counts.obs['y'] = list(cellMeta.center_y)
sub = counts[counts.obs['slice'].isin([choice])]
print(sub)
colors = np.random.rand(len(sub.obs['type']),3)
nanLabs = np.array([[np.nan]*len(sub.obs['type'])])
labs = np.array([list(sub.obs['type'])])
labs_cont = np.array([list(sub.obs['x']),list(sub.obs['y'])])
```
Make log-normalized, and scaled count matrices (separately)
```
orig_mat = sub.X
log_mat = np.log1p(sub.X)
sc.pp.log1p(sub)
#Center scale
sc.pp.scale(sub, max_value=10)
scaled_mat = sub.X
#Get KNN Jaccard distances
merAmb = latentComp(scaled_mat,log_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(merAmb.head())
merPCAAmb = latentPCAComp(scaled_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(merPCAAmb.head())
merPCAAmb.to_csv('allOuts/merPCAAmb.csv')
merAmb.to_csv('allOuts/merAmb.csv')
```
**Read in Ex-Utero E8.5 Data**
```
cell_types = {'0': 'Blood', '1': 'Mixed Mesoderm', '2': 'Foregut', '3': 'Cardiac', '4': 'Mid Hind Brain',
'5': 'Endothelial', '6': 'Presomitic Mesoderm', '7':'Amnion', '8':'Extra-Embryonic Endoderm', '9':'Pharyngeal Mesoderm',
'10': 'Extra-Embryonic Ectoderm', '11': 'Blood' , '12':'Placodes', '13': 'Neural Tube', '14':'Extra-Embryonic Mesoderm',
'15':'Somitic Mesoderm', '16':'Neural Crest', '17':'Amnion', '18':'Mid Hind Gut'}
```
Reading in log-normalized counts only
```
counts = pd.read_csv(data_path+'/GSE149372_scRNA.normalized.assay85.csv',index_col=0)
print(counts.head())
cellMeta = pd.read_csv(data_path+'/GSE149372_scRNA.MetaData.85.csv')
print(cellMeta.head())
#Center and scale log-normalized data
count_mat = counts.values.T
adata = anndata.AnnData(X = count_mat)
adata.obs_names = list(counts.columns)
adata.var_names = list(counts.index)
adata.obs['Utero'] = pd.Categorical(cellMeta.Utero)
adata.obs['Cluster'] = pd.Categorical(cellMeta.seurat_clusters)
adata
exAdata = adata[adata.obs['Utero']=='ExUt']
inAdata = adata[adata.obs['Utero']=='InUt']
exAdata
#In paper, use top 2000 HVGs and 15 PCs
def scaleAdata(adata):
sc.pp.filter_cells(adata, min_counts=0)
sc.pp.filter_genes(adata, min_counts=0)
sc.pp.highly_variable_genes(adata,n_top_genes=2000)
adata = adata[:,adata.var['highly_variable']]
adata.obsm['log'] = adata.X
sc.pp.scale(adata, max_value=10)
return adata
adata = scaleAdata(adata)
exAdata = scaleAdata(exAdata)
inAdata = scaleAdata(inAdata)
toUseAdata = exAdata
count_mat = toUseAdata.obsm['log']
scaled_mat = toUseAdata.X
#Set colors for cell clusters
colors = np.random.rand(len(toUseAdata.obs['Cluster']),3)
labs = np.array([list(toUseAdata.obs['Cluster']),list(toUseAdata.obs['Utero'])])
# adata.obs['Cluster'].value_counts()
#Get KNN Jaccard distances
exAmb = latentComp(scaled_mat,count_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(exAmb.head())
exPCAAmb = latentPCAComp(scaled_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(exPCAAmb.head())
exAmb.to_csv('allOuts/exAmb.csv')
exPCAAmb.to_csv('allOuts/exPCAAmb.csv')
```
Read in Integrated Utero E10.5 Data
```
def readLgMat(fname, data_path='../..'):
col_names = pd.read_csv(data_path+fname, nrows=0).columns
types_dict = {'Unnamed: 0': str}
types_dict.update({col: np.float16 for col in col_names if col not in types_dict})
chunk = pd.read_csv(data_path+fname,chunksize=1000,index_col=0,dtype=types_dict)
counts = pd.concat(chunk)
return counts
```
*Read in Seurat-Integrated, 'Variance-Stabilized' and Scaled counts*
```
# fname = '/GSE149372_GSM5068636-GSM5068641_scRNA.integrated.scaled.assay105.csv' #'/GSE149372_GSM5068636-GSM5068641_scRNA.normalized.assay105.csv'
counts = readLgMat('/GSE149372_GSM5068636-GSM5068641_scRNA.integrated.scaled.assay105.csv')
counts.head()
cellMeta = pd.read_csv(data_path+'/GSE149372_GSM5068636-GSM5068641_scRNA.MetaData.105.csv')
print(cellMeta.head())
#Center and scale data
count_mat = counts.values.T
adata = anndata.AnnData(X = count_mat)
adata.obs_names = list(counts.columns)
adata.var_names = list(counts.index)
adata.obs['Utero'] = pd.Categorical(cellMeta.Utero)
adata.obs['Cluster'] = pd.Categorical(cellMeta['integrated_snn_res.0.3'])
adata
#In paper, use top 2000 HVGs and 15 PCs
adata.obsm['log'] = adata.X #Data is already scaled so 'log' here is not just log-normalized
sc.pp.scale(adata, max_value=10) #Already scaled
adata
count_mat = adata.obsm['log']
scaled_mat = adata.X
#Set colors for cell clusters
colors = np.random.rand(len(adata.obs['Cluster']),3)
labs = np.array([list(adata.obs['Cluster']),list(adata.obs['Utero'])])
```
*Original log-normalized counts (post-Seurat integration)*
```
#GSE149372_GSM5068636-GSM5068641_scRNA.normalized.assay105.csv, Only Log-normalized data, not integrated
counts2_orig = readLgMat('/GSE149372_GSM5068636-GSM5068641_scRNA.normalized.assay105.csv')
counts2 = counts2_orig.loc[list(adata.var_names)]
counts2.shape
counts2_orig.shape
#Center and scale log-normalized data
count_mat_orig = counts2.values.T
adata2 = anndata.AnnData(X = count_mat_orig)
adata2.obs_names = list(counts2.columns)
adata2.var_names = list(counts2.index)
adata2.obs['Utero'] = pd.Categorical(cellMeta.Utero)
adata2.obs['Cluster'] = pd.Categorical(cellMeta['integrated_snn_res.0.3'])
adata2
adata2.obsm['log'] = adata2.X #Data matrix is log-normalized
adata2.raw = adata2.copy()
sc.pp.scale(adata2, max_value=10)
adata2
count_mat_orig = adata2.obsm['log']
scaled_mat_orig = adata2.X
#Get KNN Jaccard distances
e105Amb = latentComp(scaled_mat,count_mat_orig, ndims=2, pcs=[15,50,100], rounds = 3)
print(e105Amb.head())
e105PCAAmb = latentPCAComp(scaled_mat, ndims=2, pcs=[15,50,100], rounds = 3)
print(e105PCAAmb.head())
e105Amb.to_csv('allOuts/e105Amb.csv')
e105PCAAmb.to_csv('allOuts/e105PCAAmb.csv')
# !zip -r ./allOuts.zip allOuts
# # from google.colab import files
# # files.download("/content/allOuts.zip")
```
| github_jupyter |
# Computer Vision
In this notebook we're going to cover the basics of computer vision using CNNs. So far we've explored using CNNs for text but their initial origin began with computer vision tasks.
<img src="figures/cnn_cv.png" width=650>
# Configuration
```
config = {
"seed": 1234,
"cuda": True,
"data_url": "data/surnames.csv",
"data_dir": "cifar10",
"shuffle": True,
"train_size": 0.7,
"val_size": 0.15,
"test_size": 0.15,
"vectorizer_file": "vectorizer.json",
"model_file": "model.pth",
"save_dir": "experiments",
"num_epochs": 5,
"early_stopping_criteria": 5,
"learning_rate": 1e-3,
"batch_size": 128,
"fc": {
"hidden_dim": 100,
"dropout_p": 0.1
}
}
```
# Set up
```
# Load PyTorch library
#!pip3 install torch
import os
import json
import numpy as np
import time
import torch
import uuid
```
### Components
```
def set_seeds(seed, cuda):
""" Set Numpy and PyTorch seeds.
"""
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
print ("==> 🌱 Set NumPy and PyTorch seeds.")
def generate_unique_id():
"""Generate a unique uuid
preceded by a epochtime.
"""
timestamp = int(time.time())
unique_id = "{}_{}".format(timestamp, uuid.uuid1())
print ("==> 🔑 Generated unique id: {0}".format(unique_id))
return unique_id
def create_dirs(dirpath):
"""Creating directories.
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
print ("==> 📂 Created {0}".format(dirpath))
def check_cuda(cuda):
"""Check to see if GPU is available.
"""
if not torch.cuda.is_available():
cuda = False
device = torch.device("cuda" if cuda else "cpu")
print ("==> 💻 Device: {0}".format(device))
return device
```
### Operations
```
# Set seeds for reproducability
set_seeds(seed=config["seed"], cuda=config["cuda"])
# Generate unique experiment ID
config["experiment_id"] = generate_unique_id()
# Create experiment directory
config["save_dir"] = os.path.join(config["save_dir"], config["experiment_id"])
create_dirs(dirpath=config["save_dir"])
# Expand file paths to store components later
config["vectorizer_file"] = os.path.join(config["save_dir"], config["vectorizer_file"])
config["model_file"] = os.path.join(config["save_dir"], config["model_file"])
print ("Expanded filepaths: ")
print ("{}".format(config["vectorizer_file"]))
print ("{}".format(config["model_file"]))
# Save config
config_fp = os.path.join(config["save_dir"], "config.json")
with open(config_fp, "w") as fp:
json.dump(config, fp)
# Check CUDA
config["device"] = check_cuda(cuda=config["cuda"])
```
# Load data
We are going to get CIFAR10 data which contains images from ten unique classes. Each image has length 32, width 32 and three color channels (RGB). We are going to save these images in a directory. Each image will have its own directory (name will be the class).
```
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
import tensorflow as tf
```
### Components
```
def get_data():
"""Get CIFAR10 data.
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
X = np.vstack([x_train, x_test])
y = np.vstack([y_train, y_test]).squeeze(1)
print ("==> 🌊 Downloading Cifar10 data using TensorFlow.")
return X, y
def create_class_dirs(data_dir, classes):
"""Create class directories.
"""
create_dirs(dirpath=data_dir)
for _class in classes.values():
classpath = os.path.join(data_dir, _class)
create_dirs(dirpath=classpath)
def visualize_samples(data_dir, classes):
"""Visualize sample images for
each class.
"""
# Visualize some samples
num_samples = len(classes)
for i, _class in enumerate(classes.values()):
for file in os.listdir(os.path.join(data_dir, _class)):
if file.endswith((".png", ".jpg", ".jpeg")):
plt.subplot(1, num_samples, i+1)
plt.title("{0}".format(_class))
img = Image.open(os.path.join(data_dir, _class, file))
plt.imshow(img)
plt.axis("off")
break
def img_to_array(fp):
"""Conver image file to NumPy array.
"""
img = Image.open(fp)
array = np.asarray(img, dtype="float32")
return array
def load_data(data_dir, classes):
"""Load data into Pandas DataFrame.
"""
# Load data from files
data = []
for i, _class in enumerate(classes.values()):
for file in os.listdir(os.path.join(data_dir, _class)):
if file.endswith((".png", ".jpg", ".jpeg")):
full_filepath = os.path.join(data_dir, _class, file)
data.append({"image": img_to_array(full_filepath), "category": _class})
# Load to Pandas DataFrame
df = pd.DataFrame(data)
print ("==> 🖼️ Image dimensions: {0}".format(df.image[0].shape))
print ("==> 🍣 Raw data:")
print (df.head())
return df
```
### Operations
```
# Get CIFAR10 data
X, y = get_data()
print ("X:", X.shape)
print ("y:", y.shape)
# Classes
classes = {0: 'plane', 1: 'car', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog',
6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'}
# Create image directories
create_class_dirs(data_dir=config["data_dir"], classes=classes)
# Save images for each class
for i, (image, label) in enumerate(zip(X, y)):
_class = classes[label]
im = Image.fromarray(image)
im.save(os.path.join(config["data_dir"], _class, "{0:02d}.png".format(i)))
# Visualize each class
visualize_samples(data_dir=config["data_dir"], classes=classes)
# Load data into DataFrame
df = load_data(data_dir=config["data_dir"], classes=classes)
```
# Split data
Split the data into train, validation and test sets where each split has similar class distributions.
```
import collections
```
### Components
```
def split_data(df, shuffle, train_size, val_size, test_size):
"""Split the data into train/val/test splits.
"""
# Split by category
by_category = collections.defaultdict(list)
for _, row in df.iterrows():
by_category[row.category].append(row.to_dict())
print ("\n==> 🛍️ Categories:")
for category in by_category:
print ("{0}: {1}".format(category, len(by_category[category])))
# Create split data
final_list = []
for _, item_list in sorted(by_category.items()):
if shuffle:
np.random.shuffle(item_list)
n = len(item_list)
n_train = int(train_size*n)
n_val = int(val_size*n)
n_test = int(test_size*n)
# Give data point a split attribute
for item in item_list[:n_train]:
item['split'] = 'train'
for item in item_list[n_train:n_train+n_val]:
item['split'] = 'val'
for item in item_list[n_train+n_val:]:
item['split'] = 'test'
# Add to final list
final_list.extend(item_list)
# df with split datasets
split_df = pd.DataFrame(final_list)
print ("\n==> 🖖 Splits:")
print (split_df["split"].value_counts())
return split_df
```
### Operations
```
# Split data
split_df = split_data(
df=df, shuffle=config["shuffle"],
train_size=config["train_size"],
val_size=config["val_size"],
test_size=config["test_size"])
```
# Vocabulary
Create vocabularies for the image classes.
### Components
```
class Vocabulary(object):
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
# Token to index
if token_to_idx is None:
token_to_idx = {}
self.token_to_idx = token_to_idx
# Index to token
self.idx_to_token = {idx: token \
for token, idx in self.token_to_idx.items()}
# Add unknown token
self.add_unk = add_unk
self.unk_token = unk_token
if self.add_unk:
self.unk_index = self.add_token(self.unk_token)
def to_serializable(self):
return {'token_to_idx': self.token_to_idx,
'add_unk': self.add_unk, 'unk_token': self.unk_token}
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
def add_token(self, token):
if token in self.token_to_idx:
index = self.token_to_idx[token]
else:
index = len(self.token_to_idx)
self.token_to_idx[token] = index
self.idx_to_token[index] = token
return index
def add_tokens(self, tokens):
return [self.add_token[token] for token in tokens]
def lookup_token(self, token):
if self.add_unk:
index = self.token_to_idx.get(token, self.unk_index)
else:
index = self.token_to_idx[token]
return index
def lookup_index(self, index):
if index not in self.idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self.idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self.token_to_idx)
```
### Operations
```
# Vocabulary instance
category_vocab = Vocabulary(add_unk=False)
for index, row in df.iterrows():
category_vocab.add_token(row.category)
print (category_vocab) # __str__
print (len(category_vocab)) # __len__
index = category_vocab.lookup_token("bird")
print (index)
print (category_vocab.lookup_index(index))
```
# Sequence vocbulary
We will also create a vocabulary object for the actual images. It will store the mean and standard deviations for eahc image channel (RGB) which we will use later on for normalizing our images with the Vectorizer.
```
from collections import Counter
import string
```
### Components
```
class SequenceVocabulary(Vocabulary):
def __init__(self, train_means, train_stds):
self.train_means = train_means
self.train_stds = train_stds
def to_serializable(self):
contents = {'train_means': self.train_means,
'train_stds': self.train_stds}
return contents
@classmethod
def from_dataframe(cls, df):
train_data = df[df.split == "train"]
means = {0:[], 1:[], 2:[]}
stds = {0:[], 1:[], 2:[]}
for image in train_data.image:
for dim in range(3):
means[dim].append(np.mean(image[:, :, dim]))
stds[dim].append(np.std(image[:, :, dim]))
train_means = np.array((np.mean(means[0]), np.mean(means[1]),
np.mean(means[2])), dtype="float64").tolist()
train_stds = np.array((np.mean(stds[0]), np.mean(stds[1]),
np.mean(stds[2])), dtype="float64").tolist()
return cls(train_means, train_stds)
def __str__(self):
return "<SequenceVocabulary(train_means: {0}, train_stds: {1}>".format(
self.train_means, self.train_stds)
```
### Operations
```
# Create SequenceVocabulary instance
image_vocab = SequenceVocabulary.from_dataframe(split_df)
print (image_vocab) # __str__
```
# Vectorizer
The vectorizer will normalize our images using the vocabulary.
### Components
```
class ImageVectorizer(object):
def __init__(self, image_vocab, category_vocab):
self.image_vocab = image_vocab
self.category_vocab = category_vocab
def vectorize(self, image):
# Avoid modifying the actual df
image = np.copy(image)
# Normalize
for dim in range(3):
mean = self.image_vocab.train_means[dim]
std = self.image_vocab.train_stds[dim]
image[:, :, dim] = ((image[:, :, dim] - mean) / std)
# Reshape from (32, 32, 3) to (3, 32, 32)
image = np.swapaxes(image, 0, 2)
image = np.swapaxes(image, 1, 2)
return image
@classmethod
def from_dataframe(cls, df):
# Create vocabularies
image_vocab = SequenceVocabulary.from_dataframe(df)
category_vocab = Vocabulary(add_unk=False)
for category in sorted(set(df.category)):
category_vocab.add_token(category)
return cls(image_vocab, category_vocab)
@classmethod
def from_serializable(cls, contents):
image_vocab = SequenceVocabulary.from_serializable(contents['image_vocab'])
category_vocab = Vocabulary.from_serializable(contents['category_vocab'])
return cls(image_vocab=image_vocab,
category_vocab=category_vocab)
def to_serializable(self):
return {'image_vocab': self.image_vocab.to_serializable(),
'category_vocab': self.category_vocab.to_serializable()}
```
### Operations
```
# Vectorizer instance
vectorizer = ImageVectorizer.from_dataframe(split_df)
print (vectorizer.image_vocab)
print (vectorizer.category_vocab)
print (vectorizer.category_vocab.token_to_idx)
image_vector = vectorizer.vectorize(split_df.iloc[0].image)
print (image_vector.shape)
```
# Dataset
The Dataset will create vectorized data from the data.
```
import random
from torch.utils.data import Dataset, DataLoader
```
### Components
```
class ImageDataset(Dataset):
def __init__(self, df, vectorizer, infer=False):
self.df = df
self.vectorizer = vectorizer
# Data splits
if not infer:
self.train_df = self.df[self.df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.df[self.df.split=='val']
self.val_size = len(self.val_df)
self.test_df = self.df[self.df.split=='test']
self.test_size = len(self.test_df)
self.lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.val_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights (for imbalances)
class_counts = df.category.value_counts().to_dict()
def sort_key(item):
return self.vectorizer.category_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
elif infer:
self.infer_df = self.df[self.df.split=="infer"]
self.infer_size = len(self.infer_df)
self.lookup_dict = {'infer': (self.infer_df, self.infer_size)}
self.set_split('infer')
@classmethod
def load_dataset_and_make_vectorizer(cls, df):
train_df = df[df.split=='train']
return cls(df, ImageVectorizer.from_dataframe(train_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, df, vectorizer_filepath):
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(df, vectorizer)
def load_vectorizer_only(vectorizer_filepath):
with open(vectorizer_filepath) as fp:
return ImageVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
with open(vectorizer_filepath, "w") as fp:
json.dump(self.vectorizer.to_serializable(), fp)
def set_split(self, split="train"):
self.target_split = split
self.target_df, self.target_size = self.lookup_dict[split]
def __str__(self):
return "<Dataset(split={0}, size={1})".format(
self.target_split, self.target_size)
def __len__(self):
return self.target_size
def __getitem__(self, index):
row = self.target_df.iloc[index]
image_vector = self.vectorizer.vectorize(row.image)
category_index = self.vectorizer.category_vocab.lookup_token(row.category)
return {'image': image_vector,
'category': category_index}
def get_num_batches(self, batch_size):
return len(self) // batch_size
def generate_batches(self, batch_size, shuffle=True, drop_last=True, device="cpu"):
dataloader = DataLoader(dataset=self, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
def sample(dataset):
"""Some sanity checks on the dataset.
"""
sample_idx = random.randint(0,len(dataset))
sample = dataset[sample_idx]
print ("\n==> 🔢 Dataset:")
print ("Random sample: {0}".format(sample))
print ("Unvectorized category: {0}".format(
dataset.vectorizer.category_vocab.lookup_index(sample['category'])))
```
### Operations
```
# Load dataset and vectorizer
dataset = ImageDataset.load_dataset_and_make_vectorizer(split_df)
dataset.save_vectorizer(config["vectorizer_file"])
vectorizer = dataset.vectorizer
print (dataset.class_weights)
# Sample checks
sample(dataset=dataset)
```
# Model
Basic CNN architecture for image classification.
```
import torch.nn as nn
import torch.nn.functional as F
```
### Components
```
class ImageModel(nn.Module):
def __init__(self, num_hidden_units, num_classes, dropout_p):
super(ImageModel, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5) # input_channels:3, output_channels:10 (aka num filters)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv_dropout = nn.Dropout2d(dropout_p)
self.fc1 = nn.Linear(20*5*5, num_hidden_units)
self.dropout = nn.Dropout(dropout_p)
self.fc2 = nn.Linear(num_hidden_units, num_classes)
def forward(self, x, apply_softmax=False):
# Conv pool
z = self.conv1(x) # (N, 10, 28, 28)
z = F.max_pool2d(z, 2) # (N, 10, 14, 14)
z = F.relu(z)
# Conv pool
z = self.conv2(z) # (N, 20, 10, 10)
z = self.conv_dropout(z)
z = F.max_pool2d(z, 2) # (N, 20, 5, 5)
z = F.relu(z)
# Flatten
z = z.view(-1, 20*5*5)
# FC
z = F.relu(self.fc1(z))
z = self.dropout(z)
y_pred = self.fc2(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
def initialize_model(config, vectorizer):
"""Initialize the model.
"""
print ("\n==> 🚀 Initializing model:")
model = ImageModel(
num_hidden_units=config["fc"]["hidden_dim"],
num_classes=len(vectorizer.category_vocab),
dropout_p=config["fc"]["dropout_p"])
print (model.named_modules)
return model
```
### Operations
```
# Initializing model
model = initialize_model(config=config, vectorizer=vectorizer)
```
# Training
Training operations for image classification.
```
import torch.optim as optim
```
### Components
```
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
def update_train_state(model, train_state):
""" Update train state during training.
"""
# Verbose
print ("[EPOCH]: {0} | [LR]: {1} | [TRAIN LOSS]: {2:.2f} | [TRAIN ACC]: {3:.1f}% | [VAL LOSS]: {4:.2f} | [VAL ACC]: {5:.1f}%".format(
train_state['epoch_index'], train_state['learning_rate'],
train_state['train_loss'][-1], train_state['train_acc'][-1],
train_state['val_loss'][-1], train_state['val_acc'][-1]))
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = train_state['early_stopping_step'] \
>= train_state['early_stopping_criteria']
return train_state
class Trainer(object):
def __init__(self, dataset, model, model_file, device, shuffle,
num_epochs, batch_size, learning_rate, early_stopping_criteria):
self.dataset = dataset
self.class_weights = dataset.class_weights.to(device)
self.model = model.to(device)
self.device = device
self.shuffle = shuffle
self.num_epochs = num_epochs
self.batch_size = batch_size
self.loss_func = nn.CrossEntropyLoss(self.class_weights)
self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer=self.optimizer, mode='min', factor=0.5, patience=1)
self.train_state = {
'done_training': False,
'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'early_stopping_criteria': early_stopping_criteria,
'learning_rate': learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': model_file}
def run_train_loop(self):
print ("==> 🏋 Training:")
for epoch_index in range(self.num_epochs):
self.train_state['epoch_index'] = epoch_index
# Iterate over train dataset
# initialize batch generator, set loss and acc to 0, set train mode on
self.dataset.set_split('train')
batch_generator = self.dataset.generate_batches(
batch_size=self.batch_size, shuffle=self.shuffle,
device=self.device)
running_loss = 0.0
running_acc = 0.0
self.model.train()
for batch_index, batch_dict in enumerate(batch_generator):
# zero the gradients
self.optimizer.zero_grad()
# compute the output
y_pred = self.model(batch_dict['image'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['category'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute gradients using loss
loss.backward()
# use optimizer to take a gradient step
self.optimizer.step()
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['category'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['train_loss'].append(running_loss)
self.train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# initialize batch generator, set loss and acc to 0; set eval mode on
self.dataset.set_split('val')
batch_generator = self.dataset.generate_batches(
batch_size=self.batch_size, shuffle=self.shuffle, device=self.device)
running_loss = 0.
running_acc = 0.
self.model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.model(batch_dict['image'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['category'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['category'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['val_loss'].append(running_loss)
self.train_state['val_acc'].append(running_acc)
self.train_state = update_train_state(model=self.model, train_state=self.train_state)
self.scheduler.step(self.train_state['val_loss'][-1])
if self.train_state['stop_early']:
break
def run_test_loop(self):
# initialize batch generator, set loss and acc to 0; set eval mode on
self.dataset.set_split('test')
batch_generator = self.dataset.generate_batches(
batch_size=self.batch_size, shuffle=self.shuffle, device=self.device)
running_loss = 0.0
running_acc = 0.0
self.model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.model(batch_dict['image'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['category'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['category'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['test_loss'] = running_loss
self.train_state['test_acc'] = running_acc
# Verbose
print ("==> 💯 Test performance:")
print ("Test loss: {0:.2f}".format(self.train_state['test_loss']))
print ("Test Accuracy: {0:.1f}%".format(self.train_state['test_acc']))
def plot_performance(train_state, save_dir, show_plot=True):
""" Plot loss and accuracy.
"""
# Figure size
plt.figure(figsize=(15,5))
# Plot Loss
plt.subplot(1, 2, 1)
plt.title("Loss")
plt.plot(train_state["train_loss"], label="train")
plt.plot(train_state["val_loss"], label="val")
plt.legend(loc='upper right')
# Plot Accuracy
plt.subplot(1, 2, 2)
plt.title("Accuracy")
plt.plot(train_state["train_acc"], label="train")
plt.plot(train_state["val_acc"], label="val")
plt.legend(loc='lower right')
# Save figure
plt.savefig(os.path.join(save_dir, "performance.png"))
# Show plots
if show_plot:
print ("==> 📈 Metric plots:")
plt.show()
def save_train_state(train_state, save_dir):
train_state["done_training"] = True
with open(os.path.join(save_dir, "train_state.json"), "w") as fp:
json.dump(train_state, fp)
print ("==> ✅ Training complete!")
```
### Operations
```
# Training
trainer = Trainer(
dataset=dataset, model=model, model_file=config["model_file"],
device=config["device"], shuffle=config["shuffle"],
num_epochs=config["num_epochs"], batch_size=config["batch_size"],
learning_rate=config["learning_rate"],
early_stopping_criteria=config["early_stopping_criteria"])
trainer.run_train_loop()
# Plot performance
plot_performance(train_state=trainer.train_state,
save_dir=config["save_dir"], show_plot=True)
# Test performance
trainer.run_test_loop()
# Save all results
save_train_state(train_state=trainer.train_state, save_dir=config["save_dir"])
```
~60% test performance for our CIFAR10 dataset is not bad but we can do way better.
# Transfer learning
In this section, we're going to use a pretrained model that performs very well on a different dataset. We're going to take the architecture and the initial convolutional weights from the model to use on our data. We will freeze the initial convolutional weights and fine tune the later convolutional and fully-connected layers.
Transfer learning works here because the initial convolution layers act as excellent feature extractors for common spatial features that are shared across images regardless of their class. We're going to leverage these large, pretrained models' feature extractors for our own dataset.
```
!pip install torchvision
from torchvision import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
print (model_names)
model_name = 'vgg19_bn'
vgg_19bn = models.__dict__[model_name](pretrained=True) # Set false to train from scratch
print (vgg_19bn.named_parameters)
```
The VGG model we chose has a `features` and a `classifier` component. The `features` component is composed of convolution and pooling layers which act as feature extractors. The `classifier` component is composed on fully connected layers. We're going to freeze most of the `feature` component and design our own FC layers for our CIFAR10 task. You can access the default code for all models at `/usr/local/lib/python3.6/dist-packages/torchvision/models` if you prefer cloning and modifying that instead.
### Components
```
class ImageModel(nn.Module):
def __init__(self, feature_extractor, num_hidden_units,
num_classes, dropout_p):
super(ImageModel, self).__init__()
# Pretrained feature extractor
self.feature_extractor = feature_extractor
# FC weights
self.classifier = nn.Sequential(
nn.Linear(512, 250, bias=True),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(250, 100, bias=True),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(100, 10, bias=True),
)
def forward(self, x, apply_softmax=False):
# Feature extractor
z = self.feature_extractor(x)
z = z.view(x.size(0), -1)
# FC
y_pred = self.classifier(z)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
def initialize_model(config, vectorizer, feature_extractor):
"""Initialize the model.
"""
print ("\n==> 🚀 Initializing model:")
model = ImageModel(
feature_extractor=feature_extractor,
num_hidden_units=config["fc"]["hidden_dim"],
num_classes=len(vectorizer.category_vocab),
dropout_p=config["fc"]["dropout_p"])
print (model.named_modules)
return model
```
### Operations
```
# Initializing model
model = initialize_model(config=config, vectorizer=vectorizer,
feature_extractor=vgg_19bn.features)
# Finetune last few conv layers and FC layers
for i, param in enumerate(model.feature_extractor.parameters()):
if i < 36:
param.requires_grad = False
else:
param.requires_grad = True
# Training
trainer = Trainer(
dataset=dataset, model=model, model_file=config["model_file"],
device=config["device"], shuffle=config["shuffle"],
num_epochs=config["num_epochs"], batch_size=config["batch_size"],
learning_rate=config["learning_rate"],
early_stopping_criteria=config["early_stopping_criteria"])
trainer.run_train_loop()
# Plot performance
plot_performance(train_state=trainer.train_state,
save_dir=config["save_dir"], show_plot=True)
# Test performance
trainer.run_test_loop()
# Save all results
save_train_state(train_state=trainer.train_state, save_dir=config["save_dir"])
```
Much better performance! If you let it train long enough, we'll actually reach ~95% accuracy :)
## Inference
```
from pylab import rcParams
rcParams['figure.figsize'] = 2, 2
```
### Components
```
class Inference(object):
def __init__(self, model, vectorizer, device="cpu"):
self.model = model.to(device)
self.vectorizer = vectorizer
self.device = device
def predict_category(self, dataset):
# Batch generator
batch_generator = dataset.generate_batches(
batch_size=len(dataset), shuffle=False, device=self.device)
self.model.eval()
# Predict
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.model(batch_dict['image'], apply_softmax=True)
# Top k categories
y_prob, indices = torch.topk(y_pred, k=len(self.vectorizer.category_vocab))
probabilities = y_prob.detach().to('cpu').numpy()[0]
indices = indices.detach().to('cpu').numpy()[0]
results = []
for probability, index in zip(probabilities, indices):
category = self.vectorizer.category_vocab.lookup_index(index)
results.append({'category': category, 'probability': probability})
return results
```
### Operations
```
# Load vectorizer
with open(config["vectorizer_file"]) as fp:
vectorizer = ImageVectorizer.from_serializable(json.load(fp))
# Load the model
model = initialize_model(config=config, vectorizer=vectorizer, feature_extractor=vgg_19bn.features)
model.load_state_dict(torch.load(config["model_file"]))
# Initialize
inference = Inference(model=model, vectorizer=vectorizer, device=config["device"])
# Get a sample
sample = split_df[split_df.split=="test"].iloc[0]
plt.imshow(sample.image)
plt.axis("off")
print ("Actual:", sample.category)
# Inference
category = list(vectorizer.category_vocab.token_to_idx.keys())[0] # random filler category
infer_df = pd.DataFrame([[sample.image, category, "infer"]], columns=['image', 'category', 'split'])
infer_dataset = ImageDataset(df=infer_df, vectorizer=vectorizer, infer=True)
results = inference.predict_category(dataset=infer_dataset)
results
```
# TODO
- segmentation
- interpretability via activation maps
- processing images of different sizes
| github_jupyter |
```
import numpy as np
import pandas as pd
```
# *Series
```
lbl = ['x','y','z']
my_data = [11,22,33]
arr = np.array(my_data)
d = {'a':10, 'b':20,'c':30}
```
## Creating pandas series
### 1) Using Python List
```
pd.Series(my_data) #it looks like numpy array but it has index
pd.Series(my_data,lbl) # setting label as index
```
### 2) Using Numpy Array
```
pd.Series(arr)
```
### 3) Using Python Dictionary
```
pd.Series(d) # it automatically takes key as index
## series can almost hold any type object for eg. as below
lbl
pd.Series(lbl)
pd.Series([sum,print,len])
## Accessing values using index/key
series1 = pd.Series([1,2,3,4], ['India','SriLanka','Bhutan','Nepal'])
series2 = pd.Series([1,2,3,4],['India','Afganistan','Bhutan','Myanmar'])
series1['Nepal']
series2[2]
series1 + series2
```
# *DataFrame
```
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5,4),['a','b','c','d','e'],['w','x','y','z'])
df
### accessing columns
df['w']
type(df['w'])
type(df)
df.w #in sql way
df.isnull()
df.keys()
df[['w','z']]
```
### adding new column
```
df['new'] = df['w'] + df['y']
df
```
### droping column
```
df.drop('new', axis=1)
df
df.drop('new', axis=1, inplace=True)
df
df.drop('e') # bydefault axes is 0
df.shape
df[['x','y','z']]
## Rows
df
df.loc['b']
df.iloc[2] # integer based even your index is not an integer
```
### dealing with subset
```
df
df.loc['d','y']
df.loc[['b','c'],['x','z']]
df
df>0
df[df>0]
df['w']>0
df[df['w']>0]
df[df['z']<0]
df[df['w']>0][['y','x']]
# OR divide in chuncks
# boolseries = df['w']>0
# rslt =df[boolseries]
# mycols = ['y','x']
# rslt[mycols]
## multiple condition
df[(df['w']>0) & (df['y']<1)]
df
```
### set & reset index
```
### reset index
df.reset_index()
## set index
newindex = 'MH MP KL UP AP'.split()
newindex
df['states'] = newindex
df
df.set_index('states')
```
### Multiple Index
```
# index level
first = ['G1','G1','G1','G2','G2','G2']
second = [1,2,3,1,2,3]
hr_index = list(zip(first , second))
hr_index = pd.MultiIndex.from_tuples(hr_index)
list(zip(first , second))
hr_index
mul_df = pd.DataFrame(randn(6,2),hr_index,['A','B'])
mul_df
mul_df.loc['G1']
mul_df.loc['G1'].loc[2]
# set index name
mul_df.index.names
mul_df.index.names = ['Group','No']
mul_df
mul_df.loc['G1'].loc[3]['B']
# Cross-Section accessing/grabing
mul_df.xs(1,level='No') # it'll return row 1 in both group G1 & G2 which is difficult in case of loc()
```
# Handling Missing Data
```
import numpy as np
import pandas as pd
d = {'A': [8,9,np.nan], 'B': [5,np.nan,np.nan], 'C': [4,5,6]}
df = pd.DataFrame(d)
df
```
### dropna()
```
## pandas will drop any rows or columns if they have null values you can use axis to specify row(axis=0) or column(axis=1)
df.dropna()
df.dropna(axis=1)
df.dropna(thresh=2) ### it will drop whish has more than 2 null value
```
### fillna()
```
## can “fill in” NA values with non-NA data in a couple of ways, which we illustrate
df.fillna(value="Fill Value")
df['A'].fillna(value=df['A'].mean()) ## fill missing value in it's mean in column 'A'
df['B'].fillna(value=df['B'].mean())
```
## Groupby
```
## Groupby allows you to group together rows based off a column and perform an aggregate function on them
import numpy as np
import pandas as pd
data = {'Company': ['Ggl','FB','IBM','APL','MSFT','AMZN'],
'Person' : ['Rahul','Dheeraj','Shaam','Shundar','Chandu','Vivek'],
'Sales' : [200,120,310,220,250,125]
}
data
df = pd.DataFrame(data)
df
byComp = df.groupby('Company')
byComp
byComp.mean()
byComp.std()
byComp.sum()
byComp.sum().loc['FB']
# OR
df.groupby('Company').sum().loc['FB']
byComp.count()
#OR
df.groupby('Company').count()
byComp.max()
#OR
df.groupby('Company').max()
byComp.min()
#OR
df.groupby('Company').min()
df.groupby('Company').describe()
df.groupby('Company').describe().transpose()
df.groupby('Company').describe().transpose()['Ggl']
```
## Merging, Joining and Concatenating
```
df1 = pd.DataFrame({'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']
},
index = [0,1,2,3])
df2 = pd.DataFrame({'A':['A4','A5','A6','A7'],
'B':['B4','B5','B6','B7'],
'C':['C4','C5','C6','C7'],
'D':['D4','D5','D6','D7']
},
index = [4,5,6,7])
df3 = pd.DataFrame({'A':['A8','A9','A10','A11'],
'B':['B8','B9','B10','B11'],
'C':['C8','C9','C10','C11'],
'D':['D8','D9','D10','D11']
},
index = [8,9,10,11])
df1
df2
df3
```
### Concatenation
```
# you can use ps.concat and pass in a list of DataFrames to concatenate together. Dimension should match along the axis you are concatenating
pd.concat([df1,df2,df3]) # along rows
pd.concat([df1,df2,df3], axis=1) # along column
### Another Dataframe
left = pd.DataFrame({'key':['K0','K1','K2','K3'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']
})
right = pd.DataFrame({'key':['K0','K1','K2','K3'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']
})
left
right
```
### Merging
```
# The merge function allows you to merge DataFrames together using a similar logic as merging SQL Tables together, For example:
pd.merge(left,right,how='inner',on='key')
# or to show more complicated example
left = pd.DataFrame({'key1':['K0','K1','K2','K3'],
'key2':['K0','K1','K0','K1'],
'A':['A0','A1','A2','A3'],
'B':['B0','B1','B2','B3']
})
right = pd.DataFrame({'key1':['K0','K1','K2','K3'],
'key2':['K0','K0','K0','K0'],
'C':['C0','C1','C2','C3'],
'D':['D0','D1','D2','D3']
})
pd.merge(left,right, on=['key1','key2'])
pd.merge(left,right,how='outer',on=['key1','key2'])
pd.merge(left,right,how='right',on=['key1','key2'])
```
### Joining
```
## Joining is a convenient method for combining the columns of two potentially differently-indexed DataFrames into a single result DatFrame
left = pd.DataFrame({'A':['A0','A1','A2'],
'B':['B0','B1','B2'],
},index=['K0','K1','K2'])
right = pd.DataFrame({'C':['C0','C2','C3'],
'D':['D0','D2','D3'],
},index=['K0','K2','K3'])
left.join(right)
left.join(right, how='outer')
```
| github_jupyter |
# Data description & Problem statement:
This data set contains a total 5820 evaluation scores provided by students from Gazi University in Ankara (Turkey). There is a total of 28 course specific questions and additional 5 attributes. Please check the description at: http://archive.ics.uci.edu/ml/datasets/turkiye+student+evaluation
* Dataset is imbalanced. The data has 5820 rows and 33 variables.
* This is a classification problem. The classification goal is to predict number of times the student is taking this course: 0 (passed) and >0 (failed).
# Workflow:
- Load the dataset, and define the required functions (e.g. for detecting the outliers)
- Data Cleaning/Wrangling: Manipulate outliers, missing data or duplicate values, Encode categorical variables, etc.
- Split data into training & test parts (utilize the training part for training and test part for the final evaluation of model)
# Model Training:
- Train an ensemble of Deep Neural Network models by Keras/Tensorflow, and finally aggregate the results (Note: I've utilized SMOTE technique via imblearn toolbox to synthetically over-sample the minority category and even the dataset imbalances.)
# Model Evaluation:
- Evaluate the Neural Network model on Test Dataset, by calculating:
- AUC score
- Confusion matrix
- ROC curve
- Precision-Recall curve
- Average precision
```
import keras
import sklearn
import tensorflow as tf
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
import random as rn
%matplotlib inline
import os
os.environ['PYTHONHASHSEED'] = '0'
# for the reproducable results:
np.random.seed(42)
rn.seed(42)
tf.set_random_seed(42)
from keras import backend as K
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import warnings
warnings.filterwarnings("ignore")
df=pd.read_csv('C:/Users/rhash/Documents/Datasets/wine quality/winequality-red.csv', sep=';')
df['quality']=df['quality'].map({3:'L', 4:'L', 5:'L', 6:'L', 7:'H', 8:'H'})
df['quality']=df['quality'].map({'L':0, 'H':1})
# To Shuffle the data:
np.random.seed(42)
df=df.reindex(np.random.permutation(df.index))
df.reset_index(inplace=True, drop=True)
df.head(5)
df.info()
# Removes outliers (all rows) by one of Z-score, MAD or IQR-based approaches:
def remove_outliers(df, name, thresh=3, method="Z_Score"):
L=[]
for name in name:
if method=="Z_Score":
drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (thresh * df[name].std()))]
elif method=="MAD":
median = np.median(df[name], axis=0)
mad = np.median(np.abs(df[name] - median), axis=0)
modified_z_score = 0.6745 * (df[name]-median) / mad
drop_rows = df.index[modified_z_score >= 3.5]
elif method=="IQR":
quartile_1, quartile_3 = np.percentile(df[name], [25, 75])
iqr = np.abs(quartile_3 - quartile_1)
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
drop_rows = df.index[(df[name] > upper_bound) | (df[name] < lower_bound)]
L.extend(list(drop_rows))
df.drop(np.array(list(set(L))), axis=0, inplace=True)
remove_outliers(df, ['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol'], thresh=9)
X=df[['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',
'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',
'pH', 'sulphates', 'alcohol']]
y=df['quality']
from sklearn.preprocessing import StandardScaler, MinMaxScaler
scalor_X=MinMaxScaler().fit(X)
X=scalor_X.transform(X)
# we build a hold_out dataset for the final validation:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Resampling:
from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler
#X_r, y_r = SMOTE().fit_sample(X_train, y_train)
X_r, y_r = RandomOverSampler(random_state=0).fit_sample(X_train, y_train)
from keras.utils import to_categorical
y_r=to_categorical(y_r)
y_test=to_categorical(y_test)
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report, f1_score
class EarlyStopByAUC(keras.callbacks.Callback):
def __init__(self, value = 0, verbose = 0):
super(keras.callbacks.Callback, self).__init__()
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
#score = f1_score(np.argmax(self.validation_data[1], axis=1), np.argmax(model.predict(self.validation_data[0]), axis=1))
score=roc_auc_score(self.validation_data[1], model.predict_proba(self.validation_data[0]))
L.append(score)
if score >= self.value:
if self.verbose >0:
print("Epoch %05d: early stopping Threshold" % epoch)
self.model.stop_training = True
# KNN with Cross-Validation:
from sklearn.metrics import roc_auc_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.regularizers import l2, l1
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.utils.class_weight import compute_sample_weight
np.random.seed(42)
rn.seed(42)
tf.set_random_seed(42)
model = Sequential()
model.add(Dense(300, input_dim=X.shape[1], activation='relu', kernel_initializer = 'uniform',
activity_regularizer=l2(0.001))) # Hidden 1
model.add(Dropout(0.2))
model.add(Dense(400, activation='relu', kernel_initializer = 'uniform',
activity_regularizer=l2(0.001))) # Hidden 2
model.add(Dropout(0.2))
model.add(Dense(y_r.shape[1], activation='softmax', kernel_initializer='uniform')) # Output
L=[]
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopByAUC(value =0.95, verbose =1) #EarlyStopping(monitor='loss', min_delta=0.001, patience=5, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5", verbose=0, save_best_only=True) # save best model
history=model.fit(X_r,y_r, epochs=100, batch_size=16, validation_data=(X_test, y_test), callbacks=[monitor, checkpointer], verbose=0)
model.load_weights('best_weights.hdf5')
# Measure this fold's accuracy
auc_test=roc_auc_score(y_test, model.predict_proba(X_test))
auc_train=roc_auc_score(to_categorical(y_train), model.predict_proba(X_train))
print('Training auc score: ', auc_train, "\n")
print('Validation auc score: ', auc_test)
# list all data in history
#print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
from sklearn.metrics import roc_curve, auc, confusion_matrix, classification_report
# Plot a confusion matrix.
# cm is the confusion matrix, names are the names of the classes.
def plot_confusion_matrix(cm, names, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(names))
plt.xticks(tick_marks, names, rotation=45)
plt.yticks(tick_marks, names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
class_names=["0", "1"]
# Compute confusion matrix
cm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
# Normalize the confusion matrix by row (i.e by the number of samples in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, class_names, title='Normalized confusion matrix')
plt.show()
# Classification report:
report=classification_report(np.argmax(y_test, axis=1), np.argmax(model.predict(X_test), axis=1))
print(report)
# ROC curve & auc:
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score
fpr, tpr, thresholds=roc_curve(np.array(y_test[:, 1]), model.predict_proba(X_test)[:, 1] , pos_label=1)
roc_auc=roc_auc_score(np.array(y_test), model.predict_proba(X_test))
plt.figure()
plt.step(fpr, tpr, color='darkorange', lw=2, label='ROC curve (auc = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', alpha=0.4, lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.plot([cm_normalized[0,1]], [cm_normalized[1,1]], 'or')
plt.show()
# Precision-Recall trade-off:
precision, recall, thresholds=precision_recall_curve(y_test[:, 1], model.predict_proba(X_test)[:, 1], pos_label=1)
ave_precision=average_precision_score(y_test, model.predict_proba(X_test))
plt.step(recall, precision, color='navy')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0, 1.001])
plt.ylim([0, 1.02])
plt.title('Precision-Recall curve: AP={0:0.2f}'.format(ave_precision))
plt.plot([cm_normalized[1,1]], [cm[1,1]/(cm[1,1]+cm[0,1])], 'ob')
plt.show()
```
| github_jupyter |
<a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width="400" align="center"></a>
<h1 align="center"><font size="5">CONTENT-BASED FILTERING</font></h1>
Recommendation systems are a collection of algorithms used to recommend items to users based on information taken from the user. These systems have become ubiquitous, and can be commonly seen in online stores, movies databases and job finders. In this notebook, we will explore Content-based recommendation systems and implement a simple version of one using Python and the Pandas library.
### Table of contents
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ol>
<li><a href="#ref1">Acquiring the Data</a></li>
<li><a href="#ref2">Preprocessing</a></li>
<li><a href="#ref3">Content-Based Filtering</a></li>
</ol>
</div>
<br>
<a id="ref1"></a>
# Acquiring the Data
To acquire and extract the data, simply run the following Bash scripts:
Dataset acquired from [GroupLens](http://grouplens.org/datasets/movielens/). Lets download the dataset. To download the data, we will use **`!wget`** to download it from IBM Object Storage.
__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
```
!wget -O moviedataset.zip https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/moviedataset.zip
print('unziping ...')
!unzip -o -j moviedataset.zip
```
Now you're ready to start working with the data!
<a id="ref2"></a>
# Preprocessing
First, let's get all of the imports out of the way:
```
#Dataframe manipulation library
import pandas as pd
#Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Now let's read each file into their Dataframes:
```
#Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
#Storing the user information into a pandas dataframe
ratings_df = pd.read_csv('ratings.csv')
#Head is a function that gets the first N rows of a dataframe. N's default is 5.
movies_df.head()
```
Let's also remove the year from the __title__ column by using pandas' replace function and store in a new __year__ column.
```
#Using regular expressions to find a year stored between parentheses
#We specify the parantheses so we don't conflict with movies that have years in their titles
movies_df['year'] = movies_df.title.str.extract('(\(\d\d\d\d\))',expand=False)
#Removing the parentheses
movies_df['year'] = movies_df.year.str.extract('(\d\d\d\d)',expand=False)
#Removing the years from the 'title' column
movies_df['title'] = movies_df.title.str.replace('(\(\d\d\d\d\))', '')
#Applying the strip function to get rid of any ending whitespace characters that may have appeared
movies_df['title'] = movies_df['title'].apply(lambda x: x.strip())
movies_df.head()
```
With that, let's also split the values in the __Genres__ column into a __list of Genres__ to simplify future use. This can be achieved by applying Python's split string function on the correct column.
```
#Every genre is separated by a | so we simply have to call the split function on |
movies_df['genres'] = movies_df.genres.str.split('|')
movies_df.head()
```
Since keeping genres in a list format isn't optimal for the content-based recommendation system technique, we will use the One Hot Encoding technique to convert the list of genres to a vector where each column corresponds to one possible value of the feature. This encoding is needed for feeding categorical data. In this case, we store every different genre in columns that contain either 1 or 0. 1 shows that a movie has that genre and 0 shows that it doesn't. Let's also store this dataframe in another variable since genres won't be important for our first recommendation system.
```
#Copying the movie dataframe into a new one since we won't need to use the genre information in our first case.
moviesWithGenres_df = movies_df.copy()
#For every row in the dataframe, iterate through the list of genres and place a 1 into the corresponding column
for index, row in movies_df.iterrows():
for genre in row['genres']:
moviesWithGenres_df.at[index, genre] = 1
#Filling in the NaN values with 0 to show that a movie doesn't have that column's genre
moviesWithGenres_df = moviesWithGenres_df.fillna(0)
moviesWithGenres_df.head()
```
Next, let's look at the ratings dataframe.
```
ratings_df.head()
```
Every row in the ratings dataframe has a user id associated with at least one movie, a rating and a timestamp showing when they reviewed it. We won't be needing the timestamp column, so let's drop it to save on memory.
```
#Drop removes a specified row or column from a dataframe
ratings_df = ratings_df.drop('timestamp', 1)
ratings_df.head()
```
<a id="ref3"></a>
# Content-Based recommendation system
Now, let's take a look at how to implement __Content-Based__ or __Item-Item recommendation systems__. This technique attempts to figure out what a user's favourite aspects of an item is, and then recommends items that present those aspects. In our case, we're going to try to figure out the input's favorite genres from the movies and ratings given.
Let's begin by creating an input user to recommend movies to:
Notice: To add more movies, simply increase the amount of elements in the __userInput__. Feel free to add more in! Just be sure to write it in with capital letters and if a movie starts with a "The", like "The Matrix" then write it in like this: 'Matrix, The' .
```
userInput = [
{'title':'Breakfast Club, The', 'rating':5},
{'title':'Toy Story', 'rating':3.5},
{'title':'Jumanji', 'rating':2},
{'title':"Pulp Fiction", 'rating':5},
{'title':'Akira', 'rating':4.5}
]
inputMovies = pd.DataFrame(userInput)
inputMovies
```
#### Add movieId to input user
With the input complete, let's extract the input movie's ID's from the movies dataframe and add them into it.
We can achieve this by first filtering out the rows that contain the input movie's title and then merging this subset with the input dataframe. We also drop unnecessary columns for the input to save memory space.
```
#Filtering out the movies by title
inputId = movies_df[movies_df['title'].isin(inputMovies['title'].tolist())]
#Then merging it so we can get the movieId. It's implicitly merging it by title.
inputMovies = pd.merge(inputId, inputMovies)
#Dropping information we won't use from the input dataframe
inputMovies = inputMovies.drop('genres', 1).drop('year', 1)
#Final input dataframe
#If a movie you added in above isn't here, then it might not be in the original
#dataframe or it might spelled differently, please check capitalisation.
inputMovies
```
We're going to start by learning the input's preferences, so let's get the subset of movies that the input has watched from the Dataframe containing genres defined with binary values.
```
#Filtering out the movies from the input
userMovies = moviesWithGenres_df[moviesWithGenres_df['movieId'].isin(inputMovies['movieId'].tolist())]
userMovies
```
We'll only need the actual genre table, so let's clean this up a bit by resetting the index and dropping the movieId, title, genres and year columns.
```
#Resetting the index to avoid future issues
userMovies = userMovies.reset_index(drop=True)
#Dropping unnecessary issues due to save memory and to avoid issues
userGenreTable = userMovies.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
userGenreTable
```
Now we're ready to start learning the input's preferences!
To do this, we're going to turn each genre into weights. We can do this by using the input's reviews and multiplying them into the input's genre table and then summing up the resulting table by column. This operation is actually a dot product between a matrix and a vector, so we can simply accomplish by calling Pandas's "dot" function.
```
inputMovies['rating']
#Dot produt to get weights
userProfile = userGenreTable.transpose().dot(inputMovies['rating'])
#The user profile
userProfile
```
Now, we have the weights for every of the user's preferences. This is known as the User Profile. Using this, we can recommend movies that satisfy the user's preferences.
Let's start by extracting the genre table from the original dataframe:
```
#Now let's get the genres of every movie in our original dataframe
genreTable = moviesWithGenres_df.set_index(moviesWithGenres_df['movieId'])
#And drop the unnecessary information
genreTable = genreTable.drop('movieId', 1).drop('title', 1).drop('genres', 1).drop('year', 1)
genreTable.head()
genreTable.shape
```
With the input's profile and the complete list of movies and their genres in hand, we're going to take the weighted average of every movie based on the input profile and recommend the top twenty movies that most satisfy it.
```
#Multiply the genres by the weights and then take the weighted average
recommendationTable_df = ((genreTable*userProfile).sum(axis=1))/(userProfile.sum())
recommendationTable_df.head()
#Sort our recommendations in descending order
recommendationTable_df = recommendationTable_df.sort_values(ascending=False)
#Just a peek at the values
recommendationTable_df.head()
```
Now here's the recommendation table!
```
#The final recommendation table
movies_df.loc[movies_df['movieId'].isin(recommendationTable_df.head(20).keys())]
```
### Advantages and Disadvantages of Content-Based Filtering
##### Advantages
* Learns user's preferences
* Highly personalized for the user
##### Disadvantages
* Doesn't take into account what others think of the item, so low quality item recommendations might happen
* Extracting data is not always intuitive
* Determining what characteristics of the item the user dislikes or likes is not always obvious
<h2>Want to learn more?</h2>
IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a>
Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a>
<h3>Thanks for completing this lesson!</h3>
<h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a></h4>
<p><a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>
<hr>
<p>Copyright © 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
| github_jupyter |
# Importing Necessary Modules
```
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use("ggplot")
plt.rcParams['figure.figsize'] = (12, 8)
import seaborn as sns
sns.set(style='whitegrid', color_codes=True)
import warnings
warnings.filterwarnings('ignore')
from sklearn.feature_selection import chi2,f_classif, mutual_info_classif, SelectKBest
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import StratifiedKFold
from yellowbrick.model_selection import CVScores
from sklearn.metrics import confusion_matrix
```
# Load Dataset
```
df = pd.read_csv('../../datasets/PCOS_clean_data_without_infertility.csv')
df.head(12)
df.info()
X = df.drop(["PCOS (Y/N)",
"Blood Group",
"Height(Cm)",
"Pregnant(Y/N)",
"PRG(ng/mL)",
"RR (breaths/min)",
"No. of aborptions",
"FSH/LH",
"I beta-HCG(mIU/mL)",
"II beta-HCG(mIU/mL)",
"TSH (mIU/L)",
"FSH(mIU/mL)",
"LH(mIU/mL)",
"Waist:Hip Ratio",
"PRL(ng/mL)",
"BP _Diastolic (mmHg)",
"BP _Systolic (mmHg)",
"Reg.Exercise(Y/N)",
"RBS(mg/dl)"
],axis=1)
y = df[["PCOS (Y/N)"]]
```
# Data Augmentation
## Resampling on Complete Dataset
```
from imblearn.combine import SMOTEENN
resample = SMOTEENN(sampling_strategy="auto", random_state =0)
X, y = resample.fit_sample(X, y)
```
## Splitting
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify= y)
X_train, X_dev, y_train, y_dev = train_test_split(X_train, y_train, test_size=0.15, random_state=0, stratify= y_train)
X_train.shape
X_test.shape
X_dev.shape
```
## Scaling
```
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
scaler = MinMaxScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_train = pd.DataFrame(X_train)
X_dev = scaler.transform(X_dev)
X_dev = pd.DataFrame(X_dev)
X_test = scaler.transform(X_test)
X_test = pd.DataFrame(X_test)
# Setting Column Names from dataset
X_train.columns = X.columns
X_test.columns = X.columns
X_dev.columns = X.columns
```
# CNN
```
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
X_train = X_train.to_numpy().reshape(X_train.shape[0], X_train.shape[1], 1)
# y_train = y_train.to_numpy().reshape(y_train.shape[0], 1)
X_test = X_test.to_numpy().reshape(X_test.shape[0], X_test.shape[1], 2)
# y_test = y_test.to_numpy().reshape(y_test.shape[0], 1)
X_dev = X_dev.to_numpy().reshape(X_dev.shape[0], X_dev.shape[1], 2)
# y_dev = y_dev.to_numpy().reshape(y_dev.shape[0], 1)
from keras.utils import to_categorical
y_train = to_categorical(y_train, 2)
y_test = to_categorical(y_test, 2)
y_dev = to_categorical(y_dev, 2)
X_train.shape[:]
model = models.Sequential()
model.add(layers.Conv2D(128, (3), activation='relu', input_shape=X_train.shape[1:]))
model.add(layers.Conv2D(64, (3), activation='relu'))
model.add(layers.Conv2D(32, (3), activation='relu'))
model.add(layers.Conv1D(64, (3), activation='relu'))
model.add(layers.Conv1D(64, (5), activation='relu'))
model.add(layers.Conv1D(32, (5), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(2, activation='sigmoid'))
model.summary()
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=50, monitor='val_loss', mode='min'),
tf.keras.callbacks.TensorBoard(log_dir='logs')]
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['AUC','accuracy', 'Precision', 'Recall'])
history = model.fit(X_train,y_train, epochs=200, validation_data= (X_dev,y_dev));
plt.plot(history.history['accuracy'], label='Accuracy')
plt.plot(history.history['val_accuracy'], label = 'Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Scores')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label = 'Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='upper right')
model.evaluate(X_test, y_test, verbose=1)
```
| github_jupyter |
### Step 1 Download and prepare data
```
DATA_DIR = 'PATH_TO_THE_DATA_DIR'
# This example is for demonstration purposes
# Please refer to the corresponding NLP tutorial on NeMo documentation
! bash get_wkt2.sh $DATA_DIR
# verify data is there
! ls -l $DATA_DIR/wikitext-2
# Prepare tokenization model
! python create_vocab.py --train_path=$DATA_DIR/wikitext-2/train.txt
```
### Step 2 - import necessary packages, define hyperparameters, create tokenizer instance
```
import os
import torch
import nemo
from nemo import logging
import nemo.collections.nlp as nemo_nlp
from nemo.collections.nlp.callbacks.lm_bert_callback import eval_iter_callback, \
eval_epochs_done_callback
from nemo.utils.lr_policies import CosineAnnealing
BATCHES_PER_STEP = 1
BATCH_SIZE = 64
BATCH_SIZE_EVAL = 16
D_MODEL = 768
D_INNER = 3072
HIDDEN_ACT = "relu"
LEARNING_RATE = 0.0001
LR_WARMUP_PROPORTION = 0.05
MASK_PROBABILITY = 0.15
MAX_SEQ_LENGTH = 128
NUM_EPOCHS = 1
NUM_HEADS = 12
# Note that for Demo purposes this is set to just one epoch
NUM_LAYERS = 1
OPTIMIZER = "adam_w"
# Instantiate neural factory with supported backend
neural_factory = nemo.core.NeuralModuleFactory(
# If you're training with multiple GPUs, you should handle this value with
# something like argparse. See examples/nlp/bert_pretraining.py for an example.
local_rank=None,
# If you're training with mixed precision, this should be set to mxprO1 or mxprO2.
# See https://nvidia.github.io/apex/amp.html#opt-levels for more details.
optimization_level=nemo.core.Optimization.mxprO1,
# If you're training with multiple GPUs, this should be set to
# nemo.core.DeviceType.AllGpu
placement=nemo.core.DeviceType.GPU)
# tokenizer.model file was created during Step 1
tokenizer = nemo_nlp.data.SentencePieceTokenizer(model_path="tokenizer.model")
special_tokens = nemo_nlp.data.get_bert_special_tokens('bert')
tokenizer.add_special_tokens(special_tokens)
```
#### Instantiate necessary neural modules
```
bert_model = nemo_nlp.nm.trainables.huggingface.BERT(
vocab_size=tokenizer.vocab_size,
num_hidden_layers=NUM_LAYERS,
hidden_size=D_MODEL,
num_attention_heads=NUM_HEADS,
intermediate_size=D_INNER,
max_position_embeddings=MAX_SEQ_LENGTH,
hidden_act=HIDDEN_ACT
)
# Masked Language Modeling Loss
mlm_classifier = nemo_nlp.nm.trainables.BertTokenClassifier(D_MODEL,
num_classes=tokenizer.vocab_size,
activation=HIDDEN_ACT,
log_softmax=True)
mlm_loss = nemo_nlp.nm.losses.SmoothedCrossEntropyLoss()
# Next Sentence Prediciton Loss
nsp_classifier = nemo_nlp.nm.trainables.SequenceClassifier(D_MODEL,
num_classes=2,
num_layers=2,
activation='tanh',
log_softmax=False)
nsp_loss = nemo.backends.pytorch.common.CrossEntropyLossNM()
bert_loss = nemo.backends.pytorch.common.LossAggregatorNM(num_inputs=2)
train_data_layer = nemo_nlp.nm.data_layers.BertPretrainingDataLayer(
tokenizer=tokenizer,
dataset=os.path.join(DATA_DIR, "wikitext-2", "train.txt"),
max_seq_length=MAX_SEQ_LENGTH,
mask_probability=MASK_PROBABILITY,
batch_size=BATCH_SIZE,
shuffle=True
)
eval_data_layer = nemo_nlp.nm.data_layers.BertPretrainingDataLayer(
tokenizer=tokenizer,
dataset=os.path.join(DATA_DIR, "wikitext-2", "valid.txt"),
max_seq_length=MAX_SEQ_LENGTH,
mask_probability=MASK_PROBABILITY,
batch_size=BATCH_SIZE_EVAL,
shuffle=False
)
```
### Step 3 - Describe training and evaluation DAGs
```
# Training DAG
input_data = train_data_layer()
hidden_states = bert_model(input_ids=input_data.input_ids,
token_type_ids=input_data.input_type_ids,
attention_mask=input_data.input_mask)
mlm_logits = mlm_classifier(hidden_states=hidden_states)
t_mlm_loss = mlm_loss(logits=mlm_logits, labels=input_data.output_ids, output_mask=input_data.output_mask)
nsp_logits = nsp_classifier(hidden_states=hidden_states)
t_nsp_loss = nsp_loss(logits=nsp_logits, labels=input_data.labels)
loss = bert_loss(loss_1=t_mlm_loss, loss_2=t_nsp_loss)
# Evaluation DAG
input_data_eval = eval_data_layer()
e_hidden_states = bert_model(input_ids=input_data_eval.input_ids,
token_type_ids=input_data_eval.input_type_ids,
attention_mask=input_data_eval.input_mask)
e_mlm_logits = mlm_classifier(hidden_states=e_hidden_states)
e_mlm_loss = mlm_loss(logits=e_mlm_logits, labels=input_data_eval.output_ids, output_mask=input_data_eval.output_mask)
e_nsp_logits = nsp_classifier(hidden_states=e_hidden_states)
e_nsp_loss = nsp_loss(logits=e_nsp_logits, labels=input_data_eval.labels)
e_loss = bert_loss(loss_1=e_mlm_loss, loss_2=e_nsp_loss)
callback_loss = nemo.core.SimpleLossLoggerCallback(
tensors=[loss],
print_func=lambda x: logging.info("Loss: {:.3f}".format(x[0].item())))
train_data_size = len(train_data_layer)
# If you're training on multiple GPUs, this should be
# train_data_size / (batch_size * batches_per_step * num_gpus)
steps_per_epoch = int(train_data_size / (BATCHES_PER_STEP * BATCH_SIZE))
callback_eval = nemo.core.EvaluatorCallback(
eval_tensors=[e_mlm_loss, e_nsp_loss],
user_iter_callback=eval_iter_callback,
user_epochs_done_callback=eval_epochs_done_callback,
eval_step=steps_per_epoch)
lr_policy = CosineAnnealing(NUM_EPOCHS * steps_per_epoch,
warmup_ratio=LR_WARMUP_PROPORTION)
neural_factory.train(tensors_to_optimize=[loss],
lr_policy=lr_policy,
callbacks=[callback_loss, callback_eval],
batches_per_step=BATCHES_PER_STEP,
optimizer=OPTIMIZER,
optimization_params={
"batch_size": BATCH_SIZE,
"num_epochs": NUM_EPOCHS,
"lr": LEARNING_RATE,
"betas": (0.95, 0.98),
"grad_norm_clip": None
})
```
| github_jupyter |
## Data curation example
This notebook provides two data curation examples using data produced in the PV lab.
### 1. perovskite process data
The first example is the processing conditions for solution synthesizing perovsktie materials. We want to understand the relationship between the crystal dimensionality of perovsktie and process condtions.
Let's take a look at the data first
```
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn.preprocessing import MinMaxScaler
process_data = pd.read_excel('process.xlsx', index_col=0)
pd.set_option('display.max_rows', 104)
process_data
pd.set_option('display.max_rows', 10)
process_data.shape
```
We have 104 data points with different processing conditions. However,inputs are presented either in strings or dates. We also have missing labels (NaN). How do we convert it into machine readable format?
Firstly, we take remove the data points that are not labelled
```
process_data.isnull().sum(axis=0)
process_data = process_data.dropna(subset=['Phase '])
process_data.isnull().sum(axis=0)
```
Assign input and output
```
y = process_data['Phase ']
X = process_data.iloc[:,1:-1]
```
We can factorize the strings and dates to convert it to numbers, even for the NaNs. There a a number of methods to deal with missing data. In this case, we treat missing data as one categorical variable. Other methods include using average, nearest neighbours or zero fill NaNs. You can refer to this [tutorial](https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html)
```
X
for column in X:
X[column]= X[column].factorize()[0]
```
The NaNs are assigned to -1 using the factorize function from Pandas.
```
X
```
Now both input and output is machine readable, we can train a classifier to map process conditions to perovskite's phase. We first standardize the input data with zero mean and unit variance.
```
stdsc=StandardScaler()
X_std=stdsc.fit_transform(X)
X_std
X_train, X_test, y_train, y_test = train_test_split( X_std, y, test_size=0.33)
feat_labels = X.columns
forest=RandomForestClassifier(n_estimators=1000,n_jobs=-1)
forest.fit(X_train,y_train)
importances=forest.feature_importances_
indices=np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print ('%2d) %-*s %f'%(f,30,feat_labels[indices[f]],importances[indices[f]]))
coefs=forest.feature_importances_
feat_labels = X.columns
# make importances relative to max importance
feature_importance = abs(100.0 * (coefs / abs(coefs).max()))
sorted_idx = np.argsort(feature_importance)[-10:]
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos,feat_labels[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
print ('RF train Accuracy:%f'%forest.score(X_train,y_train))
print ('RF test Accuracy:%f'%forest.score(X_test,y_test))
```
### 2. Silver nanoparticle process data
The second example for data curation is the processing conditions for solution synthesizing AgNPs. In this case study, the input are nicely arranged in numerical format. However, the output is an absorption spectra. We need to convert the output into scaler values.
Let's take a look at the data first
```
raw_input = pd.read_excel('AgNPs.xlsx','Sheet1')
raw_spectra = pd.read_excel('AgNPs.xlsx','Sheet2',index_col=0)
raw_input
```
We can remove certain features that are not useful
```
raw_input = raw_input.drop(raw_input.columns[[0,1,2]],axis=1)
raw_input
raw_spectra
plt.plot(raw_spectra.iloc[:,0:10])
plt.xlabel('wavelength (nm)')
plt.ylabel ('intensity (a.u)')
raw_target = pd.read_excel('AgNPs.xlsx','Target')
plt.plot(raw_target.iloc[:,0],raw_target.iloc[:,2])
```
To convert the output to a scaler value. We define a loss function that measures how similar the measured spectra is to the target spectra. We use the product of cosine similarity and a scaling function.
```
from sklearn.metrics.pairwise import cosine_similarity
#scaler the target spectra from 0 to 1
scaler = MinMaxScaler()
def step_int(x):
if x>1.2:
y = 0
elif 0.7<=x<=1.2:
y = 1
elif 0<x<0.7:
y = x/0.7
return y
def spectra_loss_function (spectra, target_spec_norm):
data = spectra.values
loss = []
for i in range(data.shape[1]):
step_coeff = step_int(max(data[:,i]))
data_col = scaler.fit_transform(data[:,i].reshape(-1,1))
cos_loss = cosine_similarity(target_spec_norm.T,data_col.T)
single_loss = cos_loss*step_coeff
loss.append(single_loss[0])
loss= 1- np.array(loss)
return loss
```
The target spectrum and the measured spectrum have different resolutions. We can use interpolation to unify the range
```
import scipy.interpolate as interp
wave = np.arange(380,801,1)
f = interp.interp1d(raw_target.iloc[:,0],raw_target.iloc[:,2],kind='slinear')
target_spec = f(wave)
scaler = MinMaxScaler()
target_spec_norm = scaler.fit_transform(target_spec.reshape(-1,1))
loss = spectra_loss_function (raw_spectra, target_spec_norm)
```
Now the output becomes a single scaler value
```
X= raw_input
y = loss
y
```
We can plot the spectra that has the lowest loss
```
a = np.argmin(loss)
b= np.argmax(loss)
plt.plot(raw_spectra.iloc[:,a], label ='lowest loss spectra')
plt.plot(raw_spectra.iloc[:,b],label ='highest loss spectra')
plt.plot(wave,target_spec_norm, label ='target')
plt.xlabel ('wavelength(nm)')
plt.ylabel ('intensites (a.u)')
plt.legend()
```
With 5D input and 1D output , we can train a regerssion model to map the processing conditions for AgNP to its spectral similarity value. This will be covered in the following lectures.
| github_jupyter |
# COMP 135 Fall 2019: HW1 STARTER
# Setup comp135_env package imports
```
import os
import numpy as np
import sklearn.neighbors
import sklearn.tree
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
```
# Setup student-defined imports
```
from LeastSquaresLinearRegression import LeastSquaresLinearRegressor
from evaluate_perf_metrics import (
calc_perf_metric__absolute_error, calc_perf_metric__squared_error,
calc_perf_metrics_for_regressor_on_dataset)
```
# Load dataset
```
## TODO load all data (train/valid/test) into x_tr_NF, x_va_NF, x_te_NF, ...
y_tr_N = np.loadtxt('data_abalone/y_train.csv', delimiter=',', skiprows=1)
x_tr_NF = np.loadtxt('data_abalone/x_train.csv', delimiter=',', skiprows=1)
```
# 1a : Abalone histograms of response variable 'rings'
#### 1a(i): Produce one figure with three subplots, showing histograms of $y$ from train/valid/test
```
fig_h, axes_arr = plt.subplots(nrows=3, ncols=1, sharex=True)
## TODO plot histograms on the axes
## e.g. sns.distplot(y_tr_N, kde=False, rug=True, ax=axes_arr[0]);
```
#### 1a(ii): Describe the **train** distribution. Unimodal or multimodal? What shape? Are there noticeable outliers?
**TODO ANSWER HERE**
#### 1a(iii): Quantify train's descriptive statistics.
```
### TODO CODE HERE
```
# 1b : Scatterplots of 'rings' vs 'diam' and 'rings' vs 'shucked'
#### **1b(i):** Create figure with two subplots: scatter plot of `diam_mm` vs `rings` and scatter of `shucked_weight_g` vs `rings`.
```
## TODO CODE HERE
```
#### **1b(ii):** Describe the trends you between diameter and rings in a few sentences.
**TODO ANSWER HERE**
#### 1b(iii): Describe the trends you see between shucked weight and rings.
**TODO ANSWER HERE**
# Setup code for 1c
```
## Dummy class to perform "always guess training mean" prediction
class MeanPredictor():
def __init__(self):
self.yhat = None
def fit(self, x_tr_NF, y_tr_N):
self.yhat = np.mean(y_tr_N)
def predict(self, x_NF):
return self.yhat
## Dummy class to perform "always guess training median" prediction
class MedianPredictor():
def __init__(self):
self.yhat = None
def fit(self, x_tr_NF, y_tr_N):
self.yhat = np.median(y_tr_N)
def predict(self, x_NF):
return self.yhat
mean_value_predictor = MeanPredictor()
## TODO fit the predictor, like mean_value_predictor.fit(x_tr_N2, y_tr_N)
## TODO evaluate predictions on train, valid, and test
median_value_predictor = MedianPredictor()
## TODO fit the predictor
## TODO evaluate predictions on train, valid, and test
```
# 1c : Results Table for Abalone MSE
#### **1c:** Make a table of the **mean-squared-error** for each of the MeanPredictor and MedianPredictor predictors when evaluated on all 3 dataset splits (training, validation, and test).
**Mean Squared Error:**
| split | guess-mean | guess-median |
| ----- | ----------- | ------------ |
| train | | |
| valid | | |
| test | | |
# Model fitting code for 1d
```
linear_regressor_2feats = LeastSquaresLinearRegressor()
# TODO fit and evaluate
linear_regressor_8feats = LeastSquaresLinearRegressor()
# TODO fit and evaluate
```
# 1d : Results Table for Mean Squared Error on Abalone
### **1d(i)** and **1d(ii)** Add results to the table
**Mean Squared Error:**
| split | guess mean | guess median | linear regr (2 feats) | linear regr (8 feats)
| ----- | ----------- | ------------ | --------------------- | ---------------------
| train |
| valid |
| test |
### **1d(iii):** Does using more features seem worthwhile? Do you think the improvement on the test data is significant? Why or why not?
# 1e : Model selection for K-Nearest Neighbor Regressor
```
param_name = 'n_neighbors'
param_list = [1, 3, 5, 7, 11, 21, 41, 61, 81, 101, 201, 401, 801] # TODO ADD N
# Keep only values below total training size
param_list = [p for p in param_list if p <= param_list[-1]]
train_mse_list = []
valid_mse_list = []
test_mse_list = []
for n_neighbors in param_list:
knn_regr = sklearn.neighbors.KNeighborsRegressor(
n_neighbors=n_neighbors,
metric='euclidean',
algorithm='brute')
# TODO fit and predict and track performance metric values in the lists
```
#### **1e(i):** Make a line plot for mean-squared-error (MSE) vs $K$ on the validation set
```
# TODO
```
#### **1e(ii):** Which value do you recommend?
```
# TODO
```
#### **1e(iii):** Cumulative results table with K-Nearest Neighbor
**Mean Squared Error:**
| split | guess mean | guess median | linear regr (2 feats) | linear regr (8 feats) | k-NN (8 feats) |
| ----- | ----------- | ------------ | --------------------- | --------------------- | ----- |
| train |
| valid |
| test |
# <a name="problem-1-g"> 1g: Analyzing Residuals </a>
Bonus points possible. Not a required question. Feel free to skip
```
# TODO compute the predicted y values for linear regr and kNN
```
#### **1f(i):** Plot scatters of y vs yhat for linear regression and the best k-NN regressor
```
fig_h, ax_grid = plt.subplots(nrows=1, ncols=2, sharex=True, sharey=True)
plt.xlim([0, 26]); plt.ylim([0, 26]);
# ax_grid[0].plot(y_va_N, linear_yhat_va_N, 'k.', alpha=0.2);
ax_grid[0].set_title('Linear Regr.'); plt.xlabel('true y'); plt.ylabel('predicted y');
# ax_grid[1].plot(y_va_N, knn_yhat_va_N, 'k.', alpha=0.2);
plt.title('k-NN Regr.'); plt.xlabel('true y'); plt.ylabel('predicted y');
```
#### **1f(ii):** What kinds of systematic errors does each method make? What should be done about these?
TODO ANSWER HERE
# Problem 2 : Analysis of Doctor Visits
```
# TODO load data here
```
# 2a : Baseline predictions
#### **2a(i):** Given stakeholder's preferences, which error metric is most appropriate and why?
Because errors should scale linearly, we should use the *mean absolute error* metric.
If we used mean squared error, an error of 2 would cost 4x an error of 1.
```
mean_value_predictor = MeanPredictor()
## TODO fit and predict...
median_value_predictor = MedianPredictor()
## TODO fit and predict...
```
#### 2a(ii) : Results Table for Doctor Visits with Mean Absolute Error
**Mean Absolute Error:**
| split | guess-mean | guess-median |
| ----- | ----------- | ------------ |
| train |
| valid |
| test |
# Setup code for 2b
```
linear_regressor_2feats = LeastSquaresLinearRegressor()
# TODO fit and predict
linear_regressor_10feats = LeastSquaresLinearRegressor()
# TODO fit and predict
```
** 2b(i) and 2b(ii):** Add LR to Results Table for MAE on DoctorVisits
**Mean Absolute Error:**
| split | guess-mean | guess-median | linear regr (2 feats) | linear regr (10 feats) |
| ----- | ----------- | ------------ | --------------------- | ---------------------- |
| train |
| valid |
| test |
** 2b(iii):** Does using more features seem worthwhile? Why or why not?
# 2c : DecisionTreeRegressor
```
param_name = 'min_samples_leaf'
param_list = [1, 2, 3, 4, 5, 10, 20, 50, 100, 200, 500, 1000] # TODO add size of training set
train_mae_list = []
valid_mae_list = []
test_mae_list = []
for param in param_list:
tree_regr = sklearn.tree.DecisionTreeRegressor(
min_samples_leaf=param,
random_state=42)
# Fit, predict, and track performance metrics...
```
#### 2c(i): Line plot of mean absolute error vs min_samples_leaf
```
# TODO plot results
```
#### **2c(ii):** Which value of min_samples_leaf would you recommend?
TODO
#### 2c(iii): Add a column to the results table for MAE on DoctorVisits
**Mean Absolute Error:**
| split | guess-mean | guess-median | linear regr (2 feats) | linear regr (10 feats) | decision tree
| ----- | ----------- | ------------ | --------------------- | ---------------------- | --- |
| train |
| valid |
| test |
# 2d : DecisionTreeRegressor with MAE Training Criterion
```
train_mae_list = []
valid_mae_list = []
test_mae_list = []
for param in param_list:
tree_regr = sklearn.tree.DecisionTreeRegressor(
criterion='mae', # USE MEAN ABSOLUTE ERROR here
min_samples_leaf=param,
random_state=42)
# TODO fit, predict, and track performance metrics
```
#### 2d(i): Line plot of mean absolute error vs min_samples_leaf
```
# TODO
```
#### 2d(ii): Which value would you recommend?
```
# TODO
```
#### Setup for 2d(iii)
#### 2d(iii): Add a column to the results table for MAE on DoctorVisits
**Mean Absolute Error:**
| split | guess-mean | guess-median | linear regr (2 feats) | linear regr (10 feats) | decision tree (MSE) | decision tree (MAE)
| ----- | ----------- | ------------ | --------------------- | ---------------------- | --- | --- |
| train |
| valid |
| test |
# Problem 3: Concept questions
# 3a: Limits of $K$-NN
**Question**: When $K$ equals the total training set size $N$, the $K$-nearest-neighbor regression algorithm approaches the behavior of which other regression method discussed here?
#### 3a Answer:
TODO
# 3b: Modifications of $K$-NN
**Question**: Suppose in problem 2, when trying to minimize *mean absolute error* on heldout data, that instead of a DecisionTreeRegressor, we had used a $K$-NN regressor with Euclidean distance (as in Problem 1f).
Would we expect $K$-NN with large $K$ to always beat the strongest constant-prediction baseline (e.g. guess-median or guess-mean)?
To get better MAE values using a nearest-neighbor like approach, should we change the distance function used to compute neighbors? Would we need to change some other step of the $K$-NN prediction process?
#### 3b Answer:
TODO
# 3c: Linear Regression with Categorical Features
**Question:** Your colleague trains a linear regression model on a subset of the DoctorVisits data using only the `has_medicaid` and `has_private_insurance` features. Thus, all features in the vector have a binary categorical type and can be represented via a redundant one-hot encoding.
To your dismay, you discover that your colleague failed to include a bias term (aka intercept term) when training the weights. You recall from class that including a bias term can be important.
To be concrete, you wish each example $x_i$ was represented as a (bias-included) vector:
$$
x_i = [
\texttt{has_medicaid}
\quad \texttt{has_private_insurance}
\quad 1
] \quad \quad \quad ~
$$
However, your colleague used the following representation:
$$
\tilde{x}_i = [
\texttt{has_medicaid}
\quad \texttt{not(has_medicaid)}
\quad \texttt{has_private_insurance}
\quad \texttt{not(has_private_insurance)}
]
$$
Your colleague has delivered to you a length-4 feature vector $\tilde{w}$ for the 4 features above, but then left for vacation without giving you access to the training data.
Can you manipulate the $\tilde{w}$ vector to estimate an appropriate $w$ and $b$ such that for all possible inputs $x_i$:
$$
w^T x_i + b = \tilde{w}^T \tilde{x}_i
$$
#### 3c Answer:
TODO
| github_jupyter |
```
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import ZeroPadding2D
from keras import backend as K
import json
from collections import OrderedDict
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
DATA = OrderedDict()
```
### ZeroPadding2D
**[convolutional.ZeroPadding2D.0] padding (1,1) on 3x5x2 input, data_format='channels_last'**
```
data_in_shape = (3, 5, 2)
L = ZeroPadding2D(padding=(1, 1), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(250)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
```
**[convolutional.ZeroPadding2D.1] padding (1,1) on 3x5x2 input, data_format='channels_first'**
```
data_in_shape = (3, 5, 2)
L = ZeroPadding2D(padding=(1, 1), data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(251)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
```
**[convolutional.ZeroPadding2D.2] padding (3,2) on 2x6x4 input, data_format='channels_last'**
```
data_in_shape = (2, 6, 4)
L = ZeroPadding2D(padding=(3, 2), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(252)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
```
**[convolutional.ZeroPadding2D.3] padding (3,2) on 2x6x4 input, data_format='channels_first'**
```
data_in_shape = (2, 6, 4)
L = ZeroPadding2D(padding=(3, 2), data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(253)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
```
**[convolutional.ZeroPadding2D.4] padding ((1,2),(3,4)) on 2x6x4 input, data_format='channels_last'**
```
data_in_shape = (2, 6, 4)
L = ZeroPadding2D(padding=((1,2),(3,4)), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(254)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
```
**[convolutional.ZeroPadding2D.5] padding 2 on 2x6x4 input, data_format='channels_last'**
```
data_in_shape = (2, 6, 4)
L = ZeroPadding2D(padding=2, data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(255)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.ZeroPadding2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
```
### export for Keras.js tests
```
print(json.dumps(DATA))
```
| github_jupyter |
```
from collections import defaultdict, Counter
from itertools import zip_longest
import json
import os
import re
import sys
import urllib
import numpy as np
import requests
from consequence_prediction.vep_mapping_pipeline.consequence_mapping import *
%matplotlib inline
import matplotlib.pyplot as plt
from eva_cttv_pipeline.clinvar_xml_io.clinvar_xml_io import *
```
## VEP handling: HGVS
```
def try_to_vep(hgvs, long=False):
"""Attempts to get consequences from VEP for HGVS expression."""
safe_hgvs = urllib.parse.quote(hgvs)
vep_url = f'https://rest.ensembl.org/vep/human/hgvs/{safe_hgvs}?content-type=application/json'
resp = requests.get(vep_url).json()
if 'error' in resp:
print('ERROR:', resp['error'])
else:
print('SUCCESS:', resp[0]['most_severe_consequence'])
if long:
print(json.dumps(resp, indent=4, sort_keys=True))
print('=====')
# Non-range cases
try_to_vep('NC_000011.10:g.17605796dup')
try_to_vep('NC_000011.10:g.17605796C>T')
# Range must be less than 5kb
try_to_vep('NC_000011.10:g.17605796_17612832del')
try_to_vep('NC_000011.10:g.17605796_17608796del') # modified to be <5kb
try_to_vep('NC_000011.10:g.17605796_17605797insT')
# Amazingly, definite intronic range also seems to work
try_to_vep('LRG_214t1:c.889-1633_7395-667del')
try_to_vep('NM_000267.3:c.7000-499_7908-479del')
# No support for uncertainty
try_to_vep('NC_000011.10:g.(17605790_17605796)_(17612832_1761283)del')
try_to_vep('NC_000011.10:g.(?_17605796)_(17612832_?)del')
# I forgot this case in the other notebook, probably safe to say we can't do much with it
try_to_vep('NC_000011.10:g.?_17612832del')
# weird stuff
try_to_vep('U43746.1:n.9877-68_9877-65delTTAC')
try_to_vep('NP_000393.4:p.Leu91del')
try_to_vep('NC_012920.1:m.8350_12450del')
try_to_vep('NR_027760.2:n.609G>A')
```
### Summary
To work with VEP hgvs endpoint:
* g, c, p sequence types
* must be GRCh38
* size less than 5 kb
* about 3738 of these in ClinVar (known min span)
* no uncertainty
## VEP handling: region
This is the endpoint used by the code currently, see [here](https://github.com/EBIvariation/eva-opentargets/blob/master/consequence_prediction/vep_mapping_pipeline/consequence_mapping.py#L57).
Example query from VEP API docs, more documentation on input formatting [here](https://m.ensembl.org/info/docs/tools/vep/vep_formats.html):
`https://rest.ensembl.org/vep/human/region/7:100318423-100321323:1/DUP?content-type=application/json`
`{chr}:{start}-{end}:{strand}/{INS|DEL|DUP|TDUP}` (TDUP = tandem duplication (?))
Notes
* region can be larger than 5 kb
* not sure can do multiple regions at once from API docs
* needs (exact) genomic coordinates
```
# bulk query is possible as well
# example from above: NC_000011.10:g.17605796_17612832del
variants = ['NC_000011.10 17605796 17612832 DEL + NC_000011.10:g.17605796_17612832del']
results = query_vep(variants, VEP_SHORT_QUERY_DISTANCE)
results
```
## Biomart
Query biomart using region to get genes affected (q: is this different from the above region request?)
```
# query in the code doesn't quite work as chromosomal_region isn't an attribute
# value is similar format to VEP region endpoint: chr:start:end:strand
# but need to map NC_000011.10 -> 11
query = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Query>
<Query virtualSchemaName = "default" formatter = "TSV" header = "0" uniqueRows = "0" count = "" datasetConfigVersion = "0.6" >
<Dataset name = "hsapiens_gene_ensembl" interface = "default" >
<Filter name = "chromosomal_region" value = "11:17605796:17612832:1"/>
<Attribute name = "ensembl_gene_id" />
</Dataset>
</Query>
""".replace('\n', '')
url = f"http://www.ensembl.org/biomart/martservice?query={query}"
resp = requests.get(url)
# same example as used for VEP region
print(resp.text)
```
Notes
* similar to procedure used in repeat expansion pipeline (in particular know how to do in bulk)
* needs chromosome
* no functional consequences just gene targets
## Target genes from HGVS spans
```
PROJECT_ROOT = '/home/april/projects/opentargets/complex-events'
# dump of all records with no functional consequences and no complete coordinates
# uses June consequence pred + ClinVar 6/26/2021
no_consequences_path = os.path.join(PROJECT_ROOT, 'no-conseq_no-coords.xml.gz')
dataset = ClinVarDataset(no_consequences_path)
sequence_identifier = r'[a-zA-Z0-9_.]+'
genomic_sequence = f'^({sequence_identifier}):g\.'
# only INS, DEL, DUP supported by VEP
variant_type_regex = {
re.compile(f'{genomic_sequence}.*?del(?!ins).*?') : 'DEL',
re.compile(f'{genomic_sequence}.*?dup.*?') : 'DUP',
re.compile(f'{genomic_sequence}.*?(?<!del)ins.*?') : 'INS',
}
def_range = r'([0-9]+)_([0-9]+)'
var_range = r'\([0-9?]+_([0-9]+)\)_\(([0-9]+)_[0-9?]+\)'
ch = r'[^?_+-]'
def_span_regex = re.compile(f'{genomic_sequence}{ch}*?{def_range}{ch}*?$')
var_span_regex = re.compile(f'{genomic_sequence}{ch}*?{var_range}{ch}*?$')
def hgvs_to_vep_identifier(hgvs):
m = def_span_regex.match(hgvs)
if not m:
m = var_span_regex.match(hgvs)
if not m:
return
seq = m.group(1)
# not everything accepted by VEP, for now we'll be lazy
if not (seq.startswith('NC') or seq.startswith('LRG') or seq.startswith('NW') or seq.startswith('AC')):
return
start = int(m.group(2))
end = int(m.group(3))
if start > end+1:
return
variant_type = None
for r, s in variant_type_regex.items():
if r.match(hgvs):
variant_type = s
break
if not variant_type:
return
return f'{seq} {start} {end} {variant_type} + {hgvs}'
def extract_genes(vep_results):
results_by_variant = defaultdict(list)
for result in vep_results:
variant_identifier = result['id']
consequences = result.get('transcript_consequences', [])
results_by_variant[variant_identifier].extend({c['gene_id'] for c in consequences})
return results_by_variant
def extract_most_severe_consequence(vep_results):
results_by_variant = {}
for result in vep_results:
variant_identifier = result['id']
consequence = result.get('most_severe_consequence', '')
results_by_variant[variant_identifier] = consequence
return results_by_variant
def grouper(iterable, n):
args = [iter(iterable)] * n
return [x for x in zip_longest(*args, fillvalue=None) if x is not None]
def get_vep_results(dataset, limit=None, verbose=False):
n = 0
hgvs_list = []
for record in dataset:
if not record.measure or not record.measure.hgvs:
continue
# must be GRCh38 for VEP to work
def current_hgvs(measure):
return [elem.text for elem in measure._hgvs_elems if 'previous' not in elem.attrib['Type'].lower()]
# NB. might double count records
hgvs_list.extend([h for h in current_hgvs(record.measure) if h is not None])
n += 1
if limit and n > limit:
break
print(f'{n} records processed')
print(f'{len(hgvs_list)} HGVS expressions')
variants = [hgvs_to_vep_identifier(hgvs) for hgvs in hgvs_list]
variants = [v for v in variants if v] # v is None if it couldn't be parsed
print(f'{len(variants)} parsed into chrom/start/end/type')
# VEP only accepts batches of 200
i = 0
vep_results = []
for group in grouper(variants, n=200):
vep_results.extend(query_vep(variants=group, search_distance=VEP_SHORT_QUERY_DISTANCE))
i += 1
if verbose:
print(f'Done with batch {i}')
return vep_results
def most_severe_consequence(dataset, limit=None):
vep_results = get_vep_results(dataset, limit)
hgvs_to_consequence = extract_most_severe_consequence(vep_results)
print(f'{len(hgvs_to_consequence)} successfully mapped by VEP')
return hgvs_to_consequence
hgvs_to_consequence = most_severe_consequence(dataset)
consequence_counts = Counter(hgvs_to_consequence.values())
plt.figure(figsize=(15,10))
plt.xticks(rotation='vertical')
plt.title(f'Most severe consequence')
plt.bar(consequence_counts.keys(), consequence_counts.values())
consequence_counts
def gene_counts(dataset, limit=None):
vep_results = get_vep_results(dataset, limit)
hgvs_to_genes = extract_genes(vep_results)
print(f'{len(hgvs_to_genes)} successfully mapped by VEP')
return hgvs_to_genes, [len(y) for x,y in hgvs_to_genes.items()]
hgvs_to_genes, counts = gene_counts(dataset)
print('Mean:', np.mean(counts))
print('Median:', np.median(counts))
print('Min:', np.min(counts))
print('Max:', np.max(counts))
MAX_REASONABLE_COUNT = 50
smaller_counts = [x for x in counts if x < MAX_REASONABLE_COUNT]
print(len(smaller_counts))
plt.figure(figsize=(15,10))
plt.grid(visible=True)
plt.title(f'Number of target genes (less than {MAX_REASONABLE_COUNT})')
# first array is counts per bin
# second array is left edges of bins, plus last right edge
plt.hist(smaller_counts, bins=min(100, MAX_REASONABLE_COUNT))
```
### Summary
* out of 11,000 with known minimum span, 6270 successfully mapped by VEP
* haven't fully tested how many HGVS are handled by VEP "out of the box"
### Questions
* check hgvs is always on the + strand
* how important is it to actually get the variant type, if we have chrom/start/end?
* related, can we do anything about delins?
* what to do with sequences that aren't accepted by VEP as-is?
- OK for VEP: `NC`, `LRG`, `NW`, `AC`
- not OK:
- `NG_*` genomic region - could map (?)
- `hg38.chrX` and similar - could parse
- `AF287270` - genomic region also?
- `CM000663.1`
- `L78833.1`
For OT
* should these have a different consequence type, as we do for repeat expansion?
* what's the cutoff we should use?
* dealing with uncertainty
| github_jupyter |
```
from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dense, \
DepthwiseConv2D, Input, ReLU, AvgPool2D, BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.preprocessing import image_dataset_from_directory
path_to_dataset = '/Users/george/Documents/OpenMV/image_classification_with_openmv_camera/dataset/'
PATH = os.path.join(os.path.dirname(path_to_dataset))
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
BATCH_SIZE = 32
IMG_SIZE = (32, 32)
train_dataset = image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
validation_dataset = image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(32, 32, 3)),
tf.keras.layers.Dense(16, activation=tf.nn.relu),
tf.keras.layers.Dense(8, activation=tf.nn.relu),
tf.keras.layers.Dense(2, activation=tf.nn.sigmoid)
])
model.summary()
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
initial_epochs = 10
loss0, accuracy0 = model.evaluate(validation_dataset)
history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_dataset,
epochs=total_epochs,
initial_epoch=history.epoch[-1],
validation_data=validation_dataset)
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
loss, accuracy = model.evaluate(test_dataset)
print('Test accuracy :', accuracy)
#Retrieve a batch of images from the test set
image_batch, label_batch = test_dataset.as_numpy_iterator().next()
predictions = model.predict_on_batch(image_batch).flatten()
# Apply a sigmoid since our model returns logits
predictions = tf.nn.sigmoid(predictions)
predictions = tf.where(predictions < 0.5, 0, 1)
print('Predictions:\n', predictions.numpy())
print('Labels:\n', label_batch)
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[predictions[i]])
plt.axis("off")
def representative_dataset():
for _ in range(100):
data = np.random.rand(1, 32, 32, 3)
yield [data.astype(np.float32)]
model.save('mouse_model.h5')
converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file('mouse_model.h5')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8 # or tf.uint8
converter.inference_output_type = tf.int8 # or tf.uint8
tflite_quant_model = converter.convert()
open("mouse_quantized.tflite", "wb").write(tflite_quant_model)
ls
```
| github_jupyter |
# Exercises 06 - Strings and Dictionaries
## 0. Length of Strings
Let's start with a string lightning round to warm up. What are the lengths of the strings below?
For each of the five strings below, predict what `len()` would return when passed that string. Use the variable `length` to record your answer.
```
a = ""
length = -1
b = "it's ok"
length = -1
c = 'it\'s ok'
length = -1
d = """hey"""
length = -1
e = '\n'
length = -1
```
## 1. Check the Zip Code
There is a saying that *\"Data scientists spend 80% of their time cleaning data, and 20% of their time complaining about cleaning data.\"* Let's see if you can write a function to help clean US zip code data. Given a string, it should return whether or not that string represents a valid zip code. For our purposes, a valid zip code is any string consisting of exactly 5 digits.
HINT: `str` has a method that will be useful here. Use `help(str)` to review a list of string methods.
```
def is_valid_zip(zip_code):
"""Returns whether the input string is a valid (5 digit) zip code
"""
pass
```
## 2. Searching a Word
A researcher has gathered thousands of news articles. But she wants to focus her attention on articles including a specific word. Complete the function below to help her filter her list of articles.
Your function should meet the following criteria
- Do not include documents where the keyword string shows up only as a part of a larger word. For example, if she were looking for the keyword “closed”, you would not include the string “enclosed.”
- She does not want you to distinguish upper case from lower case letters. So the phrase “Closed the case.” would be included when the keyword is “closed”
- Do not let periods or commas affect what is matched. “It is closed.” would be included when the keyword is “closed”. But you can assume there are no other types of punctuation.
*HINT*: Some methods that may be useful here: `str.split()`, `str.strip()`, `str.lower()`
```
def word_search(doc_list, keyword):
"""
Takes a list of documents (each document is a string) and a keyword.
Returns list of the index values into the original list for all documents
containing the keyword.
Example:
doc_list = ["The Learn Python Challenge Casino.", "They bought a car", "Casinoville"]
>>> word_search(doc_list, 'casino')
>>> [0]
"""
pass
```
## 3. Searching Multiple Words
Now the researcher wants to supply multiple keywords to search for. Complete the function below to help her.
(You're encouraged to use the `word_search` function you just wrote when implementing this function. Reusing code in this way makes your programs more robust and readable - and it saves typing!)
```
def multi_word_search(doc_list, keywords):
"""
Takes list of documents (each document is a string) and a list of keywords.
Returns a dictionary where each key is a keyword, and the value is a list of indices
(from doc_list) of the documents containing that keyword
>>> doc_list = ["The Learn Python Challenge Casino.", "They bought a car and a casino", "Casinoville"]
>>> keywords = ['casino', 'they']
>>> multi_word_search(doc_list, keywords)
{'casino': [0, 1], 'they': [1]}
"""
pass
```
# Keep Going 💪
| github_jupyter |
# <center><b>Oil Data Quality Index</b></center>
### <center>First Draft<br><br>January 2015<br>Author: James L. Makela</center>
## <u>1. Background</u>
The Adios3 Oil Library will accept data on crude oils and refined products provided they contain a minimum required set of data. Additional missing data will be generated by approximation formulas according to the <b><i>Oil Properties Estimation</i></b> document.
It is reasonable to propose that the more measured data that an oil record has, the better job we will do when estimating missing oil properties. <i>(Ideally, we would not need to estimate anything, but simply use measured values)</i>
So in addition to requiring a minimum set of measured data, we will try to assess an oil record's <b>quality index</b>. The quality index is a numeric score that we will use to represent an oils "fitness", or to put it another way, how well we expect to be able to calculate reasonable estimates of the missing oil properties.
```
%pylab inline
import numpy as np
import oil_library
from oil_library.models import Oil, ImportedRecord, KVis
session = oil_library._get_db_session()
# these are some reasonable samples of oil records in the oil library
ans_mp = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'ALASKA NORTH SLOPE (MIDDLE PIPELINE)').one()
ans_2002 = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'ALASKA NORTH SLOPE (2002)').one()
ans_ps9 = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'ALASKA NORTH SLOPE-PUMP STATION #9, BP').one()
arabian_hvy = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'ARABIAN HEAVY, AMOCO').one()
borholla = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'BORHOLLA').one()
lls_bp = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'LIGHT LOUISIANNA SWEET, BP').one()
benin = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'BENIN RIVER, CHEVRON').one()
empire = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'EMPIRE ISLAND, AMOCO').one()
sajaa = session.query(ImportedRecord).filter(ImportedRecord.oil_name == 'SAJAA CONDENSATE, BP').one()
```
## <u>2. Minimum Data Required</u>
As previously mentioned, we only accept oil records that contain a minimum required set of data. Otherwise, we will not process the record.
| Type | Density (or API) | Viscosity | Distillation Cuts |
| ---- |:----------------:|:---------:|:-----------------:|
| Crude | Yes | Yes | No |
| Refined Product | Yes | Yes | Yes (at least 3) |
## <u>3. Calculating the Quality Index</u>
### <u>3.1 The General Scoring Method</u>
The total score of the oil will be an aggregation of 1 or more tests which will result in a quality index. A quality index $Q$ is defined with a range:
$$
\begin{align}
0 &\leq Q \leq 1
\end{align}
$$
This score can (naively) be considered a single test, but is more likely to be a compilation of scores of multiple sub-tests. In turn, each sub-test could be considered in a similar fashion, making a tree structure of tests of which the leaf-level tests contain only a single test. So we will define the terminology of our testing processes as either <b>'Aggregate'</b> or <b>'Atomic'</b>.
### <u>3.2 Atomic Test Score</u>
The assumption for an atomic test method is that it is testing only one thing. As such, the results of an atomic test method will be:
$$
\begin{align}
Q &= 0 \qquad \qquad \text{(the test failed)} \cr
Q &= 1 \qquad \qquad \text{(the test passed)} \cr
\end{align}
$$
### <u>3.3 Aggregate Test Score</u>
The assumption for an aggregate test method is that it is testing a number of things. These things are assumed to be a collection of sub-tests which will each return a score $Q_i$. An aggregate test will return an ordinary or weighted average of the collection of sub-test scores $\bar{Q}$, with the same numeric range as $Q$.
$$
\begin{align}
n &= \text{the number of sub-tests} \cr
Q_i &= \text{a sub-test result indexed by } i \cr
w_i &= \text{the weighted value of a sub-test indexed by } i \cr
&\quad \text{for an ordinary average, the weights will be } [1, 1, \ldots] \cr
\cr
\bar{Q} &= {\sum\limits_{i=1}^{n}{w_i \cdot Q_i} \over \sum\limits_{i=1}^n{w_i}} \cr
\end{align}
$$
```
def aggregate_score(Q_i, w_i=None):
Q_i = np.array(Q_i)
if w_i is None:
w_i = np.ones(Q_i.shape)
else:
w_i = np.array(w_i)
return np.sum(w_i * Q_i) / np.sum(w_i)
Q_i = [1.0, 1.0, 1.0, 0.0]
w_i = [1.0, 1.0, 1.0, 1.0]
print aggregate_score(Q_i) # simple average (0.75)
print aggregate_score(Q_i, w_i) # explicit simple average (0.75)
Q_i = [1.0, 1.0, 1.0, 1.0]
w_i = [3.0, 1.0, 1.0, 1.0]
print aggregate_score(Q_i, w_i) # weighted average (1.0)
Q_i = [1.0, 0.0, 0.0, 0.0]
w_i = [3.0, 1.0, 1.0, 1.0]
print aggregate_score(Q_i, w_i) # weighted average (0.5)
Q_i = [0.0, 1.0, 0.0, 0.0]
w_i = [3.0, 1.0, 1.0, 1.0]
print aggregate_score(Q_i, w_i) # weighted average (1/6 or 0.1666)
```
## <u>4. The Imported Oil Record Tests</u>
### <u>4.1 Oil Demographics</u>
We would like to gauge the richness of the demographic data in the record. These are the text fields that describe the oil record, and if the need arises, they can help us to investigate the source of the oil data found in the record.
The demographic fields will be tested simply for their presence. For now, we will not place special importance on any particular demographic field, so a simple average will be used for scoring the multiple fields.
The demographic fields to be tested are:
- reference
<i>(<b>Note</b>: in future versions we may want to add source quality flag, and age.)</i>
```
def score_demographics(imported_rec):
fields = ('reference',)
scores = []
for f in fields:
if getattr(imported_rec, f) is not None:
scores.append(1.0)
else:
scores.append(0.0)
return aggregate_score(scores)
for ir in (ans_mp, ans_ps9, arabian_hvy, borholla,
lls_bp, benin, empire, sajaa):
print ('Oil: {}, Demographics Score: {}'
.format(ir.oil_name, score_demographics(ir)))
```
### <u>4.2 Oil API</u>
An imported oil record is required by us to have either an API density value or a set of densities measured at reference temperatures. So this may seem like an unnecessary test. However we would surmise that a record that has both types of density information is of better quality than a record that contains only one or the other. So for this reason we test the oil for an API value and return an atomic score.
```
def score_api(imported_rec):
if imported_rec.api is None:
return 0.0
else:
return 1.0
for ir in (ans_mp, ans_ps9, arabian_hvy, borholla,
lls_bp, benin, empire, sajaa):
print ('Oil: {}, API Score: {}'
.format(ir.oil_name, score_api(ir)))
```
### <u>4.3 Oil Densities</u>
An imported oil record can contain 0 to 4 density values that are measured at different reference temperatures. In addition, we will consider the oil's API as a density at $15^\circ C$. We will give an atomic pass/fail score to each density measurement set that is found in the oil record.
Right now, we are testing that the oil's existing density attributes:
- contain a valid numeric density value and...
- contain a valid numeric temperature
<i>(<b>Note</b>: For the future, we could test that density is in a reasonable range for oils, that the temperature is in a reasonable kelvin range, maybe a couple of other things)</i>
If any density attribute fails any one of the density testing criteria, it is given a score of 0.<br>
We would surmise that the more distinct measurements we have, the better we would be able to estimate oil density at an arbitrary temperature, and so the oil record would have a better quality. I believe that we should place the biggest weight on the first density, and the weights of all successive densities should diminish exponentially as in the following series:
$$
\begin{align}
w_i &= \left[0.5, 0.25, 0.125, \ldots \frac{1}{2^n}\right] \cr
\end{align}
$$
We would like to see 4 valid density measurements present in our oil record, so if it has less than that, we would assign a score $Q = 0$ for any missing density up to that number.
```
def score_density_rec(density_rec):
if (density_rec.kg_m_3 is not None and
density_rec.ref_temp_k is not None):
return 1.0
else:
return 0.0
def score_densities(imported_rec):
scores = []
for d in imported_rec.densities:
scores.append(score_density_rec(d))
if not any([np.isclose(d.ref_temp_k, [288.0, 288.15]).any()
for d in imported_rec.densities]):
scores.append(score_api(imported_rec))
# We have a maximum number of 4 density field sets in our flat file
# We can set a lower acceptable number later
if len(scores) < 4:
scores += [0.0] * (4 - len(scores))
# compute our weights
w_i = 1.0 / (2.0 ** (np.arange(len(scores)) + 1))
w_i[-1] = w_i[-2] # duplicate the last weight so we sum to 1.0
return aggregate_score(scores, w_i)
for ir in (ans_mp, ans_2002, ans_ps9, arabian_hvy, borholla,
lls_bp, benin, empire, sajaa):
print 'Oil: {}'.format(ir.oil_name)
print '\tAPI: {}'.format(ir.api)
print '\tDensities: {}'.format(ir.densities)
print ('\tScore: {}'
.format(score_densities(ir)))
```
### <u>4.4 Oil Pour Point</u>
An oil has two pour point values. The notion of pour point is defined as simply the temperature at which the oil enters its solid phase or "stops pouring". However, the paraffins in the oil have a tendency to form crystalline structures over time which will in turn elevate the pour point temperature. So a maximum pour point is measured for the case in which the oil has stayed at a constant temperature for awhile, and a minimum pour point value is often measured for the case that the oil was recently heated, breaking down the crystalline structures. The "freshly heated" pour point is expected to be lower than the pour point of older oil kept in a constant temperature.
So in the interest of evaluating the quality of an oil record, we would definitely like to see at least one valid pour point value. But the presence of a second (min) value may simply indicate an oil with a lot of paraffins. It's unclear whether we should assign extra credit for having two values.
At this point in time, we are assigning a score for both a minimum and maximum value, and we will apply the following weights:
- pour_point_max_k: 2
- pour_point_min_k: 1
This indicates that we want to at least see the maximum pour point temperature, and it will account for the majority of scoring for this test. But we want to give half credit for having a minimum pour point.
```
def score_pour_point_min(imported_rec):
return (1.0 if imported_rec.pour_point_min_k is not None else 0.0)
def score_pour_point_max(imported_rec):
return (1.0 if imported_rec.pour_point_max_k is not None else 0.0)
def score_pour_point(imported_rec):
scores = []
scores.append(score_pour_point_max(imported_rec))
scores.append(score_pour_point_min(imported_rec))
weights = [2.0, 1.0]
return aggregate_score(scores, weights)
print score_pour_point(borholla) # no pour point data
print score_pour_point(arabian_hvy) # max, but not min
print score_pour_point(ans_mp) # both max and min
```
### <u>4.5 Oil Flash Point</u>
An oil record contains a flash point minimum and maximum value. It is a bit unclear to me what the distinction between the minimum and maximum is. However there are two possiblities, which I will explain.
A <b>minimum flash point</b> is defined as the minimum temperature at which an oil or fuel product will ignite on application of an ignition source under specified conditions.
The <b>fire point</b> of an oil or fuel product is the temperature at which the vapor produced by that product will continue to burn for at least 5 seconds after ignition by an open flame. So at the flash point, which would be a lower temperature, a substance will ignite briefly, but vapor might not be produced at a high enough rate to sustain the fire. The fire point can be estimated to be roughly $10^\circ C$ higher than flash point.
Ok, based on this assessment of flash point and fire point, I looked at the source data for the oil records, and for the vast majority of records that contain both values, the values were nearly identical, which indicates to me we are not dealing with a fire point value stored as a maximum flash point.
There was a small handful of records that had a maximum flash point significantly higher than the minimum, which could indicate that it is a fire point.
I believe that we can consider of both the minimum and maximum values as being a flash point, and that if we have at least one value, we probably have sufficient data quality. So the rules are:
- if we have no flash point, min or max, then $Q = 0$
- if we have a minimum flash point, then $Q = 1$
- if we have a maximum flash point, then $Q = 1$
- if we have both a maximum and minimum flash point, then $Q = 1$
```
def score_flash_point(imported_rec):
if (imported_rec.flash_point_min_k is not None or
imported_rec.flash_point_max_k is not None):
return 1.0
else:
return 0.0
for ir in (ans_mp, ans_ps9, arabian_hvy, borholla,
lls_bp, benin, empire, sajaa):
print ('Oil: {}, \tFlash Point Score: {}'
.format(ir.oil_name, score_flash_point(ir)))
```
### <u>4.6 Oil SARA Fractions</u>
The sub-compounds that make up an oil have been categorized, at least traditionally, by organic chemists as saturates, aromatics, resins, and asphaltenes. This group of four chemical categories is known as SARA. And an imported oil record may (or may not) contain measured fractional values for them. If it does, then we would say that the record has better data quality, since we have a reasonable reference to double check the veracity of the SARA component estimations that we perform.
No particular SARA value is perceived to have a more important role than any other, so we will evaluate them with equal weights when evaluating the score for existance of SARA fractions.
```
def score_sara_saturates(imported_rec):
return (1.0 if imported_rec.saturates is not None else 0.0)
def score_sara_aromatics(imported_rec):
return (1.0 if imported_rec.aromatics is not None else 0.0)
def score_sara_resins(imported_rec):
return (1.0 if imported_rec.resins is not None else 0.0)
def score_sara_asphaltenes(imported_rec):
return (1.0 if imported_rec.asphaltenes is not None else 0.0)
def score_sara_fractions(imported_rec):
scores = []
scores.append(score_sara_saturates(imported_rec))
scores.append(score_sara_aromatics(imported_rec))
scores.append(score_sara_resins(imported_rec))
scores.append(score_sara_asphaltenes(imported_rec))
return aggregate_score(scores)
print score_sara_fractions(ans_mp) # no SARA fractions
print score_sara_fractions(lls_bp) # Asphaltenes only
print score_sara_fractions(benin) # Saturates, Aromatics, Asphaltenes, no Resins
```
### <u>4.7 Oil Emulsion Constants</u>
An imported oil record contains a minimum and maximum value for emulsion constant.
After a discussion with Bill & Chris we have decided to go with the following weights for our emulsion properties.
- water_content_emulsion: weight = 2
- emuls_constant_min: weight = 3
- emuls_constant_max: weight = 0
```
def score_water_content_emulsion(imported_rec):
return (1.0 if imported_rec.water_content_emulsion is not None else 0.0)
def score_emulsion_constant_min(imported_rec):
return (1.0 if imported_rec.emuls_constant_min is not None else 0.0)
def score_emulsion_constant_max(imported_rec):
return (1.0 if imported_rec.emuls_constant_max is not None else 0.0)
def score_emulsion_constants(imported_rec):
scores = []
scores.append(score_water_content_emulsion(imported_rec))
scores.append(score_emulsion_constant_min(imported_rec))
# scores.append(score_emulsion_constant_max(imported_rec))
w_i = [2.0, 3.0]
return aggregate_score(scores, w_i)
print score_emulsion_constants(ans_mp) # no emulsion constant
print score_emulsion_constants(empire) # both min & max
```
### <u>4.8 Interfacial Tensions</u>
An oil record contains values for oil/water and oil/seawater interfacial tensions measured at a reference temperature. So the check we need to perform is an atomic score of each measured value and its associated reference temperature.
We will score each measurement set as such:
- if the measurement and temperature are valid numeric values, then $Q = 1$
- else $Q = 0$
No particular interfacial tension value is perceived to have a more important role than the other, so they will be evaluated with an equally weighted score.
```
def score_oil_water_tension(imported_rec):
if (imported_rec.oil_water_interfacial_tension_n_m is not None and
imported_rec.oil_water_interfacial_tension_ref_temp_k is not None):
return 1.0
else:
return 0.0
def score_oil_seawater_tension(imported_rec):
if (imported_rec.oil_seawater_interfacial_tension_n_m is not None and
imported_rec.oil_seawater_interfacial_tension_ref_temp_k is not None):
return 1.0
else:
return 0.0
def score_interfacial_tensions(imported_rec):
scores = []
scores.append(score_oil_water_tension(imported_rec))
scores.append(score_oil_seawater_tension(imported_rec))
return aggregate_score(scores)
print score_interfacial_tensions(lls_bp) # no interfacial tensions
print score_interfacial_tensions(empire) # only oil/seawater
print score_interfacial_tensions(ans_mp) # both oil/water and oil/seawater
```
### <u>4.9 Oil Viscosities</u>
An oil record can contain measurement data for up to 6 kinematic viscosities and 6 dynamic viscosities, each with an associated measurement reference temperature. So we need to perform an atomic score of each measured value and its associated reference temperature.
We will score each measurement set as such:
- if the measurement and temperature are valid numeric values, then $Q = 1$
- else $Q = 0$
No particular viscosity measurement is perceived to be more important than the other.
But it is often the case that a dynamic viscosity exists with a redundant reference temperature to that of a kinematic viscosity measurement. In that case, we will count the kinematic viscosity as a unique measurement and ignore the dynamic measurement.
We would surmise that the more distinct measurements we have, the better we would be able to estimate oil viscosity at an arbitrary temperature, and so the oil record would have a better quality. So I believe that we should place the biggest weight on the first viscosity, and the weights of all successive viscosities should diminish exponentially as in the following series:
$$
\begin{align}
w_i &= \left[0.5, 0.25, 0.125, \ldots \frac{1}{2^n}\right] \cr
\end{align}
$$
We would also like to see at least 4 valid viscosity measurements present in our oil record, so if it has less than that, we would assign a score $Q = 0$ for any missing viscosity up to that number.<br>
In addition, any viscosity measurement that exists for the record, but does not have a passing score should be counted even if the total number of viscosities exceeds 4. The reasoning for this is that bad data is just as relevent as missing data.
```
def score_single_viscosity(viscosity_rec):
temp = viscosity_rec.ref_temp_k
try:
value = viscosity_rec.m_2_s
except AttributeError:
value = viscosity_rec.kg_ms
if (value is not None and temp is not None):
return 1.0
else:
return 0.0
def score_viscosities(imported_rec):
scores = []
all_temps = set()
all_viscosities = []
for v in imported_rec.kvis + imported_rec.dvis:
if v.ref_temp_k not in all_temps:
all_viscosities.append(v)
all_temps.add(v.ref_temp_k)
for v in all_viscosities:
scores.append(score_single_viscosity(v))
# We require a minimum number of 4 viscosity field sets
if len(scores) < 4:
scores += [0.0] * (4 - len(scores))
# compute our weights
w_i = 1.0 / (2.0 ** (np.arange(len(scores)) + 1))
w_i[-1] = w_i[-2] # duplicate the last weight so we sum to 1.0
return aggregate_score(scores, w_i)
for ir in (ans_mp, arabian_hvy, borholla, lls_bp, benin, empire):
print ir.kvis, ir.dvis
print score_viscosities(ir)
print
```
### <u>4.10 Oil Distillation Cuts</u>
An oil record can contain measurement data for up to 15 distillation cuts, each with an associated vapor temperature, liquid temperature, and a cumulative fractional value representing the portion of oil that is evaporated at that temperature. So we need to perform an aggregate score of each cut.
For each individual cut it is essential that it have at least a distilled fraction, otherwise it is not valid.
And we would prefer a vapor temperature to be present, but we could still make use of a liquid temperature if it doesn't exist. So we will determine a cut to be valid if it has either of those temperatures.
The score for each individual valid cut will be performed as follows:
- if there is no evaporated fraction then $Q = 0$
- otherwise:
- if there is a vapor temperature, then $Q = 1$
- otherwise, if there is a liquid temperature only then $Q = 0.8$
- otherwise, $Q = 0$
We would surmise that the more distinct measurements we have, the better we would be able to estimate our oil distillation curve, and so the oil record would have a better quality. I believe that we should place the biggest weight on the first cut, and the weights of all successive cuts should diminish exponentially as in the following series:
$$
\begin{align}
w_i &= \left[0.5, 0.25, 0.125, \ldots \frac{1}{2^n}\right] \cr
\end{align}
$$
We would also like to see at least 10 valid distillation cuts present in our oil record, so if it has less than that, we would assign a score $Q = 0$ for any missing distillation cut up to that number.<br>
In addition, any cut that exists for the record, but does not have a passing score should be counted even if the total number of cuts exceeds 10. The reasoning for this is that bad data is just as relevent as missing data.
<i>
(<b>Note</b>: in the future we could be a bit more discerning of this data. We could, for instance, exclude any cuts for which the distillation fraction does not increase with an increasing temperature.)
</i>
```
def cut_has_vapor_temp(cut_rec):
return (0.0 if cut_rec.vapor_temp_k is None else 1.0)
def cut_has_liquid_temp(cut_rec):
return (0.0 if cut_rec.liquid_temp_k is None else 1.0)
def cut_has_fraction(cut_rec):
return (0.0 if cut_rec.fraction is None else 1.0)
def score_cut(cut_rec):
if cut_has_fraction(cut_rec) == 1.0:
if cut_has_vapor_temp(cut_rec) == 1.0:
return 1.0
elif cut_has_liquid_temp(cut_rec) == 1.0:
return 0.8
else:
return 0.0
else:
return 0.0
def score_cuts(imported_rec):
scores = []
for c in imported_rec.cuts:
scores.append(score_cut(c))
# We would like a minimum number of 10 distillation cuts
if len(scores) < 10:
scores += [0.0] * (10 - len(scores))
# compute our weights
w_i = 1.0 / (2.0 ** (np.arange(len(scores)) + 1))
w_i[-1] = w_i[-2] # duplicate the last weight so we sum to 1.0
return aggregate_score(scores, w_i)
for ir in (ans_mp, benin, ans_ps9, arabian_hvy, sajaa, borholla):
print 'name = ', ir.oil_name
print '\tnum_cuts: {}'.format(len(ir.cuts))
print ('\tCuts that have vapor temp: {}'
.format(np.sum([(c.vapor_temp_k is not None)
for c in ir.cuts])))
print '\tCuts Score: {}'.format(score_cuts(ir))
print
```
### <u>4.11 Oil Toxicities</u>
An oil record can contain up to 6 sets of toxicity information. These are separated into two groups of three items each; Effective Concentration (EC) and Lethal Concentration (LC).<br>
We don't currently use this information in our models, but it is concievable that it might be useful in the future. So we will describe a scoring method for this information, but the bar for success will be low.
The effective concentration data set will include the name of the species of animal, and a number of concentrations necessary for immobilization of 50% of the population of that animal after a period of exposure. The exposure times are 24, 48, and 96 hours.
Similarly, the lethal concentration data set will include the name of the species of animal, and a number of concentrations necessary to cause death of 50% of the population of that animal after a period of exposure. The exposure times are 24, 48, and 96 hours.
Our test of an individual toxicity set will simply be the presence of a species, and at least one concentration value. If satisfies that requirement, it will get a score of $Q = 1$. And we will only need to see one toxicity set to pass with a total score of $Q = 1$.
<i>
(<b>Note</b>: I can't find any oils with toxicities anymore. Either the filemaker export process is broken, or we have decided not to include this information anymore.)
</i>
```
def score_single_toxicity(tox_rec):
if (tox_rec.species is not None and
(tox_rec.after_24h is not None or
tox_rec.after_48h is not None or
tox_rec.after_96h is not None)):
return 1.0
else:
return 0.0
def score_toxicities(imported_rec):
scores = []
for t in imported_rec.toxicities:
scores.append(score_single_toxicity(t))
if any([(s == 1.0) for s in scores]):
return 1.0
else:
return 0.0
for ir in (ans_mp, benin):
print 'name = ', ir.oil_name
print 'toxicities = ', ir.toxicities
print score_toxicities(ir)
print
```
## <u>5. The Final Score of an Imported Oil Record</u>
The final score of an imported oil record will be an aggregation of the resulting scores of the individual tests described above. We will use a weighted average, and the weights will be tailored to the perceived importance of each test.
The perceived importance of the individual tests are certainly debatable. For now, here is a current list of the individual tests here with their weighted importance.
| Test | Weight | Cumulative |
| ---- |:------:|:----------:|
| Densities | 5 | 5 |
| Viscosities | 5 | 10 |
| SARA Fractions | 5 | 15 |
| Distillation Cuts | 10 | 25 |
| Interfacial Tensions | 3 | 28 |
| Pour Point | 2 | 30 |
| Demographics | 1 | 31 |
| Flash Point | 1 | 32 |
| Emulsion Constants | 1 | 33 |
| Toxicities | 0 | 33 |
Note: api and density taken together. api = density at 15C total weight 5
```
def score_imported_oil(imported_rec):
scores = [(score_densities(imported_rec), 5.0),
(score_viscosities(imported_rec), 5.0),
(score_sara_fractions(imported_rec), 5.0),
(score_cuts(imported_rec), 10.0),
(score_interfacial_tensions(imported_rec), 3.0),
(score_pour_point(imported_rec), 2.0),
(score_demographics(imported_rec), 1.0),
(score_flash_point(imported_rec), 1.0),
(score_emulsion_constants(imported_rec), 1.0)]
return aggregate_score(*zip(*scores))
for ir in (ans_mp, ans_ps9, arabian_hvy, borholla,
lls_bp, benin, empire, sajaa):
print 'Oil: {}, Score: {}'.format(ir.oil_name,
score_imported_oil(ir))
```
| github_jupyter |
Assignment
In this assignment, students will have to use the badges dataset from the UCI repository
and pick a vectorizer of their choice to structure the data. Next they have to apply decision
tree classifier by trying different maximum depths for the tree and evaluating last 10 instances to verify results.
1. Title: ML94/COLT94 Badge Problem
2. Source Information
-- Creator: Haym Hirsh, after an idea by Rob Schapire
-- Donor: Haym Hirsh (hirsh@cs.rutgers.edu)
-- Date: September, 1994
3. Past Usage:
Every pre-registered attendee at the 1994 Machine Learning
Conference and 1994 Computational Learning Theory Conference received
a badge labeled with a "+" or "-". The labeling was due to some
function known only to the badge generator (Haym Hirsh), and it
depended only on the attendee's name. The goal for conference
attendees was to identify the unknown function used to generate the
+/- labeling.
4. Relevant Information:
Part of the problem in using an automated program to discover the
unknown target function is to decide how to encode names such that
the program can be used. The data below are presented in the form
of a +/- label followed by the person's name. It is up to the
learning-system user to decide how to convert this data into something
usable by the system (e.g., what attributes to use if your favorite
learner requires feature-vector data).
5. Number of Instances: 294
6. Number of Attributes: N/A
7. Attribute Information: N/A
8. Missing Attribute Values: N/A
9. Class Distribution: 210 positives, 84 negatives
```
corpus = open("Datasets/badges.data").read()
from sklearn.tree import DecisionTreeClassifier
docs = corpus.split('\n')
X, y = [], []
for doc in docs:
l = doc[:1]
i = doc[2:]
X.append(i)
y.append(l)
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer()
matrix_X = vec.fit_transform(X)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(matrix_X[:290], y[:290])
#predicted labels of the last four documents
print(knn.predict(matrix_X[293]))
#prediction probability of the two labels for each of the last four documents
print(knn.predict_proba(matrix_X[290]))
```
## Assignments
```
corpus = open("Datasets/badges.data").read()
from sklearn.tree import DecisionTreeClassifier
docs = corpus.split("\n") ## split by on the line termination
x = [] ## name of the people
y = [] ## name of the badges
for doc in docs:
l = doc[:1] # badges
i = doc[2:] ## name of the poeple
x.append(i)
y.append(l)
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer()
matrix_x = vec.fit_transform(x)
# matrix_x.toarray()
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier(max_depth=5)
dtc.fit(matrix_x[:284],y[:284])
print("Decision Tree Classifier , label "+ str(dtc.predict(matrix_X[291])))
print("Decision Tree Classifier , prob "+ str(dtc.predict_proba(matrix_X[291])))
help(Des)
```
| github_jupyter |
# Building a case study: vegetation analysis
<img align="middle" src="../_static/session_5/01_vegetation_analysis_ch.PNG" alt="Screenshot from the crop health notebook." width="600">
*The Crop Health notebook investigated in Session 1 is an example of a case study. We now have enough knowledge of the Sandbox to build a similar comprehensive vegetation analysis.*
## Overview
In the [last session](../session_4/03_calculate_ndvi_part_2.ipynb), we calculated the Normalised Difference Vegetation Index (NDVI) as a method of measuring green vegetation over an area of interest.
The index made the Earth observation data easier to analyse, and allowed comparison across time periods. In our NDVI map of a cropping field, we could see there were seasonal variations over the quarterly geomedians. However, that information by itself provides little context for further use. Why are we looking at this field? Do the results fit what we were expecting to see? What can we use this information for?
How you analyse Digital Earth Africa data will depend on **what you want to know**. Now that we know more about the strengths and limitations of vegetation indices, we can use them to form the basis of a complete case study that informs a user of a real-world outcome.
This session will show you how to extend dataset calculations into a meaningful report. This section outlines some common goals of vegetation analysis, and provides references to examples of case study notebooks. The following sections will walk through a tutorial on how to construct a notebook on vegetation change detection.
## The significance of vegetation
<img align="middle" src="../_static/session_5/01_vegetation_analysis_crop.PNG" alt="A crop field in Africa." width="600">
*Food security is a major focus of the UN's Sustainable Development Goals (SDGs). The SDGs are a key tenet of the Digital Earth Africa program.* [[Image source](https://rcmrd.org/servir-regional-cropland-assessment-and-monitoring-service)]
Growing things are a cornerstone of life on Earth. Monitoring vegetation can give us insight into:
* **Agriculture:** including cropping, seasonal patterns and changes in land use
* **Environment health and industry:** including forestry, logging and mining
* **Climate:** including desertification and drought
## Real world example notebooks
Several case studies based on vegetation indices have already been made using the Digital Earth Africa Sandbox. These case study notebooks can be found in the Sandbox folder called **Real_world_examples**.
The **Real_world_examples** folder contains many case studies; the notebooks related to vegetation are summarised below.
* `Crop_health.ipynb`
This notebook draws upon the crop health app function to calculate NDVI for selected fields. By comparing two polygons within the area, it is possible to assess the fields for crop health and productivity over time.
Session 1 used this crop health notebook as an example of Sandbox capability.
* `Vegetation_change_detection.ipynb`
This notebook visualises vegetation change over time. Areas where vegetation has increased compared to a baseline time are marked in blue, while areas where vegetation has decreased are output as red. This change is indexed using either NDVI, or the Enhanced Vegetation Index (EVI). The notebook allows the user to choose which index is being used.
The exercise in this session, Session 5, will contain instructions to create a similar notebook.
## Structuring a case study
If you opened up some of the real world example notebooks, you might notice they are more complicated than the exercises in this training course. However, they all have the same structure.
Each notebook you have made from the training course exercises, as well as each real world example notebook, consist of six fundamental components.
1. Load packages and functions
2. Connect to the datacube
3. Define the area of interest using longitude and latitude
4. Load data from the datacube
5. Perform analysis or calculation on the data (such as calculating an index)
6. Plot the results
The **Real_world_examples** notebooks go a few steps further:
* Frame the analysis in the context of a question or purpose — this is normally introduced at the start of the notebook
* Extend results analysis and discussion
<img align="middle" src="../_static/session_5/01_vegetation_analysis_casestudy.PNG" alt="Components of a " width="600">
## Conclusion
With some understanding of the context behind spatial data analysis, we are now ready to create our own case study notebook. This week's exercise is about incorporating the additional two steps (analysis purpose, and additional results discussion) to make a well-rounded case study.
| github_jupyter |
### Dimensional analysis
The fastest way to get into the unit system is to explore the quantities that live in the `yt.units` namespace:
```
from yt.units import meter, gram, kilogram, second, joule
print (kilogram*meter**2/second**2 == joule)
print (kilogram*meter**2/second**2)
from yt.units import m, kg, s, W
kg*m**2/s**3 == W
from yt.units import kilometer
three_kilometers = 3*kilometer
print (three_kilometers)
from yt.units import gram, kilogram
print (gram+kilogram)
print (kilogram+gram)
print (kilogram/gram)
```
These unit symbols are all instances of a new class we've added to yt 3.0, `YTQuantity`. `YTQuantity` is useful for storing a single data point.
```
type(kilogram)
```
We also provide `YTArray`, which can store arrays of quantities:
```
arr = [3,4,5]*kilogram
print (arr)
print (type(arr))
```
### Creating arrays and quantities
Most people will interact with the new unit system using `YTArray` and `YTQuantity`. These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`. `YTArray` is intended to store array data, while `YTQuantitity` is intended to store scalars in a particular unit system.
There are two ways to create arrays and quantities. The first is to explicitly create it by calling the class constructor and supplying a unit string:
```
from yt.units.yt_array import YTArray
sample_array = YTArray([1,2,3], 'g/cm**3')
print (sample_array)
```
The unit string can be an arbitrary combination of metric unit names. Just a few examples:
```
from yt.units.yt_array import YTQuantity
from yt.units import kboltz
from numpy.random import random
import numpy as np
print ("Length:")
print (YTQuantity(random(), 'm'))
print (YTQuantity(random(), 'cm'))
print (YTQuantity(random(), 'Mpc'))
print (YTQuantity(random(), 'AU'))
print ('')
print ("Time:")
print (YTQuantity(random(), 's'))
print (YTQuantity(random(), 'min'))
print (YTQuantity(random(), 'hr'))
print (YTQuantity(random(), 'day'))
print (YTQuantity(random(), 'yr'))
print ('')
print ("Mass:")
print (YTQuantity(random(), 'g'))
print (YTQuantity(random(), 'kg'))
print (YTQuantity(random(), 'Msun'))
print ('')
print ("Energy:")
print (YTQuantity(random(), 'erg'))
print (YTQuantity(random(), 'g*cm**2/s**2'))
print (YTQuantity(random(), 'eV'))
print (YTQuantity(random(), 'J'))
print ('')
print ("Temperature:")
print (YTQuantity(random(), 'K'))
print ((YTQuantity(random(), 'eV')/kboltz).in_cgs())
```
Dimensional arrays and quantities can also be created by multiplication with another array or quantity:
```
from yt.units import kilometer
print (kilometer)
three_kilometers = 3*kilometer
print (three_kilometers)
```
When working with a YTArray with complicated units, you can use `unit_array` and `unit_quantity` to conveniently apply units to data:
```
test_array = YTArray(np.random.random(20), 'erg/s')
print (test_array)
```
`unit_quantity` returns a `YTQuantity` with a value of 1.0 and the same units as the array it is a attached to.
```
print (test_array.unit_quantity)
```
`unit_array` returns a `YTArray` with the same units and shape as the array it is a attached to and with all values set to 1.0.
```
print (test_array.unit_array)
```
These are useful when doing arithmetic:
```
print (test_array + 1.0*test_array.unit_quantity)
print (test_array + np.arange(20)*test_array.unit_array)
```
For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`. You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity.
```
print (test_array.uq)
print (test_array.unit_quantity == test_array.uq)
from numpy import array_equal
print (test_array.ua)
print (array_equal(test_array.ua, test_array.unit_array))
```
Unit metadata is encoded in the `units` attribute that hangs off of `YTArray` or `YTQuantity` instances:
```
from yt.units import kilometer, erg
print ("kilometer's units:", kilometer.units)
print ("kilometer's dimensions:", kilometer.units.dimensions)
print ('')
print ("erg's units:", erg.units)
print ("erg's dimensions: ", erg.units.dimensions)
```
### Arithmetic with `YTQuantity` and `YTArray`
Of course it wouldn't be very useful if all we could do is create data with units. The real power of the new unit system is that we can add, subtract, mutliply, and divide using quantities and dimensional arrays:
```
a = YTQuantity(3, 'cm')
b = YTQuantity(3, 'm')
print (a+b)
print (b+a)
print ('')
print ((a+b).in_units('ft'))
a = YTQuantity(42, 'mm')
b = YTQuantity(1, 's')
print (a/b)
print ((a/b).in_cgs())
print ((a/b).in_mks())
print ((a/b).in_units('km/s'))
print ('')
print (a*b)
print ((a*b).in_cgs())
print ((a*b).in_mks())
m = YTQuantity(35, 'g')
a = YTQuantity(9.8, 'm/s**2')
print (m*a)
print ((m*a).in_units('dyne'))
from yt.units import G, kboltz
print ("Newton's constant: ", G)
print ("Newton's constant in MKS: ", G.in_mks(), "\n")
print ("Boltzmann constant: ", kboltz)
print ("Boltzmann constant in MKS: ", kboltz.in_mks())
rho = YTQuantity(1, 'g/cm**3')
t_ff = (G*rho)**(-0.5)
print (t_ff)
```
An exception is raised if we try to do a unit operation that doesn't make any sense:
```
from yt.utilities.exceptions import YTUnitOperationError
a = YTQuantity(3, 'm')
b = YTQuantity(5, 'erg')
try:
print (a+b)
except YTUnitOperationError as e:
print (e)
```
A plain `ndarray` or a `YTArray` created with empty units is treated as a dimensionless quantity and can be used in situations where unit consistency allows it to be used:
```
a = YTArray([1.,2.,3.], 'm')
b = np.array([2.,2.,2.])
print ("a: ", a)
print ("b: ", b)
print ("a*b: ", a*b)
c = YTArray([2,2,2])
print ("c: ", c)
print ("a*c: ", a*c)
```
### Saving and Loading `YTArray`s to/from disk
`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.
#### HDF5
To write to HDF5, use `write_hdf5`:
```
my_dens = YTArray(np.random.random(10), 'Msun/kpc**3')
my_temp = YTArray(np.random.random(10), 'K')
my_dens.write_hdf5("my_data.h5", dataset_name="density")
my_temp.write_hdf5("my_data.h5", dataset_name="temperature")
```
Where we used the `dataset_name` keyword argument to create a separate dataset for each array in the same file.
We can use the `from_hdf5` classmethod to read the data back in:
```
read_dens = YTArray.from_hdf5("my_data.h5", dataset_name="density")
print (read_dens)
print (my_dens)
```
We can use the `info` keyword argument to `write_hdf5` to write some additional data to the file, which will be stored as attributes of the dataset:
```
my_vels = YTArray(np.random.normal(10), 'km/s')
info = {"source":"galaxy cluster","user":"jzuhone"}
my_vels.write_hdf5("my_data.h5", dataset_name="velocity", info=info)
```
If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:
```
my_vels.write_hdf5("data_in_group.h5", dataset_name="velocity", info=info, group_name="/data/fields")
```
where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):
#### ASCII
To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:
```
import yt
a = YTArray(np.random.random(size=10), "cm")
b = YTArray(np.random.random(size=10), "g")
c = YTArray(np.random.random(size=10), "s")
yt.savetxt("my_data.dat", [a,b,c], header='My cool data', footer='Data is over', delimiter="\t")
```
The file we wrote can then be easily used in other contexts, such as plotting in Gnuplot, or loading into a spreadsheet, or just for causal examination. We can quickly check it here:
```
%%bash
more my_data.dat
```
You can see that the header comes first, and then right before the data we have a subheader marking the units of each column. The footer comes after the data.
`yt.loadtxt` can be used to read the same data with units back in, or read data that has been generated from some other source. Just make sure it's in the format above. `loadtxt` can also selectively read from particular columns in the file with the `usecols` keyword argument:
```
bb, cc = yt.loadtxt("my_data.dat", usecols=(1,2), delimiter="\t")
print (bb)
print (b)
print ('')
print (cc)
print (c)
```
| github_jupyter |
# 3.4.4 Least Angle Regression
Least angle regression (LAR) uses a similar strategy to Forwarf stepwise regression, but only enters "as much" of a predictor as it deserves.
**Algorithm 3.2**
1. Standardize the predictors to have mean zero and unit norm. Start with the residual $\mathbf{r} = \mathbf{y} - \mathbf{\overline{y}}$ and $\beta_1,...,\beta_p = 0$
2. Find the predictor $\mathbf{x}_j$ most correlated with $\mathbf{r}$.
3. Move $\beta_j$ from 0 towards its least-squares coefficient $\langle \mathbf{x}_j, \mathbf{r} \rangle$, until some other competitor $\mathbf{x}_k$ has as much correlation with the current residual as does $\mathbf{x}_j$.
4. Move $\beta_j$ and $\beta_k$ in the direction defined by their joint least squares coefficient of the current residual on $\langle \mathbf{x}_j, \mathbf{x}_k \rangle$, until some other competitor $\mathbf{x}_l$ has as much correlation with the current residual.
5. Continue in this way until all $p$ predictors have been entered. After min(N - 1, p) steps, we arrive at the full least-squares solution.
Suppose at the beginning of the kth step:
- $\mathcal{A}_k$ is the active set of variables
- $\beta_{\mathcal{A}_k}$ be the coefficients
- $\mathbf{r}_k=\mathbf{y} - \mathbf{X}_{\mathcal{A}_k}\beta_{\mathcal{A}_k}$ is the current residual,
then the direction for this step is (3.55):
$$\delta_k = (\mathbf{X}_{\mathcal{A}_k}^T\mathbf{X}_{\mathcal{A}_k})^{-1}\mathbf{X}_{\mathcal{A}_k}^T\mathbf{r}_k$$
The coefficient profile then evolves as $\beta_{\mathcal{A}_k}(\alpha)=\beta_{\mathcal{A}_k} + \alpha \cdot \delta_k$ and the fit vector evolves as $\hat{f}_k(\alpha)=\hat{f}_k + \alpha \cdot \mathbf{u}_k$
```
import numpy as np
import pandas as pd
from scipy import stats
df = pd.read_csv('../data/prostate/prostate.data', delimiter='\t', index_col=0)
mask_train = df.pop('train')
df_y = df.pop('lpsa')
train_x = df[mask_train == 'T']
train_y = df_y[mask_train == 'T']
train_x_centered = train_x - train_x.mean(axis = 0)
train_x_centered /= np.linalg.norm(train_x_centered, axis=0)
train_y_centered = train_y - train_y.mean()
def lars(X, y):
n, p = X.shape
mu = np.zeros_like(y)
beta = np.zeros(p)
for _ in range(p):
c = X.T @ (y - mu)
c_abs = np.abs(c)
c_max = c_abs.max()
active = np.isclose(c_abs, c_max)
signs = np.where(c[active] > 0, 1, -1)
X_active = signs * X[:, active]
G = X_active.T @ X_active
Ginv = np.linalg.inv(G)
A = Ginv.sum() ** (-0.5)
w = A * Ginv.sum(axis = 1)
u = X_active @ w
gamma = c_max / A
if not np.all(active):
a = X.T @ u
complement = np.invert(active)
cc = c[complement]
ac = a[complement]
candidates = np.concatenate([(c_max - cc) / (A - ac),
(c_max + cc) / (A + ac)])
gamma = candidates[candidates >= 0].min()
mu += gamma * u
beta[active] += gamma * signs
return mu, beta
y_fit, beta = lars(train_x_centered.as_matrix(), train_y_centered.as_matrix())
train_error = np.mean((y_fit - train_y_centered) ** 2)
print ('Beta: ', beta)
print ('train error: ', train_error)
```
**Algorithm 3.2a**
4a. If a non-zero coefficient hits zero, drop its variable from the active set of variables and recompute the current joint least squares direction.
The LAR(lasso) algorithm is extremely efficient, requiring the same order of computation as that of a single least squares fit using the p predictors.
**Heuristic argument why LAR and Lasso are similar**
Suppose $\mathcal{A}$ is the active set of variables at some stage. We can express as (3.56):
$$\mathbf{x}_j^T(\mathbf{y}-\mathbf{X}\beta)=\lambda \cdot s_j, j \in \mathcal{A}$$
also $|\mathbf{x}_j^T(\mathbf{y}-\mathbf{X}\beta)| \le \lambda, j \notin \mathcal{A}$. Now consider the lasso criterian (3.57):
$$R(\beta)=\frac{1}{2}||\mathbf{y}-\mathbf{X}\beta||_2^2 + \lambda||\beta||_1$$
Let $\mathcal{B}$ be the active set of variables in the solution for a given value of $\lambda$, and $R(\beta)$ is differentiable, and the stationarity conditions give (3.58):
$$\mathbf{x}_j^T(\mathbf{y}-\mathbf{X}\beta)=\lambda \cdot sign(\beta_j), j \in \mathcal{B}$$
Comparing (3.56) and (3.58), we see that they are identical only if the sign of $\beta{j}$ matches the sign of the inner product. That is why the LAR algorithm and lasso starts to differ when an active coefficient passes through zero; The stationary conditions for the non-active variable require that (3.59):
$$|\mathbf{x}_j^T(\mathbf{y}-\mathbf{X}\beta)|\le \lambda, j \notin \mathcal{B}$$
# Degrees-of-Freedom Formula for LAR and Lasso
We define the degrees of freedom of the fitted vector $\hat{y}$ as:
$$
df(\hat{y})=\frac{1}{\sigma^2}\sum_{i=1}^N Cov(\hat{y}_i,y_i)
$$
This makes intuitive sense: the harder that we fit to the data, the larger this covariance and hence $df(\hat{\mathbf{y}})$.
| github_jupyter |
<table>
<tr align=left><td><img align=left src="./images/CC-BY.png">
<td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli</td>
</table>
```
from __future__ import print_function, division
```
Note to lecturers: This notebook is designed to work best as a classic Jupyter Notebook with nbextensions
* hide_input: to hide selected python cells particularly for just plotting
* RISE: Interactive js slide presentations
# Discussion 1: Introduction to Python
So you want to code in Python? We will do some basic manipulations and demonstrate some of the basics of the notebook interface that we will be using extensively throughout the course.
### Objectives:
* Provide overview of simplest data types and flow control available in Python 3
* Provide a few practice problems
* set up for Homework 0 to debug the homework submission system and introduce working with Jupyter notebooks
### Topics:
- Math
- Variables
- Lists
- Control flow
- Coding style
- Other data structures
- IPython/Jupyter notebooks
There is considerable online documentation and tutorials for python.
### Other intros:
- [Basic Python](https://docs.python.org/3/tutorial/introduction.html)
- [Software Carpentry - Programming in Python](http://swcarpentry.github.io/python-novice-inflammation/)
## Python Math
Lets start with some basic operations:
```
2 + 2
32 - (4 + 2)**2
1 / 2
4.0 + 4.0**(3.0 / 2.0)
```
Good practice to just add a decimal after any number you really want to treat as a `float`.
Additional types of numbers include `complex`, `Decimal` and `Fraction`.
```
3+5j
print(1j**2)
```
Note that to use "named" functions such as `sqrt` or `sin` we need to `import` a module so that we have access to those functions. When you `import` a module (or package) in Python we are asking Python to go look for the code that is named and make them active in our workspace (also called a namespace in more general parlance). Here is an example where we use Python's builtin `math` module:
```
import math
math.sqrt(4)
math.sin(math.pi / 2.0)
math.exp(-math.pi / 4.0)
```
Note that in order to access these functions we need to prepend the `math.` to the functions and the constant $\pi$. We can forgo this and import all of what `math` holds if we do the following:
```
from math import *
sin(pi / 2.0)
```
Notes:
* `import *` is discouraged, particularly if you only need a few functions or you will be mixing with other modules that define `sin`, for example `numpy.sin()` and `math.sin()` have somewhat different functionality.
* if you only want a few functions from math use
`from math import sin, cos`
* many of these functions always return a `float` number regardless of their input.
## Variables
Assign variables like you would in any other language:
```
num_students = 120
room_capacity = 120
(room_capacity - num_students) / room_capacity * 100.0
```
As indicated in the previous section, there are many different data types. For example, a variable could be an integer, a floating point number, a string, or numerous other basic types. Python will determine the data type based on how you enter it.
In the following example, three different variables are defined, and the type associated with each variable is printed.
```
number = 5
ratio = 0.15
description = "The ratio is"
doit = False
print(number,type(number))
print(ratio,type(ratio))
print(description,type(description))
print(doit,type(doit))
```
The data type can be explicitly defined using the float() and int() commands.
```
number = int(5.)
ratio = float(0.15)
print(number,type(number))
print(ratio,type(ratio))
```
Note: if you are testing for the type of a python object you should use `isinstance`
```
x = 'True'
if isinstance(x,str):
print('{} is a string'.format(x))
else:
print('{} is not a string'.format(x))
```
One thing to be careful about is that Python is case sensitive.
```
N = 20
n = 10
print(N,n)
```
## Lists
One of the most useful data structures in Python is the `list`.
```
grades = [90.0, 67.0, 85.0, 76.0, 98.0, 70.0]
```
Lists are defined with square brackets and delineated by commas. Note that there is another data type called `sequences` denoted by `( )` which are immutable (cannot be changed) once created. Lets try to do some list manipulations with our list of grades above.
Access a single value in a list
```
print(grades)
grades[-1]
```
Note that Python is 0 indexed, i.e. the first value in the list is accessed by `0`. Reverse indexing is done using negative value starting from -1 which corresponds to the last element
```
grades[-1]
```
Find the length of a list
```
len(grades)
```
There are multiple ways to append values into a list.
```
print(grades)
grades = grades + [62.0, 82.0, 59.0]
print(grades)
grades.append(88.0)
print(grades)
```
You can use the standard indexing method shown above to change a value within the array.
```
grades[1] = 68.0
print(grades)
```
Slicing is another important operation
```
print(grades)
grades[2:5]
grades[0:4]
grades[:4]
grades[4:]
```
Note that the range of values does not include the last indexed! This is important to remember for more than lists but we will get to that later.
```
grades[4:11]
```
Another property of lists is that you can put different types in them at the same time. This can be important to remember if you may have both `int` and `float` types.
```
remember = [int("2"), 2, 2.0, "2.0"]
print(remember)
```
A list can also hold any data type or data structure inside it, for example a list inside a list (referred to as nested lists) is helpful in defining matrices (although we will find a better way to do this later).
```
matrix_a = [[1],[2],[3]]
remember[0] / 1
remember[1] / 1
remember[2] / 1
```
Finally, one of the more useful list creation functions is `range` which creates a list with the bounds requested. This creates a special type within Python, but it acts like an array.
```
values = range(3,7)
print(values,type(values))
print(values[0],values[1],values[2],values[-1])
for i in range(3, 7):
print(i)
```
## Comments
Comments can be added to code using the `#` character. Anything after `#` on a line is ignored.
```
# Set up the parameters associated with the partition
N = 10 # Number of partitions to use
b = 1.0 # The right endpoint of the interval
a = 0.0 # The left endpoint of the interval
delta_x = (b-a)/float(N) # The width of each interval
print("The interval width is {0}".format(delta_x))
```
The python style guide [PEP 8](http://www.python.org/dev/peps/pep-0008) however discourages in-line comments
## Control Flow
In this section a number of different ways to control and define which commands are executed are given. The commands include conditional expressions like 'if' blocks that decide individual sets of commands to execute. It also includes 'for' loops which define a sequence of commands to execute in order. Finally, the 'while' loop is given which will loop through a set of commands until some condition is met.
### `if`
This is the basic logical control. A set of instructions is executed if a given condition is met. Note that Python decides what set of commands to execute based on how the code is indented. The '{' and '}' characters have a very different meaning in Python than in C, C++, or Java.
Note: See full list of operators supported in python [here](https://www.tutorialspoint.com/python/python_basic_operators.htm)
```
x = 5
if x > 5:
itsBig = True
print("x is greater than 5")
elif x < 5:
itsBig = False
print("x is less than 5")
else:
itsBig = not True
print("x is equal to 5")
print("The value of itsBig is {0}".format(itsBig))
```
### `for` loops
The `for` statements provide the most common type of loops in Python. The idea is that a set of commands will be repeated for a fixed number of times. The command requires a variable that can be iterated over, and each time the loop repeats a new value from the variable is used. For example, if an array is given the `for` loop will iterate over each value within the array. (there is also a `while` construct).
```
accumulator = 0
for i in range(-10, 5, 3):
accumulator += 1
print(i)
print("The number of times the loop repeated is {0}".format(accumulator))
```
### iterating over lists
It is often useful to iterate over members of lists directly
```
for animal in ['cat', 'dog', 'chinchilla']:
print(animal)
```
`enumerate` is also a very useful builtin when you need both an index and a list member
```
for (i, animal) in enumerate(['cat', 'dog', 'chinchilla']):
if i%2 == 0:
print(i, animal)
```
The above can be written in a single line and also save the outputs to a new list, by using list comprehension
```
animal_new = [ animal.capitalize() for animal in ['cat', 'dog', 'chinchilla']]
print(animal_new)
range_new = [ i for i in range(3,7)]
print(range_new)
type(range(3,7))
x = range_new.copy()
print(x)
x[2] = 12
print(x)
print(range_new)
```
### A quick exercise:
do you remember the list remember (which is a list of different types)? write a one line list comprehension to return a list of types in remember
```
types = [ type(i) for i in remember]
types
print(remember)
print(types)
```
Related to the `for` statement are the control statements `break` and `continue`. Ideally we can create a loop with logic that can avoid these but sometimes code can be more readable with judicious use of these statements. This is especially true for `while` loops and separate checks have to be made for iteration counts.
```
for n in range(2, 10):
is_prime = True
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n / x)
is_prime = False
break
if is_prime:
print("%s is a prime number" % (n))
```
### The `while` Loop
The set of commands in a while loop are executed while a given condition is True.
```
top = 0
bottom = 10
while (top > bottom):
print("top: {0}, bottom: {1}".format(top,bottom))
top -= 1
bottom += 1
```
The `pass` statement might appear fairly useless as it simply does nothing but can provide a stub to remember to come back and implement something.
```
def my_func(x):
# Remember to implement this later!
pass
```
### Defining Functions
The last statement above defines a function in Python with an argument called `x`. Functions can be defined and do lots of different things, here are a few examples.
```
def my_print_function(x):
print(x)
my_print_function(3)
def my_add_function(a, b):
return(a + b)
my_add_function(3.0, 5.0)
```
A variable can be given a default value while defining the function, this value remains unchanged unless the user specifies a different value
```
def my_crazy_function(a, b, c=1.0):
d = a + b**c
return d
my_crazy_function(2.0, 3.0), my_crazy_function(2.0, 3.0, 2.0), my_crazy_function(2.0, 3.0, c=2.0)
def my_other_function(a, b, c=1.0):
return a + b, a + b**c, a + b**(3.0 / 7.0)
x,y,z = my_other_function(2.0, 3.0, c=2.0)
print(x)
```
Let's try writing a bit more of a complex (and useful) function. The Fibonacci sequence is formed by adding the previous two numbers of the sequence to get the next value (starting with `[0, 1]`).
```
def fibonacci(n):
"""Return a list of the Fibonacci sequence up to n"""
values = [0, 1]
while values[-1] <= n:
values.append(values[-1] + values[-2])
print(values)
return values
fibonacci(100)
```
There are several other important data structures that are useful in python including
* tuples/sequences
* sets
* **dictionaries**
you can read more about them [here](https://docs.python.org/3/tutorial/datastructures.html)
## Exception Handling
Python has a very rich syntax for handling errors and exceptions which, if used sparingly can be useful when you want to fail gracefully or give the user more information about where and how a function fails.
```
def isstring(x):
""" function to check if x is a string
Parameters
----------
x : any python object
Returns
-------
bool
True if x is a string, False otherwise.
Raises
------
TypeError
if x is not a string
"""
if isinstance(x, str):
print('{} is a string'.format(x))
return True
else:
raise TypeError('{} is not a string'.format(x))
return False
x = 5.
isstring(x)
```
If you want to catch an exception and continue execution you can use the `try`-`except` syntax
```
print(remember)
for r in remember:
try:
isstring(r)
except TypeError as err:
print(err)
```
### help(), ? and `tab` are your friends
Use the `help()` function or a `?` at the end of a function to see the respective function's documentation. One could also use tab key to autocomplete and view the list of available functions.
```
help(isstring)
import numpy
x = numpy.array(range(3))
x
x.max?
```
## Coding Style
It is very important to write readable and understandable code.
This is a practical matter.
There are times when you have to go back and make changes to code you have not used in a long time. More importantly, coding is a shared activity, and if your code is not readable then it is not of any use.
Here are a few things to keep in mind while programming in and out of this class, we will work on this actively as the semester progresses as well. The standard for which Python program are written to is called [PEP 8](http://www.python.org/dev/peps/pep-0008) and contains the following basic guidelines:
- Use 4-space indentation, no tabs
- Wrap lines that exceed 80 characters
- Use judicious use of blank lines to separate out functions, classes, and larger blocks of contained code
- Comment! Also, put comments on their own line when possible
- Use `docstrings` (function descriptions)
- Use spaces around operators and after commas, `a = f(1, 2) + g(3, 4)`
- Name your classes and functions consistently.
- Use `CamelCase` for classes
- Use `lower_case_with_underscores` for functions and variables
- When in doubt be verbose with your comments and names of variables, functions, and classes
### Peer Review
To help all of us learn from each other what coding styles are easier to read we should be doing peer-reviews of the coding portions of the assignments. After the first assignment is turned in we will review a general template for code review. Please be as thorough and helpful as you can!
### Example: why is this actually a poor piece of code?
```
def isstring(x):
""" function to check if x is a string
Parameters
----------
x : any python object
Returns
-------
bool
True if x is a string, False otherwise.
Raises
------
TypeError
if x is not a string
"""
if isinstance(x, str):
print('{} is a string'.format(x))
return True
else:
raise TypeError('{} is not a string'.format(x))
return False
```
## Jupyter Notebooks
We will use a lot of Jupyter notebooks in this class for both class notes (what you are looking at now) and for turning in homework. The Jupyter notebook allows for the inline inclusion of a number of different types of input, the most critical will be
- Code (python or otherwise) and
- Markdown which includes
- $\LaTeX$,
- HTML
Jupyter notebooks allow us to organize and comment on our efforts together along with writing active documents that can be modified in-situ to our work. This can lead to better practice of important ideas such as reproducibility in our work.
## Debugging
(based on Jessica Hamrick's [debugging demo](https://github.com/jhamrick/nbgrader-demo))
Debugging is one of the most critical tools we have at our disposal. Apart from standard inspection approaches (`print` statements) the Jupyter notebook has a number of ways to debug as well.
Jupyter notebooks provide an interface to the python debugger `pdb`. The easiest way to use this functionality is by using the "magic" `%pdb` at the top of your notebook which will allow cause the notebook to jump into the python debugger anytime an exception is reached. This will allow you to step through your code and figure out what is wrong. If you want to step through code or just activate the trace back for the current cell use the `%debug` magic.
```
# for inline plotting in the notebook
%matplotlib inline
# debugger
%pdb
import numpy
import matplotlib.pyplot as plt
def plot_log():
figure, axis = plt.subplots(2, 1)
x = numpy.linspace(1, 2, 10)
axis[0].plot(x, numpy.log(x)) # <-- this line is wrong in multiple ways
plt.show()
plot_log() # Call the function, generate plot
```
Paths to debugging
1. Check the traceback
1. Use the `%debug` magic
1. `print` statements
1. Try a more informative IDE like [PyCharm](https://www.jetbrains.com/pycharm/)
and if all else fails
1. Copy and paste your error message into Google to see if anyone else has experienced similar problems. You'd be surprised how often this works!
2. Search [StackOverflow](https://stackoverflow.com/questions/tagged/python)
3. Consult fellow classmates
4. Consult the TA's and Professor (absolute last resort ;^)
### Don't forget to have fun...
Debugging is puzzle solving...the better you get at it, the better you can manage the frustration of numerical methods
| github_jupyter |
```
%matplotlib inline
```
# Visualizing the stock market structure
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
## Learning a graph structure
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
## Clustering
We use clustering to group together quotes that behave similarly. Here,
amongst the `various clustering techniques <clustering>` available
in the scikit-learn, we use `affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
## Embedding in 2D space
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use `manifold` techniques to retrieve 2D
embedding.
## Visualization
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
```
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import pandas as pd
from sklearn import cluster, covariance, manifold
print(__doc__)
# #############################################################################
# Retrieve the data from Internet
# The data is from 2003 - 2008. This is reasonably calm: (not too long ago so
# that we get high-tech firms, and before the 2008 crash). This kind of
# historical data can be obtained for from APIs like the quandl.com and
# alphavantage.co ones.
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'McDonald\'s',
'PEP': 'Pepsi',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas Instruments',
'XRX': 'Xerox',
'WMT': 'Wal-Mart',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(sorted(symbol_dict.items())).T
quotes = []
for symbol in symbols:
print('Fetching quote history for %r' % symbol, file=sys.stderr)
url = ('https://raw.githubusercontent.com/scikit-learn/examples-data/'
'master/financial-data/{}.csv')
quotes.append(pd.read_csv(url.format(symbol)))
close_prices = np.vstack([q['close'] for q in quotes])
open_prices = np.vstack([q['open'] for q in quotes])
# The daily variations of the quotes are what carry most information
variation = close_prices - open_prices
# #############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphicalLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
# #############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_,
random_state=0)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# #############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
# #############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.nipy_spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
```
| github_jupyter |
This notebook is part of the $\omega radlib$ documentation: https://docs.wradlib.org.
Copyright (c) 2018, $\omega radlib$ developers.
Distributed under the MIT License. See LICENSE.txt for more info.
# Converting Reflectivity to Rainfall
Reflectivity (Z) and precipitation rate (R) can be related in form of a power law $Z=a \cdot R^b$. The parameters ``a`` and ``b`` depend on the type of precipitation (i.e. drop size distribution and water temperature). $\omega radlib$ provides a couple of functions that could be useful in this context.
```
import wradlib as wrl
import matplotlib.pyplot as pl
import warnings
warnings.filterwarnings('ignore')
try:
get_ipython().magic("matplotlib inline")
except:
pl.ion()
import numpy as np
```
The following example demonstrates the steps to convert from the common unit *dBZ* (decibel of the reflectivity factor *Z*) to rainfall intensity (in the unit of mm/h). This is an array of typical reflectivity values (**unit: dBZ**)
```
dBZ = np.array([20., 30., 40., 45., 50., 55.])
print(dBZ)
```
Convert to reflectivity factor Z (**unit**: $mm^6/m^3$):
```
Z = wrl.trafo.idecibel(dBZ)
print(Z)
```
Convert to rainfall intensity (**unit: mm/h**) using the Marshall-Palmer Z(R) parameters:
```
R = wrl.zr.z_to_r(Z, a=200., b=1.6)
print(np.round(R, 2))
```
Convert to rainfall depth (**unit: mm**) assuming a rainfall duration of five minutes (i.e. 300 seconds)
```
depth = wrl.trafo.r_to_depth(R, 300)
print(np.round(depth, 2))
```
## An example with real radar data
The following example is based on observations of the DWD C-band radar on mount Feldberg (SW-Germany).
The figure shows a 15 minute accumulation of rainfall which was produced from three consecutive radar
scans at 5 minute intervals between 17:30 and 17:45 on June 8, 2008.
The radar data are read using [wradlib.io.read_dx](https://docs.wradlib.org/en/latest/generated/wradlib.io.radolan.read_dx.html) function which returns an array of dBZ values and a metadata dictionary (see also [Reading-DX-Data](../fileio/wradlib_reading_dx.ipynb#Reading-DX-Data)). The conversion is carried out the same way as in the example above. The plot is produced using
the function [wradlib.vis.plot_ppi](https://docs.wradlib.org/en/latest/generated/wradlib.vis.plot_ppi.html).
```
def read_data(dtimes):
"""Helper function to read raw data for a list of datetimes <dtimes>
"""
data = np.empty((len(dtimes),360,128))
for i, dtime in enumerate(dtimes):
f = wrl.util.get_wradlib_data_file('dx/raa00-dx_10908-{0}-fbg---bin.gz'.format(dtime))
data[i], attrs = wrl.io.read_dx(f)
return data
```
Read data from radar Feldberg for three consecutive 5 minute intervals and compute the accumulated rainfall depth.
```
# Read
dtimes = ["0806021735","0806021740","0806021745"]
dBZ = read_data(dtimes)
# Convert to rainfall intensity (mm/h)
Z = wrl.trafo.idecibel(dBZ)
R = wrl.zr.z_to_r(Z, a=200., b=1.6)
# Convert to rainfall depth (mm)
depth = wrl.trafo.r_to_depth(R, 300)
# Accumulate 15 minute rainfall depth over all three 5 minute intervals
accum = np.sum(depth, axis=0)
```
Plot PPI of 15 minute rainfall depth
```
pl.figure(figsize=(10,8))
ax, cf = wrl.vis.plot_ppi(accum, cmap="viridis")
pl.xlabel("Easting from radar (km)")
pl.ylabel("Northing from radar (km)")
pl.title("Radar Feldberg\n15 min. rainfall depth, 2008-06-02 17:30-17:45 UTC")
cb = pl.colorbar(cf, shrink=0.8)
cb.set_label("mm")
pl.xlim(-128,128)
pl.ylim(-128,128)
pl.grid(color="grey")
```
| github_jupyter |
# Abstract classes and Interfaces
추상 클래스와 인터페이스
---
## Abstract classes
추상 클래스는 계층구조를 잡기 위해서만 정의된 클래스로
- `abstract` 키워드를 사용
- 직접 new로 생성자를 호출해 객체를 만들 수 없다 (인터페이스와 마찬가지)
- 추상클래스의 특징
- 멤버변수와 생성자를 정의할 수 있음!!! (인터페이 차이점)
- 추상메소드(내용이 정의되지 않은 메소드)를 선언할 수 있음
- 이것은 인터페이스와 마찬가지임
- 내용이 있는 메소드도 정의 가능하긴 함 (인터페이스도 최근 자바 버전에서는 디폴트 메소드 정의 가능)
- 템플릿 메소드: 추상클래스에서 내용이 있는 메소드를 유용하게 활용하는 방식 (교재에서 찾아보기)
```
포유류
|
개
/ \
푸들 시바
```
쫑쫑이(푸들), 해피(시바), 토토(그냥 개 잡종? 잘 모름)
```
abstract class Mammal { // 포유류 추상 클래스
int lifeSpan; // 수명
Mammal(int lifeSpan) { this.lifeSpan = lifeSpan; }
}
class Dog extends Mammal {
Dog() { super(13); } // 개의 수명은 대략 13년
}
class Poodle extends Dog {
}
class Shiba extends Dog {
}
Dog 쫑쫑이 = new Poodle();
Dog 해피 = new Shiba();
Dog 개 = new Dog();
Mammal m = new Mammal(); // 추상적인 포유류를 직접 new로 Mammal의 생성자를 부르는 건 이상
```
----
## Interfaces
박쥐는 조류가 아닌 포유류이다. 그런데 조류인 갈매기럼 박쥐도 날개로 나는 동작을 한다.
생물 분류도를 따라 대략 다음과 같은 클래스 계층 구조로 표시해 볼 수 있다. 끝에 C자를 붙인 것은 클래스임을 나타내기 위해서이다.
```
척추동물C
/ \
조류C 포유류C
| |
갈매기C 박쥐C
```
```
abstract class Vertebrete { // 척추동물 클래스
}
abstract class Aves // 조류 클래스
extends Vertebrete {
}
class Seagull // 갈매기 클래스
extends Aves {
}
abstract class Mammal // 포유류 클래스
extends Vertebrete {
}
class Bat // 박쥐 클래스
extends Mammal {
}
```
그런데 난다(fly)는 메소드는 어디에 배치하는 것이 좋을까?
대부분의 조류가 날 수 있기 때문에 일단 조류에 배치하는 것은 적절하다.
타조, 펭귄 등 일부 날지 못하는 새의 경우에 한해서만 예외를 발생시키도록 오버라이딩하면 된다.
하지만 조류가 아닌 포유류 중에도 박쥐는 날개로 날 수 있는 동물이다.
물론 조류의 fly 메소드와는 별도로 이름만 같은 fly 메소드를 박쥐 클래스에 작성할 수는 있다.
하지만 그렇게 할 경우 갈매기 등의 조류와 박쥐를 일괄적으로 처리하면서 fly를 동적 바인딩할 수가 없다.
날개로 날 수 있는 동물로만 이루어진 데이터 구조를 순회하며 fly 메소드를 호출한다던가 하는 방식의 Java 프로그램을 작성하기 곤란하다는 말이다.
그렇다고 조류와 박쥐를 포함하는 포유류의 공통 상위 클래스인 척추동물에 fly 메소드를 두게 설계하는 것은 무리다.
조류처럼 대부분 다 날 수 있는 동물 분류도 있겠지만 척추동물 중에는 헤엄치지 못하는 종들이 상당히 많기 때문이다.
특히 포유류의 경우 날지 못하는 동물이 대부분이다.
이렇게 이미 설계된 계층구조의 분류를 따르지 않지만 공통점이 있는 다른 특징(주로 공통적인 동작/상호작용 등)을
Java와 같은 언어에서는 **인터페이스**로 표현한다.
난다(fly)는 메소드를 가진 인터페이스를 `난다I`라고 부르기로 한다면,
앞서 클래스 계층 구조에서 조류 클래스와 박쥐 클래스가 `난다I`를 구현한다는 설계까지 포함해 아래와 같이 나타낼 수 있다.
```
척추동물C
난다I. / \
`.조류C 포유류C
| | .난다I
갈매기C 박쥐C.'
```
어떤 클래스가 상위(부모) 클래스의 특징을 물려받는다는 의미의 *클래스 상속*을 `extends`라는 키워드로 표현하는 것을 지금까지 다룬 예제에서도 보았다.
그림으로 나타낼 때 주로 실선 또는 실선으로 된 화살표로 클래스 상속 관계를 표현한다.
어떤 클래스가 인터페이스의 특징을 만족한다 의미의 *인터페이스 구현*은 `implements`라는 키워드로 표현한다.
그림으로 나타낼 때 주로 점선 또는 점선으로 된 화살표로 인터페이스 구현 관계를 표현한다.
*인터페이스 구현* 대신 *인터페이스 상속*이라고 말하는 경우도 있다.
```
interface Flying { // 날아다니는 동물에 대한 인터페이스
public void fly ();
}
abstract class Vertebrete { // 척추동물 클래스
}
abstract class Aves // 조류 클래스
extends Vertebrete
implements Flying {
@Override
public void fly() { System.out.println("푸드덕! 푸드덕!"); }
}
class Seagull // 갈매기 클래스
extends Aves {
}
abstract class Mammal // 포유류 클래스
extends Vertebrete {
}
class Bat // 박쥐 클래스
extends Mammal
implements Flying {
@Override
public void fly() { System.out.println("바드덕? 바드덕?"); }
}
// Flying 인터페이스를 구현하는 오브젝트로 이루어진 배열 animals 정의
Flying[] animals = { new Seagull(), new Bat(), new Seagull() };
for (Flying a : animals) a.fly();
```
Java는 여러 하나의 클래스가 여러 부모 클래스의 특징을 한꺼번에 물려받는 *다중 클래스 상속*(multiple class inheritance)을 허용하지 않으므로 인터페이스가 매우 요긴하게 활용된다.
참고로 클래스와 달리 인터페이스의 경우는 하나의 클래스가 여러 인터페이스의 특징을 동시에 만족하는 *다중 인터페이스 구현*(multiple interface implementation)이 가능하다.
예를 들면 알을 낳는 동물의 특징을 `산란I`라는 인터페이스를 통해 나타낸다면 `조류C`의 경우 `난다I`와 `산란I` 인터페이스를 동시에 구현하도록 설계할 수 있다는 말이다.
위의 그림에 `산란I`클래스와 포유류 중에 특이하게 알을 낳는 오리너구리에 대한 클래스를 추가한 계층구조를 아래와 같은 그림으로 나타내 보았다.
```
_ 척추동물C _
_/ \_
난다I,산란I. / \
`.조류C 포유류C
/ 난다I. / \ .산란I
갈매기C `.박쥐C 오리너구리C.'
```
```
interface Flying { // 날아다니는 동물에 대한 인터페이스
public void fly ();
}
interface EggLaying { // 알을 낳는 동물에 대한 인터페이스
public void layEggs ();
}
class Vertebrete { // 척추동물 클래스
}
class Aves // 조류 클래스
extends Vertebrete
implements Flying, EggLaying { // 다중 인터페이 구현 가능
@Override
public void fly() { System.out.println("푸드덕! 푸드덕!"); }
@Override
public void layEggs() { System.out.println("새알 퐁퐁"); }
}
class Seagull // 갈매기 클래스
extends Aves {
}
class Mammal // 포유류 클래스
extends Vertebrete {
}
class Bat // 박쥐 클래스
extends Mammal
implements Flying {
@Override
public void fly() { System.out.println("바드덕? 바드덕?"); }
}
class Duckbill // 오리너구리 클래스
extends Mammal
implements EggLaying {
@Override
public void layEggs() { System.out.println("오리너구리 퐁퐁"); }
}
// Flying 인터페이스를 구현하는 오브젝트로 이루어 배열 fanimals 정의
Flying[] fanimals = { new Seagull(), new Bat(), new Seagull() };
for (var a : fanimals) a.fly();
// EggLaying 인터페이스를 구현하는 오브젝트로 이루어진 배열 fanimals 정의
EggLaying[] eanimals = { new Seagull(), new Duckbill(), new Seagull() };
for (var a : eanimals) a.layEggs();
```
## Interface heirarchy
인터페이스도 계층 구조를 이루도록 설계할 수 있다.
Java에서 클래스는 다중 상속이 되지 않는 반면 인터페이스는 다중상속이 가능하다.
```
interface Gasoline {
void reFuel(); // 주유구에 연료 주유
}
interface Electric {
void reCharge(); // 충전구에 전기 충전
}
interface PluginHybrid // 충전구, 주유규 둘다 있는 인터페이스
extends Gasoline, Electric {
}
```
| github_jupyter |
<h1 style='color: green; font-size: 36px; font-weight: bold;'>Data Science - Regressão Linear</h1>
# <font color='red' style='font-size: 30px;'>Conhecendo o Dataset</font>
<hr style='border: 2px solid red;'>
## Importando bibliotecas
```
import pandas as pd
import numpy as np
```
## Bibliotecas opcionais
```
import warnings
warnings.filterwarnings('ignore') # ou warnings.filterwarnings(action='once')
```
## O Dataset e o Projeto
<hr>
### Fonte: https://www.kaggle.com/greenwing1985/housepricing
### Descrição:
<p style='font-size: 18px; line-height: 2; margin: 10px 50px; text-align: justify;'>Nosso objetivo neste exercício é criar um modelo de machine learning, utilizando a técnica de Regressão Linear, que faça previsões sobre os preços de imóveis a partir de um conjunto de características conhecidas dos imóveis.</p>
<p style='font-size: 18px; line-height: 2; margin: 10px 50px; text-align: justify;'>Vamos utilizar um dataset disponível no Kaggle que foi gerado por computador para treinamento de machine learning para iniciantes. Este dataset foi modificado para facilitar o nosso objetivo, que é fixar o conhecimento adquirido no treinamento de Regressão Linear.</p>
<p style='font-size: 18px; line-height: 2; margin: 10px 50px; text-align: justify;'>Siga os passos propostos nos comentários acima de cada célular e bons estudos.</p>
### Dados:
<ul style='font-size: 18px; line-height: 2; text-align: justify;'>
<li><b>precos</b> - Preços do imóveis</li>
<li><b>area</b> - Área do imóvel</li>
<li><b>garagem</b> - Número de vagas de garagem</li>
<li><b>banheiros</b> - Número de banheiros</li>
<li><b>lareira</b> - Número de lareiras</li>
<li><b>marmore</b> - Se o imóvel possui acabamento em mármore branco (1) ou não (0)</li>
<li><b>andares</b> - Se o imóvel possui mais de um andar (1) ou não (0)</li>
</ul>
## Leitura dos dados
Dataset está na pasta "Dados" com o nome "HousePrices_HalfMil.csv" em usa como separador ";".
```
dados = pd.read_csv('../Dados/HousePrices_HalfMil.csv', sep=';')
```
## Visualizar os dados
```
dados
```
## Verificando o tamanho do dataset
```
dados.shape
```
# <font color='red' style='font-size: 30px;'>Análises Preliminares</font>
<hr style='border: 2px solid red;'>
## Estatísticas descritivas
```
dados.describe().round(2)
```
## Matriz de correlação
<p style='font-size: 18px; line-height: 2; margin: 10px 50px; text-align: justify;'>O <b>coeficiente de correlação</b> é uma medida de associação linear entre duas variáveis e situa-se entre <b>-1</b> e <b>+1</b> sendo que <b>-1</b> indica associação negativa perfeita e <b>+1</b> indica associação positiva perfeita.</p>
### Observe as correlações entre as variáveis:
<ul style='font-size: 16px; line-height: 2; text-align: justify;'>
<li>Quais são mais correlacionadas com a variável dependete (Preço)?</li>
<li>Qual o relacionamento entre elas (positivo ou negativo)?</li>
<li>Existe correlação forte entre as variáveis explicativas?</li>
</ul>
```
dados.corr().round(4)
```
# <font color='red' style='font-size: 30px;'>Comportamento da Variável Dependente (Y)</font>
<hr style='border: 2px solid red;'>
# Análises gráficas
<img width='700px' src='../Dados/img/Box-Plot.png'>
## Importando biblioteca seaborn
```
import seaborn as sns
```
## Configure o estilo e cor dos gráficos (opcional)
```
# palette -> Accent, Accent_r, Blues, Blues_r, BrBG, BrBG_r, BuGn, BuGn_r, BuPu, BuPu_r, CMRmap, CMRmap_r, Dark2, Dark2_r, GnBu, GnBu_r, Greens, Greens_r, Greys, Greys_r, OrRd, OrRd_r, Oranges, Oranges_r, PRGn, PRGn_r, Paired, Paired_r, Pastel1, Pastel1_r, Pastel2, Pastel2_r, PiYG, PiYG_r, PuBu, PuBuGn, PuBuGn_r, PuBu_r, PuOr, PuOr_r, PuRd, PuRd_r, Purples, Purples_r, RdBu, RdBu_r, RdGy, RdGy_r, RdPu, RdPu_r, RdYlBu, RdYlBu_r, RdYlGn, RdYlGn_r, Reds, Reds_r, Set1, Set1_r, Set2, Set2_r, Set3, Set3_r, Spectral, Spectral_r, Wistia, Wistia_r, YlGn, YlGnBu, YlGnBu_r, YlGn_r, YlOrBr, YlOrBr_r, YlOrRd, YlOrRd_r, afmhot, afmhot_r, autumn, autumn_r, binary, binary_r, bone, bone_r, brg, brg_r, bwr, bwr_r, cividis, cividis_r, cool, cool_r, coolwarm, coolwarm_r, copper, copper_r, cubehelix, cubehelix_r, flag, flag_r, gist_earth, gist_earth_r, gist_gray, gist_gray_r, gist_heat, gist_heat_r, gist_ncar, gist_ncar_r, gist_rainbow, gist_rainbow_r, gist_stern, gist_stern_r, gist_yarg, gist_yarg_r, gnuplot, gnuplot2, gnuplot2_r, gnuplot_r, gray, gray_r, hot, hot_r, hsv, hsv_r, icefire, icefire_r, inferno, inferno_r, jet, jet_r, magma, magma_r, mako, mako_r, nipy_spectral, nipy_spectral_r, ocean, ocean_r, pink, pink_r, plasma, plasma_r, prism, prism_r, rainbow, rainbow_r, rocket, rocket_r, seismic, seismic_r, spring, spring_r, summer, summer_r, tab10, tab10_r, tab20, tab20_r, tab20b, tab20b_r, tab20c, tab20c_r, terrain, terrain_r, viridis, viridis_r, vlag, vlag_r, winter, winter_r
sns.set_palette("Accent")
# style -> white, dark, whitegrid, darkgrid, ticks
sns.set_style("darkgrid")
```
## Box plot da variável *dependente* (y)
### Avalie o comportamento da distribuição da variável dependente:
<ul style='font-size: 16px; line-height: 2; text-align: justify;'>
<li>Parecem existir valores discrepantes (outliers)?</li>
<li>O box plot apresenta alguma tendência?</li>
</ul>
https://seaborn.pydata.org/generated/seaborn.boxplot.html?highlight=boxplot#seaborn.boxplot
```
ax = sns.boxplot(data=dados['precos'], orient='v', width=0.2)
ax.figure.set_size_inches(12, 6)
ax.set_title('Preço dos Imóveis', fontsize=20)
ax.set_ylabel('$', fontsize=16)
ax
```
## Investigando a variável *dependente* (y) juntamente com outras característica
Faça um box plot da variável dependente em conjunto com cada variável explicativa (somente as categóricas).
### Avalie o comportamento da distribuição da variável dependente com cada variável explicativa categórica:
<ul style='font-size: 16px; line-height: 2; text-align: justify;'>
<li>As estatísticas apresentam mudança significativa entre as categorias?</li>
<li>O box plot apresenta alguma tendência bem definida?</li>
</ul>
### Box-plot (Preço X Garagem)
```
ax = sns.boxplot(y='precos', x='garagem', data=dados, orient='v', width=0.5)
ax.figure.set_size_inches(12, 6)
ax.set_title('Preço dos Imóveis', fontsize=20)
ax.set_ylabel('$', fontsize=16)
ax.set_xlabel('Número de Vagas de Garagem', fontsize=16)
ax
```
### Box-plot (Preço X Banheiros)
```
ax = sns.boxplot(y='precos', x='banheiros', data=dados, orient='v', width=0.5)
ax.figure.set_size_inches(12, 6)
ax.set_title('Preço dos Imóveis', fontsize=20)
ax.set_ylabel('$', fontsize=16)
ax.set_xlabel('Número de Banheiros', fontsize=16)
ax
```
### Box-plot (Preço X Lareira)
```
ax = sns.boxplot(y='precos', x='lareira', data=dados, orient='v', width=0.5)
ax.figure.set_size_inches(12, 6)
ax.set_title('Preço dos Imóveis', fontsize=20)
ax.set_ylabel('$', fontsize=16)
ax.set_xlabel('Número de Lareiras', fontsize=16)
ax
```
### Box-plot (Preço X Acabamento em Mármore)
```
ax = sns.boxplot(y='precos', x='marmore', data=dados, orient='v', width=0.5)
ax.figure.set_size_inches(12, 6)
ax.set_title('Preço dos Imóveis', fontsize=20)
ax.set_ylabel('$', fontsize=16)
ax.set_xlabel('Acabamento em Mármore', fontsize=16)
ax
```
### Box-plot (Preço X Andares)
```
ax = sns.boxplot(y='precos', x='andares', data=dados, orient='v', width=0.5)
ax.figure.set_size_inches(12, 6)
ax.set_title('Preço dos Imóveis', fontsize=20)
ax.set_ylabel('$', fontsize=16)
ax.set_xlabel('Mais de um Andar', fontsize=16)
ax
```
## Distribuição de frequências da variável *dependente* (y)
Construa um histograma da variável dependente (Preço).
### Avalie:
<ul style='font-size: 16px; line-height: 2; text-align: justify;'>
<li>A distribuição de frequências da variável dependente parece ser assimétrica?</li>
<li>É possível supor que a variável dependente segue uma distribuição normal?</li>
</ul>
https://seaborn.pydata.org/generated/seaborn.distplot.html?highlight=distplot#seaborn.distplot
```
ax = sns.distplot(dados['precos'])
ax.figure.set_size_inches(12, 6)
ax.set_title('Distribuição de Frequências', fontsize=20)
ax.set_ylabel('Frequências', fontsize=16)
ax.set_xlabel('$', fontsize=16)
ax
```
## Gráficos de dispersão entre as variáveis do dataset
## Plotando o pairplot fixando somente uma variável no eixo y
https://seaborn.pydata.org/generated/seaborn.pairplot.html?highlight=pairplot#seaborn.pairplot
Plote gráficos de dispersão da variável dependente contra cada variável explicativa. Utilize o pairplot da biblioteca seaborn para isso.
Plote o mesmo gráfico utilizando o parâmetro kind='reg'.
### Avalie:
<ul style='font-size: 16px; line-height: 2; text-align: justify;'>
<li>É possível identificar alguma relação linear entre as variáveis?</li>
<li>A relação é positiva ou negativa?</li>
<li>Compare com os resultados obtidos na matriz de correlação.</li>
</ul>
```
ax = sns.pairplot(dados, y_vars='precos', x_vars=['area', 'garagem', 'banheiros', 'lareira', 'marmore', 'andares'])
ax.fig.suptitle('Dispersão entre as Variáveis', fontsize=20, y=1.05)
ax
ax = sns.pairplot(dados, y_vars='precos', x_vars=['area', 'garagem', 'banheiros', 'lareira', 'marmore', 'andares'], kind='reg')
ax.fig.suptitle('Dispersão entre as Variáveis', fontsize=20, y=1.05)
ax
```
# <font color='red' style='font-size: 30px;'>Estimando um Modelo de Regressão Linear</font>
<hr style='border: 2px solid red;'>
## Importando o *train_test_split* da biblioteca *scikit-learn*
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
```
from sklearn.model_selection import train_test_split
```
## Criando uma Series (pandas) para armazenar a variável dependente (y)
```
y = dados['precos']
```
## Criando um DataFrame (pandas) para armazenar as variáveis explicativas (X)
```
X = dados[['area', 'garagem', 'banheiros', 'lareira', 'marmore', 'andares']]
```
## Criando os datasets de treino e de teste
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=2811)
```
## Importando *LinearRegression* e *metrics* da biblioteca *scikit-learn*
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
https://scikit-learn.org/stable/modules/classes.html#regression-metrics
```
from sklearn.linear_model import LinearRegression
from sklearn import metrics
```
## Instanciando a classe *LinearRegression()*
```
modelo = LinearRegression()
```
## Utilizando o método *fit()* para estimar o modelo linear utilizando os dados de TREINO (y_train e X_train)
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit
```
modelo.fit(X_train, y_train)
```
## Obtendo o coeficiente de determinação (R²) do modelo estimado com os dados de TREINO
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.score
### Avalie:
<ul style='font-size: 16px; line-height: 2; text-align: justify;'>
<li>O modelo apresenta um bom ajuste?</li>
<li>Você lembra o que representa o R²?</li>
<li>Qual medida podemos tomar para melhorar essa estatística?</li>
</ul>
```
print('R² = {}'.format(modelo.score(X_train, y_train).round(2)))
```
## Gerando previsões para os dados de TESTE (X_test) utilizando o método *predict()*
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.predict
```
y_previsto = modelo.predict(X_test)
```
## Obtendo o coeficiente de determinação (R²) para as previsões do nosso modelo
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html#sklearn.metrics.r2_score
```
print('R² = %s' % metrics.r2_score(y_test, y_previsto).round(2))
```
# <font color='red' style='font-size: 30px;'>Obtendo Previsões Pontuais</font>
<hr style='border: 2px solid red;'>
## Criando um simulador simples
Crie um simulador que gere estimativas de preço a partir de um conjunto de informações de um imóvel.
```
area=38
garagem=2
banheiros=4
lareira=4
marmore=0
andares=1
entrada=[[area, garagem, banheiros, lareira, marmore, andares]]
print('$ {0:.2f}'.format(modelo.predict(entrada)[0]))
```
# <font color='red' style='font-size: 30px;'>Métricas de Regressão</font>
<hr style='border: 2px solid red;'>
## Métricas da regressão
<hr>
fonte: https://scikit-learn.org/stable/modules/model_evaluation.html#regression-metrics
Algumas estatísticas obtidas do modelo de regressão são muito úteis como critério de comparação entre modelos estimados e de seleção do melhor modelo, as principais métricas de regressão que o scikit-learn disponibiliza para modelos lineares são as seguintes:
### Erro Quadrático Médio
Média dos quadrados dos erros. Ajustes melhores apresentam $EQM$ mais baixo.
$$EQM(y, \hat{y}) = \frac 1n\sum_{i=0}^{n-1}(y_i-\hat{y}_i)^2$$
### Raíz do Erro Quadrático Médio
Raíz quadrada da média dos quadrados dos erros. Ajustes melhores apresentam $\sqrt{EQM}$ mais baixo.
$$\sqrt{EQM(y, \hat{y})} = \sqrt{\frac 1n\sum_{i=0}^{n-1}(y_i-\hat{y}_i)^2}$$
### Coeficiente de Determinação - R²
O coeficiente de determinação (R²) é uma medida resumida que diz quanto a linha de regressão ajusta-se aos dados. É um valor entra 0 e 1.
$$R^2(y, \hat{y}) = 1 - \frac {\sum_{i=0}^{n-1}(y_i-\hat{y}_i)^2}{\sum_{i=0}^{n-1}(y_i-\bar{y}_i)^2}$$
## Obtendo métricas para o modelo
```
EQM = metrics.mean_squared_error(y_test, y_previsto).round(2)
REQM = np.sqrt(metrics.mean_squared_error(y_test, y_previsto)).round(2)
R2 = metrics.r2_score(y_test, y_previsto).round(2)
pd.DataFrame([EQM, REQM, R2], ['EQM', 'REQM', 'R²'], columns=['Métricas'])
```
# <font color='red' style='font-size: 30px;'>Salvando e Carregando o Modelo Estimado</font>
<hr style='border: 2px solid red;'>
## Importando a biblioteca pickle
```
import pickle
```
## Salvando o modelo estimado
```
output = open('modelo_preço', 'wb')
pickle.dump(modelo, output)
output.close()
```
### Em um novo notebook/projeto Python
<h4 style='color: blue; font-weight: normal'>In [1]:</h4>
```sh
import pickle
modelo = open('modelo_preço','rb')
lm_new = pickle.load(modelo)
modelo.close()
area = 38
garagem = 2
banheiros = 4
lareira = 4
marmore = 0
andares = 1
entrada = [[area, garagem, banheiros, lareira, marmore, andares]]
print('$ {0:.2f}'.format(lm_new.predict(entrada)[0]))
```
<h4 style='color: red; font-weight: normal'>Out [1]:</h4>
```
$ 46389.80
```
| github_jupyter |
<img align="right" style="max-width: 200px; height: auto" src="cfds_logo.png">
### Lab 06 - "Supervised Machine Learning - k Nearest Neighbor Classification"
Chartered Financial Data Scientist (CFDS), Autumn Term 2020
In the last lab, you got your hands dirty with supervised learning by using the Gaussian Naive-Bayes (GNB) classifier. You learned how to train a model and to evaluate and interpret its results. In this lab, we will look at another popular algorithm, namely the **k Nearest-Neighbors (kNN)** classifier.
The *discriminative* **k Nearest-Neighbors (kNN)** classifier is a simple, easy to understand, versatile, but powerful machine learning algorithm. Until recently (prior to the advent of deep learning approaches) it was used in a variety of applications such as finance, healthcare, political science, handwriting detection, image recognition and video recognition, e.g. in credit ratings, financial institutes used kNN to predict the solvency of customers.
This classification technique is part of the **discriminative** type of classifiers, which can be distinguished from the **generative** type as shown in the following illustration:
<img align="center" style="max-width: 600px; height: auto" src="supervisedlearning.png">
(Inspired by: 'Machine Learning - A Probabilistic Perspective', Kevin P. Murphy)
As always, pls. don't hesitate to ask all your questions either during the lab, post them in our NextThought lab discussion forum (https://financial-data-science.nextthought.io), or send us an email (using our fds.ai email addresses).
### Lab Objectives:
After today's lab you should be able to:
> 1. Know how to setup a **notebook or "pipeline"** that solves a simple supervised classification task.
> 2. Recognize the distinct **data elements** (features and labels) needed to train and evaluate a supervised machine learning classifier.
> 3. Understand how a Gaussian **k Nearest-Neighbor (kNN)** classifier can be trained and evaluated.
> 4. Know how to use Python's sklearn library to **train** and **evaluate** arbitrary classifiers.
> 5. Understand how to **evaluate** and **interpret** the classification results.
Before we start let's watch a motivational video:
```
from IPython.display import YouTubeVideo
# Microsoft: "AI for Health Program"
# YouTubeVideo('ii-FfE-7C-k', width=800, height=600)
```
### Setup of the Analysis Environment
Similar to the previous labs, we need to import a couple of Python libraries that allow for data analysis and data visualization. In this lab will use the `Pandas`, `Numpy`, `Scikit-Learn`, `Matplotlib` and the `Seaborn` library. Let's import the libraries by the execution of the statements below:
```
# import the numpy, scipy and pandas data science library
import pandas as pd
import numpy as np
# import sklearn data and data pre-processing libraries
from sklearn import datasets
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# import sklearn naive.bayes and k-nearest neighbor classifier library
from sklearn.neighbors import KNeighborsClassifier
# import sklearn classification evaluation library
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
# import matplotlib data visualization library
import matplotlib.pyplot as plt
import seaborn as sns
```
Enable inline Jupyter notebook plotting:
```
%matplotlib inline
```
Use the 'Seaborn' plotting style in all subsequent visualizations:
```
plt.style.use('seaborn')
```
Set random seed of all our experiments - this insures reproducibility.
```
random_seed = 42
```
## 1. k Nearest-Neighbor (kNN) Classification
Now, let's have closer look into the non-parametric method used for supervised classification tasks, referred to as the the **k Nearest-Neighbors (kNN)** algorithm. As you learned during the in lecture k-NN classification, the output of the classifier is a class membership.
Prior to running **k Nearest-Neighbor (kNN)** classification let's briefly revisit the distinct steps of the algorithm as discussed in the lecture:
<img align="center" style="max-width: 700px; height: auto" src="knn.png">
(Courtesy: Intro to AI & ML lecture, Prof. Dr. Borth, University of St. Gallen)
An object is classified by a majority vote of its neighbors, with the object being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor.
### 1.1. Dataset Download and Data Assessment
Let's try the k Nearest-Neighbour algorithm using the delicious **Wine Dataset**! It is a classic and straightforward multi-class classification dataset.
<img align="center" style="max-width: 600px; height: auto" src="wine_dataset.jpg">
(Source: https://www.empirewine.com)
The data is the results of a chemical analysis of wines grown in the same region in Italy by three different cultivators (types). The dataset consists in total of **178 wines** as well as their corresponding **13 different measurements** taken for different constituents found in the three types of wine. Please, find below the list of the individual measurements (features):
>- `Alcohol`
>- `Malic acid`
>- `Ash`
>- `Alcalinity of ash`
>- `Magnesium`
>- `Total phenols`
>- `Flavanoids`
>- `Nonflavanoid phenols`
>- `Proanthocyanins`
>- `Color intensity`
>- `Hue`
>- `OD280/OD315 of diluted wines`
>- `CProline`
Further details on the dataset can be obtained from the following puplication: *Forina, M. et al, PARVUS - "An Extendible Package for Data Exploration, Classification and Correlation.", Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy.*
Let's load the dataset and conduct a preliminary data assessment:
```
wine = datasets.load_wine()
```
Print and inspect feature names of the dataset:
```
wine.feature_names
```
Print and inspect the class names of the dataset:
```
wine.target_names
```
Print and inspect the top 10 feature rows of the dataset:
```
pd.DataFrame(wine.data, columns=wine.feature_names).head(10)
```
Print and inspect the top 10 labels of the dataset:
```
pd.DataFrame(wine.target).head(10)
```
Determine and print the feature dimensionality of the dataset:
```
wine.data.shape
```
Determine and print the label dimensionality of the dataset:
```
wine.target.shape
```
Plot the data distributions of the distinct features:
```
# init the plot
plt.figure(figsize=(10, 10))
# prepare the dataset to be plotable using seaborn
# convert to Panda's DataFrame
wine_plot = pd.DataFrame(wine.data, columns=wine.feature_names)
# add class labels to the DataFrame
wine_plot['class'] = wine.target
# plot a pairplot of the distinct feature distributions
sns.pairplot(wine_plot, diag_kind='hist', hue='class');
```
### 1.2. Dataset Pre-Processing
#### 1.2.1. Feature Re-Scaling
Observing the features values of the **Wine Dataset** we will notice that their respective value ranges vary widely. This results in a major challenge for distance based machine learning algorithms such as the **k Nearest-Neighbor** classifier. The **k Nearest-Neighbour** classifier calculates the distance between two observations using a distance measure such as the **Euclidean** or **Manhattan** distance.
If one of the features exhibits a wide range of values, the calculated distance will be governed by this particular feature. Therefore, the range of all features needs to be **re-scaled** or **normalized** to a value range beween in $[0,1]$ or $[-1,1]$ so that each feature contributes approximately proportionately to the final distance.
One widley used method of feature re-scaling is referred to as **Min-Max Normalization** and is given by:
$$x'={\frac {x-{\text{min}}(x)}{{\text{max}}(x)-{\text{min}}(x)}}$$
Let's re-scale the distinct feature values of the **Wine Dataset** using **Min-Max Normalization** using the `MinMaxScaler` class of the `sklearn` library:
```
# init the min-max scaler
scaler = MinMaxScaler(feature_range=(0, 1), copy=True)
# min-max normalize the distinct feature values
wine_data_norm = scaler.fit_transform(wine.data)
```
Print and inspect the top 10 feature rows of the normalized dataset:
```
pd.DataFrame(wine_data_norm, columns=wine.feature_names).head(10)
```
Ok, we can observe that all features values have been re-scaled. Let's also statistically validate this observation and determine if all feature values have been re-scaled to a value range between in $[0,1]$:
```
pd.DataFrame(wine_data_norm, columns=wine.feature_names).describe()
```
Looks great. All feature values are indeed in a range between $[0,1]$. Let's also visualize the re-scaled feature values and inspect their distributions:
```
# init the plot
plt.figure(figsize=(10, 10))
# prepare the dataset to be plotable using seaborn
# convert to Panda's DataFrame
wine_plot = pd.DataFrame(wine_data_norm, columns=wine.feature_names)
# add class labels to the DataFrame
wine_plot['class'] = wine.target
# plot a pairplot of the distinct feature distributions
sns.pairplot(wine_plot, diag_kind='hist', hue='class');
```
Excellent, the characteristics of the distinct feature value distributions remained unchanged.
#### 1.2.2. Extraction of Training- and Evaluation-Dataset
To understand and evaluate the performance of any trained **supervised machine learning** model, it is good practice to divide the dataset into a **training set** (the fraction of data records solely used for training purposes) and a **evaluation set** (the fraction of data records solely used for evaluation purposes). Please note, the **evaluation set** will never be shown to the model as part of the training process. All of this is exactly what we did in the prior **Gaussian Naive-Bayes** lab.
<img align="center" style="max-width: 500px; height: auto" src="trainevaldataset.png">
We set the fraction of testing records to **30%** of the original dataset:
```
eval_fraction = 0.3
```
Randomly split the **Wine Dataset** into training set and evaluation set using sklearn's `train_test_split` function:
```
# 70% training and 30% evaluation
X_train, X_eval, y_train, y_eval = train_test_split(wine_data_norm, wine.target, test_size=eval_fraction, random_state=random_seed, stratify=None)
```
Evaluate the training set dimensionality:
```
X_train.shape, y_train.shape
```
Evaluate the evaluation set dimensionality:
```
X_eval.shape, y_eval.shape
```
### 1.3. k Nearest-Neighbor (kNN) Classification
There is again a Python implementation of the **k Nearest-Neighbor (kNN)** classifer available in the `Scikit-Learn` library (https://scikit-learn.org) which we can use of the shelf. Please note, for each classifier, available in the `Scikit-Learn` library, a designated and detailed documentation is provided. It often also includes a couple of practical examples and use cases. The documentation of the **k Nearest-Neighbor (kNN)** classifer can be obtained from the following url:
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
#### 1.3.1. Nearest Neighbors Classification, k=4
Set the number of neighbors `k` to be considered in the classification of each sample:
```
k_nearest_neighbors = 4
```
Please, recall that we discussed two distinct distance measures in the lecture to calculate the distance between an observation $x$ and it's $k$-nearest-neighbors $x'_{j}$ in a $n$-dimensonal feature space:
**Manhattan distance ("L1-norm"):** $$ D(x, x')=\|\sum^k_{j=1}\sum^n_{i=1}(x_{i} - x'_{j,i})\|$$
**Euclidian distance ("L2-norm"):** $$ D(x, x')=\sqrt{\sum^k_{j=1}\sum^n_{i=1}(x_{i} - x'_{j,i})^2}$$
where the index $j$ denotes the number of $k$-nearest-neighbors and the index $i$ denotes the $i$-th feature of a single nearest neighbor $x_j$. Since the 13 features of the Wine dataset consist of continuous features we will use the Euclidean distance as the distance metric in our kNN classification:
```
distance_metric = 'euclidean'
```
Init the **kNN classifier** of Python's `Scikit-Learn` library of data science algorithms:
```
knn = KNeighborsClassifier(n_neighbors=k_nearest_neighbors, metric=distance_metric)
```
Train the k-NN classifier using the training dataset:
```
knn.fit(X_train, y_train);
```
Utilize the trained model to predict the response for the evaluation dataset:
```
y_pred = knn.predict(X_eval)
```
Let's have a look at the predicted class labels:
```
y_pred
```
As well as the true class labels:
```
y_eval
```
Determine **prediction accuracy** of the trained model on the evaluation dataset:
```
print("Accuracy, k=4: ", metrics.accuracy_score(y_eval, y_pred))
```
Determine and plot the **confusion matrix** of the individual predictions:
```
# determine the prediction confusion matrix
mat = confusion_matrix(y_eval, y_pred)
```
Visualize the **confusion matrix** of the individual predictions determined by the **k=4 Nearest-Neighbor** classifier:
```
# init the plot
plt.figure(figsize=(5, 5))
# plot confusion matrix heatmap
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, cmap='YlOrRd_r', xticklabels=wine.target_names, yticklabels=wine.target_names)
# add plot axis labels
plt.xlabel('[true label]', fontsize=14)
plt.ylabel('[predicted label]', fontsize=14)
# add plot title
plt.title('Confusion Matrix - k-NN, k=4', fontsize=14);
```
Remember that as part of the lecture you learned about several measures to evaluate the quality of a retrieval system, namely **Precision**, **Recall** and **F1-Score**. Let's briefly revisit their definition and subsequently calculate those measures based on the confusion matrix above:
>- The **Precision**, denoted by Precision $=\frac{TP}{TP + FP}$, is the probability that a retrieved document is relevant.
>- The **Recall**, denoted by Recall $=\frac{TP}{TP + FN}$, is the probability that a relevant document is retrieved.
>- The **F1-Score**, denoted by F1-Score $= 2 \cdot \frac{Precision \cdot Recall}{Precision + Recall}$, combines precision and recall is the harmonic mean of both measures.
```
print(classification_report(y_eval, y_pred))
```
#### 1.3.2. Nearest Neighbors Classification, k=8
Set the number of neighbors `k` to be considered in the classification of each sample:
```
k_nearest_neighbors = 8
```
Init the **k-NN classifier** of Python's `sklearn` libary of data science algoritms:
```
knn = KNeighborsClassifier(n_neighbors=k_nearest_neighbors, metric=distance_metric)
```
Train the k-NN classifier using the training dataset:
```
knn.fit(X_train, y_train);
```
Utilize the trained model to predict the response for the evaluation dataset:
```
y_pred = knn.predict(X_eval)
```
Determine **prediction accuracy** of the trained model on the evaluation dataset:
```
print("Accuracy, k=8: ", metrics.accuracy_score(y_eval, y_pred))
```
Determine and plot the **confusion matrix** of the individual predictions:
```
# determine the prediction confusion matrix
mat = confusion_matrix(y_eval, y_pred)
```
Visualize the **confusion matrix** of the individual predictions determined by the **k=8 Nearest-Neighbor** classifier:
```
# init the plot
plt.figure(figsize=(5, 5))
# plot confusion matrix heatmap
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False, cmap='YlOrRd_r', xticklabels=wine.target_names, yticklabels=wine.target_names)
# add plot axis labels
plt.xlabel('[true label]', fontsize=14)
plt.ylabel('[predicted label]', fontsize=14)
# add plot title
plt.title('Confusion Matrix - k-NN, k=8', fontsize=14);
```
Determine and plot a comprehensive **classification report** of the individual predictions:
```
# collect classification report
report = classification_report(y_eval, y_pred)
# print classification report
print(report)
```
#### 1.3.3. Finding the optimal k of the kNN classifier
Until now we have investigated the Euclidean distance based kNN classifer for two distinct values of $k=4$ and $k=8$. However the question remains: can we locate a value for k that yields an even higher classification accuracy?
Let's therefore investigate the classification performance of a wider range of distinct $k$ values and in particular compare the corresponding classification accuracy. We will do so in the following by specifying a range of distinct $k$ values ranging from $k=1, ..., 50$:
```
# try k=1 through k=50 to be evaluated
k_range = range(1, 51)
```
To run the kNN classification experiments for different values of $k$ we will define a python loop. The loop iterates over the range of distinct k's and conducts the model training (using the training data) and evaluation (using the evaluation data). The classification accuracy for each $k$ value will be collected and stored in a designated list of accuracy scores:
```
# init the distinct accuracy scores obtained on the evaluation data
eval_accuracy_scores = []
# iterate over the distinct k values
for k in k_range:
# init the k-NN classifier
knn = KNeighborsClassifier(n_neighbors=k, metric='euclidean')
# train the k-NN classifer on the training data
knn.fit(X_train, y_train)
# evaluate the k-NN classifier on the training data
y_train_pred = knn.predict(X_train)
# evaluate the k-NN classifier on the evaluation data
y_eval_pred = knn.predict(X_eval)
# determine classification accuracy
accuracy = metrics.accuracy_score(y_eval, y_eval_pred)
# collect the classification accuracy of the current k on the evaluation data
eval_accuracy_scores.append(accuracy)
```
Visualizing the collected classification accuracy scores of the distinct $k$ values:
```
# prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
# plot the classification accuracy of distinct k's
ax.plot(range(1, len(eval_accuracy_scores)+1), eval_accuracy_scores, color='darkred', marker='o')
# add grid
ax.grid(linestyle='dotted')
# add axis range and legends
ax.set_xlabel("[$k$-Nearest-Neighbors]", fontsize=14)
ax.set_ylabel("[% classification accuracy]", fontsize=14)
# add plot title
ax.set_title('k-NN Classification Accuracy', fontsize=14);
```
Alright, we can nicely observe that a constant high classification accuracy on the held out evaluation dataset can be achieved for $k=19, 23,$ and $29$. This is probably the $k$ value that you may want to use for inferencing and rolling to model out to production.
### Exercises:
We recommend you to try the following exercises as part of the lab:
**1. Train, evaluate and plot the prediction accuracy of the k=1,...,40 Nearest Neighbor models.**
> Write a Python loop that trains and evaluates the prediction accuracy of all k-Nearest Neighbor parametrizations ranging from k=1,...,40. Collect and print the prediction accuracy of each model respectively and compare the results. Plot the prediction accuracy collected for each model above. The plot should display the distinct values of k at the x-axis and the corresponding model prediction accuracy on the y-axis. What kind of behaviour in terms of prediction accuracy can be observed with increasing k?
```
# ***************************************************
# INSERT YOUR CODE HERE
# ***************************************************
```
**2. Train, evaluate and plot the prediction accuracy of the k=1,...,40 Nearest Neighbor models without re-scaling the individual feature values.**
> Write a Python loop that trains and evaluates the prediction accuracy of all k-Nearest Neighbor parametrizations ranging from k=1,...,40. Collect and print the prediction accuracy of each model respectively and compare the results. Plot the prediction accuracy collected for each model above. The plot should display the distinct values of k at the x-axis and the corresponding model prediction accuracy on the y-axis. What kind of behaviour in terms of prediction accuracy can be observed with increasing k? What do you observe when comparing the results of the non re-scaled with the results obtained for the scaled features?
```
# ***************************************************
# INSERT YOUR CODE HERE
# ***************************************************
```
**3. Train, evaluate and plot the prediction accuracy of the k=1,...,40 Nearest Neighbor models using the "Manhattan" distance.**
> Write a Python loop that trains and evaluates the prediction accuracy of all k-Nearest Neighbor parametrizations ranging from k=1,...,40 using the "Manhattan" instead of the "Euclidian" distance metric. Collect and print the prediction accuracy of each model respectively and compare the results. Plot the prediction accuracy collected for each model above. The plot should display the distinct values of k at the x-axis and the corresponding model prediction accuracy on the y-axis. What kind of behaviour in terms of prediction accuracy can be observed with increasing k? What do you observe when comparing the results obtained for the "Manhattan" distance with the ones obtained for the "Euclidean" distance?
```
# ***************************************************
# INSERT YOUR CODE HERE
# ***************************************************
```
### Lab Summary:
In this lab, a step by step introduction into **k Nearest-Neighbor (kNN)** classification is presented. The code and exercises presented in this lab may serves as a starting point for more complex and tailored programs.
You may want to execute the content of your lab outside of the Jupyter notebook environment, e.g. on a compute node or a server. The cell below converts the lab notebook into a standalone and executable python script. Pls. note that to convert the notebook, you need to install Python's **nbconvert** library and its extensions:
```
# installing the nbconvert library
!pip install nbconvert
!pip install jupyter_contrib_nbextensions
```
Let's now convert the Jupyter notebook into a plain Python script:
```
!jupyter nbconvert --to script cfds_lab_06.ipynb
```
| github_jupyter |
```
import os
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import pickle
from PIL import Image
names = open("image_names.txt", "w")
image_names = os.listdir('../data/ALLSTIMULI/train/')[:-3]
for i in range(len(image_names)):
names.write(image_names[i][:-5]+'\n')
names.close()
names = open("image_names.txt", "r")
img_names = names.readlines()
for i in range(len(img_names)):
img_names[i]=img_names[i][:-1]
loc_data_xy={}
for name in img_names:
locpath = '../data/loc_data/train/' + name
f = open(locpath,'rb')
loc_dict = pickle.load(f)
loc_data_xy[name] = np.array(loc_dict['barycenters'])
def show_landmarks(image, landmarks):
"""Show image with landmarks"""
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
#plt.pause(0.001) # pause a bit so that plots are updated
```
# Dataset class
```
class SaccadeLandmarksDataset(Dataset):
"""Saccade Landmarks dataset."""
def __init__(self, loc_dict, img_dir, transform=None):
"""
Args:
loc_dir (string): Path to the saccade location file
img_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.loc_dict = loc_dict
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.loc_dict)
def __getitem__(self, idx):
img_name = os.listdir(self.img_dir)[idx+2]
img_path = os.path.join(self.img_dir,img_name)
image = io.imread(img_path)
name = img_name[:-5]
landmarks = self.loc_dict[name]
landmarks = np.array([landmarks])
landmarks = landmarks.reshape(-1, 2) #.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
```
# Transforms
```
class RandomSaccadeTo(object):
def __call__(self, sample):
image, landmarks = sample['image'], sample['landmarks']
nb_sac = len(landmarks)
sac_num = np.random.randint(nb_sac)
sac = landmarks[sac_num]
N_X, N_Y = image.shape[:2]
#img_color_sac = saccade_to(image, (N_X//2, N_Y//2), (sac[1], sac[0]))
image_roll = np.copy(image)
image_roll=np.roll(image_roll, N_X//2 - sac[1], axis=0)
image_roll=np.roll(image_roll, N_Y//2 - sac[0], axis=1)
return {'image':image_roll, 'pos':sac}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image_tens = sample['image'].transpose((2, 0, 1))
return {'image': torch.FloatTensor(image_tens), 'pos': sample['pos']}
```
### Adapted cropped pyramid (squeezed tensor)
```
from PYramid import cropped_pyramid
class CroppedPyramid(object):
def __init__(self, width, base_levels, color=True, do_mask=False, verbose=False):
self.width = width
self.base_levels = base_levels
self.color = color
self.do_mask = do_mask
self.verbose = verbose
def __call__(self, sample):
img_crop, level_size = cropped_pyramid(sample['image'].unsqueeze(0),
width=self.width,
base_levels=self.base_levels,
color=self.color,
do_mask=self.do_mask,
verbose=self.verbose,
squeeze=True)
return{'img_crop':img_crop, 'level_size':level_size, 'pos':sample['pos']}
```
### LogGaborTransform
```
width=32
base_levels=2
from LogGabor import LogGabor
from PYramid import local_filter
from PYramid import get_K
from PYramid import log_gabor_transform
n_sublevel = 2
n_azimuth = 12
n_theta = 12
n_phase = 2
pe = {'N_X': width, 'N_Y': width, 'do_mask': False, 'base_levels':
base_levels, 'n_theta': 24, 'B_sf': 0.6, 'B_theta': np.pi/12 ,
'use_cache': True, 'figpath': 'results', 'edgefigpath':
'results/edges', 'matpath': 'cache_dir', 'edgematpath':
'cache_dir/edges', 'datapath': 'database/', 'ext': '.pdf', 'figsize':
14.0, 'formats': ['pdf', 'png', 'jpg'], 'dpi': 450, 'verbose': 0}
lg = LogGabor(pe)
K = get_K(width=width,
n_sublevel = n_sublevel,
n_azimuth = n_azimuth,
n_theta = n_theta,
n_phase = n_phase,
r_min = width/6,
r_max = width/3,
log_density_ratio = 2,
verbose=False)
class LogGaborTransform(object):
def __init__(self, K=K, color=True, verbose=False):
self.K = K
self.color = color
self.verbose = verbose
def __call__(self, sample):
log_gabor_coeffs = log_gabor_transform(sample['img_crop'].unsqueeze(0), K)
return{'img_gabor':log_gabor_coeffs, 'K':K}
```
### ComplexModulus
# Compose transforms
### transforms.Compose
```
composed_transform = transforms.Compose([RandomSaccadeTo(),
ToTensor(),
CroppedPyramid(width, base_levels), LogGaborTransform()])
saccade_dataset = SaccadeLandmarksDataset(loc_dict=loc_data_xy,
img_dir='../data/ALLSTIMULI/',
transform=composed_transform)
```
# Iterating through the dataset
```
# Helper function to show a batch
def show_landmarks_batch(sample_batched):
"""Show image with landmarks for a batch of samples."""
for level in range(5,0,-1):
plt.figure()
images_batch = sample_batched['img_crop'][:,level,:,:,:]
batch_size = len(images_batch)
im_size = images_batch.size(2)
grid_border_size = 2
grid = utils.make_grid(images_batch)
plt.imshow(grid.numpy().transpose((1, 2, 0)).clip(0,255).astype('uint8'))
plt.title('Batch from dataloader, level=' + str(level))
n_sublevel = 2
n_azimuth = 12
n_theta = 12
n_phase = 2
n_levels = 6 #int(np.log(np.max((N_X, N_Y))/width)/np.log(base_levels)) + 1
n_eccentricity = 2
n_color = 3
print(n_levels * n_color * n_eccentricity * n_azimuth * n_theta * n_phase)
class AutoEncoder(nn.Module):
def __init__(self, in_chan = n_levels * n_color * n_eccentricity * n_azimuth * n_theta * n_phase, out_chan = 100):
super(AutoEncoder, self).__init__()
self.encoder = Encoder(in_chan=in_chan, out_chan=out_chan)
self.decoder = Decoder(in_chan=out_chan, out_chan=in_chan)
def forward(self, x, **kargs):
code = self.encoder(x)
out = self.decoder(code)
return out
class Encoder(nn.Module):
""" Encoder
"""
def __init__(self, in_chan, out_chan):
super(Encoder, self).__init__()
self.enc = nn.Linear(in_chan, out_chan)
def forward(self, x):
return self.enc(x)
class Decoder(nn.Module):
""" Decoder
"""
def __init__(self, in_chan, out_chan):
super(Decoder, self).__init__()
self.dec = nn.Linear(in_chan, out_chan)
def forward(self, x):
return self.dec(x)
autoenc = AutoEncoder()
autoenc.encoder.enc
autoenc.encoder.enc.weight
autoenc.decoder.dec
batch_size=15
n_epoch = 20
optimizer = optim.Adam(autoenc.parameters(), lr = 1e-4)
criterion = nn.MSELoss() #loss = criterion(outputs, inputs)
dataloader = DataLoader(saccade_dataset, batch_size=batch_size,
shuffle=True, num_workers=0)
loss_list = []
for epoch in range(n_epoch): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(dataloader):
inputs = data['img_gabor'].view(-1, n_levels * n_color * n_eccentricity * n_azimuth * n_theta * n_phase) #complete la dim du batch, taille vecteurs (tenseurs->vecteurs)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = autoenc(inputs)
loss = criterion(outputs, inputs)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if True: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss))
#.append
loss_list.append(running_loss)
running_loss = 0.0
print('Finished Training '+ loss_list)
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('../data/csvs/dkpes.csv')
df.head(10)
y = df['Signal-inhibition'].values
fgroup_cols = ['3-Keto', '3-Hydroxy', '12-Keto',
'12-Hydroxy', '19-Methyl', '18-Methyl', 'Sulfate-Ester',
'Sulfate-Oxygens', 'C4-C5-DB', 'C6-C7-DB', 'Sulfur']
X = df[fgroup_cols].values
fig, ax = plt.subplots(1, 2, figsize=(10, 3))
ax[0].hist(y, bins=np.arange(0, 1.1, 0.1))
ax[0].set_ylabel('Molecule count')
ax[0].set_xlabel('Signal inhibition')
ax[1].scatter(y, df['TanimotoCombo'].values)
ax[1].set_ylabel('Tanimoto similarity')
ax[1].set_xlabel('Signal inhibition')
plt.show()
print('The top 10 most active compounds:')
df['index'][:10]
print('The top 10 least active compounds:')
df['index'][-10:]
fig, ax = plt.subplots(1, 2, figsize=(10, 3))
ax[0].imshow(X[:10], cmap='binary')
ax[0].set_title('10 most active molecules')
plt.sca(ax[0])
plt.xticks(range(len(fgroup_cols)),
fgroup_cols, rotation='vertical')
plt.yticks(range(10), df['index'][:10])
ax[1].imshow(X[-10:], cmap='binary')
ax[1].set_title('10 least active molecules')
plt.sca(ax[1])
plt.xticks(range(len(fgroup_cols)),
fgroup_cols, rotation='vertical')
plt.yticks(range(10), df['index'][-10:])
plt.show()
y_binary = np.where(y >= 0.6, 1, 0)
np.sum(y_binary)
```
- install graphviz (http://www.graphviz.org)
```
tree.DecisionTreeClassifier?
import pydotplus
from sklearn import tree
from IPython.display import Image
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y_binary)
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=fgroup_cols,
class_names=['non-active', 'active'],
filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("tree.pdf")
Image(graph.create_png())
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators=1000,
random_state=0,
n_jobs=-1)
forest.fit(X, y_binary)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
feature_labels = np.array(fgroup_cols)
for f in range(X.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30,
feature_labels[indices[f]],
importances[indices[f]]))
plt.bar(range(X.shape[1]),
importances[indices],
align='center')
plt.xticks(range(X.shape[1]),
feature_labels[indices], rotation=90)
plt.xlim([-1, X.shape[1]])
plt.ylabel('Relative feature importance')
plt.tight_layout()
#plt.savefig('./random_forest.png', dpi=300)
plt.show()
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
sfs = SFS(classifier,
k_features=1,
forward=False,
floating=False,
scoring='accuracy',
verbose=0,
cv=5)
sfs = sfs.fit(X, y_binary)
fig1 = plot_sfs(sfs.get_metric_dict(), kind='std_err')
plt.ylim([0.5, 1])
plt.ylabel('Accuracy')
plt.xlabel('Number of features in the selected subset')
plt.grid()
plt.show()
sfs.subsets_[2]
sfs.subsets_[2]['feature_idx']
feature_labels[list(sfs.subsets_[2]['feature_idx'])]
```
| github_jupyter |
# Setup
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import (datasets,
dummy,
metrics,
model_selection as skms,
multiclass as skmulti,
naive_bayes,
neighbors,
preprocessing as skpre)
import warnings
warnings.filterwarnings("ignore")
np.random.seed(42)
```
# Multi-class Metric Averages
```
iris = datasets.load_iris()
tts = skms.train_test_split(iris.data, iris.target,
test_size=.33, random_state=21)
(iris_train_ftrs, iris_test_ftrs,
iris_train_tgt, iris_test_tgt) = tts
iris_preds = (neighbors.KNeighborsClassifier()
.fit(iris_train_ftrs, iris_train_tgt)
.predict(iris_test_ftrs))
print(metrics.classification_report(iris_test_tgt,
iris_preds))
# verify sums-across-rows
cm = metrics.confusion_matrix(iris_test_tgt, iris_preds)
print(cm)
print("row counts equal support:", cm.sum(axis=1))
macro_prec = metrics.precision_score(iris_test_tgt,
iris_preds,
average='macro')
print("macro:", macro_prec)
cm = metrics.confusion_matrix(iris_test_tgt, iris_preds)
print(cm)
n_labels = len(iris.target_names)
# remember:
# precision is about column of our confusion matrix
# diagonal is where we are correct
# 'macro' means "average over all classes"
# each_class_precision(correct / full column) [ add, divide n --> average]
manual_macro_prec = (np.diag(cm) / cm.sum(axis=0)).sum() / n_labels
print(manual_macro_prec)
print("should equal 'macro avg':", macro_prec == manual_macro_prec)
micro_prec = metrics.precision_score(iris_test_tgt,
iris_preds,
average='micro')
print("micro:", micro_prec)
cm = metrics.confusion_matrix(iris_test_tgt, iris_preds)
print(cm)
# 'micro' means over every prediction
# (3 in the confusion matrix means 3 predictions with that right/wrongness)
# TP.sum() / (TP&FP).sum() -->
# all correct / all preds
manual_micro_prec = np.diag(cm).sum() / cm.sum()
print(manual_micro_prec)
print("should equal avg='micro':", micro_prec==manual_micro_prec)
cr = metrics.classification_report(iris_test_tgt,
iris_preds,
digits=3)
print(cr)
# can get precision class-column with
# metrics.precision_score(actual, predicted, average=None)
# can get averages with average='macro'/'weighted'
# note: weighted is macro, but instead of dividing evenly
# (a + b + c) / 3
# it is weighted by occurance (support)
# a * (18/50) + b * (17/50) + c * (15/50)
```
# Multi-Class AUC: One-Versus-Rest
```
checkout = [0, 50, 100]
print("Original Encoding")
print(iris.target[checkout])
# instead of one target-label,
# create distinct target-label column for each target class
# (am i this or not?)
print("'Multi-label' Encoding")
print(skpre.label_binarize(iris.target, classes=[0,1,2])[checkout])
iris_multi_tgt = skpre.label_binarize(iris.target,
classes=[0,1,2])
# im --> "iris multi"
(im_train_ftrs, im_test_ftrs,
im_train_tgt, im_test_tgt) = skms.train_test_split(iris.data,
iris_multi_tgt,
test_size=.33,
random_state=21)
# knn wrapped up in one-versus-rest (3 classifiers)
knn = neighbors.KNeighborsClassifier(n_neighbors=5)
ovr_knn = skmulti.OneVsRestClassifier(knn)
pred_probs = (ovr_knn.fit(im_train_ftrs, im_train_tgt)
.predict_proba(im_test_ftrs))
# make ROC plots
lbl_fmt = "Class {} vs Rest (AUC = {:.2f})"
fig,ax = plt.subplots(figsize=(8,4))
for cls in [0,1,2]:
fpr, tpr, _ = metrics.roc_curve(im_test_tgt[:,cls],
pred_probs[:,cls])
label = lbl_fmt.format(cls, metrics.auc(fpr,tpr))
ax.plot(fpr, tpr, 'o--', label=label)
ax.legend()
ax.set_xlabel("FPR")
ax.set_ylabel("TPR");
fig,ax = plt.subplots(figsize=(6,3))
for cls in [0,1,2]:
prc = metrics.precision_recall_curve
precision, recall, _ = prc(im_test_tgt[:,cls],
pred_probs[:,cls])
prc_auc = metrics.auc(recall, precision)
label = "Class {} vs Rest (AUC) = {:.2f})".format(cls, prc_auc)
ax.plot(recall, precision, 'o--', label=label)
ax.legend()
ax.set_xlabel('Recall')
ax.set_ylabel('Precision');
```
# Multi-Class AUC: The Hand and Till Method
```
# pseudo-code
# 1. train a model
# 2. get classification scores for each example
# 3. create a blank table for each pair of classes
# (how tough is this pair of classes to distinguish with *this* classifier)
# auc is not symmetric b/c
# 4. for each pair (c_1, c_2) of classes:
# a. find AUC of c_1 against c_2 (c_1 POS, c_2 NEG)
# b. find AUC of c_2 against c_1 (c_2 POS, c_1 NEG)
# c. entry for c_1, c_2 is average of those AUCs
# 5. final value is average of the entries in the table
from mlwpy_video_extras import hand_and_till_M_statistic
knn = neighbors.KNeighborsClassifier()
knn.fit(iris_train_ftrs, iris_train_tgt)
test_probs = knn.predict_proba(iris_test_ftrs)
hand_and_till_M_statistic(iris_test_tgt, test_probs)
fig,ax = plt.subplots(1,1,figsize=(3,3))
htm_scorer = metrics.make_scorer(hand_and_till_M_statistic,
needs_proba=True)
cv_auc = skms.cross_val_score(knn,
iris.data, iris.target,
scoring=htm_scorer, cv=10)
sns.swarmplot(cv_auc, orient='v')
ax.set_title('10-Fold H&T Ms');
```
# Cumulative Response and Lift Curves
```
is_versicolor = iris.target == 1
tts_oc = skms.train_test_split(iris.data, is_versicolor,
test_size=.33, random_state = 21)
(oc_train_ftrs, oc_test_ftrs,
oc_train_tgt, oc_test_tgt) = tts_oc
# build, fit, predict (probability scores) for NB model
gnb = naive_bayes.GaussianNB()
prob_true = (gnb.fit(oc_train_ftrs, oc_train_tgt)
.predict_proba(oc_test_ftrs)[:,1]) # [:,1]=="True"
# what is the location of the "most likely true example"?
# negate b/c we want big values first
myorder = np.argsort(-prob_true)
# cumulative sum then to percent (last value is total)
realpct_myorder = oc_test_tgt[myorder].cumsum()
realpct_myorder = realpct_myorder / realpct_myorder[-1]
# convert counts of data into percents
N = oc_test_tgt.size
xs = np.linspace(1/N,1,N)
print(myorder[:3], realpct_myorder[:3])
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(8,4))
fig.tight_layout()
# cumulative response
ax1.plot(xs, realpct_myorder, 'r.')
ax1.plot(xs, xs, 'b-')
ax1.axes.set_aspect('equal')
ax1.set_title("Cumulative Response")
ax1.set_ylabel("Percent of Actual Hits")
ax1.set_xlabel("Percent Of Population\n" +
"Starting with Highest Predicted Hits")
# lift
# replace divide by zero with 1.0
ax2.plot(xs, realpct_myorder / np.where(xs > 0, xs, 1))
ax2.set_title("Lift Versus Random")
ax2.set_ylabel("X-Fold Improvement") # not cross-fold!
ax2.set_xlabel("Percent Of Population\n" +
"Starting with Highest Predicted Hits")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right');
```
# Case Study: A Classifier Comparison
```
classifiers = {'base' : dummy.DummyClassifier(strategy='most_frequent'),
'gnb' : naive_bayes.GaussianNB(),
'3-NN' : neighbors.KNeighborsClassifier(n_neighbors=10),
'10-NN' : neighbors.KNeighborsClassifier(n_neighbors=3)}
# define the one_class iris problem so we don't have random ==1 around
iris_onec_ftrs = iris.data
iris_onec_tgt = iris.target==1
msrs = ['accuracy', 'precision', 'roc_auc']
fig, axes = plt.subplots(len(msrs), 1, figsize=(8, 3*len(msrs)))
fig.tight_layout()
for mod_name, model in classifiers.items():
# abbreviate
cvs = skms.cross_val_score
cv_results = {msr:cvs(model, iris_onec_ftrs, iris_onec_tgt,
scoring=msr, cv=10) for msr in msrs}
for ax, msr in zip(axes, msrs):
msr_results = cv_results[msr]
my_lbl = "{:12s} {:.3f} {:.2f}".format(mod_name,
msr_results.mean(),
msr_results.std())
ax.plot(msr_results, 'o--', label=my_lbl)
ax.set_title(msr)
ax.legend(loc='lower left', ncol=2)
fig, axes = plt.subplots(2,2, figsize=(4,4), sharex=True, sharey=True)
fig.tight_layout()
for ax, (mod_name, model) in zip(axes.flat, classifiers.items()):
preds = skms.cross_val_predict(model,
iris_onec_ftrs, iris_onec_tgt,
cv=10)
cm = metrics.confusion_matrix(iris_onec_tgt, preds)
sns.heatmap(cm, annot=True, ax=ax,
cbar=False, square=True, fmt="d")
ax.set_title(mod_name)
axes[1,0].set_xlabel('Predicted')
axes[1,1].set_xlabel('Predicted')
axes[0,0].set_ylabel('Actual')
axes[1,0].set_ylabel('Actual');
fig, ax = plt.subplots(1, 1, figsize=(6,4))
cv_prob_true = {} # store these for use in next cell
for mod_name, model in classifiers.items():
cv_probs = skms.cross_val_predict(model,
iris_onec_ftrs, iris_onec_tgt,
cv=10, method='predict_proba')
cv_prob_true[mod_name] = cv_probs[:,1]
fpr, tpr, thresh = metrics.roc_curve(iris_onec_tgt,
cv_prob_true[mod_name])
auc = metrics.auc(fpr, tpr)
ax.plot(fpr, tpr, 'o--', label="{:7s}{}".format(mod_name, auc))
ax.set_title('ROC Curves')
ax.legend();
fig, (ax1,ax2) = plt.subplots(1, 2, figsize=(10,5))
N = len(iris_onec_tgt)
xs = np.linspace(1/N,1,N)
ax1.plot(xs, xs, 'b-')
for mod_name in classifiers:
# negate b/c we want big values first
myorder = np.argsort(-cv_prob_true[mod_name])
# cumulative sum then to percent (last value is total)
realpct_myorder = iris_onec_tgt[myorder].cumsum()
realpct_myorder = realpct_myorder / realpct_myorder[-1]
ax1.plot(xs, realpct_myorder, '.', label=mod_name)
ax2.plot(xs,
realpct_myorder / np.where(xs > 0, xs, 1),
label=mod_name)
ax1.legend()
ax2.legend()
ax1.set_title("Cumulative Response")
ax2.set_title("Lift versus Random");
```
| github_jupyter |
# Long Short-Term Memory (LSTM) Model
---
Lets first create a tiny LSTM network sample to understand the architecture of LSTM networks.
We need to import the necessary modules for our code. We need <b><code>numpy</code></b> and <b><code>tensorflow</code></b>, obviously. Additionally, we can import directly the <b><code>tensorflow.contrib.rnn</code></b> model, which includes the function for building RNNs.
```
import numpy as np
import tensorflow as tf
sess = tf.Session()
```
We want to create a network that has only one LSTM cell. We have to pass 2 elements to LSTM, the <b>prv_output</b> and <b>prv_state</b>, so called, <b>h</b> and <b>c</b>. Therefore, we initialize a state vector, <b>state</b>. Here, <b>state</b> is a tuple with 2 elements, each one is of size [1 x 4], one for passing prv_output to next time step, and another for passing the prv_state to next time stamp.
```
LSTM_CELL_SIZE = 4 # output size (dimension), which is same as hidden size in the cell
lstm_cell = tf.contrib.rnn.BasicLSTMCell(LSTM_CELL_SIZE, state_is_tuple=True)
state = (tf.zeros([1,LSTM_CELL_SIZE]),)*2
state
```
Let define a sample input. In this example, batch_size = 1, and seq_len = 6:
```
sample_input = tf.constant([[3,2,2,2,2,2]],dtype=tf.float32)
print (sess.run(sample_input))
```
Now, we can pass the input to lstm_cell, and check the new state:
```
with tf.variable_scope("LSTM_sample1"):
output, state_new = lstm_cell(sample_input, state)
sess.run(tf.global_variables_initializer())
print (sess.run(state_new))
```
As we can see, the states has 2 parts, the new state c, and also the output h. Lets check the output again:
```
print (sess.run(output))
```
<hr>
<a id="stacked_ltsm"></a>
<h2>Stacked LSTM</h2>
What about if we want to have a RNN with stacked LSTM? For example, a 2-layer LSTM. In this case, the output of the first layer will become the input of the second.
Lets start with a new session:
```
sess = tf.Session()
input_dim = 6
```
Lets create the stacked LSTM cell:
```
cells = []
```
Creating the first layer LTSM cell.
```
LSTM_CELL_SIZE_1 = 4 #4 hidden nodes
cell1 = tf.contrib.rnn.LSTMCell(LSTM_CELL_SIZE_1)
cells.append(cell1)
```
Creating the second layer LTSM cell.
```
LSTM_CELL_SIZE_2 = 5 #5 hidden nodes
cell2 = tf.contrib.rnn.LSTMCell(LSTM_CELL_SIZE_2)
cells.append(cell2)
```
To create a multi-layer LTSM we use the <b>tf.contrib.rnnMultiRNNCell</b> function, it takes in multiple single layer LTSM cells to create a multilayer stacked LTSM model.
```
stacked_lstm = tf.contrib.rnn.MultiRNNCell(cells)
```
Now we can create the RNN from <b>stacked_lstm</b>:
```
# Batch size x time steps x features.
data = tf.placeholder(tf.float32, [None, None, input_dim])
output, state = tf.nn.dynamic_rnn(stacked_lstm, data, dtype=tf.float32)
```
Lets say the input sequence length is 3, and the dimensionality of the inputs is 6. The input should be a Tensor of shape: [batch_size, max_time, dimension], in our case it would be (2, 3, 6)
```
#Batch size x time steps x features.
sample_input = [[[1,2,3,4,3,2], [1,2,1,1,1,2],[1,2,2,2,2,2]],[[1,2,3,4,3,2],[3,2,2,1,1,2],[0,0,0,0,3,2]]]
sample_input
```
we can now send our input to network, and check the output:
```
output
sess.run(tf.global_variables_initializer())
sess.run(output, feed_dict={data: sample_input})
```
As you see, the output is of shape (2, 3, 5), which corresponds to our 2 batches, 3 elements in our sequence, and the dimensionality of the output which is 5.
<br>
## Thanks for reading :)
Created by [Saeed Aghabozorgi](https://www.linkedin.com/in/saeedaghabozorgi/) and modified by [Tarun Kamboj](https://www.linkedin.com/in/kambojtarun/).
| github_jupyter |
# Week 3 - For Loops (part 2) and Conditionals (part 1)
## The following play critical roles:
### 1. For Loops - iterating through data.
### 2. Conditional Statements - adding logic to our statements.
## 1. For Loops
### For Loops are your best friend - most used Python expression for journalists:
### Iterate over:
* data stored in a list and run some calculation on each value;
* a list of URLs and visit each site to scrape data;
* data stored in dictionary keys and values and return what you are looking for;
Let's take **For Loops** for test drive:
```
## Use this list of CEO salaries from 1985
ceo_salaries_1985 = [150_000, 201_000, 110_000, 75_000, 92_000, 55_000]
## Print each salary with in the following format:
## "A CEO earned [some value] in 1985."
for salary in ceo_salaries_1985:
print(f"A CEO earned ${salary:,} in 1985")
## Now update each salary to 2019 dollars.
## Print the following info:
## "A CEO's salary of [1985 salary] in 1985 is worth [updated salary] in 2019 dollars."
## The CPI for 1985 is 107.6
## The 2019 CPI is 255.657
## The formula is: updated_salary = (oldSalary/oldCPI) * currentCPI
for salary in ceo_salaries_1985:
updated_salary = (salary/107.6)*255.657
print(f"A CEO's salary of ${salary:,} in 1985 is worth ${updated_salary:,.0f} in 2019 dollars.")
```
## For Loops through multiple but related lists
```
## You scrape a site and each datapoint is stored in different lists
firstNames = ["Irene", "Ursula", "Elon", "Tim"]
lastNames = ["Rosenfeld", "Burns", "Musk", "Cook"]
titles = ["Chairman and CEO", "Chairman and CEO", "CEO", "CEO"]
companies = ["Kraft Foods", "Xerox", "Tesla", "Apple"]
industries = ["Food and Beverage", "Process and Document Management", "Auto Manufacturing", "Consumer Technology"]
## How do you align each item from the different lists that belong together?
for (fname, lname, rank, co, field) in zip(firstNames, lastNames, titles, companies, industries ):
print(f"Name: {lname}, {fname} ")
print(f"Title: {rank}")
print(f"Company: {co}")
print(f"Industry: {field}")
print("\n")
## We don't just want to print the items.
## Let's move into a dictionary called bio_dict and then
## place in a list of dicts called bio_list.
bio_list = []
for (fname, lname, rank, co, field) in zip(firstNames, lastNames, titles, companies, industries ):
bio_dict = {"first_name": fname, "last_name": lname, "title": rank, "company": co, "industry": field}
bio_list.append(bio_dict)
print(bio_list)
```
## For Loops within For Loops
## For Loops through Dictionaries
```
## You have a list of CEO salaries from 1969.
sals_1969 = [47_000, 65_000, 39_000, 96_000]
## We need the value of these salaries updated for every decade till 2019
## Here are the CPIs for each decade in list of dictionaries from 1969 to 2019.
decades_cpi = [
{"year": 1979, "cpi": 72.6,},
{"year": 1989, "cpi": 124},
{"year": 1999, "cpi": 166.6},
{"year": 2009, "cpi": 214.537},
{"year": 2019, "cpi": 255.657}
]
## Show the contents of this list of dictionaries
decades_cpi
## What datatype is decades_cpi
type(decades_cpi)
## Check what type of data each list item is within decades_cpi
for item in decades_cpi:
# print(item)
print(type(item))
## Print out each value in this format:
## "key --> value"
for period in decades_cpi:
for key, value in period.items():
print(f"{key} --> {value}")
```
### The key alternates between the strings "year" and "cpi" in this loop.
### How do we actually target the values for "year" and "cpi" and place them in our calculations?
```
for period in decades_cpi:
# print(type(period))
# print(period)
that_year = period.get("year")
old_cpi = period.get("cpi")
print(f"{that_year} -----> {old_cpi}")
```
## We can now add these variables into our calculations:
```
## Loop through each salary and update its value for each decade
CPI_1969 = 36.7
for sal in sals_1969:
for period in decades_cpi:
cpi = period.get("cpi")
that_year = period.get("year")
updated_sal = (sal/CPI_1969) * cpi
print(f"A salary of ${sal:,} in 1969 is worth ${updated_sal:,.0f} in {that_year}")
print("*********************************************************")
```
## 2. Conditional Statements
```
## create a list of 10 random numbers anywhere from -100 to 100
##name the list numbers
import random
numbers = random.sample(range(-100, 100), 10)
numbers
## create conditional statements that tell us if the last number and the penultimate number
## are positive or negative
## print a sentence that reads:
## "The last number [what is it?] is [positive or negative] while
## the penultimate number [what is it?] is [negative or positive]."
if numbers[-2] > 0:
almost_last_status = "positive"
else: almost_last_status = "negative"
if numbers[-1] > 0:
last_status = "positive"
else: last_status = "negative"
print(f"The last number ({numbers[-1]}) is {last_status} while \
the second to last number({numbers[-2]}) is {almost_last_status}")
```
## Conditionals at work:
<img src="support_files/scraped-etf.PNG" style="width: 50%;">
To produce the chart above, we had to scrape the numbers from text published in articles. The numbers, however, were in string format with words like "million", "billion" and "trillion" describing their magnitude:
<img src="support_files/magnitude.png" style="width: 100%;">
In order to run calculations on them, we had to convert them to floating point numbers.
Take the excerpt list below called big_numbers and convert each item in the list to a floating point number. You will have to use basic math and conditional statements.
```
big_numbers = [
"130.4 million",
"67.2 million",
"125.4 million",
"5.04 million",
"1.3 billion",
"2.2 trillion",
"7.2 million",
"3.1 billion"
]
big_numbers
```
| github_jupyter |
The following cell should always be the first coding cell of your python notebooks
```
student_id = raw_input('Please enter your NETID (e.g. ydubief)')
print(student_id)
assignment_name = 'HW1_'+student_id
"""
importing the necessary libraries, do not modify
"""
%matplotlib inline
# plots graphs within the notebook
%config InlineBackend.figure_format='svg' # not sure what this does, may be default images to svg format
from IPython.display import display,Image, Latex
from __future__ import division
from sympy.interactive import printing
printing.init_printing(use_latex='mathjax')
from IPython.display import display,Image, Latex
from IPython.display import clear_output
import SchemDraw as schem
import SchemDraw.elements as e
import matplotlib.pyplot as plt
import numpy as np
import math
import scipy.constants as sc
import sympy as sym
from IPython.core.display import HTML
def header(text):
raw_html = '<h4>' + str(text) + '</h4>'
return raw_html
def box(text):
raw_html = '<div style="border:1px dotted black;padding:2em;">'+str(text)+'</div>'
return HTML(raw_html)
def nobox(text):
raw_html = '<p>'+str(text)+'</p>'
return HTML(raw_html)
def addContent(raw_html):
global htmlContent
htmlContent += raw_html
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
class ListTable(list):
""" Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook. """
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
font = {'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 18,
}
from scipy.constants.constants import C2K
from scipy.constants.constants import K2C
from scipy.constants.constants import F2K
from scipy.constants.constants import K2F
from scipy.constants.constants import C2F
from scipy.constants.constants import F2C
```
<h3> Heat loss through a single-pane window</h3>
The rear window of an automobile is defogged by attaching a thin, transparent, film-type heating element to its inner surface. By electrically heating this element, a uniform heat flux may be established at the inner surface.
(a) For 4-mm-thick window glass, determine the electrical power required per unit window area to maintain an inner surface temperature of $15^\circ C$ when the interior air temperature and convection coefficient are $T_{\infty.i}= 25^\circ C$ and $h_i=10 W/m^2.K$, while the exterior (ambient) air temperature and convection coefficient are $T_{\infty.o}=-10^\circ C$ and $h_o=65 W/m^2.K$.
(b) In practice $T_{\infty.o}$ and $h_o$ vary according to weather conditions and car speed. For values of $h_o=2,20,65,100 W/m^2.K$, determine and plot the electrical power requirement as a function of $T_{\infty.o}$ for $-30\leq T_{\infty.o}\leq 0^\circ C$. From your results, what can you conclude about the need for heater operation at low values of ho? How is this conclusion affected by the value of $T_{\infty.o}$? If h V n, where V is the vehicle speed and n is a positive exponent, how does the vehicle speed affect the need for heater operation?
The thermal conductivity of this glass is $1.4 W/m.K$
## Assumptions
Steady state, 1D conduction, thermal resistance of the heating element is negligible. Negligible heat transfer by radiation.
## Parameters
```
L =0.004 #m
k_glass = 1.4 #W/m.K thermal conductivity of glass
T_inf_in = 25 #C
T_inf_out = -10 #C
h_in = 65.
h_out = 65.
T_s_i = 15 #C
!ipython nbconvert --to html ME144-HW1.ipynb --output $assignment_name
```
| github_jupyter |
# EDA
Exploratory Data Analysis adalah proses yang memungkinkan analyst memahami isi data yang digunakan, mulai dari distribusi, frekuensi, korelasi dan lainnya.
Dalam proses ini pemahaman konteks data juga diperhatikan karena akan menjawab masalah - masalah dasar.
## 1. Import Libraries
Import library yang akan digunakan
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import string
```
## 2. Load Dataset
Load dataset hasil Crawling dengan menggunakan `tweepy` sebelumnya
```
# Load Dataset
data1 = pd.read_csv('../data/Crawling Twitter Jakarta 26 - 27.csv')
data2 = pd.read_csv('../data/Crawling Twitter Jakarta 25 - 23.csv')
data3 = pd.read_csv('../data/Crawling Twitter Jakarta 22 - 19 setengah.csv')
```
**Dataset info**
Menampilkan banyak data dan `Dtype` tiap kolomnya.
```
# Info
for i in [data1,data2,data3]:
i.info()
print()
```
## 3. Merge Dataset
Menyatukan dataset yang terpisah
```
# Merge Info
data = pd.concat([data1,data2,data3])
data.info()
```
## 4. EDA
Melakukan `Exploratory Data Analysis` pada data.
## 4.1. Tweet perhari
Mengecek banyaknya tweet perharinya
```
# Melihat banyak Tweet perhari
data['Tanggal'] = pd.to_datetime(data['Tanggal'])
tph = data['Tweets'].groupby(data['Tanggal'].dt.date).count()
frek = tph.values
h_index = {6:'Minggu',0:'Senin',1:'Selasa',2:'Rabu',3:'Kamis',4:'Jumat',5:"Sabtu"}
hari = [x.weekday() for x in tph.index]
hari = [h_index[x] for x in hari]
for i in range(len(hari)):
hari[i] = str(tph.index[i]) + f'\n{hari[i]}'
```
**Plotting** (Menampilkan hasil `EDA` lewat visual / Visualisasi Data)
```
# Plotting Line
plt.figure(figsize = (10,10))
sns.lineplot(range(len(frek)), frek)
for i, v in enumerate(frek.tolist()):
if i == 0 or i==2 or i ==4 or i == len(tph.values)-2:
plt.text(i-.25, v - 1000, str(v),fontsize=11)
elif i == 1 or i == 3 or i==6 or i == len(tph.values)-1:
plt.text(i-.25, v + 400, str(v),fontsize=11)
else :
plt.text(i+.07, v, str(v),fontsize=11)
plt.title('Banyak Tweet per Hari',fontsize=20)
plt.xticks(range(len(tph.values)), hari, rotation=45)
plt.xlabel('Tanggal',fontsize=16)
plt.ylabel('Frekuensi',fontsize=16)
plt.show()
```
**Insight**
Dapat dilihat jika jumlah tweet berada pada puncaknya di hari Sabtu dan Senin. Hal yang cukup mengejutkan yaitu terjadi penurunan jumlah tweet yang signifikan pada hari minggu.
## 4.2. Tweet perjam
Sekarang akan dilihat banyaknya tweet perjamnya.
```
# Melihat banyak Tweet perjam
tpj = []
for i in range(1,len(tph.index)) :
if i != len(tph.index)-1 :
tpj.append(data['Tanggal'][(data['Tanggal'] >= str(tph.index[i])) & (data['Tanggal']<str(tph.index[i+1]))])
else :
tpj.append(data['Tanggal'][data['Tanggal']>=str(tph.index[i])])
tpj = [x.groupby(x.dt.hour).count() for x in tpj]
```
**Plotting** (Menampilkan hasil `EDA` lewat visual / Visualisasi Data)
```
# Ploting Line
fig, axes = plt.subplots(nrows=2, ncols=4,figsize=(20,10))
for i in range(len(tpj)):
sns.lineplot(tpj[i].index.tolist(),tpj[i].values,ax=axes[i//4,i%4])
axes[i//4,i%4].set_title(f'{hari[i+1]}')
axes[i//4,i%4].set(xlabel = 'Jam', ylabel = 'Frekuensi')
plt.tight_layout()
#fig.suptitle('Banyak Tweet per Jam',fontsize=24)
plt.show()
```
**Insight**
Dapat dilihat bahwa user optimal melakukan tweet pada pukul 10 - pukul 15, selanjutnya akan terjadi penurunan jumlah tweet pada pukul 15 sampai dengan pukul 20. Selanjutnya jumlah tweet kembali naik pada pukul 20 dan kemudian menurun pada pukul 21 / 22.
## 4.3. Perbandingan Tweet dan Retweet
Akan dilihat perbandingan antara jumlah tweet dan retweet yang ada.
```
# Menghitung perbandingan tweet dan retweet
r_stat = data['Retweet Status'].groupby(data['Retweet Status']).count()
temp = r_stat.values
```
**Plotting** (Menampilkan hasil `EDA` lewat visual / Visualisasi Data)
```
# Plotting Pie
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
return "{:.1f}%\n{:d}".format(pct, absolute)
plt.figure(figsize = (8,8))
plt.pie(temp,explode=(0.1,0),labels=['Tweet','Retweet'],shadow=True,colors=['#A3FBFF','#ADFFA3'],
autopct=lambda pct: func(pct, temp),startangle=90)
plt.title('Perbandingan Jumlah Tweet dan Retweet',fontsize=18)
plt.axis('equal')
plt.legend(fontsize=11)
plt.show()
```
## 4.4. Hashtag terbanyak
Dilihat hashtag terbanyak.
```
# Menghitung banyak hashtag terkait
hashtag = data['Hashtags'].tolist()
temp = []
freks = []
for x in hashtag:
if x != []:
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.lower().split()
for i in x :
if i not in temp :
temp.append(i)
freks.append(1)
else :
freks[temp.index(i)] += 1
hashtag_ = pd.DataFrame({'Hashtag':temp,'Frekuensi':freks})
hashtag_ = hashtag_.sort_values(by='Frekuensi', ascending=False)
```
**Plotting** (Menampilkan hasil `EDA` lewat visual / Visualisasi Data)
```
# Plot 20 hashtag terbanyak
hmm = hashtag_.head(20)
plt.figure(figsize = (10,10))
sns.barplot(x = hmm['Hashtag'],y = hmm['Frekuensi'])
for i, v in enumerate(hmm['Frekuensi'].tolist()):
plt.text(i-len(str(v))/10, v + 50, str(v),fontsize=10)
plt.title('Hashtag Terbanyak',fontsize=20)
plt.xticks(rotation=90)
plt.xlabel('Hashtag',fontsize=16)
plt.ylabel('Frekuensi',fontsize=16)
plt.show()
```
## 4.5. Source (Device) Terbanyak
Dilihat Source/Device terbanyak yang digunakan oleh user.
```
# Source count
source = data['Source'].groupby(data['Source']).count()
source = pd.DataFrame({'Source' : source.index.tolist(),'Frekuensi' : source.values})
source = source.sort_values(by='Frekuensi', ascending=False)
```
**Plotting** (Menampilkan hasil `EDA` lewat visual / Visualisasi Data)
```
# Plot 20 Source terbanyak
hm = source.head(20)
plt.figure(figsize = (10,10))
sns.barplot(x = hm['Source'],y = hm['Frekuensi'])
for i, v in enumerate(hm['Frekuensi'].tolist()):
plt.text(i-len(str(v))/10, v + 1000, str(v),fontsize=10)
plt.title('Source Terbanyak',fontsize=20)
plt.xticks(rotation=90)
plt.xlabel('Source',fontsize=16)
plt.ylabel('Frekuensi',fontsize=16)
plt.show()
```
| github_jupyter |
... ***CURRENTLY UNDER DEVELOPMENT*** ...
## HyCReWW runup estimation
inputs required:
* Nearshore reconstructed historical storms
* Nearshore reconstructed simulated storms
* Historical water levels
* Synthetic water levels
in this notebook:
* HyCReWW runup estimation of historical and synthetic events
* Extreme value analysis and validation
### Workflow:
<div>
<img src="resources/nb02_04.png" width="400px">
</div>
**HyCReWW** provides wave-driven run-up estimations along coral reef-lined shorelines under a wide range of fringing reef morphologies and offshore forcing characteristics. The metamodel is based on two models: (a) a full factorial design of recent XBeach Non-Hydrostatic simulations under different reef configurations and offshore wave and water level conditions (Pearson et al, 2017); and (b) Radial Basis Functions (RBFs) for approximating the non-linear function of run-up for the set of multivariate parameters:
Runup = RBF($\eta_0$, $H_0$, ${H_0/L_0}$, $\beta_f$,$W_reef$, $\beta_b$, $c_f$ ); </center>
Where, the hydrodynamic variables defined are offshore water level ($\eta_0$), significant wave height ($H_0$), and wave steepness (${H_0/L_0}$); the reef morphologic parameters include fore reef slope ($\beta_f$), reef flat width ($W_reef$), beach slope ($\beta_b$), and seabed roughness ($c_f$). ${L_0}$ is the deep water wave length $L_0=gT_p^2/2pi$, and $T_p$ is the peak period. Beach crest elevation ($z_b$) was fixed at a height of 30 m to focus on run-up as a proxy for coastal inundation.
<img src="resources/nb02_04_profile.png">
```
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# common
import os
import os.path as op
# pip
import numpy as np
import pandas as pd
import xarray as xr
from scipy.interpolate import griddata
# DEV: override installed teslakit
import sys
sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..'))
# teslakit
from teslakit.database import Database
from teslakit.rbf import RBF_Interpolation, RBF_Reconstruction
from teslakit.mda import Normalize, MaxDiss_Simplified_NoThreshold, nearest_indexes
from teslakit.plotting.extremes import Plot_ReturnPeriodValidation
```
## Database and Site parameters
```
# --------------------------------------
# Teslakit database
p_data = r'/Users/nico/Projects/TESLA-kit/TeslaKit/data'
db = Database(p_data)
# set site
db.SetSite('ROI')
```
## HyCReWW - RBFs configuration
runup has been calculated for a total of 15 scenarios (hs, hs_lo) and a set of reef characteristics
```
# 15 scenarios of runup model execution
# RBF wave conditions
rbf_hs = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5]
rbf_hs_lo = [0.005, 0.025, 0.05, 0.005, 0.025, 0.05, 0.005, 0.025, 0.05, 0.005, 0.025, 0.05, 0.005, 0.025, 0.05]
# load trained RBF coefficients and variables min. and max. limits
var_lims, rbf_coeffs = db.Load_HYCREWW()
# reef characteristics
reef_cs = {
'rslope': 0.0505,
'bslope': 0.1667,
'rwidth': 250,
'cf': 0.0105,
}
# rbf variables names: level is our teslakit input data
rbf_vns = ['level', 'rslope', 'bslope', 'rwidth', 'cf']
```
## HyCReWW methodology library
```
def HyCReWW_RU(df):
'''
Calculates runup using HyCReWW RBFs (level, reef variables)
and a linear interpolation (hs, hs_lo2) to input dataset
var_lims - HyCReWW variables min and max limits
rbf_coeffs - HyCReWW rbf coefficients
reef_cs - reef characteristics
rbf_vns - rbf variables
df - input pandas.dataframe (time,), vars: level, hs, tp, dir, hs_lo2
'''
# 1. Prepare input data
# -----------------------------------------------------------------
# add reef characteristics to input dataset
for p in reef_cs.keys(): df[p] = reef_cs[p]
# filter data: all variables inside limits
lp = []
for vn in var_lims.keys():
ps = (df[vn] > var_lims[vn][0]) & (df[vn] < var_lims[vn][1])
lp.append(ps)
ix_in = np.where(np.all(lp, axis=0))[0]
# select dataset to interpolate at RBFs
ds_in = df.iloc[ix_in]
ds_rbf_in = ds_in[rbf_vns]
# 2. Calculate RUNUP with input LEVEL for the 15 RBF scenarios
# -----------------------------------------------------------------
# parameters
ix_sc = [0, 1, 2, 3, 4]
ix_dr = []
minis = [var_lims[x][0] for x in rbf_vns]
maxis = [var_lims[x][1] for x in rbf_vns]
# Normalize data
ds_nm ,_ ,_ = Normalize(ds_rbf_in.values, ix_sc, ix_dr, minis=minis, maxis=maxis)
# RBF interpolate level for the 15 scenarios
aux_1 = []
for rc in rbf_coeffs:
ro = RBF_Interpolation(rc['constant'], rc['coeff'], rc['nodes'], ds_nm.T)
aux_1.append(ro)
ru_z = np.array(aux_1)
# 3. interpolate RUNUP for input WAVES with the 15 RBF scenarios
# -----------------------------------------------------------------
# RU linear interpolation (15 sets: hs, hs_lo -> runup)
#ru_in = np.zeros(ds_in.shape[0]) * np.nan
#for c, (_, r) in enumerate(ds_in.iterrows()):
# ru_in[c] = griddata((rbf_hs, rbf_hs_lo), ru_z[:,c], (r['hs'], r['hs_lo2']), method='linear')
# RU linear interpolation (15 sets: hs, hs_lo -> runup) (*faster than loop)
def axis_ipl_rbfs(inp):
return griddata((rbf_hs, rbf_hs_lo), inp[:15], (inp[15], inp[16]), method='linear')
inp = np.concatenate((ru_z, ds_in[['hs', 'hs_lo2']].T))
ru_in = np.apply_along_axis(axis_ipl_rbfs, 0, inp)
# 4. Prepare output
# -----------------------------------------------------------------
# add level to run_up
ru_in = ru_in + ds_in['level']
# return runup
ru_out = np.zeros(len(df.index)) * np.nan
ru_out[ix_in] = ru_in
xds_ru = xr.Dataset({'runup': (('time',), ru_out)}, coords={'time': df.index})
return xds_ru
```
## HyCReWW MDA-RBF statistical wrap
```
def mdarbf_HyCReWW(dataset):
'''
Solves HyCReWW methodology using a MDA-RBFs statistical wrap.
This results in a substantial reduce in computational cost.
A Statistical representative subset will be selected with MaxDiss algorithm from input dataset.
This subset will be solved using HyCReWW methodology.
This subset and its runup HyCReWW output will be used to fit Radial Basis Functions.
Using RBFs, the entire input dataset is statistically solved
'''
base_dataset = dataset.copy()
# 1. MaxDiss
# -----------------------------------------------------------------
vns_mda = ['hs', 'hs_lo2','level'] # variables used at classification
n_subset = 100
ix_scalar = [0, 1, 2]
ix_directional = []
# remove nan data from input dataset
dataset.dropna(inplace=True)
# data for MDA
data = dataset[vns_mda]
# MDA algorithm
sel = MaxDiss_Simplified_NoThreshold(data.values[:], n_subset, ix_scalar, ix_directional)
subset = pd.DataFrame(data=sel, columns=vns_mda)
# fill subset variables
ix_n = nearest_indexes(subset[vns_mda].values[:], data.values[:], ix_scalar, ix_directional)
vns_fill = ['tp', 'dir']
for vn in vns_fill:
subset[vn] = dataset[vn].iloc[ix_n].values[:]
# calculate runup with HyCReWW
ru_sel = HyCReWW_RU(subset)
target = ru_sel.runup.to_dataframe()
# clean subset variables
subset.drop(columns=['rslope', 'bslope', 'rwidth', 'cf'], inplace=True)
# clean nans from runup target and input subset
ix_rm = np.where(np.isnan(target.values))[0]
subset.drop(index=ix_rm, inplace=True)
target.drop(index=ix_rm, inplace=True)
# 2. RBF RunUp Reconstruction
# -----------------------------------------------------------------
vs_recon = ['hs', 'hs_lo2','level']
subset_r = subset[vs_recon]
dataset_r = base_dataset[vs_recon] # to maintain input indexes and put nan where there is no output
ix_scalar_subset = [0, 1, 2]
ix_scalar_target = [0]
recon = RBF_Reconstruction(
subset_r.values, ix_scalar_subset, [],
target.values, ix_scalar_target, [],
dataset_r.values
)
xds_ru = xr.Dataset({'runup': (('time',), recon.squeeze())}, coords={'time': base_dataset.index})
return xds_ru
```
## HyCReWW RBF Interpolation: Historical
```
# Load complete historical data and nearshore waves
# offshore level
level = db.Load_HIST_OFFSHORE(vns=['level'], decode_times=True)
# nearshore waves
waves = db.Load_HIST_NEARSHORE(vns=['Hs', 'Tp', 'Dir'], decode_times=True)
waves["time"] = waves["time"].dt.round("H") # fix waves times: round to nearest hour
# use same time for nearshore calculations
level = level.sel(time=waves.time)
# prepare data for HyCReWW
waves = waves.rename_vars({"Hs": "hs", "Tp": "tp", 'Dir':'dir'}) # rename vars
waves['hs_lo2'] = waves['hs']/(1.5613*waves['tp']**2) # calc. hs_lo2
waves['level'] = level['level'] # add level
dataset = waves[['hs', 'tp', 'dir', 'level', 'hs_lo2']].to_dataframe()
# calculate runup with HyCReWW
#ru_hist = HyCReWW_RU(dataset)
# calculate runup with HyCReWW MDA-RBF wrap
ru_hist = mdarbf_HyCReWW(dataset)
# store historical runup
db.Save_HIST_NEARSHORE(ru_hist)
```
## HyCREWW RBF Interpolation: Simulation
```
# offshore level
level = db.Load_SIM_OFFSHORE_all(vns=['level'], decode_times=False)
# nearshore waves
waves = db.Load_SIM_NEARSHORE_all(vns=['Hs', 'Tp', 'Dir', 'max_storms'], decode_times=False)
# prepare data for hycreww
waves = waves.rename_vars({"Hs": "hs", "Tp": "tp", 'Dir':'dir'}) # rename vars
waves['hs_lo2'] = waves['hs']/(1.5613*waves['tp']**2) # calc. hs_lo2
waves['level'] = level['level'] # add level
# fix simulation times (cftimes)
tmpt = db.Load_SIM_NEARSHORE_all(vns=['Hs'], decode_times=True, use_cftime=True)
waves['time'] = tmpt['time']
# iterate simulations
for n in waves.n_sim:
waves_n = waves.sel(n_sim=int(n))
dataset = waves_n[['hs', 'tp', 'dir', 'level', 'hs_lo2']].to_dataframe()
# calculate runup with HyCReWW
#ru_sim_n = HyCREWW_RU(dataset)
# calculate runup with HyCReWW MDA-RBF wrap
ru_sim_n = mdarbf_HyCReWW(dataset)
# store simulation runup
db.Save_SIM_NEARSHORE(ru_sim_n, int(n))
print('simulation {0} processed.'.format(int(n)))
```
## Methodology Validation: Annual Maxima
```
# load all simulations
ru_sims = db.Load_SIM_NEARSHORE_all(vns=['runup'], decode_times=True, use_cftime=True)
# compare historical and simulations runup annual maxima
hist_A = ru_hist['runup'].groupby('time.year').max(dim='time')
sim_A = ru_sims['runup'].groupby('time.year').max(dim='time')
# Return Period historical vs. simulations
Plot_ReturnPeriodValidation(hist_A, sim_A.transpose());
```
| github_jupyter |
```
from utils import *
import tensorflow as tf
from sklearn.cross_validation import train_test_split
import time
trainset = sklearn.datasets.load_files(container_path = 'data', encoding = 'UTF-8')
trainset.data, trainset.target = separate_dataset(trainset,1.0)
print (trainset.target_names)
print (len(trainset.data))
print (len(trainset.target))
ONEHOT = np.zeros((len(trainset.data),len(trainset.target_names)))
ONEHOT[np.arange(len(trainset.data)),trainset.target] = 1.0
train_X, test_X, train_Y, test_Y, train_onehot, test_onehot = train_test_split(trainset.data,
trainset.target,
ONEHOT, test_size = 0.2)
concat = ' '.join(trainset.data).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print('vocab from size: %d'%(vocabulary_size))
print('Most common words', count[4:10])
print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])
GO = dictionary['GO']
PAD = dictionary['PAD']
EOS = dictionary['EOS']
UNK = dictionary['UNK']
class Model:
def __init__(self, size_layer, num_layers, embedded_size,
dict_size, dimension_output, learning_rate):
def cells(reuse=False):
return tf.nn.rnn_cell.BasicRNNCell(size_layer,reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.float32, [None, dimension_output])
encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
rnn_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype = tf.float32)
W = tf.get_variable('w',shape=(size_layer, dimension_output),initializer=tf.orthogonal_initializer())
b = tf.get_variable('b',shape=(dimension_output),initializer=tf.zeros_initializer())
self.logits = tf.matmul(outputs[:, -1], W) + b
self.cost = tf.losses.hinge_loss(logits = self.logits, labels = self.Y)
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
size_layer = 128
num_layers = 2
embedded_size = 128
dimension_output = len(trainset.target_names)
learning_rate = 1e-3
maxlen = 50
batch_size = 128
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(size_layer,num_layers,embedded_size,vocabulary_size+4,dimension_output,learning_rate)
sess.run(tf.global_variables_initializer())
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n'%(EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size):
batch_x = str_idx(train_X[i:i+batch_size],dictionary,maxlen)
acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer],
feed_dict = {model.X : batch_x, model.Y : train_onehot[i:i+batch_size]})
train_loss += loss
train_acc += acc
for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size):
batch_x = str_idx(test_X[i:i+batch_size],dictionary,maxlen)
acc, loss = sess.run([model.accuracy, model.cost],
feed_dict = {model.X : batch_x, model.Y : test_onehot[i:i+batch_size]})
test_loss += loss
test_acc += acc
train_loss /= (len(train_X) // batch_size)
train_acc /= (len(train_X) // batch_size)
test_loss /= (len(test_X) // batch_size)
test_acc /= (len(test_X) // batch_size)
if test_acc > CURRENT_ACC:
print('epoch: %d, pass acc: %f, current acc: %f'%(EPOCH,CURRENT_ACC, test_acc))
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time()-lasttime)
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'%(EPOCH,train_loss,
train_acc,test_loss,
test_acc))
EPOCH += 1
logits = sess.run(model.logits, feed_dict={model.X:str_idx(test_X,dictionary,maxlen)})
print(metrics.classification_report(test_Y, np.argmax(logits,1), target_names = trainset.target_names))
```
| github_jupyter |
```
import torch
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
from tqdm.auto import tqdm
import numpy as np
from matplotlib import pyplot as plt
from torch import nn
import gin
from functools import partial
gin.enter_interactive_mode()
@gin.configurable
def get_xy(h=10, w=10, s=1000):
res = np.zeros((s, h, w), dtype=np.float32)
idxes_x = np.random.choice(h, s, replace=True)
idxes_y = np.random.choice(w, s, replace=True)
res[np.arange(s), idxes_x, idxes_y] = 1.0
return res, (idxes_x, idxes_y)
h, w, s = 10, 10, 1000
gin.bind_parameter('get_xy.h', h)
gin.bind_parameter('get_xy.w', w)
gin.bind_parameter('get_xy.s', s)
X, (xs, ys) = get_xy()
idx = np.random.choice(s)
plt.title(f"{xs[idx]} {ys[idx]}")
plt.imshow(X[idx])
import torch
from torch import nn
import numpy as np
import gin
@gin.configurable
class PolyFracAct(nn.Module):
"""Polynomial activation function. y = poly(x, w1)/poly(x, w2), w1, w2 is learned."""
GIVE_N_FEATURES = True
def __repr__(self, *args, **kwargs):
orig = super(PolyFracAct, self).__repr__(*args, **kwargs)
return f"{orig} max_degree={self.max_degree} features={self.features}"
def __init__(self, features=None, max_degree=3):
super(PolyFracAct, self).__init__()
# order: constant, x, x^2, ...
self.max_degree = max_degree
if isinstance(features, list) or isinstance(features, tuple):
features = np.prod(features)
self.features = features
self.orig_act = nn.Tanh()
init = np.random.randn(*(max_degree + 1, features, 2)).astype(np.float32) / 10.
# init[1, :, :] = 1.0
# init[2, :, :] = 0.0001
self.a = nn.Parameter(torch.from_numpy(init), requires_grad=True)
def forward(self, x):
xshape = x.shape
x = x.flatten(start_dim=1)
assert x.shape[1] == self.features, (x.shape, self.features)
# if self.orig_act is not None:
# x = self.orig_act(x)
assert self.max_degree == 3
out1 = x * self.a[1, :, 0] + x ** 2 * self.a[2, :, 0] + x ** 3 * self.a[3, :, 0] + self.a[0, :, 0]
out2 = x * self.a[1, :, 1] + x ** 2 * self.a[2, :, 1] + x ** 3 * self.a[3, :, 1] + self.a[0, :, 1]
out = out1 / out2
x = out.view(*xshape)
if self.orig_act is not None:
x = self.orig_act(x)
return x
act_cls = lambda **kwargs: nn.Tanh()
from sparse_causal_model_learner_rl.trainable.quadratic_neuron import Quadratic
from sparse_causal_model_learner_rl.trainable.poly_activation import PolyAct
# act_cls = partial(PolyAct, orig_act_cls=lambda: (lambda x: x)) # nn.Tanh
act_cls = PolyFracAct
layer_cls = nn.Linear
# layer_cls = Quadratic
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
# self.fc1 = layer_cls(in_features=h * w, out_features=10)
# self.fc2 = layer_cls(in_features=10, out_features=4)
self.fc = nn.Linear(in_features=h*w, out_features=4)
# self.act = act_cls(features=10)#nn.Tanh()
def forward(self, x):
x = x.flatten(start_dim=1)
# x = self.act(self.fc1(x))
# x = self.fc2(x)
x = self.fc(x)
return x
class Reconstructor(nn.Module):
def __init__(self):
super(Reconstructor, self).__init__()
self.fc1 = layer_cls(in_features=4, out_features=10 * h * w)
self.fc2 = nn.Linear(in_features=10*h*w, out_features=h * w)
self.act1 = act_cls(features=10*h*w)#nn.Tanh()
self.act2 = act_cls(features=h*w)
def forward(self, x):
x = self.act1(self.fc1(x))
x = self.act2(self.fc2(x))
x = x.view(x.shape[0], h, w)
return x
rec = Reconstructor()
dec = Decoder()
rec.act1.a
data = torch.from_numpy(X)
def loss():
# softmaxed = torch.nn.Softmax(1)(rec(dec(data)).flatten(start_dim=1))
# answers = torch.argmax(data.flatten(start_dim=1), dim=1)
# ce = torch.nn.CrossEntropyLoss()
# loss = ce(softmaxed, answers)
# delta = (rec(dec(data)) - data).flatten(start_dim=1)
# std = data.std(0, keepdim=True).flatten(start_dim=1)
# std = torch.where(std < 1e-6, torch.ones_like(std), std)
# delta = delta.pow(2)
# delta = delta / std.pow(2)
# loss = delta.sum(1).mean(0)
delta = (rec(dec(data)) - data).flatten(start_dim=1)
loss = delta.pow(2).sum(1).mean(0)
return loss
opt = torch.optim.Adam(list(rec.parameters()) + list(dec.parameters()),
lr=1e-3)
losses = []
for i in tqdm(range(10000)):
if i % 10 == 0:
X, (xs, ys) = get_xy()
data = torch.from_numpy(X)
opt.zero_grad()
l = loss()
l.backward()
opt.step()
losses.append(l.item())
plt.plot(losses)
plt.yscale('log')
X, (xs, ys) = get_xy()
data = torch.from_numpy(X)
Xrec = rec(dec(data))
for _ in range(3):
idx = np.random.choice(s)
plt.subplot(1, 2, 1)
plt.title(f"{xs[idx]} {ys[idx]}")
plt.imshow(X[idx])
plt.subplot(1, 2, 2)
xrecnp = Xrec.detach().cpu().numpy()[idx]
plt.imshow(xrecnp, vmin=0, vmax=1)# >= np.max(xrecnp))
plt.show()
plt.imshow
plt.imshow(rec.fc1.W[7, :, :].detach().numpy())
xs = np.linspace(-10, 10, 100)
idx = 1
xs_ = np.tanh(xs)
pows = [np.power(xs_, i) for i in range(4)]
xs_pows1 = rec.act1.a[:, idx:idx+1, 0].detach().cpu().numpy() * np.array(pows)
xs_pows2 = rec.act1.a[:, idx:idx+1, 1].detach().cpu().numpy() * np.array(pows)
p1 = np.sum(xs_pows1, axis=0)
p2 = np.sum(xs_pows2, axis=0)
p = p1 / p2
# p = np.tanh(p)
plt.plot(xs, p)
rec.act1.a[:, idx:idx+1, 0], rec.act1.a[:, idx:idx+1, 1]
rec.act1.a[:, idx:idx+1, 0].detach().cpu().numpy().shape, np.array(pows).shape
```
| github_jupyter |
# Snowflake
Notebook to demonstrate how to use the Snowflake Spark connector from FinSpace.
## Objective
For every table found in a snowflake database, create a FinSpace dataset and populate an associated attribute set with the values necessary to retrieve the table of data from snowflake directly (catalog, schema, and table names). The attribute set also contains an additional attribute of type category named 'Source' that identifies the data as coming from Snowflake as well, this can be used in the FinSpace for browsing (the Category 'Source' appears in the browse menu).
## Outline
- In FinSpace: Create an attribute set in FinSpace to hold meta-data about the table's location in snowflake
- e.g. values necessary for the spark query to load the snowflake table
- Values: catalog (database), schema, and the table name
- In this notebook, that attribute set's name is 'Snowflake Table Attributes' and searched for in the attribute sets
- Given a snowflake database's name....
- get all tables in snowflake and their schemas
- for each table...
- populate the finspace attribute set (defined above) with the meta-data about the table (catalog, schema, and table name)
- translate the Snowflake schema into a FinSpace schema
- create a FinSpace dataset (tabular, give schema), associate the populated attribute set to the created dataset
## References
- [Spark Connector](https://docs.snowflake.com/en/user-guide/spark-connector.html)
- [Using the Connector with Python](https://docs.snowflake.com/en/user-guide/spark-connector-use.html#using-the-connector-with-python)
- [All Options](https://docs.snowflake.com/en/user-guide/spark-connector-use.html#label-spark-options)
- [Spark Key Pair](https://docs.snowflake.com/en/user-guide/spark-connector-use.html#using-key-pair-authentication-key-pair-rotation)
- [Spark OAuth](https://docs.snowflake.com/en/user-guide/spark-connector-use.html#using-external-oauth)
### Meta-Data Calls to Snowflake
To get a list of the databases in the account you can use
```
SHOW DATABASES;
```
Or query INFORMATION_SCHEMA in any database with
```
SELECT * from <any_database_name>.INFORMATION_SCHEMA.DATABASES;
```
To get table, view and column information it is highly recommended to limit the scope to the database of interest. Some customers have extremely large schemas that can get time consuming to retrieve all the tables and columns in the account.
Examples
```
SHOW TABLES < or COLUMNS> IN DATABASE <database_name>;
Select * from <database_name>.INFORMATION_SCHEMA.COLUMNS;
```
Considerations of using INFORMATION_SCHEMA vs SHOW
https://docs.snowflake.com/en/sql-reference/info-schema.html#considerations-for-replacing-show-commands-with-information-schema-views
https://docs.snowflake.com/en/sql-reference/info-schema.html#general-usage-notes
```
%local
from aws.finspace.cluster import FinSpaceClusterManager
# if this was already run, no need to run again
if 'finspace_clusters' not in globals():
finspace_clusters = FinSpaceClusterManager()
finspace_clusters.auto_connect()
else:
print(f'connected to cluster: {finspace_clusters.get_connected_cluster_id()}')
```
## Configure Spark for Snowflake
This ensures the cluster gets the maven packages deployed to it so the cluster can communicate with Snowflake. The '-f' argument below will force any running spark session to restart.
```
%%configure -f
{
"conf": {
"spark.jars.packages": "net.snowflake:snowflake-jdbc:3.13.5,net.snowflake:spark-snowflake_2.11:2.9.0-spark_2.4"
}
}
%local
import configparser
# read the config
config = configparser.ConfigParser()
config.read("snowflake.ini")
# values from config
snowflake_user=config['snowflake']['user']
snowflake_password=config['snowflake']['password']
snowflake_account=config['snowflake']['account']
snowflake_database=config['snowflake']['database']
snowflake_warehouse=config['snowflake']['warehouse']
print(f"""snowflake_user={snowflake_user}
snowflake_password={snowflake_password}
snowflake_account={snowflake_account}
snowflake_database={snowflake_database}
snowflake_warehouse={snowflake_warehouse}
""")
%send_to_spark -i snowflake_user
%send_to_spark -i snowflake_password
%send_to_spark -i snowflake_account
%send_to_spark -i snowflake_database
%send_to_spark -i snowflake_warehouse
# Snowflake options for the spark data source
# username and password should be protected, admitedly in the clear for convenience
sfOptions = {
"sfURL" : f"{snowflake_account}.snowflakecomputing.com",
"sfUser" : snowflake_user,
"sfPassword" : snowflake_password,
"sfDatabase" : snowflake_database,
"sfWarehouse" : snowflake_warehouse,
"autopushdown": "on",
"keep_column_case": "on"
}
# class name for the snowflake spark data source
SNOWFLAKE_SOURCE_NAME = "net.snowflake.spark.snowflake"
```
# Python Helper Classes
These are the FinSpace helper classes found in the finspace samples and examples github
```
# %load finspace.py
import datetime
import time
import boto3
import os
import pandas as pd
import urllib
from urllib.parse import urlparse
from botocore.config import Config
from boto3.session import Session
# Base FinSpace class
class FinSpace:
def __init__(
self,
config=Config(retries={'max_attempts': 3, 'mode': 'standard'}),
boto_session: Session = None,
dev_overrides: dict = None,
service_name = 'finspace-data'):
"""
To configure this class object, simply instantiate with no-arg if hitting prod endpoint, or else override it:
e.g.
`hab = FinSpaceAnalyticsManager(region_name = 'us-east-1',
dev_overrides = {'hfs_endpoint': 'https://39g32x40jk.execute-api.us-east-1.amazonaws.com/alpha'})`
"""
self.hfs_endpoint = None
self.region_name = None
if dev_overrides is not None:
if 'hfs_endpoint' in dev_overrides:
self.hfs_endpoint = dev_overrides['hfs_endpoint']
if 'region_name' in dev_overrides:
self.region_name = dev_overrides['region_name']
else:
if boto_session is not None:
self.region_name = boto_session.region_name
else:
self.region_name = self.get_region_name()
self.config = config
self._boto3_session = boto3.session.Session(region_name=self.region_name) if boto_session is None else boto_session
print(f"service_name: {service_name}")
print(f"endpoint: {self.hfs_endpoint}")
print(f"region_name: {self.region_name}")
self.client = self._boto3_session.client(service_name, endpoint_url=self.hfs_endpoint, config=self.config)
@staticmethod
def get_region_name():
req = urllib.request.Request("http://169.254.169.254/latest/meta-data/placement/region")
with urllib.request.urlopen(req) as response:
return response.read().decode("utf-8")
# --------------------------------------
# Utility Functions
# --------------------------------------
@staticmethod
def get_list(all_list: dir, name: str):
"""
Search for name found in the all_list dir and return that list of things.
Removes repetitive code found in functions that call boto apis then search for the expected returned items
:param all_list: list of things to search
:type: dir:
:param name: name to search for in all_lists
:type: str
:return: list of items found in name
"""
r = []
# is the given name found, is found, add to list
if name in all_list:
for s in all_list[name]:
r.append(s)
# return the list
return r
# --------------------------------------
# Classification Functions
# --------------------------------------
def list_classifications(self):
"""
Return list of all classifications
:return: all classifications
"""
all_list = self.client.list_classifications(sort='NAME')
return self.get_list(all_list, 'classifications')
def classification_names(self):
"""
Get the classifications names
:return list of classifications names only
"""
classification_names = []
all_classifications = self.list_classifications()
for c in all_classifications:
classification_names.append(c['name'])
return classification_names
def classification(self, name: str):
"""
Exact name search for a classification of the given name
:param name: name of the classification to find
:type: str
:return
"""
all_classifications = self.list_classifications()
existing_classification = next((c for c in all_classifications if c['name'].lower() == name.lower()), None)
if existing_classification:
return existing_classification
def describe_classification(self, classification_id: str):
"""
Calls the describe classification API function and only returns the taxonomy portion of the response.
:param classification_id: the GUID of the classification to get description of
:type: str
"""
resp = None
taxonomy_details_resp = self.client.describe_taxonomy(taxonomyId=classification_id)
if 'taxonomy' in taxonomy_details_resp:
resp = taxonomy_details_resp['taxonomy']
return (resp)
def create_classification(self, classification_definition):
resp = self.client.create_taxonomy(taxonomyDefinition=classification_definition)
taxonomy_id = resp["taxonomyId"]
return (taxonomy_id)
def delete_classification(self, classification_id):
resp = self.client.delete_taxonomy(taxonomyId=classification_id)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
return resp
return True
# --------------------------------------
# Attribute Set Functions
# --------------------------------------
def list_attribute_sets(self):
"""
Get list of all dataset_types in the system
:return: list of dataset types
"""
resp = self.client.list_dataset_types()
results = resp['datasetTypeSummaries']
while "nextToken" in resp:
resp = self.client.list_dataset_types(nextToken=resp['nextToken'])
results.extend(resp['datasetTypeSummaries'])
return (results)
def attribute_set_names(self):
"""
Get the list of all dataset type names
:return list of all dataset type names
"""
dataset_type_names = []
all_dataset_types = self.list_dataset_types()
for c in all_dataset_types:
dataset_type_names.append(c['name'])
return dataset_type_names
def attribute_set(self, name: str):
"""
Exact name search for a dataset type of the given name
:param name: name of the dataset type to find
:type: str
:return
"""
all_dataset_types = self.list_dataset_types()
existing_dataset_type = next((c for c in all_dataset_types if c['name'].lower() == name.lower()), None)
if existing_dataset_type:
return existing_dataset_type
def describe_attribute_set(self, attribute_set_id: str):
"""
Calls the describe dataset type API function and only returns the dataset type portion of the response.
:param attribute_set_id: the GUID of the dataset type to get description of
:type: str
"""
resp = None
dataset_type_details_resp = self.client.describe_dataset_type(datasetTypeId=attribute_set_id)
if 'datasetType' in dataset_type_details_resp:
resp = dataset_type_details_resp['datasetType']
return (resp)
def create_attribute_set(self, attribute_set_def):
resp = self.client.create_dataset_type(datasetTypeDefinition=attribute_set_def)
att_id = resp["datasetTypeId"]
return (att_id)
def delete_attribute_set(self, attribute_set_id: str):
resp = self.client.delete_attribute_set(attributeSetId=attribute_set_id)
if resp['ResponseMetadata']['HTTPStatusCode'] != 200:
return resp
return True
def associate_attribute_set(self, att_name: str, att_values: list, dataset_id: str):
# get the attribute set by name, will need its id
att_set = self.attribute_set(att_name)
# get the dataset's information, will need the arn
dataset = self.describe_dataset_details(dataset_id=dataset_id)
# disassociate any existing relationship
try:
self.client.dissociate_dataset_from_dataset_type(datasetArn=dataset['arn'],
datasetTypeId=att_set['id'])
except:
print("Nothing to disassociate")
self.client.associate_dataset_with_dataset_type(datasetArn=dataset['arn'], datasetTypeId=att_set['id'])
ret = self.client.update_dataset_type_context(datasetArn=dataset['arn'], datasetTypeId=att_set['id'],
values=att_values)
return ret
# --------------------------------------
# Permission Group Functions
# --------------------------------------
def list_permission_groups(self, max_results: int):
all_perms = self.client.list_permission_groups(MaxResults=max_results)
return (self.get_list(all_perms, 'permissionGroups'))
def permission_group(self, name):
all_groups = self.list_permission_groups(max_results = 100)
existing_group = next((c for c in all_groups if c['name'].lower() == name.lower()), None)
if existing_group:
return existing_group
def describe_permission_group(self, permission_group_id: str):
resp = None
perm_resp = self.client.describe_permission_group(permissionGroupId=permission_group_id)
if 'permissionGroup' in perm_resp:
resp = perm_resp['permissionGroup']
return (resp)
# --------------------------------------
# Dataset Functions
# --------------------------------------
def describe_dataset_details(self, dataset_id: str):
"""
Calls the describe dataset details API function and only returns the dataset details portion of the response.
:param dataset_id: the GUID of the dataset to get description of
:type: str
"""
resp = None
dataset_details_resp = self.client.describe_dataset_details(datasetId=dataset_id)
if 'dataset' in dataset_details_resp:
resp = dataset_details_resp["dataset"]
return (resp)
def create_dataset(self, name: str, description: str, permission_group_id: str, dataset_permissions: [], kind: str,
owner_info, schema):
"""
Create a dataset
Warning, dataset names are not unique, be sure to check for the same name dataset before creating a new one
:param name: Name of the dataset
:type: str
:param description: Description of the dataset
:type: str
:param permission_group_id: permission group for the dataset
:type: str
:param dataset_permissions: permissions for the group on the dataset
:param kind: Kind of dataset, choices: TABULAR
:type: str
:param owner_info: owner information for the dataset
:param schema: Schema of the dataset
:return: the dataset_id of the created dataset
"""
if dataset_permissions:
request_dataset_permissions = [{"permission": permissionName} for permissionName in dataset_permissions]
else:
request_dataset_permissions = []
response = self.client.create_dataset(name=name,
permissionGroupId = permission_group_id,
datasetPermissions = request_dataset_permissions,
kind=kind,
description = description.replace('\n', ' '),
ownerInfo = owner_info,
schema = schema)
return response["datasetId"]
def ingest_from_s3(self,
s3_location: str,
dataset_id: str,
change_type: str,
wait_for_completion: bool = True,
format_type: str = "CSV",
format_params: dict = {'separator': ',', 'withHeader': 'true'}):
"""
Creates a changeset and ingests the data given in the S3 location into the changeset
:param s3_location: the source location of the data for the changeset, will be copied into the changeset
:stype: str
:param dataset_id: the identifier of the containing dataset for the changeset to be created for this data
:type: str
:param change_type: What is the kind of changetype? "APPEND", "REPLACE" are the choices
:type: str
:param wait_for_completion: Boolean, should the function wait for the operation to complete?
:type: str
:param format_type: format type, CSV, PARQUET, XML, JSON
:type: str
:param format_params: dictionary of format parameters
:type: dict
:return: the id of the changeset created
"""
create_changeset_response = self.client.create_changeset(
datasetId=dataset_id,
changeType=change_type,
sourceType='S3',
sourceParams={'s3SourcePath': s3_location},
formatType=format_type.upper(),
formatParams=format_params
)
changeset_id = create_changeset_response['changeset']['id']
if wait_for_completion:
self.wait_for_ingestion(dataset_id, changeset_id)
return changeset_id
def describe_changeset(self, dataset_id: str, changeset_id: str):
"""
Function to get a description of the the givn changeset for the given dataset
:param dataset_id: identifier of the dataset
:type: str
:param changeset_id: the idenfitier of the changeset
:type: str
:return: all information about the changeset, if found
"""
describe_changeset_resp = self.client.describe_changeset(datasetId=dataset_id, id=changeset_id)
return describe_changeset_resp['changeset']
def create_as_of_view(self, dataset_id: str, as_of_date: datetime, destination_type: str,
partition_columns: list = [], sort_columns: list = [], destination_properties: dict = {},
wait_for_completion: bool = True):
"""
Creates an 'as of' static view up to and including the requested 'as of' date provided.
:param dataset_id: identifier of the dataset
:type: str
:param as_of_date: as of date, will include changesets up to this date/time in the view
:type: datetime
:param destination_type: destination type
:type: str
:param partition_columns: columns to partition the data by for the created view
:type: list
:param sort_columns: column to sort the view by
:type: list
:param destination_properties: destination properties
:type: dict
:param wait_for_completion: should the function wait for the system to create the view?
:type: bool
:return str: GUID of the created view if successful
"""
create_materialized_view_resp = self.client.create_materialized_snapshot(
datasetId=dataset_id,
asOfTimestamp=as_of_date,
destinationType=destination_type,
partitionColumns=partition_columns,
sortColumns=sort_columns,
autoUpdate=False,
destinationProperties=destination_properties
)
view_id = create_materialized_view_resp['id']
if wait_for_completion:
self.wait_for_view(dataset_id=dataset_id, view_id=view_id)
return view_id
def create_auto_update_view(self, dataset_id: str, destination_type: str,
partition_columns=[], sort_columns=[], destination_properties={},
wait_for_completion=True):
"""
Creates an auto-updating view of the given dataset
:param dataset_id: identifier of the dataset
:type: str
:param destination_type: destination type
:type: str
:param partition_columns: columns to partition the data by for the created view
:type: list
:param sort_columns: column to sort the view by
:type: list
:param destination_properties: destination properties
:type: str
:param wait_for_completion: should the function wait for the system to create the view?
:type: bool
:return str: GUID of the created view if successful
"""
create_materialized_view_resp = self.client.create_materialized_snapshot(
datasetId=dataset_id,
destinationType=destination_type,
partitionColumns=partition_columns,
sortColumns=sort_columns,
autoUpdate=True,
destinationProperties=destination_properties
)
view_id = create_materialized_view_resp['id']
if wait_for_completion:
self.wait_for_view(dataset_id=dataset_id, view_id=view_id)
return view_id
def wait_for_ingestion(self, dataset_id: str, changeset_id: str, sleep_sec=10):
"""
function that will continuously poll the changeset creation to ensure it completes or fails before returning.
:param dataset_id: GUID of the dataset
:type: str
:param changeset_id: GUID of the changeset
:type: str
:param sleep_sec: seconds to wait between checks
:type: int
"""
while True:
status = self.describe_changeset(dataset_id=dataset_id, changeset_id=changeset_id)['status']
if status == 'SUCCESS':
print(f"Changeset complete")
break
elif status == 'PENDING' or status == 'RUNNING':
print(f"Changeset status is still PENDING, waiting {sleep_sec} sec ...")
time.sleep(sleep_sec)
continue
else:
raise Exception(f"Bad changeset status: {status}, failing now.")
def wait_for_view(self, dataset_id: str, view_id: str, sleep_sec=10):
"""
function that will continuously poll the view creation to ensure it completes or fails before returning.
:param dataset_id: GUID of the dataset
:type: str
:param view_id: GUID of the view
:type: str
:param sleep_sec: seconds to wait between checks
:type: int
"""
while True:
list_views_resp = self.client.list_materialization_snapshots(datasetId=dataset_id, maxResults=100)
matched_views = list(filter(lambda d: d['id'] == view_id, list_views_resp['materializationSnapshots']))
if len(matched_views) != 1:
size = len(matched_views)
raise Exception(f"Unexpected error: found {size} views that match the view Id: {view_id}")
status = matched_views[0]['status']
if status == 'SUCCESS':
print(f"View complete")
break
elif status == 'PENDING' or status == 'RUNNING':
print(f"View status is still PENDING, continue to wait till finish...")
time.sleep(sleep_sec)
continue
else:
raise Exception(f"Bad view status: {status}, failing now.")
def list_changesets(self, dataset_id: str):
resp = self.client.list_changesets(datasetId=dataset_id, sortKey='CREATE_TIMESTAMP')
results = resp['changesets']
while "nextToken" in resp:
resp = self.client.list_changesets(datasetId=dataset_id, sortKey='CREATE_TIMESTAMP',
nextToken=resp['nextToken'])
results.extend(resp['changesets'])
return (results)
def list_views(self, dataset_id: str, max_results=50):
resp = self.client.list_materialization_snapshots(datasetId=dataset_id, maxResults=max_results)
results = resp['materializationSnapshots']
while "nextToken" in resp:
resp = self.client.list_materialization_snapshots(datasetId=dataset_id, maxResults=max_results,
nextToken=resp['nextToken'])
results.extend(resp['materializationSnapshots'])
return (results)
def list_datasets(self, max_results: int):
all_datasets = self.client.list_datasets(maxResults=max_results)
return (self.get_list(all_datasets, 'datasets'))
def list_dataset_types(self):
resp = self.client.list_dataset_types(sort='NAME')
results = resp['datasetTypeSummaries']
while "nextToken" in resp:
resp = self.client.list_dataset_types(sort='NAME', nextToken=resp['nextToken'])
results.extend(resp['datasetTypeSummaries'])
return (results)
@staticmethod
def get_execution_role():
"""
Convenience function from SageMaker to get the execution role of the user of the sagemaker studio notebook
:return: the ARN of the execution role in the sagemaker studio notebook
"""
import sagemaker as sm
e_role = sm.get_execution_role()
return (f"{e_role}")
def get_user_ingestion_info(self):
return (self.client.get_user_ingestion_info())
def upload_pandas(self, data_frame: pd.DataFrame):
import awswrangler as wr
resp = self.client.get_working_location(locationType='INGESTION')
upload_location = resp['s3Uri']
wr.s3.to_parquet(data_frame, f"{upload_location}data.parquet", index=False, boto3_session=self._boto3_session)
return upload_location
def ingest_pandas(self, data_frame: pd.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):
print("Uploading the pandas dataframe ...")
upload_location = self.upload_pandas(data_frame)
print("Data upload finished. Ingesting data ...")
return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='PARQUET')
def read_view_as_pandas(self, dataset_id: str, view_id: str):
"""
Returns a pandas dataframe the view of the given dataset. Views in FinSpace can be quite large, be careful!
:param dataset_id:
:param view_id:
:return: Pandas dataframe with all data of the view
"""
import awswrangler as wr # use awswrangler to read the table
# @todo: switch to DescribeMateriliazation when available in HFS
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
# 0. Ensure view is ready to be read
if (view['status'] != 'SUCCESS'):
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
# determine if the table has partitions first, different way to read is there are partitions
p = wr.catalog.get_partitions(table=glue_table_name, database=glue_db_name, boto3_session=self._boto3_session)
def no_filter(partitions):
if len(partitions.keys()) > 0:
return True
return False
df = None
if len(p) == 0:
df = wr.s3.read_parquet_table(table=glue_table_name, database=glue_db_name,
boto3_session=self._boto3_session)
else:
spath = wr.catalog.get_table_location(table=glue_table_name, database=glue_db_name,
boto3_session=self._boto3_session)
cpath = wr.s3.list_directories(f"{spath}/*", boto3_session=self._boto3_session)
read_path = f"{spath}/"
# just one? Read it
if len(cpath) == 1:
read_path = cpath[0]
df = wr.s3.read_parquet(read_path, dataset=True, partition_filter=no_filter,
boto3_session=self._boto3_session)
# Query Glue table directly with wrangler
return df
@staticmethod
def get_schema_from_pandas(df: pd.DataFrame):
"""
Returns the FinSpace schema columns from the given pandas dataframe.
:param df: pandas dataframe to interrogate for the schema
:return: FinSpace column schema list
"""
# for translation to FinSpace's schema
# 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'
DoubleType = "DOUBLE"
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
hab_columns = []
for name in dict(df.dtypes):
p_type = df.dtypes[name]
switcher = {
"float64": DoubleType,
"int64": IntegerType,
"datetime64[ns, UTC]": TimestampType,
"datetime64[ns]": DateType
}
habType = switcher.get(str(p_type), StringType)
hab_columns.append({
"dataType": habType,
"name": name,
"description": ""
})
return (hab_columns)
@staticmethod
def get_date_cols(df: pd.DataFrame):
"""
Returns which are the data columns found in the pandas dataframe.
Pandas does the hard work to figure out which of the columns can be considered to be date columns.
:param df: pandas dataframe to interrogate for the schema
:return: list of column names that can be parsed as dates by pandas
"""
date_cols = []
for name in dict(df.dtypes):
p_type = df.dtypes[name]
if str(p_type).startswith("date"):
date_cols.append(name)
return (date_cols)
def get_best_schema_from_csv(self, path, is_s3=True, read_rows=500, sep=','):
"""
Uses multiple reads of the file with pandas to determine schema of the referenced files.
Files are expected to be csv.
:param path: path to the files to read
:type: str
:param is_s3: True if the path is s3; False if filesystem
:type: bool
:param read_rows: number of rows to sample for determining schema
:param sep:
:return dict: schema for FinSpace
"""
#
# best efforts to determine the schema, sight unseen
import awswrangler as wr
# 1: get the base schema
df1 = None
if is_s3:
df1 = wr.s3.read_csv(path, nrows=read_rows, sep=sep)
else:
df1 = pd.read_csv(path, nrows=read_rows, sep=sep)
num_cols = len(df1.columns)
# with number of columns, try to infer dates
df2 = None
if is_s3:
df2 = wr.s3.read_csv(path, parse_dates=list(range(0, num_cols)), infer_datetime_format=True,
nrows=read_rows, sep=sep)
else:
df2 = pd.read_csv(path, parse_dates=list(range(0, num_cols)), infer_datetime_format=True, nrows=read_rows,
sep=sep)
date_cols = self.get_date_cols(df2)
# with dates known, parse the file fully
df = None
if is_s3:
df = wr.s3.read_csv(path, parse_dates=date_cols, infer_datetime_format=True, nrows=read_rows, sep=sep)
else:
df = pd.read_csv(path, parse_dates=date_cols, infer_datetime_format=True, nrows=read_rows, sep=sep)
schema_cols = self.get_schema_from_pandas(df)
return (schema_cols)
def s3_upload_file(self, source_file: str, s3_destination: str):
"""
Uploads a local file (full path) to the s3 destination given (expected form: s3://<bucket>/<prefix>/).
The filename will have spaces replaced with _.
:param source_file: path of file to upload
:param s3_destination: full path to where to save the file
:type: str
"""
hab_s3_client = self._boto3_session.client(service_name='s3')
o = urlparse(s3_destination)
bucket = o.netloc
prefix = o.path.lstrip('/')
fname = os.path.basename(source_file)
hab_s3_client.upload_file(source_file, bucket, f"{prefix}{fname.replace(' ', '_')}")
def list_objects(self, s3_location: str):
"""
lists the objects found at the s3_location. Strips out the boto API response header,
just returns the contents of the location. Internally uses the list_objects_v2.
:param s3_location: path, starting with s3:// to get the list of objects from
:type: str
"""
o = urlparse(s3_location)
bucket = o.netloc
prefix = o.path.lstrip('/')
results = []
hab_s3_client = self._boto3_session.client(service_name='s3')
paginator = hab_s3_client.get_paginator('list_objects_v2')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
for page in pages:
if 'Contents' in page:
results.extend(page['Contents'])
return (results)
def list_clusters(self, status: str = None):
"""
Lists current clusters and their statuses
:param status: status to filter for
:return dict: list of clusters
"""
resp = self.client.list_clusters()
clusters = []
if 'clusters' not in resp:
return (clusters)
for c in resp['clusters']:
if status is None:
clusters.append(c)
else:
if c['clusterStatus']['state'] in status:
clusters.append(c)
return (clusters)
def get_cluster(self, cluster_id):
"""
Resize the given cluster to desired template
:param cluster_id: cluster id
"""
clusters = self.list_clusters()
for c in clusters:
if c['clusterId'] == cluster_id:
return (c)
return (None)
def update_cluster(self, cluster_id: str, template: str):
"""
Resize the given cluster to desired template
:param cluster_id: cluster id
:param template: target template to resize to
"""
cluster = self.get_cluster(cluster_id=cluster_id)
if cluster['currentTemplate'] == template:
print(f"Already using template: {template}")
return (cluster)
self.client.update_cluster(clusterId=cluster_id, template=template)
return (self.get_cluster(cluster_id=cluster_id))
def wait_for_status(self, clusterId: str, status: str, sleep_sec=10, max_wait_sec=900):
"""
Function polls service until cluster is in desired status.
:param clusterId: the cluster's ID
:param status: desired status for clsuter to reach
:
"""
total_wait = 0
while True and total_wait < max_wait_sec:
resp = self.client.list_clusters()
this_cluster = None
# is this the cluster?
for c in resp['clusters']:
if clusterId == c['clusterId']:
this_cluster = c
if this_cluster is None:
print(f"clusterId:{clusterId} not found")
return (None)
this_status = this_cluster['clusterStatus']['state']
if this_status.upper() != status.upper():
print(f"Cluster status is {this_status}, waiting {sleep_sec} sec ...")
time.sleep(sleep_sec)
total_wait = total_wait + sleep_sec
continue
else:
return (this_cluster)
def get_working_location(self, locationType='SAGEMAKER'):
resp = None
location = self.client.get_working_location(locationType=locationType)
if 's3Uri' in location:
resp = location['s3Uri']
return (resp)
# %load finspace_spark.py
import datetime
import time
import boto3
from botocore.config import Config
# FinSpace class with Spark bindings
class SparkFinSpace(FinSpace):
import pyspark
def __init__(
self,
spark: pyspark.sql.session.SparkSession = None,
config = Config(retries = {'max_attempts': 0, 'mode': 'standard'}),
dev_overrides: dict = None
):
FinSpace.__init__(self, config=config, dev_overrides=dev_overrides)
self.spark = spark # used on Spark cluster for reading views, creating changesets from DataFrames
def upload_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame):
resp = self.client.get_user_ingestion_info()
upload_location = resp['ingestionPath']
# data_frame.write.option('header', 'true').csv(upload_location)
data_frame.write.parquet(upload_location)
return upload_location
def ingest_dataframe(self, data_frame: pyspark.sql.dataframe.DataFrame, dataset_id: str, change_type: str, wait_for_completion=True):
print("Uploading data...")
upload_location = self.upload_dataframe(data_frame)
print("Data upload finished. Ingesting data...")
return self.ingest_from_s3(upload_location, dataset_id, change_type, wait_for_completion, format_type='parquet', format_params={})
def read_view_as_spark(
self,
dataset_id: str,
view_id: str
):
# TODO: switch to DescribeMatz when available in HFS
views = self.list_views(dataset_id=dataset_id, max_results=50)
filtered = [v for v in views if v['id'] == view_id]
if len(filtered) == 0:
raise Exception('No such view found')
if len(filtered) > 1:
raise Exception('Internal Server error')
view = filtered[0]
# 0. Ensure view is ready to be read
if (view['status'] != 'SUCCESS'):
status = view['status']
print(f'view run status is not ready: {status}. Returning empty.')
return
glue_db_name = view['destinationTypeProperties']['databaseName']
glue_table_name = view['destinationTypeProperties']['tableName']
# Query Glue table directly with catalog function of spark
return self.spark.table(f"`{glue_db_name}`.`{glue_table_name}`")
def get_schema_from_spark(self, data_frame: pyspark.sql.dataframe.DataFrame):
from pyspark.sql.types import StructType
# for translation to FinSpace's schema
# 'STRING'|'CHAR'|'INTEGER'|'TINYINT'|'SMALLINT'|'BIGINT'|'FLOAT'|'DOUBLE'|'DATE'|'DATETIME'|'BOOLEAN'|'BINARY'
DoubleType = "DOUBLE"
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
hab_columns = []
items = [i for i in data_frame.schema]
switcher = {
"BinaryType" : StringType,
"BooleanType" : BooleanType,
"ByteType" : IntegerType,
"DateType" : DateType,
"DoubleType" : FloatType,
"IntegerType" : IntegerType,
"LongType" : IntegerType,
"NullType" : StringType,
"ShortType" : IntegerType,
"StringType" : StringType,
"TimestampType" : TimestampType,
}
for i in items:
# print( f"name: {i.name} type: {i.dataType}" )
habType = switcher.get( str(i.dataType), StringType)
hab_columns.append({
"dataType" : habType,
"name" : i.name,
"description" : ""
})
return( hab_columns )
# initialize the FinSpace helper
finspace = SparkFinSpace(spark=spark)
```
# Import All Tables from a Snowflake Database
Here we import the technical meta data from snowflake into FinSpace. The meta-data of the table in Snowflake is put into a FinSpace attribute set that is then associated with the FinSpace dataset that represents a table in Snowflake.
Each FinSpace dataset represents a table in the given Snowflake database.
It is assumed that you have an attribute set in FinSpace named 'Snowflake Table Attributes' that has been configured to contain 3 string values (named Catalog, Schema, and Table) and a Category named 'Source' that have a valued named 'Snowflake'.
You will also need to know the permission group id to which you will entitle the dataset to, the example below will grant all antitlements on the datasets to the group you identify.
To get a group's ID, you will need to extract it from the group's FinSpace page, for example navigate to a user group's details page, then extract the bolded portion of the URL:
https://FINSPACE_ENVIRONMENT_ID.us-east-1.amazonfinspace.com/userGroup/m20knhL976XFrB8JhW7ryg
in the above URL example the group id is: **m20knhL976XFrB8JhW7ryg**
```
# Snowflake database to create datasets for each tables from
dbName = 'DEV_DB'
# Attribute set's name in FinSpace
att_name = 'Snowflake Table Attributes'
# User Group to grant access to the dataset
group_id = ''
# get all the tables in a database
#-------------------------------------------
tablesDF = ( spark.read.format(SNOWFLAKE_SOURCE_NAME)
.options(**sfOptions)
.option("query", f"select * from {dbName}.information_schema.tables where table_schema <> 'INFORMATION_SCHEMA'")
.load()
).cache()
# get all the schemas of the tables in the database
#---------------------------------------------------
schemaDF = ( spark.read.format(SNOWFLAKE_SOURCE_NAME)
.options(**sfOptions)
.option("query", f"select * from {dbName}.information_schema.columns where table_schema <> 'INFORMATION_SCHEMA'")
.load()
).cache()
```
# Utility Functions
These functions help with translating Snowflake Schema to FinSpace schema and parsing the returns from FinSpace.
```
#
# Utility functions
# ------------------------------------------------------------
def find_by_key_value(l, key, v):
for n in l:
if n[key] == v:
return n
def get_field_by_name(f_list, title, name='name'):
f = find_by_key_value(f_list, 'title', title)
if f is not None:
return f[name]
return None
def get_field_values(f_list, field):
for f in f_list:
if f['field'] == field:
return f['values']
def get_finspace_schema(table_schema_pdf):
FloatType = "FLOAT"
DateType = "DATE"
StringType = "STRING"
DoubleType = "DOUBLE"
IntegerType = "INTEGER"
LongType = "BIGINT"
BooleanType = "BOOLEAN"
TimestampType = "DATETIME"
switcher = {
"DATE": DateType,
"FLOAT": FloatType,
"NUMBER": DoubleType,
"TEXT": StringType,
"BOOLEAN": BooleanType,
"TIMESTAMP_LTZ": TimestampType,
"TIMESTAMP_NTZ": TimestampType
}
columns = []
for index, row in table_schema_pdf.iterrows():
name = row['COLUMN_NAME']
description = row['COMMENT']
data_type = row['DATA_TYPE']
if description is None: description = ''
hab_type = switcher.get(str(data_type), StringType)
columns.append({'dataType': hab_type, 'name': name, 'description': description})
schema = {
'primaryKeyColumns': [],
'columns': columns
}
return schema
def get_snowflake_query(dataset_id, att_name):
sfAttrSet = finspace.attribute_set(att_name)
if sfAttrSet is None:
print(f'Did not find the attribute set with name: {att_name}')
return None
# get the dataset details
dataset_details = finspace.client.describe_dataset_details(datasetId=dataset_id)
if dataset_details is None:
print(f'Did not find the dataset with id: {dataset_id}')
return None
# find the snowflake attributes related to the dataset
attributes = dataset_details['datasetTypeContexts']
sfAttr = find_by_key_value(attributes, 'id', sfAttrSet['id'])
if sfAttr is None:
print(f'Did not find the attribute set with name: {att_name} in the dataset with id: {dataset_id}')
return None
field_defs = sfAttr['definition']['fields']
catalog = get_field_by_name(field_defs, 'Catalog')
schema = get_field_by_name(field_defs, 'Schema')
table = get_field_by_name(field_defs, 'Table')
field_values = sfAttr['values']
return f'"{get_field_values(field_values, catalog)[0]}"."{get_field_values(field_values, schema)[0]}"."{get_field_values(field_values, table)[0]}"'
```
# Get the Attribute Set
The snowflake attribute set must be retrieved by name, and then its identifiers are used when populating the attribute set for asociation to the datasets. We need the exact IDs of the fields, not just their names.
```
import pprint
# Get the attribute set
sfAttrSet = finspace.attribute_set(att_name)
att_def = None
att_fields = None
# get the fields of the attribute set
att_resp = finspace.describe_attribute_set(sfAttrSet['id'])
if 'definition' in att_resp:
att_def = att_resp['definition']
if 'fields' in att_def:
att_fields = att_def['fields']
# The fields we will populate
pprint.pprint(att_fields, width=100)
```
## Snowflake Source
One of the fields in the snowflake attribute set identifies the Source to be snowflake, this is through a classification who's values/identifiers need to be extracted from FinSpace and then used to populate the Snowflake attribtue set and associate it to the FinSpace datasets that are to be created.
```
# get the key for snowflake in the classification 'Source'
classification_name = 'Source'
classification_value = 'Snowflake'
classification_resp = finspace.classification(classification_name)
classification_fields = finspace.describe_classification(classification_resp['id'])
classification_key = None
for n in classification_fields['definition']['nodes']:
if n['fields']['name'] == classification_value:
classification_key = n['key']
# this is the key for source in the Category
print(f'''Classification: {classification_name}
Value: {classification_value}
Key: {classification_key}''')
# all the tables into a pandas dataframe to then iterate on
tablesDF.select('TABLE_CATALOG', 'TABLE_SCHEMA', 'TABLE_NAME', 'ROW_COUNT', 'COMMENT').show(10, False)
# Permissions to grant the above group for the created dataset
basicPermissions = [
"ViewDatasetDetails",
"ReadDatasetData",
"AddDatasetData",
"CreateSnapshot",
"EditDatasetMetadata",
"ManageDatasetPermissions",
"DeleteDataset"
]
# All datasets have ownership
basicOwnerInfo = {
"phoneNumber" : "12125551000",
"email" : "jdoe@amazon.com",
"name" : "Jane Doe"
}
# all the tables into a pandas dataframe to then iterate on
tablesPDF = tablesDF.select('TABLE_CATALOG', 'TABLE_SCHEMA', 'TABLE_NAME', 'ROW_COUNT', 'COMMENT').toPandas()
c = 0
#create=False
create=True
# For each table, create a dataset with the necessary attribute set populated and associated to the dataset
for index, row in tablesPDF.iterrows():
c = c + 1
catalog = row.TABLE_CATALOG
schema = row.TABLE_SCHEMA
table = row.TABLE_NAME
# Attributes and their populated values
att_values = [
{ 'field' : get_field_by_name(att_fields, 'Catalog'), 'type' : get_field_by_name(att_fields, 'Catalog', 'type')['name'], 'values' : [ catalog ] },
{ 'field' : get_field_by_name(att_fields, 'Schema'), 'type' : get_field_by_name(att_fields, 'Schema', 'type')['name'], 'values' : [ schema ] },
{ 'field' : get_field_by_name(att_fields, 'Table'), 'type' : get_field_by_name(att_fields, 'Table', 'type')['name'], 'values' : [ table ] },
{ 'field' : get_field_by_name(att_fields, 'Source'), 'type' : get_field_by_name(att_fields, 'Source', 'type')['name'], 'values' : [ classification_key ] },
]
# get this table's schema from Snowflake
tableSchemaPDF = schemaDF.filter(schemaDF.TABLE_NAME == table).filter(schemaDF.TABLE_SCHEMA == schema).select('ORDINAL_POSITION', 'COLUMN_NAME', 'IS_NULLABLE', 'DATA_TYPE', 'COMMENT').orderBy('ORDINAL_POSITION').toPandas()
print(tableSchemaPDF)
# translate Snowflake schema to FinSpace Schema
fs_schema = get_finspace_schema(tableSchemaPDF)
# name and description of the dataset to create
name = f'{table}'
description = f'Snowflake table from catalog: {catalog}'
if row.COMMENT is not None:
description = row.COMMENT
print(f'name: {name}')
print(f'description: {description}')
print("att_values:")
for i in att_values:
print(i)
print("schema:")
for i in fs_schema['columns']:
print(i)
if (create):
# create the dataset
dataset_id = finspace.create_dataset(
name = name,
description = description,
permission_group_id = group_id,
dataset_permissions = basicPermissions,
kind = "TABULAR",
owner_info = basicOwnerInfo,
schema = fs_schema
)
print(f'Created, dataset_id: {dataset_id}')
time.sleep(20)
# associate tha attributes to the dataset
if (att_name is not None and att_values is not None):
print(f"Associating values to attribute set: {att_name}")
finspace.associate_attribute_set(att_name=att_name, att_values=att_values, dataset_id=dataset_id)
```
| github_jupyter |
```
import sys
from multiprocessing import Pool, cpu_count
import pandas as pd
import numpy as np
from fuzzywuzzy import process
sys.path.append('../src')
from my_aws import S3
s3_connect = S3()
key_tor = 'OMDB_Torrents.csv'
bucket = 'movie-torrents'
df_tor = s3_connect.get_data(key_tor, bucket)
key_num = 'TheNumbers_budgets.csv'
bucket = 'movie-torrents'
df_num = s3_connect.get_data(key_num, bucket)
numbers_titles = df_num['title'].tolist()
torrent_titles = df_tor['Title'].tolist()
def fuzzy_match(search_title):
match_title, match_percent = process.extractOne(search_title, numbers_titles)
return {'RowTitle': search_title, 'FuzzTitle': match_title, 'FuzzPercent': match_percent}
```
# Resource heavy - long running
```
worker_pool = Pool(cpu_count())
%time fuzz_results = worker_pool.map(fuzzy_match, torrent_titles)
worker_pool.close()
worker_pool.join()
# Put results into a dataframe and rename columns
df_fuzz = pd.DataFrame(fuzz_results)
df_fuzz = df_fuzz[['RowTitle','FuzzTitle','FuzzPercent']]
df_fuzz.columns = ['Title', 'FuzzTitle', 'FuzzPercent']
# Append to torrent dataframe
df = pd.merge(df_tor, df_fuzz, how='inner', on='Title')
df.drop_duplicates(subset='Title', inplace=True)
# Drop rows where match was below 95%
df = df[df['FuzzPercent'] >= 95]
# remove where no torrent counts were received from any source
df['CheckTup'] = list(zip(df['Kat_Count'].tolist(), df['Pirate_Count'].tolist(), df['Extra_Count'].tolist(),
df['Torrentz_Count'].tolist(), df['Torrentz_Ver_Count'].tolist(), df['Zoogle_Ver_Count'].tolist()))
df = df[df['CheckTup'] != ('Fail','Fail','Fail','Fail','Fail','Fail')].reset_index(drop=True)
del df['CheckTup']
# replace Fail, None, N and NaN with 0 - remove >, and <
int_cols = ['Metascore', 'Runtime', 'imdbRating','imdbVotes', 'Kat_Count', 'Pirate_Count',
'Extra_Count', 'Torrentz_Count', 'Torrentz_Ver_Count', 'Zoogle_Ver_Count']
for col in int_cols:
df[col] = df[col].replace(['Fail', 'None', 'N', 'NaN'], '0')
df[col] = df[col].apply(lambda x: str(x).replace('>','').replace('<','').replace(',',''))
df[col] = df[col].replace(np.nan, 0)
df[col] = df[col].fillna(value=0)
df[col] = pd.to_numeric(df[col], errors='coerce', downcast='integer')
df[col] = df[col].fillna(value=0)
# fill in remaining NaN's with blanks
df.fillna(value='', inplace=True)
df_num.columns = ['FuzzTitle', 'ReleaseDate', 'ProductionBudget', 'DomesticBudget', 'WorldGross']
# merge data frames
data_df = pd.merge(df, df_num, how='left', on='FuzzTitle')
data_df = data_df.drop_duplicates(subset='imdbID')
print(len(data_df))
data_df['Released'] = pd.to_datetime(data_df['Released'])
data_df['ReleaseDate'] = pd.to_datetime(data_df['ReleaseDate'])
data_df.loc[data_df['Released'].isnull(), 'Released'] = data_df['ReleaseDate']
del data_df['ReleaseDate']
del data_df['FuzzTitle']
del data_df['FuzzPercent']
# sum torrent counts
data_df['Total_Torrents'] = data_df[['Kat_Count','Pirate_Count','Extra_Count','Torrentz_Count',
'Torrentz_Ver_Count','Zoogle_Ver_Count']].sum(axis=1)
data_df.reset_index(drop=True, inplace=True)
data_df.info()
```
| github_jupyter |
## Введение в Pandas
### Основы
Библиотека **Pandas** основана на Numpy и применяется для обработки и анализа данных, представленных в виде больших разнородных таблиц. Как правило, реальные данные хранятся не в виде массивов чисел, а в виде структурированного набора данных (*sql, csv, json, excel, xml* и т.д.). Для обработки, отображения, преобразования и любых манипуляций с этими данными и применяется Pandas.
Библиотека Pandas основана на понятиях **Series** и **DataFrame**. Это специальные структуры данных, причем *Series* - это одномерная структура, а *DataFrame* - двумерная структура данных. Посмотрим, как ими пользоваться на примерах ниже.
Импортируем библиотеки в проект:
```
import numpy as np
import pandas as pd
%matplotlib inline
pd.__version__
```
## Data Series
Pandas **Series** - это одномерный индексированный массив данных, который может хранить данные любого типа (числа, строки, объекты python). В отличие однородного массива numpy, в котором представленные данные имеют один тип, в Series тип данных может быть произвольный (гетерогенный массив). Pandas Series также очень похож на словарь.
Простой способ создать Series:
```python
s = pd.Series(data, index)
```
где *data*:
- список, словарь, кортеж,
- numpy массив,
- скалярный тип данных (integer, string, ...)
и *index* - массив индексов (может быть как числом, так и любым другим значением).
Особенности:
- Если *data* - массив numpy, то длина index должна совпадать с длиной массива данных.
- Если *index* не обозначен, то он создается автоматически в виде набора чисел: `0, ..., len(data)-1`
Создадим простейший объект pandas Series:
```
d = pd.Series([0, 1, 2, 3, 4, 5, 6, 7])
d
```
Знаения и индексы можно получить с помощью следующих атрибутов:
```
d.values # get data values
d.index # get data indexes
```
Заметим, что pandas Series, как и массивы numpy, имеют атрибут типа хранимых значений `dtype`.
Значения *Series* представляют собой массив numpy, поэтому к ним применимы любые операции и преобразования из библиотеки numpy (математические операции, функции, методы Numpy).
Series можно создать с помощью словаря `dict` или явного указания значений и индексов:
```
# Example with a dictionary:
pd.Series({'a':1, 'b':2, 'c':3, 'd':4})
# Create series with data and index:
d = pd.Series(data=[1, 3, 5, np.nan, 6], index=['a', 'b', 'c', 'd', 'e'])
'a' in d # Check index
'z' in d # Check index
```
Также можно создать Series с помощью скалярной величины (константы).В таком случае элементы вектора инициализируются константным значением:
```
pd.Series(data=7, index=range(5))
```
### Индексация и доступ к элементам
Доступ к значениям в pandas Series производится аналогично массивам в numpy, даже если индексы представлены не как вектор целых чисел. Например:
```
d = pd.Series(data=7, index=['a', 'b', 'c', 'd', 'e'])
d[0:4] # Get slice of pandas Series by index
```
Тот же самый срез можно получить по именованым значениям индексов:
```
d['a':'d'] # Get slice of pandas Series by named index
```
В pandas Series можно добавлять новые элементы или изменять существующие. Также доступно множественное присваивание:
```
d['f'] = 10 # Add new element
d['a':'d'] = -1 # Assign new value
d
```
### Фильтрация значений
Аналогично numpy, с помощью булевых масок и выражений возможна фильтрация значений в Series:
```
d[d > 0] # Get values from pandas Series
d[d < 0] # Get values from pandas Series
d > 0 # Get boolean mask of pandas Series
```
У объекта Series и его индекса есть атрибут `name`, задающий имя объекту и индексу.
```
d.name = 'num' # data name
d.index.name = 'let' # index name
d
```
Pandas позволяет динамически менять индекс Series. Для этого необходимо присвоить атрибуту index новые значения.
```
d.index = ['A', 'B', 'C', 'D', 'E', 'F'] # New index
d
```
Дополнительные примеры использования pandas Series:
```
# Create pandas series from list comprehension
pd.Series([i**2 for i in range(7)])
# Create series with numpy random array:
N = 8 # Number of samples
np.random.seed(5) # Random seed
pd.Series([np.random.randint(-N, N) for x in range(N)])
```
## DataFrame
Pandas **DataFrame** - можно представить как таблицу данных. Это многомерный массив, включающий в себя возможности и особенности привычных таблиц (Excel) и реляционных баз данных (SQL). В таблице присутствуют сроки и столбцы. **Столбцы** в DataFrame - объекты Series, **строки** которых являются их непосредственными элементами. Иными словами, DataFrame это набор Series.
```
# Create dict of two pandas Series
d = {
'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two': pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
}
# Create pandas DataFrame
df = pd.DataFrame(d)
df
```
Как видно, пропущенные значения имеют тип NaN.
Доступ к именам строк с помощью атрибута `index`
```
df.index
```
Доступ к именам столбцов с помощью атрибута `columns`
```
df.columns
```
Доступ ко всем значениям таблицы с помощью атрибута `values`
```
df.values
```
### Добавление и удаление столбцов
Добавление столбца в таблицу и инициализация его каким-либо значением:
```
df['three'] = 0
df
```
Добавление столбца в таблицу. Элементы задаются из списка:
```
df['four'] = [i for i in range(4)]
df
```
Добавление столбца неполного размера. Столбец дополняется до необходимой длины, а элементы столбца инициализируются значением по умолчанию `NaN`:
```
df['one_tr'] = df['one'][:1]
df
```
Создание таблицы с помощью словаря `dict`:
```
ddict = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20, 'd': 30}]
pd.DataFrame(data=ddict, index=['one', 'two'])
```
Удаление столбца с помощью `del`
```
del df['four']
df
```
Удаление столбца с помощью метода `pop()`
```
df.pop('two')
df
```
Создадим простейшую таблицу на базе словаря. Для этого определим одномерные списки, которые будут выступать в роли pandas Series.
```
fpgas = ['Spartan-7', 'Artix-7', 'Kintex-7', 'Virtex-7']
options = [
'LCells',
'BRAMs',
'DSPs',
'Transceivers',
'Memory Speed',
'Pins'
]
lcells = [102, 215, 478, 1955]
brams = [4.2, 13.0, 34.0, 68.0]
dsps = [160, 740, 1920, 3600]
trns = [np.NaN, 16, 32, 96]
mems = [800, 1066, 1866, 1866]
pins = [400, 500, 500, 1200]
```
Ключи словаря будут использоваться в качестве названия колонки, а передаваемый список в качестве значений.
Параметр опций (options) будем использовать как индекс DataFrame
```
# Set DataFrame data and index:
df_data = {options[0]: lcells, options[1]: brams}
df_index = fpgas
# Create DataFrame:
ft = pd.DataFrame(df_data, df_index)
ft
```
Если индекс по строкам не задан явно, то pandas задаёт целочисленный индекс от 0 до N-1, где N - количество строк в таблице
```
pd.DataFrame(df_data)
```
Доступ к именам столбцов и значениям по столбцам (для всей таблицы):
```
print(ft.columns, '\n')
print(ft.values, '\n')
```
Доступ к значениям колонки осуществляется путем передачи названия этой колонки (или нескольких). Каждая колонка в отельности является Series:
```
ft['BRAMs']
ft['LCells']
type(ft['BRAMs']) # pandas Series
```
Добавление новой колонки в DataFrame:
```
ft[options[2]] = dsps # Add new column
ft
ft[options[3]] = trns # Add new column
ft
```
Добавление имени индекса строк:
```
ft.index.name = '7Series' # Add Index name
ft
```
### Индексация, выбор данных
| Operation | Syntax | Result |
| -- | -- | -- |
| Select column | df[col] | Series |
| Select row by label | df.loc[label] | Series |
| Select row by integer location | df.iloc[loc] | Series |
| Slice rows | df[5:10] | DataFrame |
| Select rows by boolean vector | df[bool_vec] | DataFrame |
Столбец по выбранному признаку
```
ft['DSPs'] # Return Series (column)
```
Строка по выбранному признаку. `.loc` - используется для доступа по строковой метке. Указывает явный индекс.
```
ft.loc['Artix-7'] # Return Series (row)
ft.loc['Artix-7'][['DSPs', 'BRAMs']] # Return DSPs & BRAMs columns
```
Строка (обращение по целочисленному индексу). `.iloc` - используется для доступа по числовому значению (начиная от 0), то есть - неявный индекс (в таблице не отображается его числовое значение).
```
ft.iloc[1] # Return Series (row by int location)
```
Срез по строкам
```
ft[0:2] # Slice of series
```
Выбор по булевой маске (показать строки при выполнении условия)
```
ft[ft.DSPs > 1000]
ft[ft.DSPs > 1000]['DSPs']
ft[ft.DSPs > 1000][['DSPs', 'BRAMs']]
# DSP / BRAM ratio:
ft['DSPs'] / ft['BRAMs']
# Only for Artix-7
(ft['DSPs'] / ft['BRAMs']).loc['Artix-7']
```
Сбросить явно указанный **индекс** можно с помощью соответствующего метода:
```
ft.reset_index()
```
| github_jupyter |
# Monetary Economics: Chapter 4
### Preliminaries
```
# This line configures matplotlib to show figures embedded in the notebook,
# instead of opening a new window for each figure. More about that later.
# If you are using an old version of IPython, try using '%pylab inline' instead.
%matplotlib inline
import matplotlib.pyplot as plt
from pysolve.model import Model
from pysolve.utils import is_close,round_solution
```
### Model PC
```
def create_pc_model():
model = Model()
model.set_var_default(0)
model.var('Bcb', desc='Government bills held by the Central Bank')
model.var('Bh', desc='Government bills held by households')
model.var('Bs', desc='Government bills supplied by the government')
model.var('C', desc='Consumption goods')
model.var('Hh', desc='Cash held by households')
model.var('Hs', desc='Cash supplied by the central bank')
model.var('R', desc='Interest rate on government bills')
model.var('T', desc='Taxes')
model.var('V', desc='Household wealth')
model.var('Y', desc='Income = GDP')
model.var('YD', desc='Disposable income of households')
model.param('alpha1', desc='Propensity to consume out of income', default=0.6)
model.param('alpha2', desc='Propensity to consume out of wealth', default=0.4)
model.param('lambda0', desc='Parameter in asset demand function', default=0.635)
model.param('lambda1', desc='Parameter in asset demand function', default=5.0)
model.param('lambda2', desc='Parameter in asset demand function', default=0.01)
model.param('theta', desc='Tax rate', default=0.2)
model.param('G', desc='Government goods', default=20.)
model.param('Rbar', desc='Interest rate as policy instrument')
model.add('Y = C + G') # 4.1
model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2
model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1
model.add('V = V(-1) + (YD - C)') # 4.4
model.add('C = alpha1*YD + alpha2*V(-1)') # 4.5, 0<alpha2<alpha1<1
model.add('Hh = V - Bh') # 4.6
model.add('Bh = V*lambda0 + V*lambda1*R - lambda2*YD') # 4.7
model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8
model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9
model.add('Bcb = Bs - Bh') # 4.10
model.add('R = Rbar') # 4.11
return model
steady = create_pc_model()
steady.set_values({'alpha1': 0.6,
'alpha2': 0.4,
'lambda0': 0.635,
'lambda1': 5.0,
'lambda2': 0.01,
'G': 20,
'Rbar': 0.025})
for _ in range(100):
steady.solve(iterations=100, threshold=1e-5)
if is_close(steady.solutions[-2], steady.solutions[-1], atol=1e-4):
break
```
### Model PCEX
```
def create_pcex_model():
model = Model()
model.set_var_default(0)
model.var('Bcb', desc='Government bills held by the Central Bank')
model.var('Bd', desc='Demand for government bills')
model.var('Bh', desc='Government bills held by households')
model.var('Bs', desc='Government bills supplied by the government')
model.var('C', desc='Consumption goods')
model.var('Hd', desc='Demand for cash')
model.var('Hh', desc='Cash held by households')
model.var('Hs', desc='Cash supplied by the central bank')
model.var('R', desc='Interest rate on government bills')
model.var('T', desc='Taxes')
model.var('V', desc='Household wealth')
model.var('Ve', desc='Expected household wealth')
model.var('Y', desc='Income = GDP')
model.var('YD', desc='Disposable income of households')
model.var('YDe', desc='Expected disposable income of households')
model.set_param_default(0)
model.param('alpha1', desc='Propensity to consume out of income', default=0.6)
model.param('alpha2', desc='Propensity to consume o of wealth', default=0.4)
model.param('lambda0', desc='Parameter in asset demand function', default=0.635)
model.param('lambda1', desc='Parameter in asset demand function', default=5.0)
model.param('lambda2', desc='Parameter in asset demand function', default=0.01)
model.param('theta', desc='Tax rate', default=0.2)
model.param('G', desc='Government goods', default=20.)
model.param('Ra', desc='Random shock to expectations', default=0.0)
model.param('Rbar', desc='Interest rate as policy instrument', default=0.025)
model.add('Y = C + G') # 4.1
model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2
model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1
model.add('V = V(-1) + (YD - C)') # 4.4
model.add('C = alpha1*YDe + alpha2*V(-1)') # 4.5E
model.add('Bd = Ve*lambda0 + Ve*lambda1*R - lambda2*YDe') # 4.7E
model.add('Hd = Ve - Bd') # 4.13
model.add('Ve = V(-1) + (YDe - C)') # 4.14
model.add('Hh = V - Bh') # 4.6
model.add('Bh = Bd') # 4.15
model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8
model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9
model.add('Bcb = Bs - Bh') # 4.10
model.add('R = Rbar') # 4.11
model.add('YDe = YD * (1 + Ra)') # 4.16
return model
```
### Steady state and shocks
```
pcex_steady = create_pcex_model()
pcex_steady.set_values([('alpha1', 0.6),
('alpha2', 0.4),
('lambda0', 0.635),
('lambda1', 5.0),
('lambda2', 0.01),
('theta', 0.2),
('G', 20),
('Rbar', 0.025),
('Ra', 0),
('Bcb', 116.36),
('Bh', 363.59),
('Bs', 'Bh + Bcb'),
('Hh', 116.35),
('Hs', 'Hh'),
('V', 'Bh + Hh'),
('R', 'Rbar')])
for _ in range(100):
pcex_steady.solve(iterations=100, threshold=1e-5)
if is_close(pcex_steady.solutions[-2], pcex_steady.solutions[-1], atol=1e-4):
break
import random
random.seed(6)
shocks = create_pcex_model()
shocks.set_values(pcex_steady.solutions[-1], ignore_errors=True)
for _ in range(50):
shocks.parameters['Ra'].value = random.gauss(0,1) / 10.
shocks.solve(iterations=100, threshold=1e-3)
```
#### Figure 4.1
```
caption = '''
Figure 4.1 Money demand and held money balances, when the economy is subjected
to random shocks.'''
hddata = [s['Hd'] for s in shocks.solutions[25:]]
hhdata = [s['Hh'] for s in shocks.solutions[25:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(min(hddata+hhdata)-2, max(hddata+hhdata)+2)
axes.plot(hhdata, 'b')
axes.plot(hddata, linestyle='--', color='r')
# add labels
plt.text(13, 35, 'Held money balances')
plt.text(13, 34, '(continuous line)')
plt.text(16, 12, 'Money demand')
plt.text(16, 11, '(dotted line)')
fig.text(0.1, -.05, caption);
```
###### Figure 4.2
```
caption = '''
Figure 4.2 Changes in money demand and in money balances held (first differences),
when the economy is subjected to random shocks. '''
hddata = [s['Hd'] for s in shocks.solutions[24:]]
hhdata = [s['Hh'] for s in shocks.solutions[24:]]
for i in range(len(hddata)-1, 0, -1):
hddata[i] -= hddata[i-1]
hhdata[i] -= hhdata[i-1]
hddata = hddata[1:]
hhdata = hhdata[1:]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(min(hddata+hhdata)-2, max(hddata+hhdata)+2)
axes.plot(hhdata, 'b')
axes.plot(hddata, linestyle='--', color='r')
# add labels
plt.text(13, 20, 'Held money balances')
plt.text(13, 18, '(continuous line)')
plt.text(15, -18, 'Money demand')
plt.text(15, -20, '(dotted line)')
fig.text(0.1, -.05, caption);
```
### Scenario: Model PC, Steady state with increase in interest rate
```
rate_shock = create_pc_model()
rate_shock.set_values({'Bcb': 21.576,
'Bh': 64.865,
'Bs': 86.441,
'Hh': 21.62,
'Hs': 21.62,
'V': 86.485,
'alpha1': 0.6,
'alpha2': 0.4,
'lambda0': 0.635,
'lambda1': 5.0,
'lambda2': 0.01,
'G': 20,
'Rbar': 0.025})
# solve until stable
for i in range(50):
rate_shock.solve(iterations=100, threshold=1e-5)
if is_close(rate_shock.solutions[-2], rate_shock.solutions[-1], atol=1e-4):
break
rate_shock.parameters['Rbar'].value = 0.035
for i in range(40):
rate_shock.solve(iterations=100, threshold=1e-5)
```
###### Figure 4.3
```
caption = '''
Figure 4.3 Evolution of the shares of bills and money balances in the portfolio of
households, following an increase of 100 points in the rate of interest on bills.'''
hhdata = [s['Hh']/s['V'] for s in rate_shock.solutions[15:]]
bhdata = [s['Bh']/s['V'] for s in rate_shock.solutions[15:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False)
axes.spines['top'].set_visible(False)
axes.set_ylim(0.19, 0.26)
axes.plot(hhdata, 'b')
axes2 = axes.twinx()
axes2.tick_params(top=False)
axes2.spines['top'].set_visible(False)
axes2.set_ylim(0.74, 0.81)
axes2.plot(bhdata, linestyle='--', color='r')
plt.text(1, 0.81, 'Share of')
plt.text(1, 0.807, 'money balances')
plt.text(45, 0.81, 'Share of')
plt.text(45, 0.807, 'bills')
plt.text(15, 0.795, 'Share of bills in')
plt.text(15, 0.792, 'household portfolios')
plt.text(15, 0.755, 'Share of money balances')
plt.text(15, 0.752, 'in household portfolios')
fig.text(0.1, -.05, caption);
```
###### Figure 4.4
```
caption = '''
Figure 4.4 Evolution of disposable income and household consumption following an
increase of 100 points in the rate of interest on bills. '''
yddata = [s['YD'] for s in rate_shock.solutions[20:]]
cdata = [s['C'] for s in rate_shock.solutions[20:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(86, 91)
axes.plot(yddata, 'b')
axes.plot(cdata, linestyle='--', color='r')
# add labels
plt.text(10, 90.2, 'Disposable')
plt.text(10, 90.0, 'Income')
plt.text(10, 88, 'Consumption')
fig.text(0.1, -0.05, caption);
```
### Model PCEX1
```
def create_pcex1_model():
model = Model()
model.set_var_default(0)
model.var('Bcb', desc='Government bills held by the Central Bank')
model.var('Bd', desc='Demand for government bills')
model.var('Bh', desc='Government bills held by households')
model.var('Bs', desc='Government bills supplied by the government')
model.var('C', desc='Consumption goods')
model.var('Hd', desc='Demand for cash')
model.var('Hh', desc='Cash held by households')
model.var('Hs', desc='Cash supplied by the central bank')
model.var('R', 'Interest rate on government bills')
model.var('T', desc='Taxes')
model.var('V', desc='Household wealth')
model.var('Ve', desc='Expected household wealth')
model.var('Y', desc='Income = GDP')
model.var('YD', desc='Disposable income of households')
model.var('YDe', desc='Expected disposable income of households')
model.set_param_default(0)
model.param('alpha1', desc='Propensity to consume out of income', default=0.6)
model.param('alpha2', desc='Propensity to consume o of wealth', default=0.4)
model.param('lambda0', desc='Parameter in asset demand function', default=0.635)
model.param('lambda1', desc='Parameter in asset demand function', default=5.0)
model.param('lambda2', desc='Parameter in asset demand function', default=0.01)
model.param('theta', desc='Tax rate', default=0.2)
model.param('G', desc='Government goods', default=20.)
model.param('Rbar', desc='Interest rate as policy instrument', default=0.025)
model.add('Y = C + G') # 4.1
model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2
model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1
model.add('V = V(-1) + (YD - C)') # 4.4
model.add('C = alpha1*YDe + alpha2*V(-1)') # 4.5E
model.add('Bd = Ve*lambda0 + Ve*lambda1*R - lambda2*YDe') # 4.7E
model.add('Hd = Ve - Bd') # 4.13
model.add('Ve = V(-1) + (YDe - C)') # 4.14
model.add('Hh = V - Bh') # 4.6
model.add('Bh = Bd') # 4.15
model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8
model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9
model.add('Bcb = Bs - Bh') # 4.10
model.add('R = Rbar') # 4.11
model.add('YDe = YD(-1)') # 4.16A
return model
pcex1 = create_pcex1_model()
pcex1.set_values({'Bcb': 21.576,
'Bh': 64.865,
'Bs': 86.441,
'Hh': 21.62,
'Hs': 21.62,
'V': 86.485,
'YD': 90,
'alpha1': 0.6,
'alpha2': 0.4,
'lambda0': 0.635,
'lambda1': 5.0,
'lambda2': 0.01,
'G': 20,
'Rbar': 0.025})
for i in range(10):
pcex1.solve(iterations=100, threshold=1e-5)
pcex1.parameters['alpha1'].value = 0.7
for i in range(40):
pcex1.solve(iterations=100, threshold=1e-5)
```
###### Figure 4.5
```
caption = '''
Figure 4.5 Rise and fall of national income (GDP) following an increase in the
propensity to consume out of expected disposable income ($\\alpha_1$) '''
ydata = [s['Y'] for s in pcex1.solutions[8:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(104, 123)
axes.plot(ydata, 'b')
# add labels
plt.text(10, 116, 'National Income (GDP)')
fig.text(0.1, -0.05, caption);
```
###### Figure 4.6
```
caption = '''
Figure 4.6 Evolution of consumtion, expected disposable income and lagged wealth,
following an increase in the propensity to consume out of expected disposable
income ($\\alpha_1$).'''
vdata = [s['V'] for s in pcex1.solutions[8:]]
ydedata = [s['YDe'] for s in pcex1.solutions[8:]]
cdata = [s['C'] for s in pcex1.solutions[8:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(60, 106)
axes.plot(cdata, linestyle=':', color='r')
axes.plot(ydedata, linestyle='--', color='b')
axes.plot(vdata, color='k')
# add labels
plt.text(5, 102, 'Consumption')
plt.text(5, 90, 'Expected')
plt.text(5, 88, 'disposable')
plt.text(5, 86, 'income')
plt.text(10, 70, 'Lagged wealth')
fig.text(0.1, -.1, caption);
```
### Model PCEX2
```
def create_pcex2_model():
model = Model()
model.set_var_default(0)
model.var('Bcb', desc='Government bills held by the Central Bank')
model.var('Bd', desc='Demand for government bills')
model.var('Bh', desc='Government bills held by households')
model.var('Bs', desc='Government bills supplied by the government')
model.var('C', desc='Consumption goods')
model.var('Hd', desc='Demand for cash')
model.var('Hh', desc='Cash held by households')
model.var('Hs', desc='Cash supplied by the central bank')
model.var('R', 'Interest rate on government bills')
model.var('T', desc='Taxes')
model.var('V', desc='Household wealth')
model.var('Ve', desc='Expected household wealth')
model.var('Y', desc='Income = GDP')
model.var('YD', desc='Disposable income of households')
model.var('YDe', desc='Expected disposable income of households')
model.var('alpha1', desc='Propensity to consume out of income')
model.set_param_default(0)
model.param('alpha2', desc='Propensity to consume out of wealth', default=0.6)
model.param('alpha10', desc='Propensity to consume out of income - exogenous')
model.param('iota', desc='Impact of interest rate on the propensity to consume out of income')
model.param('lambda0', desc='Parameter in asset demand function', default=0.635)
model.param('lambda1', desc='Parameter in asset demand function', default=5.0)
model.param('lambda2', desc='Parameter in asset demand function', default=0.01)
model.param('theta', desc='Tax rate', default=0.2)
model.param('G', desc='Government goods')
model.param('Rbar', desc='Interest rate as policy instrument')
model.add('Y = C + G') # 4.1
model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2
model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1
model.add('V = V(-1) + (YD - C)') # 4.4
model.add('C = alpha1*YDe + alpha2*V(-1)') # 4.5E
model.add('Bd = Ve*lambda0 + Ve*lambda1*R - lambda2*YDe') # 4.7E
model.add('Hd = Ve - Bd') # 4.13
model.add('Ve = V(-1) + (YDe - C)') # 4.14
model.add('Hh = V - Bh') # 4.6
model.add('Bh = Bd') # 4.15
model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8
model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9
model.add('Bcb = Bs - Bh') # 4.10
model.add('R = Rbar') # 4.11
model.add('YDe = YD(-1)') # 4.16A
model.add('alpha1 = alpha10 - iota*R(-1)')
return model
pcex2 = create_pcex2_model()
pcex2.set_values({'Bcb': 21.576,
'Bh': 64.865,
'Bs': 86.441, # Bs = Bh + Bcb
'Hh': 21.62,
'Hs': 21.62, # Hs = Hh
'R': 0.025,
'V': 86.485, # V = Bh + Hh
'YD': 90,
'alpha1': 0.6,
'alpha2': 0.4,
'alpha10': 0.7,
'iota': 4,
'lambda0': 0.635,
'lambda1': 5,
'lambda2': 0.01,
'theta': 0.2,
'G': 20,
'Rbar': 0.025})
for i in range(15):
pcex2.solve(iterations=100, threshold=1e-5)
# Introduce the rate shock
pcex2.parameters['Rbar'].value += 0.01
for i in range(40):
pcex2.solve(iterations=100, threshold=1e-5)
```
###### Figure 4.9
```
caption = '''
Figure 4.9 Evolution of GDP, disposable income, consumptiona and wealth,
following an increase of 100 points in the rate of interest on bills, in Model PCEX2
where the propensity to consume reacts negatively to higher interest rates'''
vdata = [s['V'] for s in pcex2.solutions[12:]]
ydata = [s['Y'] for s in pcex2.solutions[12:]]
yddata = [s['YD'] for s in pcex2.solutions[12:]]
cdata = [s['C'] for s in pcex2.solutions[12:]]
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(80, 116)
axes.plot(ydata, linestyle=':', color='b')
axes.plot(vdata, linestyle='-', color='r')
axes.plot(yddata, linestyle='-.', color='k')
axes.plot(cdata, linestyle='--', color='g')
# add labels
plt.text(15, 112, 'National income (GDP)')
plt.text(15, 101, 'Household wealth')
plt.text(8, 89, 'Disposable')
plt.text(8, 87.5, 'income')
plt.text(12, 84, 'Consumption')
fig.text(0.1, -0.1, caption);
```
###### Figure 4.10
```
caption = '''
Figure 4.10 Evolution of tax revenues and government expenditures including net
debt servicing, following an increase of 100 points in the rate of interest on bills,
in Model PCEX2 where the propensity to consume reacts negatively to higher
interest rates'''
tdata = list()
sumdata = list()
for i in range(12, len(pcex2.solutions)):
s = pcex2.solutions[i]
s_1 = pcex2.solutions[i-1]
sumdata.append( s['G'] + s_1['R']*s_1['Bh'])
tdata.append(s['T'])
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 1.1, 1.1])
axes.tick_params(top=False, right=False)
axes.spines['top'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.set_ylim(20.5, 23)
axes.plot(sumdata, linestyle='-', color='r')
axes.plot(tdata, linestyle='--', color='k')
# add labels
plt.text(6, 22.9, 'Government expenditures plus net debt service')
plt.text(15, 22, 'Tax revenues')
fig.text(0.1, -0.15, caption);
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.