code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''cv'': conda)'
# name: python_defaultSpec_1599331848733
# ---
# # Agente $\epsilon$-Guloso Para $k$-Armed-Bandit
# Leia a explicação do algoritmo no [README.md!](README.md)
import sys
sys.path.append('../')
import bandit #importa o ambiente
import numpy as np
# ## Definindo a função argmax
def argmax(Q_values):
""" (lista) -> índice de maior valor(int)
Recebe uma lista dos valores Q e retorna o índice do maior valor. Por definição resolve empates escolhendo um deles aleatóriamente.
"""
max_value = float("-inf")
ties = []
for i in range(len (Q_values)):
if Q_values[i] > max_value:
max_value = Q_values[i]
ties = []
if Q_values[i] == max_value:
ties.append(i)
return np.random.choice(ties)
# ## Definindo o Agente
class EpsilonGreedy(object):
def __init__(self, k_arms, epsilon=0.1):
self.epsilon = epsilon
self.k_arms = k_arms
self.n_arms = np.zeros(k_arms)
self.Q_values = np.zeros(k_arms)
self.last_action = argmax(self.Q_values)
def agent_step(self, reward):
""" (float) -> acao(int)
Dá um step para o Agente atualizando os valores Q. Pega a recompensa do estado e retorna a ação escolhida.
"""
self.n_arms[self.last_action] += 1
step_size = 1/self.n_arms[self.last_action]
current_Q = self.Q_values[self.last_action] + step_size * (reward - self.Q_values[self.last_action])
self.Q_values[self.last_action] = current_Q
if np.random.random() < self.epsilon:
current_action = np.random.randint(self.k_arms)
else:
current_action = argmax(self.Q_values)
return current_action
# Aqui vemos a parte principal do algoritmo, na qual é escolhido um número atleatório, e caso ele seja menor que $\epsilon$, toma uma ação aleatória, o que estimula exploração.
# ## Testando o Algoritmo
import matplotlib.pyplot as plt
# +
num_runs = 200
num_steps = 1000
np.random.seed(1)
total_means = []
max_mean = 0
for runs in range(num_runs):
agent = EpsilonGreedy(k_arms=10, epsilon=0.1)
env = bandit.GaussianBandit(k_arms=10)
score = [0]
means = []
max_mean += np.max(env.bandits_expectations)
for steps in range(num_steps):
reward = env.gamble(agent.last_action)
agent.last_action = agent.agent_step(reward)
score.append(score[-1] + reward)
means.append(score[-1]/(steps+1))
total_means.append(means)
plt.figure(figsize=(15,5))
plt.plot([max_mean / num_runs for _ in range(num_steps)], linestyle="--")
plt.plot(np.mean(total_means, axis=0))
plt.legend(["Melhor Possível", "Epsilon-Guloso"])
plt.title("Recompensa Média de um Agente Epsilon-Guloso")
plt.xlabel("Steps")
plt.ylabel("Recompensa Média")
plt.show()
# -
# ## Testando $\epsilon$'s diferentes
# +
num_runs = 200
num_steps = 1000
np.random.seed(1)
total_means = []
epsilons = [0,0.4,0.01,0.1]
plt.figure(figsize=(15,5))
max_mean = 0
for eps in epsilons:
for runs in range(num_runs):
agent = EpsilonGreedy(k_arms=10, epsilon=eps)
env = bandit.GaussianBandit(k_arms=10)
score = [0]
means = []
max_mean += np.max(env.bandits_expectations)
for steps in range(num_steps):
reward = env.gamble(agent.last_action)
agent.last_action = agent.agent_step(reward)
score.append(score[-1] + reward)
means.append(score[-1]/(steps+1))
total_means.append(means)
plt.plot(np.mean(total_means, axis=0))
plt.legend(epsilons)
plt.title("Recompensa Média de um Agente Epsilon-Guloso")
plt.xlabel("Steps")
plt.ylabel("Recompensa Média")
plt.show()
# -
# Pode-se ver aqui como diferentes $\epsilon$'s tem diferentes resultados no agente. um $\epsilon$ = 0 é equivalente a um agente guloso, um $\epsilon$ = 0.4 possui resultado ruim porque toma muitas ações aleatórias. É possível ver que o $\epsilon$ = 0.01 não obteve resultado melhor que o de 0.1, porém, se deixássemos com mais steps, ele eventualmente o passaria.
|
Aprendizado por Reforço Clássico/Bandits/Agente Epsilon-Guloso/eps_greedy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Imports
import numpy as np
import pandas as pd
from collections import Counter
from statsmodels.distributions.empirical_distribution import ECDF
import os
import glob
import copy
# ## Opening the CSV files
dataframes = [pd.read_csv(file, sep=',', index_col=0) for file in sorted(glob.glob('../preprocessed_datasets' + "/*."+'csv'))]
cohorts = [file.strip(".csv") for file in sorted(os.listdir('../preprocessed_datasets'))]
# +
# reduce to BL visit and MCI participants only
all_cohorts = dict()
for name, df in zip(cohorts, dataframes):
all_cohorts[name] = df.loc[(df["Visit"]==1) & (df["Diagnosis"].astype(str)=='MCI')]
#all_cohorts_ctl = copy.deepcopy(all_cohorts)
all_cohorts_ctl = dict()
for name, df in zip(cohorts, dataframes):
all_cohorts_ctl[name] = df.loc[(df["Visit"]==1) & (df["Diagnosis"].astype(str)=='CU')]
# -
# ## Functions to perform essential calculations
def cat_stat_df(dfs, result):
"""Counting different categories, calculate the % of categorical features, store results in a df"""
categorical = {'APOE4': [2.0, 1.0], 'Sex': ['Female'], 'Diagnosis': ['CU', 'MCI', 'AD']}
column_cat = ['Sex', 'Diagnosis', 'APOE4']
for cohort in dfs:
if dfs[cohort].empty==False:
calc_dict = dict()
df = dfs[cohort]
for col in column_cat:
ca = Counter(df[col].dropna())
calc_dict[col] = ca
cohort_df = pd.DataFrame(calc_dict).transpose()
cohort_df = cohort_df.dropna(how='all')
cohort_df.loc[cohort] = cohort_df.sum()
for i in categorical:
if i == 'Diagnosis':
if i in cohort_df.index:
result.loc[cohort, categorical[i]] = cohort_df.loc[cohort, cohort_df.loc[i].notna()].astype(int)
result.loc[cohort, categorical[i]] = result.loc[cohort, categorical[i]].replace({np.nan: 0})
result.loc[cohort, 'n'] = int(sum(cohort_df.loc[cohort, cohort_df.loc[i].notna()]))
else:
result.loc[cohort, i] = np.nan
result.loc[cohort, 'n'] = int(len(dfs[cohort].index))
elif i == 'APOE4':
if 'APOE4' in list(cohort_df.index.astype(str)):
if '2.0' not in list(cohort_df.columns.astype(str)) and '2' not in list(cohort_df.columns.astype(str)):
cohort_df[2.0] = np.nan
result.loc[cohort, i] = round(100 * sum([val for val in cohort_df.loc[i, categorical[i]]]) /
sum([val for val in cohort_df.loc[i].dropna()]), 1)
else:
result.loc[cohort, i] = np.nan
elif i == 'Sex':
if (i in cohort_df.index) & ("Female" in cohort_df.columns):
result.loc[cohort, i] = round(100 * sum([val for val in cohort_df.loc[i, categorical[i]]])
/ sum([val for val in cohort_df.loc[i].dropna()]), 1)
else:
result.loc[cohort, i] = 0
result.loc[cohort, 'Total'] = int(len(dfs[cohort].index))
result.rename(columns={"Sex": "Female %", "APOE4": "APOE4 %"}, inplace=True)
return result
def num_quantiles(dfs, dfs_ctl, result):
"""Calculating std and mean and evalute quantiles under the ECDF for all cohorts"""
column_num = ['Age', 'CDR', 'Education', 'MMSE', 'CDRSB', 'Hippocampus', 'A-beta', 'Ttau', 'Ptau']
non_int_cols=["CDR", "CDRSB"]
biomarker = ['Hippocampus', 'A-beta', 'Ttau', 'Ptau']
for df, ctl_df in zip(dfs, dfs_ctl):
dfn = dfs[df]
dfn_ctl = dfs_ctl[ctl_df]
calc_dict = dict()
calc_dict_ctl = dict()
for col in column_num:
quants = []
if (dfn.empty == False) & (col in dfn.columns)==True:
if len(dfn.index.unique()) > 2:
# return nan if no data
if pd.isnull(dfn[col].quantile()):
calc_dict[col] = np.nan
continue
if col in non_int_cols:
for i in [.25, .5, .75]:
quants.append(round(dfn[col].quantile(i), 1))
else:
for i in [.25, .5, .75]:
quants.append(int(round(dfn[col].quantile(i), 0)))
# create and save string to return
calc_dict[col] = str(quants[0]) + ', ' + str(quants[1]) + ', ' + str(quants[2])
elif len(dfn.index.unique()) == 2:
if col == 'Age':
quants = (list(dfn.iloc[0:][col].values))
calc_dict[col] = str(int(quants[0])) + ', ' + str(int(quants[1]))
else:
quants = (list(dfn.iloc[0:][col].values))
calc_dict[col] = str(round(quants[0], 1)) + ', ' + str(round(quants[1], 1))
else:
if col == 'Age':
calc_dict[col] = int(dfn.iloc[0][col])
else:
calc_dict[col] = round(dfn.iloc[0][col], 1)
for col in biomarker:
if len(dfn_ctl.index.unique()) > 2:
if (dfn_ctl.empty == False) & (col in dfn.columns)==True:
quants = []
ctl_dat = dfn_ctl[col].dropna()
# return nan if no data or no control distribution to compare to
if (pd.isnull(dfn[col].quantile())) or not (ctl_dat.any()):
calc_dict_ctl[col] = np.nan
continue
ecdf = ECDF(ctl_dat)
for i in [.25, .5, .75]:
value = int(round(ecdf(dfn[col].quantile(i)) * 100))
quants.append(value)
# create and save string to return
calc_dict_ctl[col] = str(quants[0]) + ', ' + str(quants[1]) + ', ' + str(quants[2])
else:
calc_dict_ctl[col] = np.nan
for clin, bio in zip([calc_dict], [calc_dict_ctl]):
for marker in biomarker:
if (marker in clin) & (marker in bio):
if pd.notnull(clin[marker]):
clin[marker] += " (" + str(bio[marker]) + ")"
else:
continue
df_quan = pd.DataFrame(calc_dict, index=[df])
for col in df_quan.columns:
result.loc[df, col] = df_quan.loc[df, col]
result.rename(columns={"Ttau": "tTau", "Ptau": "pTau"}, inplace=True)
return result
# ## Make an empty dataframe to fill in with the results
# +
results = pd.DataFrame(index = all_cohorts.keys(), columns = [col for col in all_cohorts['AIBL'].columns])
results.index.name = 'Name of Dataset'
for i in ['CU', 'MCI', 'AD', 'Total']:
results[i] = np.nan
results.drop(columns=['Diagnosis', 'Visit'], inplace=True)
results = cat_stat_df(all_cohorts, results)
results = num_quantiles(all_cohorts, all_cohorts_ctl, results)
# -
# ## Final table
# sort columns
results = results[["n", "Total", "CU", "MCI", "AD", "Female %", "Age", "Education", "APOE4 %",
"MMSE", "CDR", "CDRSB", "Hippocampus", "A-beta", "tTau", "pTau"]]
results
# ### Outputs
results[["Female %", "Age", "Education", "APOE4 %", "MMSE", "CDR", "CDRSB", "Hippocampus",
"A-beta", "tTau", "pTau"]].to_csv("../adata_resources/MCI_summary_stats.csv")
print("N all cohorts: ", results["n"].sum())
|
quantiles/MCI_quantiles_all_datasets-v100.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BFS
import sys
def bfs():
from queue import Queue
adj_list= {
"A":["B","D"],
"B":["C"],
"C":[],
"D":["E","F"],
"E":["D","G"],
"F":["D","E"],
"G":["E","H"],
"H":["I"],
"I":["J","K"],
"J":["K","L"],
"K":[],
"L":["M"],
"M":["N","O"],
"N":[],
"O":["P"],
"P":[]
}
visited = {} # dictionary
level = {} #empty level dictionary
parent = {}
bfs_traversal_output = [] #for storing order
queue = Queue()
for node in adj_list.keys():
visited[node] = False
parent[node] = None
level[node] = -1
goal = input("enter key : ")
s = "A"
visited[s] = True
level[s] = 0
queue.put(s)
while not queue.empty():
u = queue.get() #pop first element
if goal == u:
print("success")
print(bfs_traversal_output)
sys.exit(0)
bfs_traversal_output.append(u)
for v in adj_list[u]: #check all adjacent
if not visited[v]:
visited[v] = True
parent[v] = u
level[v] = level[u]+1
queue.put(v)
if goal == v:
print("success")
print(bfs_traversal_output)
sys.exit(0)
print("fail")
print(bfs_traversal_output)
sys.exit(0)
#print(visited)
#print(level)
#print(parent)
bfs()
# # DFS
# +
import sys
#tree creation
adj_list= {
"A":["B","C"],
"B":["D","E"],
"C":["B","F"],
"D":["E"],
"E":["F"],
"F":["G"],
"G":["H","I"],
"H":["I","J"],
"I":["J","K"],
"J":["K"],
"K":["L"],
"L":["M"],
"M":["N","O"],
"N":["O"],
"O":["P"],
"P":[]
}
color = {} #White,Gray,Black
#W=unvisited, G=current visiting, B= visited
parent = {}
dfs_traversal_output = []
target_key = input("enter target key : ")
#initialize all as unvisited
for node in adj_list.keys():
color[node] = "W"
parent[node] = None
#algo start
def dfs(u): #u=rootnode
color[u] = "G"#current
#print(dfs_traversal_output)
dfs_traversal_output.append(u)
#if parent node is goal key
if target_key == u:
print("Success")
sys.exit(0)
for v in adj_list[u]:
if color[v] == "W": #if unvisited color=w
parent[v] = u
#if child node is goal key
if target_key == v:
print("Success")
sys.exit(0)
dfs(v)
color[u] = "B" #visited
dfs("A") #function call
print("fail")
# -
import sys
def bfs():
from queue import Queue
adj_list= {
"A":["B","D"],
"B":["A","C"],
"C":["B"],
"D":["A","E","F"],
"E":["D","F","G"],
"F":["D","E","H"],
"G":["E","H"],
"H":["I","F"],
"I":["J","K"],
"J":["K","L"],
"K":[],
"L":["M"],
"M":["N","O"],
"N":[],
"O":["P"],
"P":[]
}
visited = {} # dictionary
level = {} #empty level dictionary
parent = {}
bfs_traversal_output = [] #for storing order
queue = Queue()
for node in adj_list.keys():
visited[node] = False
parent[node] = None
level[node] = -1
goal = input("enter key : ")
s = "A"
visited[s] = True
level[s] = 0
queue.put(s)
while not queue.empty():
u = queue.get() #pop first element
bfs_traversal_output.append(u)
if goal == u:
print("success")
print(bfs_traversal_output)
sys.exit(0)
for v in adj_list[u]: #check all adjacent
if not visited[v]:
visited[v] = True
parent[v] = u
level[v] = level[u]+1
queue.put(v)
if goal == v:
print("success")
print(bfs_traversal_output)
sys.exit(0)
print("fail")
print(bfs_traversal_output)
sys.exit(0)
#print(visited)
#print(level)
#print(parent)
bfs()
|
mani9793/Assignment_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
# +
src_dir = '../tests/resources/huge_dataset'
orderbooks_src = f'{src_dir}/orderbook_10_03_20.csv.gz'
trades_src = f'{src_dir}/trades_10_03_20.csv.gz'
orderbooks = pd.read_csv(orderbooks_src, header=None, nrows=60000)
orderbooks = orderbooks[[0, 1, 2, 3, 13, 23, 33]] # L1
orderbooks[0] = pd.to_datetime(orderbooks[0])
orderbooks[1] = orderbooks[1].apply(lambda x: datetime.timedelta(milliseconds=x))
orderbooks[0] = orderbooks[0] + orderbooks[1]
orderbooks = orderbooks.drop(columns=[1])
orderbooks.columns = ['timestamp', 'symbol', 'ask-price', 'ask-volume', 'bid-price', 'bid-volume']
# -
trades = pd.read_csv(trades_src, header=None, nrows=10000)
trades[1] = pd.to_datetime(trades[1])
trades[2] = trades[2].apply(lambda x: datetime.timedelta(milliseconds=x))
trades[1] = trades[1] + trades[2]
trades = trades.drop(columns=[2, 5])
trades.columns = ['symbol', 'timestamp', 'price', 'volume', 'side']
t = pd.DatetimeIndex(orderbooks['timestamp'])
orderbooks = orderbooks.set_index(t)
# len(t.values)
t[:10]
tmp = orderbooks.sort_index().truncate(before='2020-03-10 18:25:12')
tmp2 = orderbooks.sort_index().truncate(after='2020-03-10 18:25:12')
tmp2
t.first('3m')
# +
xbt_ob = orderbooks[orderbooks['symbol'] == 'XBTUSD']
xbt_tr = trades[trades['symbol'] == 'XBTUSD']
xbt_sell = xbt_tr[xbt_tr['side'] == 'Sell']
xbt_buy = xbt_tr[xbt_tr['side'] == 'Buy']
eth_ob = orderbooks[orderbooks['symbol'] == 'ETHUSD']
eth_tr = trades[trades['symbol'] == 'ETHUSD']
eth_sell = eth_tr[eth_tr['side'] == 'Sell']
eth_buy = eth_tr[eth_tr['side'] == 'Buy']
# +
plt.figure(figsize=(40,20))
plt.xlabel('Time')
plt.xlabel('Price')
plt.plot(xbt_ob['timestamp'], xbt_ob['ask-price'], label='ask price')
plt.plot(xbt_ob['timestamp'], xbt_ob['bid-price'], label='bid price')
plt.scatter(xbt_sell['timestamp'], xbt_sell['price'], label='Sells', c='g')
plt.scatter(xbt_buy['timestamp'], xbt_buy['price'], label='Buys', c='r')
plt.legend()
# +
plt.figure(figsize=(40,20))
plt.xlabel('Time')
plt.xlabel('Price')
plt.plot(eth_ob['timestamp'], eth_ob['ask-price'], label='ask price')
plt.plot(eth_ob['timestamp'], eth_ob['bid-price'], label='bid price')
plt.scatter(eth_sell['timestamp'], eth_sell['price'], label='Sells', c='g')
plt.scatter(eth_buy['timestamp'], eth_buy['price'], label='Buys', c='r')
plt.legend()
# -
|
notebooks/Plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Introduction to ICESat-2 Surface Velocity Calculations
#
# This notebook is meant to introduce the processing flow for a simple along-track velocity calculation using repeat cycles of ICESat-2 elevation profiles. The notebook covers:
# 1. Setting up the IS2_velocity library
# 2. Loading elevation data from an hdf5 file using the built-in reader function.
# 3. Smoothing and differentiating the elevation profile.
# 4. Correlating the differentiated profile to calculate surface velocities.
# Import the basic libraries
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib notebook
# ## Library Setup
#
# In order to run the IS2_velocity scripts as a python library, you must first:
# 1. Download or clone the repository at https://github.com/ICESAT-2HackWeek/IS2_velocity.git
# 2. Install the dependencies including numpy, scipy, h5py, astropy, icepyx, and the ICESat-2 pointCollection library.
# 3. Go the the home directory in our repository and run ‘python setup.py install’.
#
# If you successfully run the setup.py script, you should be able to run the cell below.
# As an example, import a function from the ICESat-2 surface velocity library
from IS2_velocity.correlation_processing import calculate_velocities
help(calculate_velocities)
# # Velocity calculation: Control correlations
# +
# Import funcitons for the velocity calculation; Correlate all the beams from one set of repeat ground tracks, rgt = 0848
from IS2_velocity.correlation_processing import calculate_velocities
### Select rgt for now
rgt = '0848'
### Control the correlation step:
segment_length = 2000 # meters, how wide is the window we are correlating in each step
search_width = 1000 # meters, how far in front of and behind the window to check for correlation
along_track_step = 100 # meters; how much to jump between each consecutivevelocity determination
max_percent_nans = 10 # Maximum % of segment length that can be nans and still do the correlation step
### Which product
product = 'ATL06'
if product == 'ATL06':
dx = 20
### Select filter type and required arguments; Currently only this running mean is supported
filter_type = 'running_average'
running_avg_window = 100 # meters
# -
# # Velocity calculation: Load Data / Import dictionaries
# +
from IS2_velocity.readers import load_data_by_rgt
# atl06_to_dict is within the function load_data_by_rgt
# path to data, relative to folder /notebooks
data_dir = '../data/'
rgt = '0848'
# Load data; This step loads raw data, interpolates to constant spacing, filters if requested, and
# differentiates
filter_type = 'running_average'
running_avg_window = 100
x_atc, lats, lons, h_li_raw, h_li_raw_NoNans, h_li, h_li_diff, times, min_seg_ids, \
segment_ids, cycles_this_rgt, x_ps, y_ps = \
load_data_by_rgt(rgt = rgt, path_to_data = data_dir, product = 'ATL06', \
filter_type = filter_type, running_avg_window = running_avg_window, \
format = 'hdf5')
# -
# ## Visualize one of the beams
# +
# Plot the landice elevation along the pass.
cycle1='03'
cycle2='04'
beam='gt1l'
plt.figure(figsize=(8,4))
plt.plot(x_atc[cycle1][beam]/1000.,h_li[cycle1][beam],c='indianred')
plt.plot(x_atc[cycle2][beam]/1000.,h_li[cycle2][beam],c='steelblue')
plt.ylabel('Elevation (m)')
plt.xlabel('Along-Track Distance (km)')
plt.tight_layout()
# -
# # Velocity calculation: Calculate velocity between cycles 03 and 04
# +
from IS2_velocity.correlation_processing import calculate_velocities
# Calculate velocity between cycles 3 and 4
cycle1 = '03'
cycle2 = '04'
beams = ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']
saving = True
write_out_path = '.'
write_out_prefix = ''
spatial_extent = np.array([-65, -86, -55, -81])
map_data_root = '/Users/grace/Dropbox/Cornell/projects/003/FIS_data/'
velocities, correlations, lags, midpoints_x_atc, midpoints_xy, midpoints_lons, midpoints_lats = \
calculate_velocities(rgt, x_atc, h_li_raw, h_li_diff, lats, lons, segment_ids, times, beams, cycle1, cycle2, \
product, segment_length, search_width, along_track_step, max_percent_nans, dx, saving = True, \
write_out_path = write_out_path, prepend = write_out_prefix,spatial_extent = spatial_extent, \
map_data_root = map_data_root)
# -
# # Velocity calculation: Visualize result for one beam
# +
from matplotlib.gridspec import GridSpec
beam = 'gt1l'
x1 = x_atc['03'][beam]
x2 = x_atc['04'][beam]
h1 = h_li['03'][beam]
h2 = h_li['04'][beam]
dh1 = h_li_diff['03'][beam]
dh2 = h_li_diff['04'][beam]
vel_xs = midpoints_x_atc[rgt][beam]
velocs = velocities[rgt][beam]
plt.figure(figsize=(8,4))
gs = GridSpec(2,2)
# Plot the elevation profiles again
plt.subplot(gs[0,0])
plt.tick_params(bottom=False,labelbottom=False)
plt.plot(x1/1000.-29000,h1,'.',c='indianred')
plt.plot(x2/1000.-29000,h2,'.',c='steelblue',ms=3)
plt.ylabel('Elevation (m)')
plt.title('ATL06',fontweight='bold')
plt.xlim(80,580)
# Plot the slopes again
plt.subplot(gs[1,0])
plt.tick_params(bottom=False,labelbottom=False)
plt.plot(x1/1000.-29000,dh1,'.',c='indianred')
plt.plot(x2/1000.-29000,dh2,'.',c='steelblue',ms=3)
plt.ylim(-.05,.05)
plt.ylabel('Surface Slope (m/m)')
plt.xlim(80,580)
# Plot the calculated velocities along track
ax5 = plt.subplot(gs[0,1])
plt.plot(vel_xs/1000.-29000,velocs,'.',c='k',label='ATL06')
plt.ylabel('Velocity (m/yr)')
plt.xlabel('Along-Track Distance (km)')
plt.xlim(80,580)
plt.ylim(-500,1500)
plt.tight_layout()
# +
from IS2_velocity.plotting import plot_measures_along_track_comparison
datapath = '/Users/grace/Dropbox/Cornell/projects/003/git_repo_old_Hackweek/surface_velocity/contributors/grace_barcheck/download/'
out_path = '/Users/grace/Dropbox/Cornell/projects/003/out_tmp/'
map_data_root = '/Users/grace/Dropbox/Cornell/projects/003/FIS_data/'
correlation_threshold = 0.65
plot_out_location = out_path
velocity_number = 0
spatial_extent = np.array([-65, -86, -55, -81])
plot_measures_along_track_comparison(rgt, beams, out_path, correlation_threshold, spatial_extent, plot_out_location, map_data_root, velocity_number)
|
notebooks/Introduction_Tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 微分可能LUTモデルによるMobileNetライクなMNIST学習
#
# Differentiable LUTモデルでMobileNet風の畳み込み層を形成して、一般的なデータに対してCNNによる回路学習を行います。
# +
import os
import shutil
import numpy as np
from tqdm.notebook import tqdm
import torch
import torchvision
import torchvision.transforms as transforms
import binarybrain as bb
# -
# ### データセット
#
# データセットの準備には torchvision を使います
# +
# configuration
net_name = 'MnistDifferentiableLutMobileNet'
data_path = os.path.join('./data/', net_name)
rtl_sim_path = '../../verilog/mnist/tb_mnist_lut_cnn'
rtl_module_name = 'MnistLutCnn'
output_velilog_file = os.path.join(data_path, rtl_module_name + '.v')
sim_velilog_file = os.path.join(rtl_sim_path, rtl_module_name + '.v')
bin_mode = True
frame_modulation_size = 7
epochs = 4
mini_batch_size = 64
# dataset
dataset_path = './data/'
dataset_train = torchvision.datasets.MNIST(root=dataset_path, train=True, transform=transforms.ToTensor(), download=True)
dataset_test = torchvision.datasets.MNIST(root=dataset_path, train=False, transform=transforms.ToTensor(), download=True)
loader_train = torch.utils.data.DataLoader(dataset=dataset_train, batch_size=mini_batch_size, shuffle=True, num_workers=2)
loader_test = torch.utils.data.DataLoader(dataset=dataset_test, batch_size=mini_batch_size, shuffle=False, num_workers=2)
# -
# ## ネットワーク構築
#
# make_conv_layer にて pointwise-depthwise-pointwise の畳み込みを組み合わせた層を作ります。
#
# Convolution2d は指定したモデルを im2col とcol2im で挟み込んで畳み込み層を生成します。
#
# pointwise はフィルタサイズが1x1の通常のConv層です<br>
# depthwise は畳み込み内部に持つ DifferentiableLut に同一チャネル内での接続のみを指定して構成します。<br>
# +
# バイナリ時は BIT型を使えばメモリ削減可能
bin_dtype = bb.DType.BIT if bin_mode else bb.DType.FP32
def make_conv_layer(output_ch, hidden_ch, padding='valid', bin_dtype=bb.DType.BIT):
return bb.Sequential([
# input(pointwise)
bb.Convolution2d(
bb.Sequential([
bb.DifferentiableLut([hidden_ch*6, 1, 1], bin_dtype=bin_dtype),
bb.DifferentiableLut([hidden_ch, 1, 1], connection='serial', bin_dtype=bin_dtype),
]),
filter_size=(1, 1),
fw_dtype=bin_dtype),
# hidden(depthwise)
bb.Convolution2d(
bb.Sequential([
bb.DifferentiableLut([hidden_ch, 1, 1], connection='depthwise', bin_dtype=bin_dtype),
]),
filter_size=(3, 3), padding=padding,
fw_dtype=bin_dtype),
# output(pointwise)
bb.Convolution2d(
bb.Sequential([
bb.DifferentiableLut([output_ch*6, 1, 1], connection='serial', bin_dtype=bin_dtype),
bb.DifferentiableLut([output_ch, 1, 1], connection='serial', bin_dtype=bin_dtype),
]),
filter_size=(1, 1),
fw_dtype=bin_dtype),
])
# define network
net = bb.Sequential([
bb.RealToBinary(frame_modulation_size=frame_modulation_size, bin_dtype=bin_dtype),
bb.Sequential([
make_conv_layer(36, 36, bin_dtype=bin_dtype), # 28x28 -> 26x26
make_conv_layer(2*36, 36, bin_dtype=bin_dtype), # 26x26 -> 24x24
bb.MaxPooling(filter_size=(2, 2), fw_dtype=bin_dtype), # 24x24 -> 12x12
]),
bb.Sequential([
make_conv_layer(2*36, 72, bin_dtype=bin_dtype), # 12x12 -> 10x10
make_conv_layer(4*36, 72, bin_dtype=bin_dtype), # 10x10 -> 4x4
bb.MaxPooling(filter_size=(2, 2), fw_dtype=bin_dtype),
]),
bb.Sequential([
bb.Convolution2d(
bb.Sequential([
bb.DifferentiableLut([6*128], bin_dtype=bin_dtype),
bb.DifferentiableLut([128], bin_dtype=bin_dtype),
bb.DifferentiableLut([10*6*6], bin_dtype=bin_dtype),
bb.DifferentiableLut([10*6], bin_dtype=bin_dtype),
bb.DifferentiableLut([10], bin_dtype=bin_dtype),
]),
filter_size=(4, 4),
fw_dtype=bin_dtype),
]),
bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)
])
net.set_input_shape([1, 28, 28])
if bin_mode:
net.send_command("binary true")
# print(net.get_info())
# -
# ## 学習実施
#
# 学習を行います
# +
#bb.load_networks(data_path, net)
# learning
loss = bb.LossSoftmaxCrossEntropy()
metrics = bb.MetricsCategoricalAccuracy()
optimizer = bb.OptimizerAdam()
optimizer.set_variables(net.get_parameters(), net.get_gradients())
for epoch in range(epochs):
loss.clear()
metrics.clear()
# learning
with tqdm(loader_train) as t:
for images, labels in t:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=True)
dy_buf = loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
net.backward(dy_buf)
optimizer.update()
t.set_postfix(loss=loss.get(), acc=metrics.get())
# test
loss.clear()
metrics.clear()
for images, labels in loader_test:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=False)
loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
bb.save_networks(data_path, net)
print('epoch[%d] : loss=%f accuracy=%f' % (epoch, loss.get(), metrics.get()))
# -
# ## RTL(Verilog)変換
#
# FPGA化するために Verilog に変換します。インターフェースはXilinx社のAXI4 Stream Video 仕様(フレームスタートでtuserが立つ)となります。
# MaxPooling の単位で画像サイズが縮小されてしまうので、現状、この単位でしか変換できないため3つに分けて出力しています。
# +
# export verilog
with open(output_velilog_file, 'w') as f:
f.write('`timescale 1ns / 1ps\n\n')
bb.dump_verilog_lut_cnv_layers(f, rtl_module_name + 'Cnv0', net[1])
bb.dump_verilog_lut_cnv_layers(f, rtl_module_name + 'Cnv1', net[2])
bb.dump_verilog_lut_cnv_layers(f, rtl_module_name + 'Cnv2', net[3])
# Simulation用ファイルに上書きコピー
shutil.copyfile(output_velilog_file, sim_velilog_file)
|
samples/python/mnist/MnistDifferentiableLutCnnMobileNet.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Merge Nodes
#
# One issue with SemmedDB (or the UMLS Metathesaurus in general) is that the CUIs are too granular in detail.
#
# Take for example Imatinib Mesylate. The following concepts are all found within SemmedDB:
#
# | UMLS CUI | Concept Name |
# |----------|-------------------|
# | C0939537 | Imatinib mesylate |
# | C0385728 | CGP 57148 |
# | C1097576 | ST 1571 |
# | C0935987 | Gleevec |
# | C0906802 | STI571 |
# | C0935989 | imatinib |
#
# However, all of these concepts describe the same chemical structure. Luckily, all of these UMLS CUIs can be cross-referenced to just 1 MeSH Descriptor ID: `D000068877`. This will allow us to merge these concept within the network.
#
# Diseases have similar issues, however, they are a little less straightforward. A similar, yet more complex approach will be used for thier combination.
# +
from tqdm import tqdm
from collections import defaultdict
from collections import Counter
from queue import Queue
from itertools import chain
import pandas as pd
import pickle
import sys
sys.path.append('../../hetnet-ml/src')
import graph_tools as gt
sys.path.append('../tools/')
import load_umls
# -
# ## 1. Import the DrugCentral info for Gold Standard and Add Compound Names
rels = pd.read_csv('../data/drugcentral_rel_06212018.csv')
rels.head(2)
dc_ids = pd.read_csv('../data/drugcentral_ids_06212018.csv')
dc_ids.head(2)
syn = pd.read_csv('../data/drugcentral_syn_06212018.csv')
syn.rename(columns={'id': 'struct_id'}, inplace=True)
syn.head(2)
pref = syn.query('preferred_name == 1').reset_index(drop=True)
pref = pref.dropna(subset=['struct_id'])
pref['struct_id'] = pref['struct_id'].astype('int64')
pref.head(2)
struct_id_to_name = pref.set_index('struct_id')['name'].to_dict()
rels['c_name'] = rels['struct_id'].map(lambda i: struct_id_to_name.get(i, float('nan')))
rels.shape[0] == rels['c_name'].count()
# ## 2. Map the Compounds in Semmed DB to MeSH
#
# Although we will be mapping all UMLS CUIs (that can be mapped) to MeSH, after the inital map, we will start by taking a closer look at the compounds. Because there are multiple sources of X-refs for both Compounds and Diseases, these special Metanodes will be a bit more complicated than a simple direct map.
#
# Starting with a direct Map from UMLS to MeSH will combine a lot of the Compound nodes, reducing the number of total unique compounds.
nodes = gt.remove_colons(pd.read_csv('../data/nodes_VER31_R.csv'))
umls_to_mesh = pickle.load(open('../data/UMLS-CUI_to_MeSH-Descripctor.pkl', 'rb'))
umls_to_mesh_1t1 = {k: v[0] for k, v in umls_to_mesh.items() if len(v) == 1}
nodes['mesh_id'] = nodes['id'].map(lambda c: umls_to_mesh_1t1.get(c, float('nan')))
drugs = nodes.query('label == "Chemicals & Drugs"').copy()
# +
print('{:.3%} of Drug IDs mapped via MeSH:'.format(drugs['mesh_id'].count() / drugs.shape[0]))
print('{:,} of {:,} Mapped to {:,} Unique MSH ids'.format(drugs['mesh_id'].count(), drugs.shape[0], drugs['mesh_id'].nunique()))
num_drugs = drugs['id'].nunique()
msh_compress_drugs = drugs['mesh_id'].fillna(drugs['id']).nunique()
print('{:.3%} Reduction in Drugs by using MSH synonmyms {:,} --> {:,}'.format((num_drugs - msh_compress_drugs)/num_drugs, num_drugs, msh_compress_drugs))
# -
# ## 3. Use UMLS MeSH mappings and Mappings from DrugCentral to ensure Maximum overlap
#
# DrugCentral also has it's own internal identifiers for compounds as well as mappings from both their internal id to UMLS and MeSH.
#
# If we treat these mappings all as edges in a network, and use a Subnet finding algorthim, each subnet will essentially be a unique chemical structure, with the nodes of that subnet representing all of the different identifiers that map to that structure.
# +
dc_maps = dc_ids.query('id_type in {}'.format(["MESH_DESCRIPTOR_UI", "MESH_SUPPLEMENTAL_RECORD_UI" , "UMLSCUI"]))
drug_adj_list = defaultdict(set)
for row in tqdm(dc_maps.itertuples(), total=len(dc_maps)):
drug_adj_list[row.struct_id].add(row.identifier)
drug_adj_list[row.identifier].add(row.struct_id)
# -
umls_keys = list(chain(*[[k]*len(v) for k, v in umls_to_mesh.items()]))
mesh_vals = list(chain(*[v for v in umls_to_mesh.values()]))
umls_to_mesh_df = pd.DataFrame({'umls': umls_keys, 'mesh': mesh_vals})
drug_ids = drugs['id'].unique()
umls_to_mesh_drugs = umls_to_mesh_df.query('umls in @drug_ids')
# +
umls_set = set(drugs['id']) | set(dc_maps.query('id_type == "UMLSCUI"'))
mesh_set = set(mesh_vals) | set(dc_maps.query('id_type in {}'.format(["MESH_DESCRIPTOR_UI", "MESH_SUPPLEMENTAL_RECORD_UI"]))['identifier'])
len(umls_set & mesh_set) == 0
# -
for row in umls_to_mesh_drugs.itertuples():
drug_adj_list[row.umls].add(row.mesh)
drug_adj_list[row.mesh].add(row.umls)
# Ensure that all Struct IDs from DrugCentral make it into the subnets (even if no xrefs)
for struct_id in rels.query('relationship_name == "indication"')['struct_id'].unique():
drug_adj_list[struct_id].add(struct_id)
def get_subnets(adj_list):
all_identifiers = set(adj_list.keys())
subnets = defaultdict(set)
visited = set()
for cui in tqdm(all_identifiers):
if cui not in visited:
visited.add(cui)
q = Queue()
q.put(cui)
while not q.empty():
cur = q.get()
visited.add(cur)
for neighbour in adj_list[cur]:
subnets[cui].add(neighbour)
if neighbour not in visited:
q.put(neighbour)
visited.add(neighbour)
return subnets
subnets = get_subnets(drug_adj_list)
len(subnets)
# Find a label for each group.
# Will choose based on number of umls items that can be mapped to a single MeSH term (more == higher priority).
# +
mesh_counts = umls_keys + mesh_vals + list(dc_maps['identifier']) + list(dc_maps['struct_id'].unique())
mesh_counts = Counter(mesh_counts)
rekeyed_subnets = dict()
for v in subnets.values():
sort_sub = sorted(list(v), key=lambda k: (mesh_counts[k], k in mesh_set, k in umls_set), reverse=True)
new_key = sort_sub[0]
rekeyed_subnets[new_key] = v
# +
# Final map is just inverse of the subnets dict
final_drug_map = dict()
for k, v in rekeyed_subnets.items():
for val in v:
final_drug_map[val] = k
# -
len(final_drug_map)
pickle.dump(final_drug_map, open('../data/drug_merge_map.pkl', 'wb'))
# ## 4. Map all the compounds and check
#
# Do a final mapping of the compound IDs for merging, and spot check a few results
# Some items won't necessarily be mappable, so use original ID
drugs['new_id'] = drugs['id'].map(lambda i: final_drug_map.get(i, i))
# Map the Gold Standard indications as well
rels['compound_new_id'] = rels['struct_id'].map(lambda i: final_drug_map.get(i, i))
drugs['id_source'] = drugs['new_id'].map(lambda x: 'MeSH' if x in mesh_set else 'UMLS')
drugs.head(2)
print('{:.3%} Reduction in Drugs {:,} --> {:,}'.format(
(drugs.shape[0] - drugs['new_id'].nunique())/drugs.shape[0], drugs.shape[0], drugs['new_id'].nunique()))
# +
inds = rels.query('relationship_name == "indication"')
drug_ids_semmed = set(drugs['new_id'])
drugs_in_inds = set(inds['compound_new_id'].dropna())
num_ind_in_semmed = len(drugs_in_inds & drug_ids_semmed)
print('{:.3%} of Drugs in DC Indications mapped: {:,} out of {:,}'.format(
(num_ind_in_semmed / len(drugs_in_inds)), num_ind_in_semmed, len(drugs_in_inds)))
ind_semmed_comp = inds.query('compound_new_id in @drug_ids_semmed').shape[0]
print('{:.3%} of Indications have mappable Drug: {:,} out of {:,}'.format(
(ind_semmed_comp / len(inds)), ind_semmed_comp, len(inds)))
# -
# Looks at the Mesh IDs that mapped to the greatest number of CUIs and see if the mappings make sense...
mesh_counts.most_common(3)
# +
to_q = mesh_counts.most_common(3)[0][0]
drugs.query('new_id == @to_q')
# +
to_q = mesh_counts.most_common(3)[1][0]
drugs.query('new_id == @to_q')
# +
to_q = mesh_counts.most_common(3)[2][0]
drugs.query('new_id == @to_q')
# -
# These all look pretty good. All of the names for `D000111` are listed uner the aliases on the [Acetylcysteine MeSH page](https://meshb.nlm.nih.gov/record/ui?ui=D000111)
#
# Now let's look at a few compounds that may have a new MeSH ID distinct from their original, thanks to incorporating the DrugCentral X-refs
new_id_not_mesh = drugs.dropna(subset=['mesh_id']).query('new_id != mesh_id')
print(len(new_id_not_mesh))
new_id_not_mesh.head(10)
diff_new_ids = new_id_not_mesh.query('id_source == "MeSH"')['new_id'].values
diff_new_ids[:5]
inds.query('compound_new_id == "D002108"')
# ## 5. Diseases:
#
# With diseases we will do a little more in terms of the mapping. Beacause diseases in both UMLS and MeSH, we will incorporate some of the mappings from Disease Ontology Slim to try and get general diseases. The workflow will be as folows:
#
# 1. Map nodes to Mesh
# 2. Map ind CUI and/or SNOMED terms from DrugCentral to Mesh
# 4. Incorporate DO Slim mappings
# 3. Find overlap between these soruces
#
diseases = nodes.query('label == "Disorders"').copy()
len(diseases)
dis_numbers = diseases.groupby('mesh_id').apply(len).sort_values(ascending=False)
param = dis_numbers[:10].index.tolist()
diseases.query('mesh_id in @param').sort_values('mesh_id')
conso = load_umls.open_mrconso()
conso.head(2)
snomed_xrefs = conso.query("SAB == 'SNOMEDCT_US'").dropna(subset=['CUI', 'SCUI'])
snomed_xrefs.head(2)
# +
dis_adj_list = defaultdict(set)
disease_ids = set(diseases['id'].unique())
umls_to_mesh_dis = umls_to_mesh_df.query('umls in @disease_ids')
for row in umls_to_mesh_dis.itertuples():
dis_adj_list[row.umls].add(row.mesh)
dis_adj_list[row.mesh].add(row.umls)
# +
# Convert the snomed concept ids to string since they're strings the adj_list
rels['snomed_conceptid'] = rels['snomed_conceptid'].map(lambda i: str(int(i)) if not pd.isnull(i) else i)
sub_rels = rels.dropna(subset=['snomed_conceptid', 'umls_cui'])
for row in sub_rels.itertuples():
dis_adj_list[row.umls_cui].add(row.snomed_conceptid)
dis_adj_list[row.snomed_conceptid].add(row.umls_cui)
# Make sure to get mesh to CUI maps for the new cuis picked up via drugcentral
if row.umls_cui in umls_to_mesh_1t1:
dis_adj_list[umls_to_mesh_1t1[row.umls_cui]].add(row.umls_cui)
dis_adj_list[row.umls_cui].add(umls_to_mesh_1t1[row.umls_cui])
# +
ind_snomed = set(rels['snomed_conceptid'])
dis_umls = set(rels['umls_cui']) | disease_ids
dis_snomed_xrefs = snomed_xrefs.query('CUI in @dis_umls or SCUI in @ind_snomed')
print(len(dis_snomed_xrefs))
for row in tqdm(dis_snomed_xrefs.itertuples(), total=len(dis_snomed_xrefs)):
dis_adj_list[row.CUI].add(row.SCUI)
dis_adj_list[row.SCUI].add(row.CUI)
# Make sure to get mesh to CUI maps for the new cuis picked up via drugcentral
if row.CUI in umls_to_mesh_1t1:
dis_adj_list[umls_to_mesh_1t1[row.CUI]].add(row.CUI)
dis_adj_list[row.CUI].add(umls_to_mesh_1t1[row.CUI])
# -
# ### DO Slim Integration
#
# The following disease-ontology files were generated from a [fork of <NAME>'s work generating the Disease Ontology Slim](https://github.com/mmayers12/disease-ontology). The only major differnece between Daniel's Release and this version is that I have added in the Disease Ontology terms from their 'Rare Slim' list to attempt to get some coverage of Rare Monogetic Diseases. These can be another way to consolidate diesease into more general types
#
# First we'll need a DOID to UMLS_CUI map, WikiData can provide a quick and dirty map
# +
from wikidataintegrator import wdi_core
query_text = """
select ?doid ?umlscui
WHERE
{
?s wdt:P699 ?doid .
?s wdt:P2892 ?umlscui .
}
"""
result = wdi_core.WDItemEngine.execute_sparql_query(query_text, as_dataframe=True)
result.to_csv('../data/doid-to-umls.csv', index=False)
doid_to_umls = result.set_index('doid')['umlscui'].to_dict()
# -
slim_xref = pd.read_table('../../disease-ontology/data/xrefs-prop-slim.tsv')
do_slim = pd.read_table('../../disease-ontology/data/slim-terms-prop.tsv')
slim_xref.head(2)
slim_xref['resource'].value_counts()
resources = ['SNOMEDCT_US_2016_03_01', 'UMLS', 'MESH', 'SNOMEDCT', 'SNOMEDCT_US_2015_03_01']
useful_xref = slim_xref.query('resource in @resources')
for row in useful_xref.itertuples():
dis_adj_list[row.doid_code].add(row.resource_id)
dis_adj_list[row.resource_id].add(row.doid_code)
if row.resource == "UMLS" and row.resource_id in umls_to_mesh_1t1:
dis_adj_list[umls_to_mesh_1t1[row.resource_id]].add(row.resource_id)
dis_adj_list[row.resource_id].add(umls_to_mesh_1t1[row.resource_id])
do_slim['cui'] = do_slim['subsumed_id'].map(lambda d: doid_to_umls.get(d, float('nan')))
do_slim_d = do_slim.dropna(subset=['cui'])
for row in do_slim_d.itertuples():
dis_adj_list[row.subsumed_id].add(row.cui)
dis_adj_list[row.cui].add(row.subsumed_id)
if row.cui in umls_to_mesh_1t1:
dis_adj_list[umls_to_mesh_1t1[row.cui]].add(row.cui)
dis_adj_list[row.cui].add(umls_to_mesh_1t1[row.cui])
do_slim_terms = do_slim.set_index('slim_id')['slim_name'].to_dict()
slim_ids = set(do_slim_terms.keys())
# ## 6. Make the final map for Diseases and Map them
dis_subnets = get_subnets(dis_adj_list)
len(dis_subnets)
# +
umls_set = set(diseases['id'].dropna()) | set(rels['umls_cui'].dropna())
umls_to_val = {u: 9999999-int(u[1:]) for u in umls_set}
mesh_counts = umls_keys + mesh_vals + list(rels['umls_cui'].map(lambda c: umls_to_mesh_1t1.get(c, c)))
mesh_counts = Counter(mesh_counts)
rekeyed_dis_subnets = dict()
for v in dis_subnets.values():
# If a disease was consolidated under DO-SLIM, take the slim ID and name
if v & slim_ids:
new_key = (v & slim_ids).pop()
rekeyed_dis_subnets[new_key] = v
else:
# First take ones in the mesh, then by the highest number of things it consolidated
# Then take the lowest numbered UMLS ID...
sort_sub = sorted(list(v), key=lambda k: (k in mesh_set, mesh_counts[k], k in umls_set, umls_to_val.get(k, 0)), reverse=True)
new_key = sort_sub[0]
rekeyed_dis_subnets[new_key] = v
# -
'C565169' in mesh_vals
# +
# Final map is just inverse of the subnets dict
final_dis_map = dict()
for k, v in rekeyed_dis_subnets.items():
for val in v:
final_dis_map[val] = k
# -
diseases['new_id'] = diseases['id'].map(lambda i: final_dis_map.get(i, i))
# See how many instances of diseases mapped to 1 mesh ID had their ID changed through
# SNOMED and DO-SLIM consolidation
print('{} original CUIs'.format(diseases.dropna(subset=['mesh_id']).query('mesh_id != new_id')['id'].nunique()))
print('Mapped to {} MeSH IDs'.format(diseases.dropna(subset=['mesh_id']).query('mesh_id != new_id')['mesh_id'].nunique()))
print('Consolidated to {} unique entities'.format(diseases.dropna(subset=['mesh_id']).query('mesh_id != new_id')['new_id'].nunique()))
# +
def dis_source_map(x):
if x in mesh_set:
return 'MeSH'
elif x in umls_set:
return 'UMLS'
elif x.startswith('DOID:'):
return 'DO-Slim'
else:
# Just in case there's a problem...
return 'Uh-Oh'
diseases['id_source'] = diseases['new_id'].map(lambda x: dis_source_map(x))
# -
diseases['id_source'].value_counts()
pickle.dump(final_dis_map, open('../data/disease_merge_map.pkl', 'wb'))
print('{:.3%} Reduction in Diseases {:,} --> {:,}'.format(
(diseases.shape[0] - diseases['new_id'].nunique())/diseases.shape[0], diseases.shape[0], diseases['new_id'].nunique()))
# +
rels['disease_new_id'] = rels['umls_cui'].map(lambda c: final_dis_map.get(c, c))
print(rels['disease_new_id'].count())
bad_idx = rels[rels['disease_new_id'].isnull()].index
rels.loc[bad_idx, 'disease_new_id'] = rels.loc[bad_idx, 'snomed_conceptid'].map(lambda c: final_dis_map.get(c, float('nan')))
# +
inds = rels.query('relationship_name == "indication"')
disease_ids_semmed = set(diseases['new_id'])
diseases_in_inds = set(inds['disease_new_id'].dropna())
num_ind_in_semmed = len(diseases_in_inds & disease_ids_semmed)
print('{:.3%} of diseases in DC Indications mapped: {:,} out of {:,}'.format(
(num_ind_in_semmed / len(diseases_in_inds)), num_ind_in_semmed, len(diseases_in_inds)))
ind_semmed_comp = inds.query('disease_new_id in @disease_ids_semmed').shape[0]
print('{:.3%} of Indications have mappable disease: {:,} out of {:,}'.format(
(ind_semmed_comp / len(inds)), ind_semmed_comp, len(inds)))
# +
inds_dd = inds.drop_duplicates(subset=['compound_new_id', 'disease_new_id'])
new_cids = set(drugs['new_id'].unique())
new_dids = set(diseases['new_id'].unique())
inds_in_semmed = inds_dd.query('compound_new_id in @new_cids and disease_new_id in @new_dids')
print('{:.3%} of indications now have both compound and disease mappable {:,} out of {:,}'.format(
len(inds_in_semmed) / len(inds_dd), len(inds_in_semmed), len(inds_dd)))
# -
# ### Add in Dates for Indications
#
# Since the Indications are pretty much fully mapped to the network and ready to go as a Gold Standard for machine learning, we will map approval date information to the compounds now, so it's available for future analyses.
app = pd.read_csv('../data/drugcentral_approvals_06212018.csv')
app.head()
# +
app = app.rename(columns={'approval': 'approval_date'})
app = (app.dropna(subset=['approval_date']) # Remove NaN values
.sort_values('approval_date') # Put the earliest approval_date first
.groupby('struct_id') # Group by the compound's id
.first() # And select the first instance of that id
.reset_index()) # Return struct_id to a column from the index
# -
rels = pd.merge(rels, app[['struct_id', 'approval_date']], how='left', on='struct_id')
rels.head(2)
idx = rels[~rels['approval_date'].isnull()].index
rels.loc[idx, 'approval_year'] = rels.loc[idx, 'approval_date'].map(lambda s: s.split('-')[0])
rels.head(2)
# ## 7. Rebuild the Nodes
#
# The node CSV will now be rebuilt with all the new ID mappings and corresponding concept names
all_umls = set(nodes['id'])
umls_set = set(nodes['id']) | set(dc_maps.query('id_type == "UMLSCUI"')) | set(rels['umls_cui'])
def get_source(cid):
if cid in mesh_set:
return 'MeSH'
elif cid in umls_set:
return 'UMLS'
elif cid.startswith('DOID:'):
return 'DO-Slim'
else:
return 'problem...'
pickle.dump(umls_set, open('../data/umls_id_set.pkl', 'wb'))
pickle.dump(mesh_set, open('../data/mesh_id_set.pkl', 'wb'))
# +
new_nodes = nodes.query('label not in {}'.format(['Chemicals & Drugs', 'Disorders'])).copy()
new_nodes['new_id'] = new_nodes['mesh_id'].fillna(new_nodes['id'])
new_nodes['id_source'] = new_nodes['new_id'].apply(lambda c: get_source(c))
new_nodes['id_source'].value_counts()
# -
drug_dis = pd.concat([drugs, diseases])
curr_map = drug_dis.set_index('id')['new_id'].to_dict()
idx = drug_dis.groupby('new_id')['label'].nunique() > 1
problems = idx[idx].index.values
print(len(problems))
# +
remap = dict()
grpd = drug_dis.query('new_id in @problems').groupby('new_id')
for grp, df in grpd:
for labels in df['label'].unique():
curr_label = df.query('label == @labels')['id'].values
# Keep the MeSH Map for the New ID if its a Drug
if labels == 'Chemcials & Drugs':
for c in curr_label:
remap[c] = grp
# Use a random Disease CUI if its a Disease
else:
new_cui = curr_label[0]
for c in curr_label:
remap[c] = new_cui
drug_dis['new_id'] = drug_dis['id'].map(lambda i: remap.get(i, curr_map[i]))
# -
# #### Go back and Fix the Indications
#
# We just changed 4 Diseases back to CUIs, so must ensure those don't affect the earlier mappings to indicaitons
if rels.query('disease_new_id in @problems').shape[0] > 0:
print('This is a problem')
else:
print('This is a non-issue so no need to fix anything')
# +
new_nodes = pd.concat([new_nodes, drug_dis])
new_nodes = new_nodes.sort_values('label')
idx = new_nodes.groupby('new_id')['label'].nunique() > 1
problems = idx[idx].index.values
print(len(problems))
# -
new_nodes.query('new_id in {}'.format(problems.tolist())).sort_values('new_id').head(10)
# ### Fix other node-type conflicts
#
# Since the UMLS to MeSH map has no regard for semmantic type of the node, some concepts may have been condensed across semmantic types.
#
# All the Drug and Disease overlaps should be solved, so now move onto other nodetype conflicts.
#
# Conflicts will be solved in this manner:
#
# 1. If one of the types is a Drug or a Disease, that one gets the MeSH ID
# 2. If no Drug or Disease, the one that has the largest number of nodes will recieve the MeSH ID
# 3. Remaining Nodetypes will be separated and assume the CUI of the node with the highest degree of connection in the network
#
#
# Take, for example, `Ivalon`. It has 4 original CUIs that mapped to the same MeSH ID. Two of which carried the semmantic type `Chemicals & Drugs` and two `Devices`. The mesh_id will be kept for the `Chemicals & Drugs` version of the Nodes, which will be merged. The `Devices` versions of the nodes will be merged, whichever CUI has the most has the greatest number of edges will be the CUI used for this merged node.
#
# `Chemicals & Drugs` and `Disorders` will always take the meshID before other semmantic types. Otherwise, the MeSH id will be assignged to the semanitc type had the most CUIs merged into 1 node. The other semmatic types will again have the CUI selected based on edge count.
edges = gt.remove_colons(pd.read_csv('../data/edges_VER31_R.csv', converters={'pmids':eval}))
cui_counts = edges['start_id'].value_counts().add(edges['end_id'].value_counts(), fill_value=0).to_dict()
# +
# For now, just return conflicting nodes to thier old semmantic type
grpd = new_nodes.query('new_id in @problems').groupby('new_id')
remap = dict()
for msh_id, df in tqdm(grpd, total=len(grpd)):
# Get all the labels and counts for those labels
labels = df['label'].unique().tolist()
counts = df['label'].value_counts().to_dict()
# Sort the by the Number of different nodes mapped to that label
labels = sorted(labels, key=lambda l: counts[l], reverse=True)
# Chemicals and Drugs and Diseases have higher priorities in the context of machine learning
# So any item that could be either of those types will be set to them automatically.
drug_or_dis = False
# Select the Chemicals & Drugs nodes to have the MeSH ID if possible
if 'Chemicals & Drugs' in labels:
labels.remove('Chemicals & Drugs')
curr_label = df.query('label == "Chemicals & Drugs"')['id'].values
drug_or_dis = True
for c in curr_label:
remap[c] = msh_id
# Otherwise, elect the Disorders nodes to have the MeSH ID if possible
elif 'Disorders' in labels:
labels.remove('Disorders')
curr_label = df.query('label == "Disorders"')['id'].values
drug_or_dis = True
for c in curr_label:
remap[c] = msh_id
# Finally assign a merged CUI based on edge counts
for i, label in enumerate(labels):
curr_label = df.query('label == @label')['id'].values
# Give highest counts of nodes the MeSH ID, if not already assigned to a Drug or Disease
if i == 0 and not drug_or_dis:
new_cui = msh_id
else:
# For types that won't get a MeSH ID,
# get the CUI that has largest number of instances in the edges
new_cui = sorted(curr_label, key=lambda v: cui_counts.get(v, 0), reverse=True)[0]
for c in curr_label:
remap[c] = new_cui
# +
# Perform the new Mapping
curr_map = new_nodes.set_index('id')['new_id'].to_dict()
new_nodes['new_id'] = nodes['id'].map(lambda i: remap.get(i, curr_map[i]))
# Ensure there are now no problems
idx = new_nodes.groupby('new_id')['label'].nunique() > 1
problems = idx[idx].index.values
print(len(problems))
# +
num_old_ids = new_nodes['id'].nunique()
num_new_ids = new_nodes['new_id'].nunique()
print('{:.3%} reduction in the number of NODES\n{:,} --> {:,}'.format((num_old_ids-num_new_ids)/num_old_ids, num_old_ids, num_new_ids))
# -
new_nodes['id_source'] = new_nodes['new_id'].apply(lambda c: get_source(c))
new_nodes['id_source'].value_counts()
# +
cui_to_name = nodes.set_index('id')['name'].to_dict()
cui_to_name = {**cui_to_name, **rels.set_index('umls_cui')['concept_name'].to_dict()}
cui_to_name = {**cui_to_name, **rels.set_index('compound_new_id')['c_name'].to_dict()}
msh_to_name = pickle.load(open('../data/MeSH_DescUID_to_Name.pkl', 'rb'))
# The mappings from UMLS are less reliable, so use the ones that came from MeSH itself first
msh_to_name = {**pickle.load(open('../data/MeSH_id_to_name_via_UMLS.pkl', 'rb')), **msh_to_name}
id_to_name = {**struct_id_to_name, **do_slim_terms, **cui_to_name, **msh_to_name}
# -
# All new IDs should have a mapped name
set(new_nodes['new_id']).issubset(set(id_to_name.keys()))
new_nodes['name'] = new_nodes['new_id'].map(lambda i: id_to_name[i])
pickle.dump(id_to_name, open('../data/all_ids_to_names.pkl', 'wb'))
final_node_map = new_nodes.set_index('id')['new_id'].to_dict()
# ## 8. Map all the edges
#
# Now that we have a finalized original to new ID map, we can straight map all the ids in the edges file.
#
# If any edges are now duplicated, PMIDs in support for those edges will be merged into a set.
edges['start_id'] = edges['start_id'].map(lambda c: final_node_map[c])
edges['end_id'] = edges['end_id'].map(lambda c: final_node_map[c])
# +
# %%time
num_before = len(edges)
# Some edges now duplicated, de-duplicate and combine pmids
grpd = edges.groupby(['start_id', 'end_id', 'type'])
edges = grpd['pmids'].apply(lambda Series: set.union(*Series.values)).reset_index()
# re-count the pmid numbers
edges['n_pmids'] = edges['pmids'].apply(len)
num_after = len(edges)
# -
print('{:,} Edges before node consolidation'.format(num_before))
print('{:,} Edges after node consolidation'.format(num_after))
print('A {:.3%} reduction in edges'.format((num_before - num_after) / num_before))
# ### Save the files for the network
# +
# Get rid of the old ids in the nodes
new_nodes.drop('id', axis=1, inplace=True)
new_nodes = new_nodes.rename(columns={'new_id': 'id'})[['id', 'name', 'label', 'id_source']]
new_nodes = new_nodes.drop_duplicates(subset='id')
# Sort values before writing to disk
new_nodes = new_nodes.sort_values('label')
edges = edges.sort_values('type')
# Add in colons required by neo4j
new_nodes = gt.add_colons(new_nodes)
edges = gt.add_colons(edges)
new_nodes.to_csv('../data/nodes_VER31_R_nodes_consolidated.csv', index=False)
edges.to_csv('../data/edges_VER31_R_nodes_consolidated.csv', index=False)
pickle.dump(final_node_map, open('../data/node_id_merge_map.pkl', 'wb'))
# -
# ### Save the relationship files for a Machine Learning Gold Standard
rels.head(2)
# Do some rennaming of the columns before saving
rels = rels.rename(columns={'c_name': 'compound_name',
'concept_name': 'disease_name',
'compound_new_id': 'compound_semmed_id',
'disease_new_id': 'disease_semmed_id'})
# +
# Only want indications for the gold standard
# Keep Duplicates in RELs just in case they're insightful, but indicaitons should have no dups.
inds = rels.query('relationship_name == "indication"').drop_duplicates(subset=['compound_semmed_id', 'disease_semmed_id'])
rels.to_csv('../data/gold_standard_relationships_nodemerge.csv', index=False)
inds.to_csv('../data/indications_nodemerge.csv', index=False)
|
1_build/02-Merge_nodes_via_ID_xrefs-(MeSH-DrugCentral-DO_Slim).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "628e6c15-c92f-4817-a821-baf9ebb1dc66", "showTitle": false, "title": ""}
# 
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "84f4c163-0b5e-40c6-a467-5654304fd991", "showTitle": false, "title": ""}
# # 1. Spark NLP Basics v2.6.3
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0c9c60fc-f40a-4cfb-9c29-c39700022547", "showTitle": false, "title": ""}
import sparknlp
print("Spark NLP version", sparknlp.version())
print("Apache Spark version:", spark.version)
spark
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "7d2861b6-7a96-42d5-a08c-c1ffe849e3ff", "showTitle": false, "title": ""}
# ## Using Pretrained Pipelines
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "96bcbc3f-cd8b-4e12-ba2e-3417f353270e", "showTitle": false, "title": ""}
# https://github.com/JohnSnowLabs/spark-nlp-models
#
# https://nlp.johnsnowlabs.com/models
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c4ea7028-fb15-4d1f-8ef0-93d98d890d3e", "showTitle": false, "title": ""}
from sparknlp.pretrained import PretrainedPipeline
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "08a72a0e-e0a9-4afe-a4e0-fa6ff9f8e2bf", "showTitle": false, "title": ""}
testDoc = '''Peter is a very good persn.
My life in Russia is very intersting.
John and Peter are brothrs. However they don't support each other that much.
<NAME> is no longer happy. He has a good car though.
Europe is very culture rich. There are huge churches! and big houses!
'''
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cd9ec787-f5a6-4e7b-af19-13f318ca6059", "showTitle": false, "title": ""}
testDoc
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1a8f5d70-d2fd-41e2-a68c-d555fee4379c", "showTitle": false, "title": ""}
# ### Explain Document ML
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "dffc7f2a-54a1-41ae-9ee7-33d8d4d32345", "showTitle": false, "title": ""}
# **Stages**
# - DocumentAssembler
# - SentenceDetector
# - Tokenizer
# - Lemmatizer
# - Stemmer
# - Part of Speech
# - SpellChecker (Norvig)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "44897f3e-0042-4bf7-afec-201d6ce987a4", "showTitle": false, "title": ""}
pipeline = PretrainedPipeline('explain_document_ml', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b0259739-47b3-4be7-bf92-971c5f2be67d", "showTitle": false, "title": ""}
pipeline.model.stages
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a0290015-555a-46ac-b03e-c1a79b7beedc", "showTitle": false, "title": ""}
result = pipeline.annotate(testDoc)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2fd22cab-66ef-45c6-abc7-88ade407f906", "showTitle": false, "title": ""}
result.keys()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9822aa96-02e4-4e19-aaf3-5ce5e650ce55", "showTitle": false, "title": ""}
result['sentence']
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4651f498-b9d9-4822-8abb-adc72390d0ff", "showTitle": false, "title": ""}
result['token']
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e241c6a6-9aca-4514-ba20-49585e3fdcd4", "showTitle": false, "title": ""}
list(zip(result['token'], result['pos']))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ecd95818-04c6-4af0-ba6e-0c25fe2748ca", "showTitle": false, "title": ""}
list(zip(result['token'], result['lemmas'], result['stems'], result['spell']))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bcbbf083-2c83-4020-b0d0-f7ba5df46031", "showTitle": false, "title": ""}
import pandas as pd
df = pd.DataFrame({'token':result['token'],
'corrected':result['spell'], 'POS':result['pos'],
'lemmas':result['lemmas'], 'stems':result['stems']})
df
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a3778492-ad52-4440-a891-e04add3ef54c", "showTitle": false, "title": ""}
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e9151f71-2f8b-4516-aa8f-9aa76a04d8d4", "showTitle": false, "title": ""}
# ### Explain Document DL
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "041e9201-c34a-4490-ad73-603605246ec2", "showTitle": false, "title": ""}
# **Stages**
# - DocumentAssembler
# - SentenceDetector
# - Tokenizer
# - NER (NER with GloVe 100D embeddings, CoNLL2003 dataset)
# - Lemmatizer
# - Stemmer
# - Part of Speech
# - SpellChecker (Norvig)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cddce042-3e63-42bd-bb79-7632508272da", "showTitle": false, "title": ""}
pipeline_dl = PretrainedPipeline('explain_document_dl', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "574057e6-6679-4c6e-9f1a-3f74f438b2d6", "showTitle": false, "title": ""}
pipeline_dl.model.stages
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "182e8fa6-c455-47d8-8a1e-bdcc60cdf721", "showTitle": false, "title": ""}
pipeline_dl.model.stages[-2].getStorageRef()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8edb6613-e98e-4639-9294-c3360bd44dba", "showTitle": false, "title": ""}
pipeline_dl.model.stages[-2].getClasses()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "84d9775e-ff03-49e8-9bdd-32a436123f71", "showTitle": false, "title": ""}
result = pipeline_dl.annotate(testDoc)
result.keys()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0d137dc6-b395-419e-a1b1-ec906f91769e", "showTitle": false, "title": ""}
result['entities']
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8d361a4d-4caf-454e-861c-1b137fe931bc", "showTitle": false, "title": ""}
df = pd.DataFrame({'token':result['token'], 'ner_label':result['ner'],
'spell_corrected':result['checked'], 'POS':result['pos'],
'lemmas':result['lemma'], 'stems':result['stem']})
df
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3aa83069-7a21-49e9-9fff-67787cb93c67", "showTitle": false, "title": ""}
# ### Recognize Entities DL
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "456a6628-0d1c-4bab-939e-c999f2f8dc07", "showTitle": false, "title": ""}
recognize_entities = PretrainedPipeline('recognize_entities_dl', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0033f3fd-68ab-484f-95a3-63ed03dc6d33", "showTitle": false, "title": ""}
testDoc = '''
Peter is a very good persn.
My life in Russia is very intersting.
John and Peter are brothrs. However they don't support each other that much.
<NAME> is no longer happy. He has a good car though.
Europe is very culture rich. There are huge churches! and big houses!
'''
result = recognize_entities.annotate(testDoc)
list(zip(result['token'], result['ner']))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0f8872b5-52ee-412d-98f2-6f51257469c1", "showTitle": false, "title": ""}
# ### Clean Stop Words
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "55028886-2d19-4282-ba10-88d7f2dad980", "showTitle": false, "title": ""}
clean_stop = PretrainedPipeline('clean_stop', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6b624f7d-425d-40cc-8084-9d47c9b8f37a", "showTitle": false, "title": ""}
result = clean_stop.annotate(testDoc)
result.keys()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e8cd6afc-48b9-41f3-8154-e6720f4dac76", "showTitle": false, "title": ""}
' '.join(result['cleanTokens'])
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ea8d859a-8a84-47b7-ac0d-39c184034cdc", "showTitle": false, "title": ""}
# ### Clean Slang
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "ec94477d-24b7-4368-8b66-abedf3d54643", "showTitle": false, "title": ""}
clean_slang = PretrainedPipeline('clean_slang', lang='en')
result = clean_slang.annotate(' Whatsup bro, call me ASAP')
' '.join(result['normal'])
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "235f65a3-1317-4cdd-a98e-69d0c926dc4a", "showTitle": false, "title": ""}
# ### Spell Checker
#
# (Norvig Algo)
#
# ref: https://norvig.com/spell-correct.html
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b04a2589-ec8a-483b-9513-721f686b1c6e", "showTitle": false, "title": ""}
spell_checker = PretrainedPipeline('check_spelling', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a383a3f5-99d9-4891-8ee8-6d437f207079", "showTitle": false, "title": ""}
testDoc = '''
Peter is a very good persn.
My life in Russia is very intersting.
John and Peter are brothrs. However they don't support each other that much.
<NAME> is no longer happy. He has a good car though.
Europe is very culture rich. There are huge churches! and big houses!
'''
result = spell_checker.annotate(testDoc)
result.keys()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1ecf9e7f-a77f-4612-82a5-7a1f80669fa3", "showTitle": false, "title": ""}
list(zip(result['token'], result['checked']))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "87f21d96-84d1-4fe2-a079-749440e44717", "showTitle": false, "title": ""}
# ### Spell Checker DL
#
# https://medium.com/spark-nlp/applying-context-aware-spell-checking-in-spark-nlp-3c29c46963bc
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9fb34ebb-87f9-4537-a7c6-4f16be60540d", "showTitle": false, "title": ""}
spell_checker_dl = PretrainedPipeline('check_spelling_dl', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "01a6d58d-4bd1-416a-9465-645d8b5f2438", "showTitle": false, "title": ""}
text = 'We will go to swimming if the ueather is nice.'
result = spell_checker_dl.annotate(text)
list(zip(result['token'], result['checked']))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "92515f7e-609d-48c0-af32-1e4a3ed3ebae", "showTitle": false, "title": ""}
result.keys()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c327f174-3f8f-4c19-8ec7-3a22372df146", "showTitle": false, "title": ""}
# check for the different occurrences of the word "ueather"
examples = ['We will go to swimming if the ueather is nice.',\
"I have a black ueather jacket, so nice.",\
"I introduce you to my sister, she is called ueather."]
results = spell_checker_dl.annotate(examples)
for result in results:
print (list(zip(result['token'], result['checked'])))
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "910fac54-521d-42cb-8c3c-f9eeae5287c4", "showTitle": false, "title": ""}
for result in results:
print (result['document'],'>>',[pairs for pairs in list(zip(result['token'], result['checked'])) if pairs[0]!=pairs[1]])
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1c9b786c-4515-4aa5-ba07-6a7bf4afca18", "showTitle": false, "title": ""}
# if we had tried the same with spell_checker (previous version)
results = spell_checker.annotate(examples)
for result in results:
print (list(zip(result['token'], result['checked'])))
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6c9072a3-e43e-416e-8669-5f3645ff3986", "showTitle": false, "title": ""}
# ### Parsing a list of texts
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "9dcac600-df00-496b-9e72-08252c181b65", "showTitle": false, "title": ""}
testDoc_list = ['French author who helped pioner the science-fiction genre.',
'Verne wrate about space, air, and underwater travel before navigable aircrast',
'Practical submarines were invented, and before any means of space travel had been devised.']
testDoc_list
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "961e5240-1418-492a-b743-983cb07550cc", "showTitle": false, "title": ""}
pipeline = PretrainedPipeline('explain_document_ml', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "854a49d7-9c2c-47d5-83e2-e05401258598", "showTitle": false, "title": ""}
result_list = pipeline.annotate(testDoc_list)
len (result_list)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "06e85591-3fec-4a19-9639-fbb923c666b7", "showTitle": false, "title": ""}
result_list[0]
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e9ee192b-8b9f-4df7-9535-f6d3520440d7", "showTitle": false, "title": ""}
# ### Using fullAnnotate to get more details
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6e0c369f-2c48-4835-8a95-c8c0fdcb5173", "showTitle": false, "title": ""}
# ```
# annotatorType: String,
# begin: Int,
# end: Int,
# result: String, (this is what annotate returns)
# metadata: Map[String, String],
# embeddings: Array[Float]
# ```
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "23888403-bd94-4e7f-990c-35cc5fa1ad0e", "showTitle": false, "title": ""}
text = '<NAME> is a nice guy and lives in New York'
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "5c016664-a824-4aeb-b242-a5c58fd0e14a", "showTitle": false, "title": ""}
# pipeline_dl >> explain_document_dl
detailed_result = pipeline_dl.fullAnnotate(text)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f525febe-36b9-4171-a175-3fe528baae8c", "showTitle": false, "title": ""}
detailed_result
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e2454e42-3147-471f-9f4c-e24b271c04be", "showTitle": false, "title": ""}
detailed_result[0]['entities']
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "34a93877-8bee-4515-8653-cf312a7d3f13", "showTitle": false, "title": ""}
detailed_result[0]['entities'][0].result
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "730b380e-b451-4d96-893a-23c310c2f7cb", "showTitle": false, "title": ""}
chunks=[]
entities=[]
for n in detailed_result[0]['entities']:
chunks.append(n.result)
entities.append(n.metadata['entity'])
df = pd.DataFrame({'chunks':chunks, 'entities':entities})
df
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1e23a4d4-bf41-4a34-b993-20ea68b65265", "showTitle": false, "title": ""}
tuples = []
for x,y,z in zip(detailed_result[0]["token"], detailed_result[0]["pos"], detailed_result[0]["ner"]):
tuples.append((int(x.metadata['sentence']), x.result, x.begin, x.end, y.result, z.result))
df = pd.DataFrame(tuples, columns=['sent_id','token','start','end','pos', 'ner'])
df
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "4aed140f-3169-44af-85aa-de700e15d88a", "showTitle": false, "title": ""}
# ### Use pretrained match_chunk Pipeline for Individual Noun Phrase
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f4dd2e48-ba86-495a-b7f8-624a232e62cf", "showTitle": false, "title": ""}
# **Stages**
# - DocumentAssembler
# - SentenceDetector
# - Tokenizer
# - Part of Speech
# - Chunker
#
# Pipeline:
#
# - The pipeline uses regex `<DT>?<JJ>*<NN>+`
# - which states that whenever the chunk finds an optional determiner (DT) followed by any number of adjectives (JJ) and then a noun (NN) then the Noun Phrase(NP) chunk should be formed.
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f939691c-6a48-4fc1-977c-4b90c30abe21", "showTitle": false, "title": ""}
pipeline = PretrainedPipeline('match_chunks', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "401dd683-21e3-47db-9041-951203cd8b53", "showTitle": false, "title": ""}
pipeline.model.stages
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bfdd65ef-dbc9-4dec-bd10-474bde84c5dd", "showTitle": false, "title": ""}
result = pipeline.annotate("The book has many chapters") # single noun phrase
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c40e566b-f378-4eac-8175-70f9b81eb063", "showTitle": false, "title": ""}
result
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a2bf6b29-6fa4-435c-8152-9e801d11529a", "showTitle": false, "title": ""}
result['chunk']
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "e5d71409-ff10-45d8-a9e0-c77fe91a3f50", "showTitle": false, "title": ""}
result = pipeline.annotate("the little yellow dog barked at the cat") #multiple noune phrases
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2838e3ff-4ea3-475f-877b-9c73fb1b9886", "showTitle": false, "title": ""}
result
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "df9a2133-5462-42db-acc3-0e5ac130b533", "showTitle": false, "title": ""}
result['chunk']
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c87d591a-4d68-467d-91d1-5cf6c274f768", "showTitle": false, "title": ""}
# ### Extract exact dates from referential date phrases
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "1cc528be-7dd6-4771-be2c-b8bcc508acb2", "showTitle": false, "title": ""}
pipeline = PretrainedPipeline('match_datetime', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "feb0860b-5db3-418c-a4f0-b43ef4aafa25", "showTitle": false, "title": ""}
result = pipeline.annotate("I saw him yesterday and he told me that he will visit us next week")
result
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "69d78e5e-7991-429c-8d76-b0ea1b4d4690", "showTitle": false, "title": ""}
detailed_result = pipeline.fullAnnotate("I saw him yesterday and he told me that he will visit us next week")
detailed_result
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2c163fda-bf15-4e60-8367-5602171b7bb0", "showTitle": false, "title": ""}
tuples = []
for x in detailed_result[0]["token"]:
tuples.append((int(x.metadata['sentence']), x.result, x.begin, x.end))
df = pd.DataFrame(tuples, columns=['sent_id','token','start','end'])
df
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6f7cf4f0-1d80-42e1-b32b-8b3c03d46566", "showTitle": false, "title": ""}
# ### Sentiment Analysis
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "410b66c6-952b-428f-ac06-5d980c451d87", "showTitle": false, "title": ""}
# #### Vivek algo
#
# paper: `Fast and accurate sentiment classification using an enhanced Naive Bayes model`
#
# https://arxiv.org/abs/1305.6143
#
# code `https://github.com/vivekn/sentiment`
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "905da929-d177-4deb-b58e-37337f14f565", "showTitle": false, "title": ""}
sentiment = PretrainedPipeline('analyze_sentiment', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "23b7b7e1-3548-45ac-aa08-6d15d49b2f04", "showTitle": false, "title": ""}
result = sentiment.annotate("The movie I watched today was not a good one")
result['sentiment']
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "d4b82155-f4b0-4d51-ad4c-ead2f984378f", "showTitle": false, "title": ""}
# #### DL version (trained on imdb)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "cc186f11-dad8-4e1d-afa7-aa1eee4ae6a4", "showTitle": false, "title": ""}
sentiment_imdb = PretrainedPipeline('analyze_sentimentdl_use_imdb', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "bb228ea9-9255-4add-b030-58b24d47b273", "showTitle": false, "title": ""}
sentiment_imdb_glove = PretrainedPipeline('analyze_sentimentdl_glove_imdb', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "f8a945be-01b0-40d2-9f89-86323e11dbf7", "showTitle": false, "title": ""}
comment = '''
It's a very scary film but what impressed me was how true the film sticks to the original's tricks; it isn't filled with loud in-your-face jump scares, in fact, a lot of what makes this film scary is the slick cinematography and intricate shadow play. The use of lighting and creation of atmosphere is what makes this film so tense, which is why it's perfectly suited for those who like Horror movies but without the obnoxious gore.
'''
result = sentiment_imdb_glove.annotate(comment)
result['sentiment']
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "594ea587-012f-4c1b-b6d4-c39ca882f831", "showTitle": false, "title": ""}
sentiment_imdb_glove.fullAnnotate(comment)[0]['sentiment']
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8808a4cc-3f89-4c7f-860d-6101c8a93575", "showTitle": false, "title": ""}
# #### DL version (trained on twitter dataset)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "6de3e4a4-1861-4d9e-9d8b-371814eabad5", "showTitle": false, "title": ""}
sentiment_twitter = PretrainedPipeline('analyze_sentimentdl_use_twitter', lang='en')
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "2a2f7d51-e41a-4906-b6aa-7d4414731684", "showTitle": false, "title": ""}
result = sentiment_twitter.annotate("The movie I watched today was not a good one")
result['sentiment']
# + [markdown] application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8f6b039f-bb45-45d7-a26c-fd0e97d61800", "showTitle": false, "title": ""}
# End of Notebook #1
|
tutorials/Certification_Trainings/Public/databricks_notebooks/2.6/1.SparkNLP_Basics_v2.6.3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.collections as mc
import numpy as np
import random as rand
# ### simple random walk
# +
g = nx.grid_graph([1,20])
state = rand.choice(list(g.nodes()))
state = (9, 0)
steps = 5
for i in range(steps):
node_colors = {x:0 for x in g.nodes}
edge_colors = {x:0 for x in g.edges}
old_state = state
state = rand.choice(list(g.neighbors(state)))
edge_colors[(old_state, state)]= 1
edge_colors[(state, old_state)]= 1
node_colors[old_state] = 2
node_colors[state]=1
plt.figure()
nx.draw(g,pos = {x:x for x in g.nodes()}, node_color = [node_colors[x] for x in g.nodes()],edge_color = [edge_colors[x] for x in g.edges()],
width =3, cmap="jet")
plt.show()
# -
# ### random walk on a line
# +
# initial position
current = 0
total_length = 100
walk_probabilities = [.5, .7, .9]
# random walk
for k in walk_probabilities:
random_walk = []
current = 0
for j in range(total_length):
alpha = rand.random()
if (alpha < k):
current += 1
else:
current -= 1
random_walk.append((current, j))
x, y = zip(*random_walk)
plt.plot(x, y, label='p = {0}'.format(k))
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
plt.xlim([-total_length, total_length])
plt.ylim([1, total_length * 1.1])
plt.legend(loc='upper left')
plt.xlabel("Position")
plt.ylabel("Time")
plt.title("Random walks")
# -
# ### short bursts on a line
# +
"""Show distribution of Z_m, the burst maxes/record highs for each burst."""
# PARAMS
m = 100
num_bursts = 1000
total_length = m * num_bursts
# initial position
current = 0
random_walk = []
burst_maxes = []
for j in range(num_bursts):
burst = []
current = 0
burst_max = 0
for i in range(m):
alpha = rand.random()
if (alpha > .5):
current += 1
else:
current -= 1
burst.append(current)
burst_max = max(burst_max, current)
burst_maxes.append(burst_max)
plt.hist(burst_maxes)
plt.xlabel("Burst max")
plt.ylabel("Frequency")
plt.title("Burst max for {0} bursts of length {1}".format(num_bursts, m))
#plt.savefig("Output/burst_max_dist.png")
plt.show()
# +
"""Visualize short burst run."""
# PARAMS
m = 10
num_bursts = 10
total_length = m * num_bursts
# probability of moving right
burst_probabilities = [.5]
# initial position
current = 0
for k in burst_probabilities:
t = 0
burst_max = 0 # position, time
burst_maxes = []
random_walk = []
for j in range(num_bursts):
burst = [(burst_max, t)]
current = burst_max
max_time = 0
for i in range(m):
alpha = rand.random()
if (alpha < k):
current += 1
else:
current -= 1
random_walk.append((current, t))
burst.append((current, t))
if (current > burst_max):
burst_max = max(burst_max, current)
max_time = t
t+=1
x, y = zip(*random_walk)
#plt.plot(x, y, label='p = {0}'.format(k))
#dt=np.dtype('int, ')
lis = [list(elem) for elem in burst]
print(lis)
lc = mc.LineCollection(lis, linewidths=2)
#fig, ax = pl.subplots()
ax.add_collection(lc)
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
plt.xlim([-total_length, total_length])
plt.ylim([0, total_length * 1.1])
plt.legend(loc='upper left')
plt.xlabel("Position")
plt.ylabel("Time")
plt.title("Short bursts")
plt.show()
# +
"""Compare random walks to short bursts on a line."""
# PARAMS
m = [1, 10, 100]
num_bursts = [1000, 100, 10]
#total_length = m * num_bursts
total_length = 1000
# proability of moving right
walk_probabilities = [.5, .51, .6, .7, .8]
# probability of moving right
burst_probabilities = [.5]
# initial position
current = 0
# simple random walk
for k in walk_probabilities:
random_walk = []
current = 0
for j in range(total_length):
alpha = rand.random()
if (alpha < k):
current += 1
else:
current -= 1
random_walk.append((current, j))
x, y = zip(*random_walk)
plt.plot(x, y, label='r.w., p = {0}'.format(k))
# short burst
for r in range(len(num_bursts)):
for k in burst_probabilities:
t = 0
burst_max = 0
burst_maxes = []
random_walk = []
for j in range(num_bursts[r]):
burst = []
current = burst_max
for i in range(m[r]):
t+=1
alpha = rand.random()
if (alpha < k):
current += 1
else:
current -= 1
random_walk.append((current, t))
burst_max = max(burst_max, current)
x, y = zip(*random_walk)
plt.plot(x, y, label='s.b., {0} steps'.format(m[r], k))
ax = plt.gca()
ax.spines['left'].set_position(('data', 0))
plt.xlim([-total_length, total_length])
plt.ylim([1, total_length * 1.05])
plt.legend(loc='upper left')
plt.xlabel("Position")
plt.ylabel("Time").set_position(('data', .3))
#plt.title("Random walks of length {0} and {1} short bursts of length {2}".format(total_length, num_bursts, m))
plt.title("Random walks and short bursts on a line")
#plt.savefig("Output/random_walks_bursts.png")
plt.show()
# -
# ### random walk on a grid
# +
"""Simple random walk on a grid."""
# PARAMS
size = 11
steps = 100
g = nx.grid_graph([size, size])
origin = ((size - 1)/2, (size - 1)/2)
state = origin
path = []
h_distances = []
distances = []
for i in range(steps):
x = state[0] - origin[0]
y = state[1] - origin[1]
path.append(state)
h_distances.append([np.abs(x), i])
distances.append([np.sqrt(np.square(x) + np.square(y)), i])
state = rand.choice(list(g.neighbors(state)))
# horizontal distances from origin
x, y = zip(*h_distances)
plt.plot(x, y, label='horizontal')
# true distance from origin
x, y = zip(*distances)
plt.plot(x, y, label='Euclidean')
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
plt.xlim([0, size])
plt.ylim([.1, steps * 1.1])
plt.legend(loc='upper right')
plt.xlabel("Distance from origin")
plt.ylabel("Time")
plt.title("Random walk on grid")
# -
# +
"""Biased random walk on a grid.
Biased to prefer moving away from the origin,
with probabilities p for moving up/down and left/right,
depending on quadrant, and (1-p)/2 for each remaining move.
"""
# PARAMS
size = 11
steps = 100
p = 1/3
g = nx.grid_graph([size, size])
origin = ((size - 1)/2, (size - 1)/2)
state = origin
path = []
h_distances = []
distances = []
def move(state, direction):
new_state = state
if (direction == "right"):
new_state = (state[0]+1, state[1])
elif (direction == "left"):
new_state = (state[0]-1, state[1])
elif (direction == "up"):
new_state = (state[0], state[1]+1)
elif (direction == "down"):
new_state = (state[0], state[1]-1)
return new_state
for i in range(steps):
x = state[0] - origin[0]
y = state[1] - origin[1]
path.append((x, y))
h_distances.append([np.abs(x), i])
distances.append([np.sqrt(np.square(x) + np.square(y)), i])
node_colors = {x:0 for x in g.nodes}
edge_colors = {x:0 for x in g.edges}
old_state = state
alpha = rand.random()
if (state == origin):
state = rand.choice(list(g.neighbors(state)))
if (x >= 0 and y > 0):
if (alpha < p):
state = move(state, "right")
elif (alpha < 2*p):
state = move(state, "up")
elif (alpha < p+.5):
state = move(state, "left")
else:
state = move(state, "down")
elif (x < 0 and y >= 0 ):
if (alpha < p):
state = move(state, "left")
elif (alpha < 2*p):
state = move(state, "up")
elif (alpha < p+.5):
state = move(state, "right")
else:
state = move(state, "down")
elif (x <= 0 and y < 0):
if (alpha < p):
state = move(state, "left")
elif (alpha < 2*p):
state = move(state, "down")
elif (alpha < p+.5):
state = move(state, "right")
else:
state = move(state, "up")
elif (x > 0 and y <= 0):
if (alpha < p):
state = move(state, "right")
elif (alpha < 2*p):
state = move(state, "down")
elif (alpha < p+.5):
state = move(state, "left")
else:
state = move(state, "up")
edge_colors[(old_state, state)]= 1
edge_colors[(state, old_state)]= 1
node_colors[old_state] = 2
node_colors[state]=1
# plt.figure()
# nx.draw(g,pos = {x:x for x in g.nodes()}, node_color = [node_colors[x] for x in g.nodes()],
# edge_color = [edge_colors[x] for x in g.edges()], width =3, cmap="jet")
# plt.show()
x, y = zip(*path)
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
ax.spines['bottom'].set_position(('data',0))
# plt.xlim([-size, size])
# plt.ylim([-size, size])
plt.title("Random walk on grid ({0} steps, p = {1:.2f})".format(steps, p))
plt.plot(x, y)
plt.show()
# horizontal distances from origin
x, y = zip(*h_distances)
plt.plot(x, y, label='horizontal')
# true distance from origin
x, y = zip(*distances)
plt.plot(x, y, label='Euclidean')
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
plt.xlim([0, size * 2])
plt.ylim([.1, steps * 1.1])
plt.legend(loc='upper left')
plt.xlabel("Distance from origin")
plt.ylabel("Time")
plt.title("Random walk on grid, p = {0:.2f}".format(p))
plt.show()
# -
# ### short burst on grid
# +
def get_horizontal_distance(state_one, state_two):
return np.abs(state_one[0] - state_two[0])
def get_euclidean_distance(state_one, state_two):
return np.sqrt(np.square(state_one[0] - state_two[0]) + np.square(state_one[1] - state_two[1]))
# +
"""Short bursts on a grid."""
# PARAMS
size = 15
m = 10
num_bursts = 10
total_length = m * num_bursts
metric = get_horizontal_distance # or get_euclidan_distance
g = nx.grid_graph([size, size])
origin = ((size - 1)/2, (size - 1)/2)
state = origin
path = []
h_distances = []
distances = []
burst_max = state
t = 0
for j in range(num_bursts):
state = burst_max
for i in range(m):
x = state[0] - origin[0]
y = state[1] - origin[1]
path.append((x,y))
h_distances.append([np.abs(x), t])
distances.append([np.sqrt(np.square(x) + np.square(y)), t])
state = grid_move(g, state, origin, .25, rand.random())
burst_max = state if metric(state, origin) > metric(burst_max, origin) else burst_max
t+= 1
# horizontal distances from origin
x, y = zip(*h_distances)
plt.plot(x, y, label='horizontal')
# true distance from origin
x, y = zip(*distances)
plt.plot(x, y, label='Euclidean')
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
plt.xlim([0, size])
plt.ylim([.1, total_length * 1.1])
plt.legend(loc='upper right')
plt.xlabel("Distance from origin")
plt.ylabel("Time")
plt.title("Short bursts on grid")
# +
def move(state, direction):
new_state = state
if (direction == "right"):
new_state = (state[0]+1, state[1])
elif (direction == "left"):
new_state = (state[0]-1, state[1])
elif (direction == "up"):
new_state = (state[0], state[1]+1)
elif (direction == "down"):
new_state = (state[0], state[1]-1)
return new_state
def grid_move(graph, state, origin, p, alpha):
go = 0
if (state == origin):
return rand.choice(list(graph.neighbors(state)))
if (x >= 0 and y > 0):
if (alpha < p):
go = "right"
elif (alpha < 2*p):
go = "up"
elif (alpha < p + .5):
go = "left"
else:
go = "down"
elif (x < 0 and y >= 0 ):
if (alpha < p):
go = "left"
elif (alpha < 2*p):
go = "up"
elif (alpha < p + .5):
go = "right"
else:
go = "down"
elif (x <= 0 and y < 0):
if (alpha < p):
go = "left"
elif (alpha < 2*p):
go = "down"
elif (alpha < p + .5):
go = "right"
else:
go = "up"
elif (x > 0 and y <= 0):
if (alpha < p):
go = "right"
elif (alpha < 2*p):
go = "down"
elif (alpha < p + .5):
go = "left"
else:
go = "up"
return move(state, go)
# +
"""Compare random walks to short bursts on a grid."""
# PARAMS
size = 101
biases = [.25, .3, .375, .4, .5]
bursts_sizes = [100, 10, 1]
num_bursts = [20, 200, 2000]
steps = total_length = 2000 # burst_size * num_bursts
#metric = get_horizontal_distance
#dist = "horizontal"
metric = get_euclidean_distance
dist = "Euclidean"
g = nx.grid_graph([size, size])
origin = ((size - 1)/2, (size - 1)/2)
state = origin
# random walk
rw_path = {"rw_path_{0}".format(b):[] for b in biases}
rw_h_distances = {"rw_h_distances_{0}".format(b):[] for b in biases}
rw_distances = {"rw_distances_{0}".format(b):[] for b in biases}
for bias in biases:
p = bias
state = origin
for i in range(steps):
x = state[0] - origin[0]
y = state[1] - origin[1]
rw_path["rw_path_{0}".format(p)].append((x, y))
rw_h_distances["rw_h_distances_{0}".format(p)].append([np.abs(x), i])
rw_distances["rw_distances_{0}".format(p)].append([np.sqrt(np.square(x) + np.square(y)), i])
state = grid_move(g, state, origin, p, rand.random())
# short bursts
origin = ((size - 1)/2, (size - 1)/2)
bursts_path = {"bursts_path_{0}".format(b):[] for b in bursts_sizes}
bursts_h_distances = {"bursts_h_distances_{0}".format(b):[] for b in bursts_sizes}
bursts_distances = {"bursts_distances_{0}".format(b):[] for b in bursts_sizes}
for m in range(len(bursts_sizes)):
t = 0
state = origin
burst_max = state
for j in range(num_bursts[m]):
state = burst_max
for i in range(bursts_sizes[m]):
x = state[0] - origin[0]
y = state[1] - origin[1]
bursts_path["bursts_path_{0}".format(bursts_sizes[m])].append((x, y))
bursts_h_distances["bursts_h_distances_{0}".format(bursts_sizes[m])].append([np.abs(x), t])
bursts_distances["bursts_distances_{0}".format(bursts_sizes[m])].append([np.sqrt(np.square(x)
+ np.square(y)), t])
state = grid_move(g, state, origin, .25, rand.random())
if (metric(state, origin) > metric(burst_max, origin)):
burst_max = state
t+= 1
for b in biases:
x, y = zip(*rw_path["rw_path_{0}".format(b)])
plt.plot(x, y, label="r.w., p = {0:.2f}".format(b))
for b in bursts_sizes:
x, y = zip(*bursts_path["bursts_path_{0}".format(b)])
plt.plot(x, y, label="s.b., size = {0}".format(b))
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
ax.spines['bottom'].set_position(('data',0))
plt.title("Random walk and short bursts on {0}x{0} grid (n = {1})".format(size, total_length))
plt.legend(loc='lower right')
#plt.savefig("grid_trajectories.png")
plt.show()
# # horizontal distances from origin
# for b in biases:
# x, y = zip(*rw_h_distances["rw_h_distances_{0}".format(b)])
# plt.plot(x, y, label='random walk, horizontal')
# Euclidean distance from origin
for b in biases:
x, y = zip(*rw_distances["rw_distances_{0}".format(b)])
plt.plot(x, y, label='r.w., p = {0}'.format(b))
# # horizontal distances from origin
# for b in bursts_sizes:
# x, y = zip(*bursts_h_distances["bursts_h_distances".format(b)])
# plt.plot(x, y, label='short bursts, horizontal')
# Euclidean distance from origin
for b in bursts_sizes:
x, y = zip(*bursts_distances["bursts_distances_{0}".format(b)])
plt.plot(x, y, label='s.b., size = {0}'.format(b))
ax = plt.gca()
ax.spines['left'].set_position(('data',0))
plt.xlim([0, size * 12])
plt.ylim([.1, steps * 1.1])
plt.legend(loc='lower right')
plt.xlabel("{0} distance from origin".format(dist))
plt.ylabel("Time")
plt.title("Random walk vs. short bursts (n = {0})".format(total_length))
#plt.savefig("grid_comparison.png")
plt.show()
# -
|
misc/short_bursts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Concise Implementation of Recurrent Neural Networks
# :label:`sec_rnn-concise`
#
# While :numref:`sec_rnn_scratch` was instructive to see how RNNs are implemented,
# this is not convenient or fast.
# This section will show how to implement the same language model more efficiently
# using functions provided by high-level APIs
# of a deep learning framework.
# We begin as before by reading the time machine dataset.
#
# + origin_pos=3 tab=["tensorflow"]
import tensorflow as tf
from d2l import tensorflow as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
# + [markdown] origin_pos=4
# ## [**Defining the Model**]
#
# High-level APIs provide implementations of recurrent neural networks.
# We construct the recurrent neural network layer `rnn_layer` with a single hidden layer and 256 hidden units.
# In fact, we have not even discussed yet what it means to have multiple layers---this will happen in :numref:`sec_deep_rnn`.
# For now, suffice it to say that multiple layers simply amount to the output of one layer of RNN being used as the input for the next layer of RNN.
#
# + origin_pos=7 tab=["tensorflow"]
num_hiddens = 256
rnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens,
kernel_initializer='glorot_uniform')
rnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True,
return_sequences=True, return_state=True)
# + origin_pos=12 tab=["tensorflow"]
state = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)
state.shape
# + [markdown] origin_pos=13
# [**With a hidden state and an input,
# we can compute the output with
# the updated hidden state.**]
# It should be emphasized that
# the "output" (`Y`) of `rnn_layer`
# does *not* involve computation of output layers:
# it refers to
# the hidden state at *each* time step,
# and they can be used as the input
# to the subsequent output layer.
#
# + origin_pos=17 tab=["tensorflow"]
X = tf.random.uniform((num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
Y.shape, len(state_new), state_new[0].shape
# + [markdown] origin_pos=18
# Similar to :numref:`sec_rnn_scratch`,
# [**we define an `RNNModel` class
# for a complete RNN model.**]
# Note that `rnn_layer` only contains the hidden recurrent layers, we need to create a separate output layer.
#
# + origin_pos=21 tab=["tensorflow"]
#@save
class RNNModel(tf.keras.layers.Layer):
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, state):
X = tf.one_hot(tf.transpose(inputs), self.vocab_size)
# Later RNN like `tf.keras.layers.LSTMCell` return more than two values
Y, *state = self.rnn(X, state)
output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.cell.get_initial_state(*args, **kwargs)
# + [markdown] origin_pos=22
# ## Training and Predicting
#
# Before training the model, let us [**make a prediction with the a model that has random weights.**]
#
# + origin_pos=25 tab=["tensorflow"]
device_name = d2l.try_gpu()._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
net = RNNModel(rnn_layer, vocab_size=len(vocab))
d2l.predict_ch8('time traveller', 10, net, vocab)
# + [markdown] origin_pos=26
# As is quite obvious, this model does not work at all. Next, we call `train_ch8` with the same hyperparameters defined in :numref:`sec_rnn_scratch` and [**train our model with high-level APIs**].
#
# + origin_pos=29 tab=["tensorflow"]
num_epochs, lr = 500, 1
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)
# + [markdown] origin_pos=30
# Compared with the last section, this model achieves comparable perplexity,
# albeit within a shorter period of time, due to the code being more optimized by
# high-level APIs of the deep learning framework.
#
#
# ## Summary
#
# * High-level APIs of the deep learning framework provides an implementation of the RNN layer.
# * The RNN layer of high-level APIs returns an output and an updated hidden state, where the output does not involve output layer computation.
# * Using high-level APIs leads to faster RNN training than using its implementation from scratch.
#
# ## Exercises
#
# 1. Can you make the RNN model overfit using the high-level APIs?
# 1. What happens if you increase the number of hidden layers in the RNN model? Can you make the model work?
# 1. Implement the autoregressive model of :numref:`sec_sequence` using an RNN.
#
# + [markdown] origin_pos=33 tab=["tensorflow"]
# [Discussions](https://discuss.d2l.ai/t/2211)
#
|
python/d2l-en/tensorflow/chapter_recurrent-neural-networks/rnn-concise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
URL = "http://s-cube-network.eu/c2k-files/c2k_data_comma.csv"
data = pd.read_csv(URL, na_values='?')
# +
tabla = []
for i, fila in data.iterrows():
for leg in ['i1','i2','i3', 'o']:
if np.isnan(fila[leg + '_legid']):
continue
for service in ['rcs','dep','rcf','dlv']:
if service =='rcs':
posiciones = ['0']
elif service =='dep':
posiciones = ['1','2','3']
elif service == 'rcf':
posiciones = ['1','2','3']
elif service =='dlv':
posiciones = ['0']
for pos in posiciones:
if service == 'rcs' :
service_pos = service
origin_service_pos = 'dep_1'
destin_service_pos = 'dep_1'
elif service == 'dlv':
service_pos = service
origin_service_pos = 'rcf_' + ult_pos
destin_service_pos = 'rcf_' + ult_pos
else:
service_pos = service + '_' + pos
origin_service_pos = 'dep_' + pos
destin_service_pos = 'rcf_' + pos
if np.isnan(fila[leg + '_' + service_pos + '_p']):
continue
nueva_fila = {}
nueva_fila['process_id'] = str(int(fila.nr))
nueva_fila['transport_id'] = str(int(fila[leg + '_legid']))
nueva_fila['transport_oper'] = 'outgoing' if leg == 'o' else 'incoming'
nueva_fila['service_type'] = service
nueva_fila['service_pos'] = pos
nueva_fila['origin_place'] = str(int(fila[leg + '_' + origin_service_pos + '_place']))
nueva_fila['destin_place'] = str(int(fila[leg + '_' + destin_service_pos + '_place']))
nueva_fila['planned_time'] = fila[leg + '_' + service_pos + '_p']
nueva_fila['effective_time'] = fila[leg + '_' + service_pos + '_e']
tabla.append(nueva_fila)
ult_pos = pos
# -
df = pd.DataFrame(tabla)
df.head()
df.to_csv('data_c2k.csv', index=False)
|
Cargo 2000.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import xarray as xr
import sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from scipy import signal
from matplotlib.offsetbox import AnnotationBbox
import seaborn as sb
# %matplotlib inline
sys.path.append('/workspace/bkraft/dl_chapter14/src')
from utils.plotting import plot_map, subplots_robinson, plot_hexbin, text_box, subplots_plateCarree
from utils.metrics import get_metrics
# +
fig_width_pt = 443.57848 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72. # Convert pt to inches
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height =fig_width*golden_mean # height in inches
fig_size = [fig_width,fig_height]
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "xelatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
"axes.labelsize": 9, # LaTeX default is 10pt font.
"axes.titlesize": 9,
"font.size": 7,
"legend.fontsize": 9, # Make the legend/label fonts a little smaller
"xtick.labelsize": 7,
"ytick.labelsize": 7,
"figure.figsize": fig_size, # default fig size of 0.9 textwidth
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :)
r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble
],
'axes.linewidth': 0.5,
'xtick.major.width': 0.5,
'xtick.minor.width': 0.5,
'ytick.major.width': 0.5,
'ytick.minor.width': 0.5
}
mpl.rcParams.update(pgf_with_latex)
def new_subplots(nrow, ncol, wh_ratio, width=1, **kwargs):
plt.clf()
fig, ax = plt.subplots(nrow, ncol, figsize=fig_size, **kwargs)
return fig, ax
def savefig(filename, **kwargs):
#plt.savefig('{}.pgf'.format(filename), pad_inches = 0, bbox_inches='tight')
plt.savefig('{}.pdf'.format(filename), pad_inches = 0.05, bbox_inches='tight', **kwargs)
# -
fig_size
# Added some useful functions, I will add more (e.g. plotting time-eries). Please don't change source code. You can also just use your own stuff or copy my functions and change them, or let me know if you want to have something changed.
# ## Load Data
# +
#Spatially optimized
dss_nn = xr.open_zarr('/scratch/dl_chapter14/experiments/et/n_sm.n_perm/inference/pred_so.zarr/') * 86400 # 1 kg/m2/s = 86400 mm/day
dss_wn = xr.open_zarr('/scratch/dl_chapter14/experiments/et/w_sm.n_perm/inference/pred_so.zarr/') * 86400
dss_nw = xr.open_zarr('/scratch/dl_chapter14/experiments/et/n_sm.w_perm/inference/pred_so.zarr/') * 86400
dss_ww = xr.open_zarr('/scratch/dl_chapter14/experiments/et/w_sm.w_perm/inference/pred_so.zarr/') * 86400
#Temporally optimized
dst_nn = xr.open_zarr('/scratch/dl_chapter14/experiments/et/n_sm.n_perm/inference/pred_to.zarr/') * 86400
dst_wn = xr.open_zarr('/scratch/dl_chapter14/experiments/et/w_sm.n_perm/inference/pred_to.zarr/') * 86400
dst_nw = xr.open_zarr('/scratch/dl_chapter14/experiments/et/n_sm.w_perm/inference/pred_to.zarr/') * 86400
dst_ww = xr.open_zarr('/scratch/dl_chapter14/experiments/et/w_sm.w_perm/inference/pred_to.zarr/') * 86400
# -
# ## Subset for the amazon region
amazon_mask = xr.open_dataset("/workspace/BGI/scratch/sbesnard/amazon_mask_360_720.nc").sel(lat= slice(10,-22), lon= slice(-85, -40))
amazon_raw_nn = dst_nn.sel(lat= slice(10,-22), lon= slice(-85, -40)) * amazon_mask.amazon_boundary
amazon_raw_nw = dst_nw.sel(lat= slice(10,-22), lon= slice(-85, -40)) * amazon_mask.amazon_boundary
amazon_raw_wn = dst_wn.sel(lat= slice(10,-22), lon= slice(-85, -40)) * amazon_mask.amazon_boundary
amazon_raw_ww = dst_ww.sel(lat= slice(10,-22), lon= slice(-85, -40)) * amazon_mask.amazon_boundary
# ## Subset for Australia
australia_raw_nn = dst_nn.sel(lat= slice(-10,-40), lon= slice(109, 156))
australia_raw_nw = dst_nw.sel(lat= slice(-10,-40), lon= slice(109, 156))
australia_raw_wn = dst_wn.sel(lat= slice(-10,-40), lon= slice(109, 156))
australia_raw_ww = dst_ww.sel(lat= slice(-10,-40), lon= slice(109, 156))
# ## Compute monthly seasonal cycle
#Amazon
amazon_seas_obs_2005 = amazon_raw_wn.sel(time=slice('2005-01-01', '2005-12-31')).obs.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_wn_2005 = amazon_raw_wn.sel(time=slice('2005-01-01', '2005-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_nn_2005 = amazon_raw_nn.sel(time=slice('2005-01-01', '2005-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_ww_2005 = amazon_raw_ww.sel(time=slice('2005-01-01', '2005-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_nw_2005 = amazon_raw_nw.sel(time=slice('2005-01-01', '2005-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
#Australia
australia_seas_obs_2010 = australia_raw_wn.sel(time=slice('2010-01-01', '2010-12-31')).obs.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_wn_2010 = australia_raw_wn.sel(time=slice('2010-01-01', '2010-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_nn_2010 = australia_raw_nn.sel(time=slice('2010-01-01', '2010-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_ww_2010 = australia_raw_ww.sel(time=slice('2010-01-01', '2010-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_nw_2010 = australia_raw_nw.sel(time=slice('2010-01-01', '2010-12-31')).mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
# ## Compute the residuals of the mean seasonal variations
# Amazon
amazon_seas_var_res_wn = (amazon_seas_obs - amazon_seas_obs.mean('month')) - (amazon_seas_wn - amazon_seas_wn.mean('month'))
amazon_seas_var_res_nn = (amazon_seas_obs - amazon_seas_obs.mean('month')) - (amazon_seas_nn - amazon_seas_nn.mean('month'))
amazon_seas_var_res_nw = (amazon_seas_obs - amazon_seas_obs.mean('month')) - (amazon_seas_nw - amazon_seas_nw.mean('month'))
amazon_seas_var_res_ww = (amazon_seas_obs - amazon_seas_obs.mean('month')) - (amazon_seas_ww - amazon_seas_ww.mean('month'))
# Australia
australia_seas_var_res_wn = (australia_seas_obs - australia_seas_obs.mean('month')) - (australia_seas_wn - australia_seas_wn.mean('month'))
australia_seas_var_res_nn = (australia_seas_obs - australia_seas_obs.mean('month')) - (australia_seas_nn - australia_seas_nn.mean('month'))
australia_seas_var_res_nw = (australia_seas_obs - australia_seas_obs.mean('month')) - (australia_seas_nw - australia_seas_nw.mean('month'))
australia_seas_var_res_ww = (australia_seas_obs - australia_seas_obs.mean('month')) - (australia_seas_ww - australia_seas_ww.mean('month'))
# ## Compute monthly anomalies
# +
##Amazon
#Compute monthly mean 2000-2013
amazon_seas_obs = amazon_raw_wn.obs.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_wn = amazon_raw_wn.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_nn = amazon_raw_nn.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_ww = amazon_raw_ww.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
amazon_seas_nw = amazon_raw_nw.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
# Compute montly mean for 2005
amazon_seas_obs_2005 = amazon_raw_wn.obs.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2005-01-01', '2005-12-31'))
amazon_seas_wn_2005 = amazon_raw_wn.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2005-01-01', '2005-12-31'))
amazon_seas_nn_2005 = amazon_raw_nn.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2005-01-01', '2005-12-31'))
amazon_seas_nw_2005 = amazon_raw_nw.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2005-01-01', '2005-12-31'))
amazon_seas_ww_2005 = amazon_raw_ww.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2005-01-01', '2005-12-31'))
# Compute montly anomalies
amazon_anom_obs_2005 = amazon_seas_obs_2005.values - amazon_seas_obs.values
amazon_anom_wn_2005 = amazon_seas_wn_2005.values - amazon_seas_wn.values
amazon_anom_nn_2005 = amazon_seas_nn_2005.values - amazon_seas_nn.values
amazon_anom_ww_2005 = amazon_seas_ww_2005.values - amazon_seas_ww.values
amazon_anom_nw_2005 = amazon_seas_nw_2005.values - amazon_seas_nw.values
# +
##Amazon
#Compute monthly mean 2000-2013
australia_seas_obs = australia_raw_wn.obs.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_wn = australia_raw_wn.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_nn = australia_raw_nn.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_ww = australia_raw_ww.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
australia_seas_nw = australia_raw_nw.mod.mean(('lat', 'lon')).groupby('time.month').mean('time')
# Compute montly mean for 2010
australia_seas_obs_2010 = australia_raw_wn.obs.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2010-01-01', '2010-12-31'))
australia_seas_wn_2010 = australia_raw_wn.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2010-01-01', '2010-12-31'))
australia_seas_nn_2010 = australia_raw_nn.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2010-01-01', '2010-12-31'))
australia_seas_nw_2010 = australia_raw_nw.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2010-01-01', '2010-12-31'))
australia_seas_ww_2010 = australia_raw_ww.mod.mean(('lat', 'lon')).resample(time = '1MS').mean().sel(time=slice('2010-01-01', '2010-12-31'))
# Compute montly anomalies
australia_anom_obs_2010 = australia_seas_obs_2010.values - australia_seas_obs.values
australia_anom_wn_2010 = australia_seas_wn_2010.values - australia_seas_wn.values
australia_anom_nn_2010 = australia_seas_nn_2010.values - australia_seas_nn.values
australia_anom_ww_2010 = australia_seas_ww_2010.values - australia_seas_ww.values
australia_anom_nw_2010 = australia_seas_nw_2010.values - australia_seas_nw.values
# -
# ## Compute annual anomalies (z-score)
# +
##Amazon
# No permutation with soil moisture
amazon_raw_wn_annual = amazon_raw_wn.groupby('time.year').mean('time')
amazon_raw_wn_std = amazon_raw_wn.groupby('time.year').mean('time').std('year')
amazon_raw_wn_mean = amazon_raw_wn.groupby('time.year').mean('time').mean('year')
amazon_raw_wn_anomaly_standarise = (amazon_raw_wn_annual - amazon_raw_wn_mean) / amazon_raw_wn_std
amazon_raw_obs_anomaly_standarise_detrend = signal.detrend(amazon_raw_wn_anomaly_standarise.obs.mean(('lat', 'lon')))
amazon_raw_wn_anomaly_standarise_detrend = signal.detrend(amazon_raw_wn_anomaly_standarise.mod.mean(('lat', 'lon')))
# No permutation no soil moisture
amazon_raw_nn_annual = amazon_raw_nn.groupby('time.year').mean('time')
amazon_raw_nn_std = amazon_raw_nn.groupby('time.year').mean('time').std('year')
amazon_raw_nn_mean = amazon_raw_nn.groupby('time.year').mean('time').mean('year')
amazon_raw_nn_anomaly_standarise = (amazon_raw_nn_annual - amazon_raw_nn_mean) / amazon_raw_nn_std
amazon_raw_nn_anomaly_standarise_detrend = signal.detrend(amazon_raw_nn_anomaly_standarise.mod.mean(('lat', 'lon')))
# Permutation with soil moisture
amazon_raw_ww_annual = amazon_raw_ww.groupby('time.year').mean('time')
amazon_raw_ww_std = amazon_raw_ww.groupby('time.year').mean('time').std('year')
amazon_raw_ww_mean = amazon_raw_ww.groupby('time.year').mean('time').mean('year')
amazon_raw_ww_anomaly_standarise = (amazon_raw_ww_annual - amazon_raw_ww_mean) / amazon_raw_ww_std
amazon_raw_ww_anomaly_standarise_detrend = signal.detrend(amazon_raw_ww_anomaly_standarise.mod.mean(('lat', 'lon')))
# Permutation no soil moisture
amazon_raw_nw_annual = amazon_raw_nw.groupby('time.year').mean('time')
amazon_raw_nw_std = amazon_raw_nw.groupby('time.year').mean('time').std('year')
amazon_raw_nw_mean = amazon_raw_nw.groupby('time.year').mean('time').mean('year')
amazon_raw_nw_anomaly_standarise = (amazon_raw_nw_annual - amazon_raw_nw_mean) / amazon_raw_nw_std
amazon_raw_nw_anomaly_standarise_detrend = signal.detrend(amazon_raw_nw_anomaly_standarise.mod.mean(('lat', 'lon')))
# +
##Australia
# No permutation with soil moisture
australia_raw_wn_annual = australia_raw_wn.groupby('time.year').mean('time')
australia_raw_wn_std = australia_raw_wn.groupby('time.year').mean('time').std('year')
australia_raw_wn_mean = australia_raw_wn.groupby('time.year').mean('time').mean('year')
australia_raw_wn_anomaly_standarise = (australia_raw_wn_annual - australia_raw_wn_mean) / australia_raw_wn_std
australia_raw_obs_anomaly_standarise_detrend = signal.detrend(australia_raw_wn_anomaly_standarise.obs.mean(('lat', 'lon')))
australia_raw_wn_anomaly_standarise_detrend = signal.detrend(australia_raw_wn_anomaly_standarise.mod.mean(('lat', 'lon')))
# No permutation no soil moisture
australia_raw_nn_annual = australia_raw_nn.groupby('time.year').mean('time')
australia_raw_nn_std = australia_raw_nn.groupby('time.year').mean('time').std('year')
australia_raw_nn_mean = australia_raw_nn.groupby('time.year').mean('time').mean('year')
australia_raw_nn_anomaly_standarise = (australia_raw_nn_annual - australia_raw_nn_mean) / australia_raw_nn_std
australia_raw_nn_anomaly_standarise_detrend = signal.detrend(australia_raw_nn_anomaly_standarise.mod.mean(('lat', 'lon')))
# Permutation with soil moisture
australia_raw_ww_annual = australia_raw_ww.groupby('time.year').mean('time')
australia_raw_ww_std = australia_raw_ww.groupby('time.year').mean('time').std('year')
australia_raw_ww_mean = australia_raw_ww.groupby('time.year').mean('time').mean('year')
australia_raw_ww_anomaly_standarise = (australia_raw_ww_annual - australia_raw_ww_mean) / australia_raw_ww_std
australia_raw_ww_anomaly_standarise_detrend = signal.detrend(australia_raw_ww_anomaly_standarise.mod.mean(('lat', 'lon')))
# Permutation no soil moisture
australia_raw_nw_annual = australia_raw_nw.groupby('time.year').mean('time')
australia_raw_nw_std = australia_raw_nw.groupby('time.year').mean('time').std('year')
australia_raw_nw_mean = australia_raw_nw.groupby('time.year').mean('time').mean('year')
australia_raw_nw_anomaly_standarise = (australia_raw_nw_annual - australia_raw_nw_mean) / australia_raw_nw_std
australia_raw_nw_anomaly_standarise_detrend = signal.detrend(australia_raw_nw_anomaly_standarise.mod.mean(('lat', 'lon')))
# -
# ## Combine different scale into one numpy array
#Amazon
amazon_obs = {"mean_seasonal_cycle": amazon_seas_obs, 'seasonal_anomaly':amazon_anom_obs_2005,
"seasonal_res":np.zeros(10), 'annual_anomaly':amazon_raw_obs_anomaly_standarise_detrend}
amazon_wn = {"mean_seasonal_cycle": amazon_seas_wn, 'seasonal_anomaly':amazon_anom_wn_2005,
"seasonal_res":amazon_seas_var_res_wn, 'annual_anomaly':amazon_raw_wn_anomaly_standarise_detrend}
amazon_nn = {"mean_seasonal_cycle": amazon_seas_nn, 'seasonal_anomaly':amazon_anom_nn_2005,
"seasonal_res":amazon_seas_var_res_nn, 'annual__nomaly':amazon_raw_nn_anomaly_standarise_detrend}
amazon_ww = {"mean_seasonal_cycle": amazon_seas_ww, 'seasonal_anomaly':amazon_anom_ww_2005,
"seasonal_res":amazon_seas_var_res_ww, 'annual_anomaly':amazon_raw_ww_anomaly_standarise_detrend}
amazon_nw = {"mean_seasonal_cycle": amazon_seas_nw, 'seasonal_anomaly':amazon_anom_nw_2005,
"seasonal_res":amazon_seas_var_res_nw, 'annual_anomaly':amazon_raw_nw_anomaly_standarise_detrend}
#australia
australia_obs = {"mean_seasonal_cycle": australia_seas_obs_2010, 'seasonal_anomaly':australia_anom_obs_2010,
"seasonal_res":np.zeros(10), 'annual_anomaly':australia_raw_obs_anomaly_standarise_detrend}
australia_wn = {"mean_seasonal_cycle": australia_seas_wn_2010, 'seasonal_anomaly':australia_anom_wn_2010,
"seasonal_res":australia_seas_var_res_wn, 'annual_anomaly':australia_raw_wn_anomaly_standarise_detrend}
australia_nn = {"mean_seasonal_cycle": australia_seas_nn_2010, 'seasonal_anomaly':australia_anom_nn_2010,
"seasonal_res":australia_seas_var_res_nn, 'annual_anomaly':australia_raw_nn_anomaly_standarise_detrend}
australia_ww = {"mean_seasonal_cycle": australia_seas_ww_2010, 'seasonal_anomaly':australia_anom_ww_2010,
"seasonal_res":australia_seas_var_res_ww, 'annual_anomaly':australia_raw_ww_anomaly_standarise_detrend}
australia_nw = {"mean_seasonal_cycle": australia_seas_nw_2010, 'seasonal_anomaly':australia_anom_nw_2010,
"seasonal_res":australia_seas_var_res_nw, 'annual_anomaly':australia_raw_nw_anomaly_standarise_detrend}
# +
fig, ax = plt.subplots(3, 2, figsize=(fig_size[0], fig_size[0]*0.8), gridspec_kw={'wspace': 0.2, 'hspace': 1})
def get_style(style, i):
legend = i == 0
alpha = 0.7
linewidth = 1.1
style0 = dict(
color='0.2',
linestyle='solid',
linewidth=0.9,
alpha=1,
label='MATSIRO' if legend else '_nolegend_',
)
style1 = dict(
linewidth=linewidth,
linestyle='--',
color=sb.color_palette("Paired")[1],
alpha=alpha,
label=r'$\mathrm{LSTM_{SM}}$' if legend else '_nolegend_'
)
style2 = dict(
linewidth=linewidth,
color=sb.color_palette("Paired")[1],
alpha=alpha,
label=r'$\mathrm{LSTM_{\neg SM}}$' if legend else '_nolegend_'
)
style3 = dict(
linewidth=linewidth,
linestyle='--',
color=sb.color_palette("Paired")[3],
alpha=alpha,
label=r'$\mathrm{FC_{SM}}$' if legend else '_nolegend_'
)
style4 = dict(
linewidth=linewidth,
color=sb.color_palette("Paired")[3],
alpha=alpha,
label=r'$\mathrm{FC_{\neg SM}}$' if legend else '_nolegend_'
)
return [style0, style1, style2, style3, style4][style]
for i, var in enumerate(['mean_seasonal_cycle', 'seasonal_res', 'seasonal_anomaly']):
if var in ['seasonal_res']:
# Amazon
# ax[i, 0].axhline(y=0, xmin=-0.11692960554993577, xmax=0.13768228649736652, **get_style(0, i))
ax[i, 0].plot([0, 11], [0, 0], **get_style(0, i))
# Australia
ax[i, 1].plot([0, 11], [0, 0], **get_style(0, i))
else:
# Amazon
ax[i, 0].plot(amazon_obs[var], **get_style(0, i))
# Australia
ax[i, 1].plot(australia_obs[var], **get_style(0, i))
ax[i, 0].plot(amazon_wn[var], **get_style(1, i))
ax[i, 0].plot(amazon_nn[var], **get_style(2, i))
ax[i, 0].plot(amazon_ww[var], **get_style(3, i))
ax[i, 0].plot(amazon_nw[var], **get_style(4, i))
ax[i, 0].spines['top'].set_visible(False)
ax[i, 0].spines['right'].set_visible(False)
ax[i, 1].plot(australia_wn[var], **get_style(1, i))
ax[i, 1].plot(australia_nn[var], **get_style(2, i))
ax[i, 1].plot(australia_ww[var], **get_style(3, i))
ax[i, 1].plot(australia_nw[var], **get_style(4, i))
ax[i, 1].spines['top'].set_visible(False)
ax[i, 1].spines['right'].set_visible(False)
ax[i, 0].set_ylabel('ET ($mm \ day^{-1}$)')
ax[i, 1].set_ylabel('')
ax[i, 0].set_xticks(np.arange(0, 12))
ax[i, 0].set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'],
rotation=45)
ax[i, 1].set_xticks(np.arange(0, 12))
ax[i, 1].set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'],
rotation=45)
ax[i, 0].yaxis.set_label_coords(-0.14, 0.5)
ax[0, 0].set_title('Amazon region\n mean seasonal cycle 2001-2013')
ax[0, 1].set_title('Australia\n mean seasonal cycle 2001-2013')
ax[1, 0].set_title('mean seasonal residuals')
ax[1, 1].set_title('mean seasonal residuals')
ax[2, 0].set_title('seasonal anomaly in 2005')
ax[2, 1].set_title('seasonal anomaly in 2010')
#ax[-2, 1].legend();
ax[0, 0].legend(loc='upper center', bbox_to_anchor=(1., 1.8),
ncol=5, frameon=False, labelspacing=-1, borderaxespad=0., columnspacing=1)
plt.gca().yaxis.set_label_position("right")
savefig('/workspace/bkraft/dl_chapter14/src/notebooks/exp2_figures/Fig5', dpi=300)
# -
# ## Compute difference of the annual anomalies
#Amazon
amazon_diff_obs_wn = amazon_raw_wn_anomaly_standarise.sel(year=2005).obs - amazon_raw_wn_anomaly_standarise.sel(year=2005).mod
amazon_diff_obs_wn.to_netcdf('/workspace/bkraft/dl_chapter14/src/amazon_diff_obs_wn.nc')
amazon_diff_obs_nn = amazon_raw_wn_anomaly_standarise.sel(year=2005).obs - amazon_raw_nn_anomaly_standarise.sel(year=2005).mod
amazon_diff_obs_nn.to_netcdf('/workspace/bkraft/dl_chapter14/src/amazon_diff_obs_nn.nc')
amazon_diff_obs_ww = amazon_raw_wn_anomaly_standarise.sel(year=2005).obs - amazon_raw_ww_anomaly_standarise.sel(year=2005).mod
amazon_diff_obs_ww.to_netcdf('/workspace/bkraft/dl_chapter14/src/amazon_diff_obs_ww.nc')
amazon_diff_obs_nw = amazon_raw_wn_anomaly_standarise.sel(year=2005).obs - amazon_raw_nw_anomaly_standarise.sel(year=2005).mod
amazon_diff_obs_nw.to_netcdf('/workspace/bkraft/dl_chapter14/src/amazon_diff_obs_nw.nc')
#australia
australia_diff_obs_wn = australia_raw_wn_anomaly_standarise.sel(year=2010).obs - australia_raw_wn_anomaly_standarise.sel(year=2010).mod
australia_diff_obs_wn.to_netcdf('/workspace/bkraft/dl_chapter14/src/australia_diff_obs_wn.nc')
australia_diff_obs_nn = australia_raw_wn_anomaly_standarise.sel(year=2010).obs - australia_raw_nn_anomaly_standarise.sel(year=2010).mod
australia_diff_obs_nn.to_netcdf('/workspace/bkraft/dl_chapter14/src/australia_diff_obs_nn.nc')
australia_diff_obs_ww = australia_raw_wn_anomaly_standarise.sel(year=2010).obs - australia_raw_ww_anomaly_standarise.sel(year=2010).mod
australia_diff_obs_ww.to_netcdf('/workspace/bkraft/dl_chapter14/src/australia_diff_obs_ww.nc')
australia_diff_obs_nw = australia_raw_wn_anomaly_standarise.sel(year=2010).obs - australia_raw_nw_anomaly_standarise.sel(year=2010).mod
australia_diff_obs_nw.to_netcdf('/workspace/bkraft/dl_chapter14/src/australia_diff_obs_nw.nc')
# ## Scatterplot LSTM vs MATSIRO annual anomalies
fig, ax = plt.subplots(2, 2, figsize=(5, 5))
plot_hexbin(amazon_raw_wn_anomaly_standarise.sel(year = 2005).mod, amazon_raw_wn_anomaly_standarise.sel(year = 2005).obs,
xlabel= 'with SM\nno perm', ylabel= 'MATSIRO simulation', ax=ax[0,0])
plot_hexbin(amazon_raw_nn_anomaly_standarise.sel(year = 2005).mod, amazon_raw_nn_anomaly_standarise.sel(year = 2005).obs,
xlabel= 'no SM\nno perm', ylabel= 'MATSIRO simulation', ax=ax[0,1])
plot_hexbin(amazon_raw_ww_anomaly_standarise.sel(year = 2005).mod, amazon_raw_nw_anomaly_standarise.sel(year = 2005).obs,
xlabel= 'no SM\nwith perm', ylabel= 'MATSIRO simulation', ax=ax[1,0])
plot_hexbin(amazon_raw_nw_anomaly_standarise.sel(year = 2005).mod, amazon_raw_ww_anomaly_standarise.sel(year = 2005).obs,
xlabel= 'with SM\nwith perm', ylabel= 'MATSIRO simulation', ax=ax[1,1])
plt.savefig('/workspace/bkraft/dl_chapter14/src/notebooks/exp2_figures/annual_anomaly_amazon_scatter.png', dpi=300)
# ## Plot Difference maps
# +
metrics = xr.open_dataset('/scratch/dl_chapter14/experiments/et/derived/spatial_metrics.nc')
fig, axes = subplots_robinson(2, 2, figsize=(fig_size[0], fig_size[0]*0.6), gridspec_kw={'wspace': 0.01, 'hspace': 0.01})
for i, met in enumerate(['mef', 'rmse']):
for j, (mod, mod_name) in enumerate(zip(['wn_nn', 'ww_nw'], [r'$\mathrm{LSTM_{\neg SM}}$ - $\mathrm{LSTM_{SM}}$', r'$\mathrm{FC_{\neg SM}}$ - $\mathrm{FC_{SM}}$'])):
ax = axes[j, i]
if mod == 'wn_nn':
dt = metrics[met].sel(model= 'nn', timeres='daily', set='raw', cvset='eval') - metrics[met].sel(model= 'wn', timeres='daily', set='raw', cvset='eval')
else:
dt = metrics[met].sel(model= 'nw', timeres='daily', set='raw', cvset='eval') - metrics[met].sel(model= 'ww', timeres='daily', set='raw', cvset='eval')
label = 'NSE ($-$)' if met=='mef' else 'RMSE ($mm \ day^{-1}$)'
plot_map(
dt, label=' ', vmin=-0.5 if met=='mef' else -0.5, vmax=0.5 if met=='mef' else 0.5, cmap='bwr_r' if met=='mef' else 'bwr', ax=ax,
histogram_placement=[0.08, 0.28, 0.2, 0.25], hist_kw={'bins': 20, 'edgecolor': 'none'}, cbar_kwargs={'extend': 'both'}, rasterized=True) # [x0, y0, width, height]
ax.set_title('')
if i == 0:
ax.text(-0.02, 0.45, mod_name, horizontalalignment='right', verticalalignment='center', transform=ax.transAxes, rotation=90, size=9)
if j == 0:
ax.set_title(label, size=9)
ax.outline_patch.set_linewidth(0.5)
savefig('/workspace/bkraft/dl_chapter14/src/notebooks/exp2_figures/Fig3', dpi=300)
# -
|
src/notebooks/exp2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import importlib
importlib.reload(logging) # see https://stackoverflow.com/a/21475297/1469195
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
# +
# %%capture
import os
import site
os.sys.path.insert(0, '/home/schirrmr/code/hyperoptim//')
os.sys.path.insert(0, '/home/schirrmr/code/reversible/')
os.sys.path.insert(0, '/home/schirrmr/braindecode/code/braindecode/')
# %load_ext autoreload
# %autoreload 2
import numpy as np
import logging
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
# %matplotlib inline
# %config InlineBackend.figure_format = 'png'
matplotlib.rcParams['figure.figsize'] = (12.0, 1.0)
matplotlib.rcParams['font.size'] = 14
import seaborn
seaborn.set_style('darkgrid')
# -
from hyperoptim.results import (load_data_frame,
remove_columns_with_same_value, mean_identical_exps, pairwise_compare_frame,
round_numeric_columns)
import pandas as pd
df =load_df('/data/schirrmr/schirrmr/reversible/experiments/new-deep-invertible')
df
# +
df = load_data_frame('/data/schirrmr/schirrmr/reversible/experiments/deepshallow//')
df = df[df.debug == 0]
df = df[df.finished == 1]
df = remove_columns_with_same_value(df)
df = df.fillna('-')
df = df.drop('seed', axis=1)
df.runtime = pd.to_timedelta(np.round(df.runtime), unit='s')
df.loc[:, 'valid_misclass'] = df.loc[:,'valid_misclass'] * 100
# -
result_cols = ['valid_misclass', 'runtime', 'train_loss', 'valid_loss', ]
meaned_df = mean_identical_exps(df[df.subject_id > 3].drop('subject_id', axis=1), result_cols=result_cols).sort_values(
by='valid_misclass')
meaned_df = round_numeric_columns(meaned_df, 2)
meaned_df[(meaned_df.max_epochs == 1000) |
(meaned_df.max_epochs == 100)]
df = load_data_frame('/data/schirrmr/schirrmr/reversible/experiments/deepshallow//')
df = remove_columns_with_same_value(df)
df = df.drop('seed', axis=1)
df.runtime = pd.to_timedelta(np.round(df.runtime), unit='s')
result_cols = ['valid_misclass', 'runtime', 'train_loss', 'valid_loss', ]
mean_identical_exps(df[df.subject_id > 4].drop('subject_id', axis=1), result_cols=result_cols).sort_values(
by='valid_misclass')
|
notebooks/results/PureClassification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finding a protein motif, from [Rosalind.info](https://www.rosalind.info)
#
# (Text copied from http://rosalind.info/problems/mprt/)
#
#
# <div class="problem-statement problem-statement-bordered" problem="241">
# <blockquote>
# <h2 id="motif-implies-function">Motif Implies Function</h2>
# <div class="thumb"><a figure="Figure 1" href="http://rosalind.info/media/problems/mprt/cyclophilines.png" lightbox-title="The human cyclophilin family, as represented by the structures of the isomerase domains of some of its members." rel="lightbox[figures]"><img src="/media/problems/mprt/cyclophilines.thumb.png" /></a><div class="caption"><strong>Figure 1</strong><span>. </span><span>The human cyclophilin family, as represented by the structures of the isomerase domains of some of its members.</span></div>
# </div>
# <p>As mentioned in <a data-content="Solved by 15945 (correct ratio 67.6%)." data-trigger="hover" href="/problems/prot/" rel="popover" title="“Translating RNA into Protein”">“Translating RNA into Protein”</a>, <a class="term" href="/glossary/protein/" id="term-206" rel="tooltip" title="The functional unit of the cell.">proteins</a> perform every practical function in the <a class="term" href="/glossary/cell/" id="term-257" rel="tooltip" title="
# The "building block of life," making up all living things on Earth.">cell</a>.
# A structural and functional unit of the protein is a <a class="term new" href="/glossary/protein-domain/" id="term-562" rel="tooltip" title="New term:
# A structural and functional unit of the protein.">protein domain</a>: in terms of the protein's
# <a class="term" href="/glossary/protein-primary-structure/" id="term-570" rel="tooltip" title="
# The order of amino acids on a protein.">primary structure</a>, the domain is an interval of amino acids that can evolve and
# function independently.</p>
# <p>Each domain usually corresponds to a single function of the protein (e.g., binding the protein to <a class="term" href="/glossary/dna/" id="term-545" rel="tooltip" title="
# The molecule encoding heredity and underlying the cellular processes of all life forms.">DNA</a>, creating
# or breaking specific chemical bonds, etc.). Some proteins, such as myoglobin and the Cytochrome complex,
# have only one domain, but many proteins are multifunctional and therefore possess several domains.
# It is even possible to artificially fuse different domains into a protein molecule with definite properties,
# creating a <a class="term new" href="/glossary/chimeric-protein/" id="term-583" rel="tooltip" title="New term:
# A protein artificially constructed from several known domains.">chimeric protein</a>.</p>
# <p>Just like species, proteins can evolve, forming <a class="term" href="/glossary/homologous/" id="term-324" rel="tooltip" title="
# Descending from the same ancestor.">homologous</a> groups called <a class="term new" href="/glossary/protein-family/" id="term-586" rel="tooltip" title="New term:
# A group of homologous proteins.">protein families</a>.
# Proteins from one family usually have the same set of domains, performing similar functions;
# see <a href="/media/problems/mprt/cyclophilines.png" lightbox-title="The human cyclophilin family, as represented by the structures of the isomerase domains of some of its members." rel="lightbox[figures]" title="Click to view">Figure 1</a>.</p>
# <p>A component of a domain essential for its function is called a <a class="term" href="/glossary/motif/" id="term-241" rel="tooltip" title="
# A nucleotide or amino acid pattern of biological significance.">motif</a>, a term that in general
# has the same meaning as it does in <a class="term" href="/glossary/nucleic-acid/" id="term-200" rel="tooltip" title="
# A polymer of nucleotides, constituting either RNA or DNA.">nucleic acids</a>, although many other terms are also used
# (blocks, signatures, fingerprints, etc.) Usually protein motifs are evolutionarily conservative,
# meaning that they appear without much change in different species.</p>
# <p>Proteins are identified in different labs around the world and gathered into freely accessible databases.
# A central repository for protein data is <a href="http://www.uniprot.org/" target="_blank">UniProt</a>, which provides
# detailed protein annotation, including function description, domain structure, and post-translational modifications.
# UniProt also supports protein similarity search, taxonomy analysis, and literature citations.</p>
# </blockquote>
# <h2 id="problem">Problem</h2>
# <p>To allow for the presence of its varying forms, a protein motif is represented by a shorthand as follows:
# [XY] means "either X or Y" and {X} means "any amino acid except X." For example, the N-glycosylation motif
# is written as N{P}[ST]{P}.</p>
# <p>You can see the complete description and features of a particular protein by its access ID
# "uniprot_id" in the UniProt database, by inserting the ID number into</p>
# <div class="codehilite"><pre>http://www.uniprot.org/uniprot/uniprot_id
# </pre></div>
#
# <p>Alternatively, you can obtain a protein sequence in <a class="term" href="/glossary/fasta-format/" id="term-759" rel="tooltip" title="
# A text format used for naming genetic strings in databases.">FASTA format</a> by following</p>
# <div class="codehilite"><pre>http://www.uniprot.org/uniprot/uniprot_id.fasta
# </pre></div>
#
#
# <p>For example, the data for protein B5ZC00 can be found at <a href="http://www.uniprot.org/uniprot/B5ZC00" target="_blank"><a href="http://www.uniprot.org/uniprot/B5ZC00" rel="nofollow" target="_blank"><a href="http://www.uniprot.org/uniprot/B5ZC00" rel="nofollow" target="_blank">http://www.uniprot.org/uniprot/B5ZC00</a></a></a>.</p>
# <p><span class="given-return">Given:</span> At most 15 UniProt Protein Database access IDs.</p>
# <p><span class="given-return">Return:</span> For each protein possessing the N-glycosylation motif, output its given access ID followed
# by a list of <a class="term" href="/glossary/location/" id="term-382" rel="tooltip" title="
# The position in a string where a substring begins.">locations</a> in the protein string where the motif can be found.</p>
# <h2 id="sample-dataset">Sample Dataset</h2>
# <div class="codehilite"><pre>A2Z669
# B5ZC00
# P07204_TRBM_HUMAN
# P20840_SAG1_YEAST
# </pre></div>
#
#
# <h2 id="sample-output">Sample Output</h2>
# <div class="codehilite"><pre>B5ZC00
# 85 118 142 306 395
# P07204_TRBM_HUMAN
# 47 115 116 382 409
# P20840_SAG1_YEAST
# 79 109 135 248 306 348 364 402 485 501 614
# </pre></div>
#
#
# <blockquote>
# <h2 id="note">Note</h2>
# <p>Some entries in UniProt have one primary (citable) accession number and some secondary numbers, appearing due to
# merging or demerging entries. In this problem, you may be given any type of ID.
# If you type the secondary ID into the UniProt query, then you will be automatically
# redirected to the page containing the primary ID.
# You can find more information about UniProt IDs <a href="http://www.uniprot.org/manual/accession_numbers" target="_blank">here</a>.</p>
# </blockquote>
# <div class="clearfix"></div>
# </div>
# ## My interpretation/reasoning
#
# 1. In this exercise, I will combine two things:
# 1. Downloading protein sequences from a public database
# 2. Finding motifs in the sequences
#
# 2. The motif of interest is "N, not-P, S-or-T, not-P" so a 4-amino acid sequence.
#
# 3. The results should be the protein ID of the protein that contains the motif, followed by a newline and all positions of the motif, separated by spaces.
#
# So practically, I want to make a script that:
# - Opens and reads a text file with IDs
# - For each ID, lookup the amino acid sequence
# - Find any position that holds the motif N{P}[ST]{P}
# - if there are none: pass
# - if the motif is found: return the ID and the positions
#
# Sounds pretty straightforward. Let's see how to get that into code.
def read_ids_from_file(input_file):
"""
Given a text file with one ID per line,
return a list of IDs.
"""
id_list = []
with open(input_file, 'r') as read_file:
for line in read_file:
id_list.append(line.strip())
return(id_list)
print(read_ids_from_file("data/Example_finding_a_protein_motif.txt"))
# Then I want to download the fasta files belonging to these IDs, for which I can use the urllib module as suggested here: https://stackoverflow.com/questions/1393324/in-python-given-a-url-to-a-text-file-what-is-the-simplest-way-to-read-the-cont
import urllib.request
def fetch_uniprot_fasta(accession_id):
"""
Given an accession ID from UniProt,
download and return the corresponding fasta record
as separate variables for ID and sequence.
"""
base_url = "http://www.uniprot.org/uniprot/"
fasta_url = base_url + accession_id + ".fasta"
sequence = ""
for line in urllib.request.urlopen(fasta_url):
text = line.decode("utf-8").strip()
if text.startswith(">"):
fasta_id = text
else:
sequence += text
return(fasta_id, sequence)
fetch_uniprot_fasta("A2Z669")
# So this way you can just read and print a fasta file from the web. Can we also immediately parse it as fasta record?
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
def create_seq_record(name, sequence):
"""
Given a name and a sequence,
create a Biopython SeqRecord object.
"""
sequence = Seq(sequence)
seq_record = SeqRecord(sequence)
seq_record.id = name
return(seq_record)
# +
(name, sequence) = fetch_uniprot_fasta("A2Z669")
print(create_seq_record(name, sequence))
# -
# This is not strictly necessary, but I thought it was nice to have.
# Let's just continue with looking for motifs in the sequences.
import regex
def search_motif_positions(sequence, motif):
"""
Search a sequence for a given motif (as regex),
and return position as list. Return False if the motif
is absent from the sequence.
"""
positions = [m.start() + 1 for m in regex.finditer(
r"%s" % motif, sequence)]
#Method thanks to moinudin: https://stackoverflow.com/a/4664889
if len(positions) > 0:
return(positions)
else:
return(False)
# The regex of the motif should be: "N[^P][S|T][^P]"
print(search_motif_positions(sequence, "N[^P][S|T][^P]"))
regex.findall(r"N[^P][S|T][^P]", "MKNKFKTQEELVNHLKTVGFVFANSEIYNGLANAWDYGPLGVLLKNNLKNLWWKEFVTKQKDVVGLDSAIILNPLVWKASGHLDNFSDPLIDCKNCKARYRADKLIESFDENIHIAENSSNEEFAKVLNDYEISCPTCKQFNWTEIRHFNLMFKTYQGVIEDAKNVVYLRPETAQGIFVNFKNVQRSMRLHLPFGIAQIGKSFRNEITPGNFIFRTREFEQMEIEFFLKEESAYDIFDKYLNQIENWLVSACGLSLNNLRKHEHPKEELSHYSKKTIDFEYNFLHGFSELYGIAYRTNYDLSVHMNLSKKDLTYFDEQTKEKYVPHVIEPSVGVERLLYAILTEATFIEKLENDDERILMDLKYDLAPYKIAVMPLVNKLKDKAEEIYGKILDLNISATFDNSGSIGKRYRRQDAIGTIYCLTIDFDSLDDQQDPSFTIRERNSMAQKRIKLSELPLYLNQKAHEDFQRQCQK")
def find_a_protein_motif(input_file):
"""
The complete program:
- read a file with accession IDs
- parse the associated amino acid sequences
- look for the motif
- if the motif exists:
- print accession ID and (start) positions
"""
motif = "N[^P][S|T][^P]"
accession_ids = read_ids_from_file(input_file)
for accession_id in accession_ids:
(fasta_id, sequence) = fetch_uniprot_fasta(accession_id)
if search_motif_positions(sequence, motif):
print("%s\n%s" % (accession_id, " ".join(map(str, search_motif_positions(sequence, motif)))))
else:
pass
return(None)
find_a_protein_motif("data/Example_finding_a_protein_motif.txt")
# Now that seems to be working. Let's see if it works on the real deal:
find_a_protein_motif("data/rosalind_mprt.txt")
# Apparently this was the wrong answer, unfortunately. Where could the error be?
#
# What if I just try it again? Maybe it was just this set for whatever reason.
find_a_protein_motif("data/rosalind_mprt2.txt")
# No, that didn't do it. Then I have to find something else...
#
# Let's make a test version to debug the problem. A function that shows all IDs, sequences, motifs, positions to help check if it all looks right.
# +
def find_a_protein_motif_debug(input_file):
"""
The complete program:
- read a file with accession IDs
- parse the associated amino acid sequences
- look for the motif
- if the motif exists:
- print accession ID and (start) positions
"""
motif = "N[^P][S|T][^P]"
accession_ids = read_ids_from_file(input_file)
for accession_id in accession_ids:
print("ID: %s" % accession_id)
(fasta_id, sequence) = fetch_uniprot_fasta(accession_id)
print("Fasta ID: %s\nSequence: %s" % (fasta_id, sequence))
search_motif_positions_debug(sequence, motif)
return(None)
def search_motif_positions_debug(sequence, motif):
"""
Search a sequence for a given motif (as regex),
and return position as list. Return False if the motif
is absent from the sequence.
"""
print("Found motifs: %s" % regex.findall(r"%s" % motif, sequence))
positions = [m.start() + 1 for m in regex.finditer(
r"%s" % motif, sequence)]
#Method thanks to moinudin: https://stackoverflow.com/a/4664889
print("Motif positions: %s" % positions)
if len(positions) > 0:
return(positions)
else:
return(False)
# -
find_a_protein_motif_debug("data/rosalind_mprt.txt")
# When looking back at the example sequences, I found the problem! Consider these example answers:
#
# **By Rosalind**
# ```
# B5ZC00
# 85 118 142 306 395
# P07204_TRBM_HUMAN
# 47 115 116 382 409
# P20840_SAG1_YEAST
# 79 109 135 248 306 348 364 402 485 501 614
# ```
#
# **By my function**
# ```
# B5ZC00
# 85 118 142 306 395
# P07204_TRBM_HUMAN
# 47 115 382 409
# P20840_SAG1_YEAST
# 79 109 135 248 306 348 364 402 485 501 614
# ```
#
# And look carefully at the list of positions given for "P07204_TRBM_HUMAN"...
#
# ... see it yet?
#
# The answer is...
#
# `116` is missing from my function's output! It does not list overlapping matches.
# So I need to find a regex function that matches all overlapping matches too!
#
# Perhaps take another look at: https://stackoverflow.com/a/18966891?
# +
#Let's take this sequence specifically and see how to fix my problem there!
test_id = ">sp|P07204|TRBM_HUMAN Thrombomodulin OS=Homo sapiens OX=9606 GN=THBD PE=1 SV=2"
test_sequence = "MLGVLVLGALALAGLGFPAPAEPQPGGSQCVEHDCFALYPGPATFLNASQICDGLRGHLMTVRSSVAADVISLLLNGDGGVGRRRLWIGLQLPPGCGDPKRLGPLRGFQWVTGDNNTSYSRWARLDLNGAPLCGPLCVAVSAAEATVPSEPIWEEQQCEVKADGFLCEFHFPATCRPLAVEPGAAAAAVSITYGTPFAARGADFQALPVGSSAAVAPLGLQLMCTAPPGAVQGHWAREAPGAWDCSVENGGCEHACNAIPGAPRCQCPAGAALQADGRSCTASATQSCNDLCEHFCVPNPDQPGSYSCMCETGYRLAADQHRCEDVDDCILEPSPCPQRCVNTQGGFECHCYPNYDLVDGECVEPVDPCFRANCEYQCQPLNQTSYLCVCAEGFAPIPHEPHRCQMFCNQTACPADCDPNTQASCECPEGYILDDGFICTDIDECENGGFCSGVCHNLPGTFECICGPDSALARHIGTDCDSGKVDGGDSGSGEPPPSPTPGSTLTPPAVGLVHSGLLIGISIASLCLVVALLALLCHLRKKQGAARAKMEYKCAAPSKEVVLQHVRTERTPQRL"
test_matches = regex.findall(r"N[^P][S|T][^P]", test_sequence, overlapped=True)
print([match for match in test_matches])
test_iters = regex.finditer(r"N[^P][S|T][^P]", test_sequence, overlapped=True)
print([match.start() + 1 for match in test_iters])
# -
# It looks like I was missing only the `overlapped=True` part. If I add that, I should be good.
def search_motif_positions2(sequence, motif):
"""
Search a sequence for a given motif (as regex),
and return position as list. Return False if the motif
is absent from the sequence.
"""
positions = [m.start() + 1 for m in regex.finditer(
r"%s" % motif, sequence, overlapped = True)]
#Method thanks to moinudin: https://stackoverflow.com/a/4664889
if len(positions) > 0:
return(positions)
else:
return(False)
def find_a_protein_motif2(input_file):
"""
The complete program:
- read a file with accession IDs
- parse the associated amino acid sequences
- look for the motif
- if the motif exists:
- print accession ID and (start) positions
"""
motif = "N[^P][S|T][^P]"
accession_ids = read_ids_from_file(input_file)
for accession_id in accession_ids:
(fasta_id, sequence) = fetch_uniprot_fasta(accession_id)
if search_motif_positions2(sequence, motif):
print("%s\n%s" % (accession_id, " ".join(map(str, search_motif_positions2(sequence, motif)))))
else:
pass
return(None)
# So now these should give different results than before if I use the same input files. Let's see how that goes.
find_a_protein_motif2("data/Example_finding_a_protein_motif.txt")
# So far so good...
find_a_protein_motif2("data/rosalind_mprt.txt")
# Indeed there is a minor difference here! Look at the positions `168` and `169` for `P01047_KNL2_BOVIN`.
find_a_protein_motif2("data/rosalind_mprt2.txt")
# So this one should differ in the positions for `P02974_PMM1_NEIGO`: `68` has been added compared to the previous attempt.
#
# Now I feel I am ready for the challenge again. Let's do it!
find_a_protein_motif2("data/rosalind_mprt3.txt")
# ## Success!!
#
# I did it! Indeed the overlapping motifs were what I missed first and what has been fixed in this second version.
|
MPRT-Finding_a_protein_motif.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Tce3stUlHN0L"
# ##### Copyright 2019 The TensorFlow Authors.
#
#
# + colab_type="code" id="tuOe1ymfHZPu" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# # Hyperparameter Search
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/hyperparameter_search.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/community/en/hyperparameter_search.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="xHxb-dlhMIzW"
# ## Overview
# Hyperparamter tuning or search is somewhat of a black box, an art as it is so often referred to as is the process of choosing some of the parameters of a deep learning model in order to obtain the best possible performance for that architecture. There are quite a few tools out there that do a decent job of tuning parameters, but none are as straightforward, robust and state-of-the-art as Keras-Tuner.
# #
# This notebook will show how the parameters can be tuned manually and using Keras-Tuner. But first, here's a peek at few of the tools:
#
# ### HyperParameter Tuning search
# - `Hyperopt`: a popular Python library for optimizing over all sorts of complex
# search spaces (including real values such as the learning rate, or discrete values
# such as the number of layers).
# - `Hyperas, kopt or Talos`: optimizing hyperparameters for Keras model (the first
# two are based on Hyperopt).
# - `Scikit-Optimize (skopt)`: a general-purpose optimization library. The Bayes
# SearchCV class performs Bayesian optimization using an interface similar to Grid
# SearchCV .
# - `Spearmint`: a Bayesian optimization library.
# - `Sklearn-Deap`: a hyperparameter optimization library based on evolutionary
# algorithms, also with a GridSearchCV -like interface. [Link](https://github.com/rsteca/sklearn-deap)
# - `keras-tuner`: Bayesian as well as RandomSearch based tuning library that is known as "Hypertuning for humans"
#
# + [markdown] colab_type="text" id="MUXex9ctTuDB"
# ## Setup
# + id="2Ks23ztk_eAK" colab_type="code" colab={}
import tensorflow as tf
assert tf.__version__.startswith('2')
print(f'{tf.__version__}')
# + id="VFEo5cXuNuCI" colab_type="code" colab={}
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RandomizedSearchCV
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Dense, Dropout, Conv2D, Flatten, Activation
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
# + [markdown] id="7gTyBUPIbhBz" colab_type="text"
# ## Loading the datatset
# + colab_type="code" id="7sGyQVklCifB" colab={}
mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# + colab_type="code" id="yIBDfFqMChFN" colab={}
X_train = tf.cast(np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], 1)), tf.float64)
X_test = tf.cast(np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2], 1)), tf.float64)
# + id="3FgkTUsQAUcX" colab_type="code" colab={}
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
# + [markdown] id="9UF9AKHgajrh" colab_type="text"
# ## Manual Hyperparameter Tuning
# + id="dx_nROJfas9k" colab_type="code" colab={}
model = tf.keras.models.Sequential()
model.add(Conv2D(32, (3,3), activation='relu', kernel_initializer='he_uniform', input_shape=(28,28,1)))
model.add(Conv2D(64, (3,3), activation='relu', kernel_initializer='he_uniform'))
model.add(Flatten())
model.add(Dense(20))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'])
model.summary()
# + id="L0cRUN9Fas7S" colab_type="code" colab={}
model.fit(X_train, y_train, epochs=5, batch_size=128)
# + [markdown] id="PhdapEGqa4f4" colab_type="text"
# Although this works, there is an element of luck and expertise to tune hyperparameters effectively. The use of Keras-Tuner is discussed below that performs the tuning effectively.
# + [markdown] id="h1v5CRo6Zb4k" colab_type="text"
# ## Keras-Tuner - Hyperparameter Tuning
# ---
# ### Features of Keras-Tuner
# - **Intuitive API**: As easy as 1,2,3
# - **State of the art hypertuner algorithms**
# - **Tunable architectures ready to go**
# - **Seamless experiments recording**: Automatic recording to analyse and reproduce your results
#
# **NOTE**: Do not download the Pypi version of keras-tuner. Follow the steps in the cell below for downloading.
# + id="EpP6AmH8ZO9C" colab_type="code" colab={}
# use pip install keras-tuner once https://github.com/keras-team/keras-tuner/issues/71 is fixed in the pip package
# !pip install -q git+https://github.com/keras-team/keras-tuner
# + id="xOG0h4IKZO6x" colab_type="code" colab={}
import kerastuner
from kerastuner.tuners import RandomSearch
# + id="VKnuKpg6CzxA" colab_type="code" colab={}
# Step 1: Wrap model in a function
def model_fn(hp):
# Step 2: Define the hyper-parameters
LR = hp.Choice('learning_rate', [0.001, 0.0005, 0.0001])
DROPOUT_RATE = hp.Float('dropout_rate', 0.0, 0.5, 5)
NUM_DIMS = hp.Int('num_dims', 8, 32, 8)
NUM_LAYERS = hp.Int('num_layers', 1, 3)
L2_NUM_FILTERS = hp.Int('l2_num_filters', 8, 64, 8)
L1_NUM_FILTERS = hp.Int('l1_num_filters', 8, 64, 8)
# Step 3: Replace static values with hyper-parameters
model = tf.keras.models.Sequential()
model.add(Conv2D(L1_NUM_FILTERS, (3,3), activation='relu', kernel_initializer='he_uniform', input_shape=(28,28,1)))
model.add(Conv2D(L2_NUM_FILTERS, (3,3), activation='relu', kernel_initializer='he_uniform'))
model.add(Flatten())
for _ in range(NUM_LAYERS):
model.add(Dense(NUM_DIMS))
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'])
return model
# + id="G9GdZ50QCzub" colab_type="code" colab={}
tuner = RandomSearch(
model_fn,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='temp_dir')
# + id="u3kr9AB8Czsk" colab_type="code" colab={}
tuner.search_space_summary()
# + id="IPnHFy0gCzqN" colab_type="code" colab={}
tuner.search(X_train, y_train, epochs=5, validation_data=(X_test, y_test))
# + id="mrN7-zP2b0zz" colab_type="code" colab={}
models = tuner.get_best_models(num_models=3)
# + id="Vudn2Rjjb0w4" colab_type="code" colab={}
tuner.results_summary()
# + [markdown] id="dyg4mM9lb6BZ" colab_type="text"
# ## Upcoming Features in Keras-Tuner
# - **Ecosystem Intergration**: Integration out-of-the box with Colab, GCP and many more...
# - **Online Dashboard**: App to monitor the tuning on the go!
# + [markdown] colab_type="text" id="UhNtHfuxCGVy"
# ## References
# - <NAME>: Cutting Edge Tensorflow at Google I/O 19 - [[Youtube Link](https://www.youtube.com/watch?v=Un0JDL3i5Hg&t=458s)]
# - Keras-Tuner GitHub Repository - [[GitHub Link](https://github.com/keras-team/keras-tuner)]
|
community/en/hyperparamter_search.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CSV Analysis Example
# This is a demo notebook showing how to use the `orion.analysis.analyze` function on a CSV signal.
# ## 1. Load the data
#
# In the first step, we setup the environment and load the CSV that we want to process.
#
# To do so, we need to import the `orion.data.load_signal` function and call it passing
# the path to the CSV file.
#
# In this case, we will be loading the `S-1.csv` file from inside the `data` folder.
# +
import logging;
logging.basicConfig(level=logging.ERROR)
logging.getLogger().setLevel(level=logging.ERROR)
import warnings
warnings.simplefilter("ignore")
# +
from orion.data import load_signal
signal_path = 'data/S-1.csv'
data = load_signal(signal_path)
data.head()
# -
# ## 2. Detect anomalies using a pipeline
#
# Once we have the data, let us try to use the LSTM pipeline to analyze it and search for anomalies.
#
# In order to do so, we will have import the `orion.analysis.analyze` function and pass it
# the loaded data and the path to the pipeline JSON that we want to use.
#
# In this case, we will be using the `lstm_dynamic_threshold.json` pipeline from inside the `orion` folder.
#
# The output will be a ``pandas.DataFrame`` containing a table with the detected anomalies.
# +
from orion.analysis import analyze
pipeline_path = '../orion/pipelines/lstm_dynamic_threshold.json'
anomalies = analyze(pipeline_path, data)
anomalies
|
notebooks/CSV Analysis Example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
from numpy import angle, max, pi, sin, zeros
import matplotlib
# %matplotlib inline
data = np.hstack((np.array(range(10))[:,np.newaxis]))
N = 256
data = np.linspace(0,1,N)[np.newaxis,:]
theta = np.linspace(0, 2*np.pi, N)
def plot_clock(ax):
ax.set_yticklabels([])
ax.set_theta_zero_location('N')
ax.set_theta_direction(-1)
ax.set_xticks([0, np.pi/2, np.pi, np.pi * 3/2])
ax.set_xticklabels([12, 3, 6, 9])
return ax
def plot_clocks(cmap='hsv'):
fig = figure(figsize=(3.7, 1.5))
ax = fig.add_subplot(121, polar=True)
ax.pcolormesh(theta,[0,1], data, cmap=cmap, vmin=0, vmax=2)
ax = plot_clock(ax)
ax.set_title('AM')
ax = fig.add_subplot(122, polar=True)
ax.pcolormesh(theta,[0,1], data + 1, cmap=cmap, vmin=0, vmax=2)
ax = plot_clock(ax)
ax.set_title('PM')
return fig
colors = np.vstack((plt.cm.viridis.colors, np.flipud(plt.cm.plasma.colors)[:250]))
cmap = matplotlib.colors.ListedColormap(colors)
_ = plot_clocks(cmap=cmap)
# So start with almost black in the middle of the night 2am and then into pink for sunrise 6am then into blue for day then into orange and then into purple for night
np.array(plt.cm.plasma.colors).shape
np.array(plt.cm.viridis.colors).shape
fig = figure(figsize=(3.7, 1.5))
ax = fig.add_subplot(121, polar=True)
ax.pcolormesh(theta,[0,1], data, cmap='hsv', vmin=0, vmax=2)
ax = plot_clock(ax)
ax.set_title('AM')
ax = fig.add_subplot(122, polar=True)
ax.pcolormesh(theta,[0,1], data + 1, cmap='hsv', vmin=0, vmax=2)
ax = plot_clock(ax)
ax.set_title('PM')
# +
def complex_to_rgb(complex_data, invert=False):
phase = angle(complex_data)
amplitude = abs(complex_data)
amplitude = amplitude/max(max(amplitude))
A = zeros((complex_data.shape[0], complex_data.shape[1], 3))
A[:,:,0] = .5*(sin(phase)+1)*amplitude
A[:,:,1] = .5*(sin(phase+pi/2)+1)*amplitude
A[:,:,2] = .5*(-sin(phase)+1)*amplitude
if(invert):
return 1-A
else:
return A
N = 1024
x = np.linspace(-1, 1, N)
y = np.linspace(-1, 1, N)
X,Y = np.meshgrid(x,y)
R = np.sqrt(X*X + Y*Y)
PHI = np.arctan2(Y, X)
fig = figure()
ax = fig.add_subplot(111, polar=True)
ax.imshow(complex_to_rgb(R*np.exp(1j*PHI) * (R<1), invert=True), extent=(0, 2*np.pi, 0, 1024))
ax.set_xticks([ 0, np.pi/2, np.pi, 3*np.pi/2])#-.5)
ax.set_yticks([0, N/3, 2*N/3, N])
ax.set_xticklabels(['', '$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$'])
ax.set_yticklabels([])
# -
# !conda upgrade matplotlib -y
# +
def complex_to_rgb(complex_data, invert=False):
from numpy import angle, max, pi, sin, zeros
phase = angle(complex_data)
amplitude = abs(complex_data)
amplitude = amplitude/max(max(amplitude))
A = zeros((complex_data.shape[0], complex_data.shape[1], 3))
A[:,:,0] = .5*(sin(phase)+1)*amplitude
A[:,:,1] = .5*(sin(phase+pi/2)+1)*amplitude
A[:,:,2] = .5*(-sin(phase)+1)*amplitude
if(invert):
return 1-A
else:
return A
import numpy as np
from matplotlib.pyplot import figure
N = 1024
x = np.linspace(-1, 1, N)
y = np.linspace(-1, 1, N)
X,Y = np.meshgrid(x,y)
R = np.sqrt(X*X + Y*Y)
PHI = np.arctan2(Y, X)
fig = figure()
ax = fig.add_subplot(111, polar=True)
ax.imshow(complex_to_rgb(R*np.exp(1j*PHI) * (R<1), invert=True), extent=[0,2*np.pi, 0,1024])
ax.set_rgrids([1,N/3,2*N/3], angle=45)
ax.set_xticks([0, np.pi/2, np.pi, 3*np.pi/2])
ax.set_yticks([0, N/3, 2*N/3, N])
ax.set_xticklabels([r'$0$', r'$\pi/2$', r'$\pi$', r'$3\pi/2$'])
ax.set_yticklabels([r'0', r'$1/3$', r'$2/3$', '1'])
# -
|
scratch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Principal Component Analysis
# PCA is a dimensionality reduction technique; it lets you distill multi-dimensional data down to fewer dimensions, selecting new dimensions that preserve variance in the data as best it can.
#
# We're not talking about Star Trek stuff here; let's make it real - a black & white image for example, contains three dimensions of data: X position, Y position, and brightness at each point. Distilling that down to two dimensions can be useful for things like image compression and facial recognition, because it distills out the information that contributes most to the variance in the data set.
#
# Let's do this with a simpler example: the Iris data set that comes with scikit-learn. It's just a small collection of data that has four dimensions of data for three different kinds of Iris flowers: The length and width of both the petals and sepals of many individual flowers from each species. Let's load it up and have a look:
# +
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
import pylab as pl
from itertools import cycle
iris = load_iris()
numSamples, numFeatures = iris.data.shape
print(numSamples)
print(numFeatures)
print(list(iris.target_names))
# -
# So, this tells us our data set has 150 samples (individual flowers) in it. It has 4 dimensions - called features here, and three distinct Iris species that each flower is classified into.
#
# While we can visualize 2 or even 3 dimensions of data pretty easily, visualizing 4D data isn't something our brains can do. So let's distill this down to 2 dimensions, and see how well it works:
X = iris.data
pca = PCA(n_components=2, whiten=True).fit(X)
X_pca = pca.transform(X)
# What we have done is distill our 4D data set down to 2D, by projecting it down to two orthogonal 4D vectors that make up the basis of our new 2D projection. We can see what those 4D vectors are, although it's not something you can really wrap your head around:
print(pca.components_)
# Let's see how much information we've managed to preserve:
print(pca.explained_variance_ratio_)
print(sum(pca.explained_variance_ratio_))
# That's pretty cool. Although we have thrown away two of our four dimensions, PCA has chosen the remaining two dimensions well enough that we've captured 92% of the variance in our data in a single dimension alone! The second dimension just gives us an additional 5%; altogether we've only really lost less than 3% of the variance in our data by projecting it down to two dimensions.
#
# As promised, now that we have a 2D representation of our data, we can plot it:
# +
# %matplotlib inline
from pylab import *
colors = cycle('rgb')
target_ids = range(len(iris.target_names))
pl.figure()
for i, c, label in zip(target_ids, colors, iris.target_names):
pl.scatter(X_pca[iris.target == i, 0], X_pca[iris.target == i, 1],
c=c, label=label)
pl.legend()
pl.show()
# -
# You can see the three different types of Iris are still clustered pretty well. If you think about it, this probably works well because the overall size of an individual flower probably makes both the petal and sepal sizes increase by a similar amount. Although the actual numbers on this graph have no intuitive meaning, what we're probably seeing is measure of the ratio of width to height for petals and sepals - and PCA distilled our data down to that on its own.
# ## Activity
# Our results suggest we could actually distill this data down to a single dimension and still preserve most of its variance. Try it! Do a PCA down to one component, and measure the results.
|
MLCourse/PCA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### LightGBM + ADSTuner
# * added feature engineering
# * added year, removed temp
# * removing day I got the best results. (The range of days in the train set don't match with test set)
#
# +
import pandas as pd
import numpy as np
import catboost as cat
# to use ADSTuner
from ads.hpo.search_cv import ADSTuner
from ads.hpo.stopping_criterion import *
from ads.hpo.distributions import *
# to encode categoricals
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import make_scorer
import seaborn as sns
import matplotlib.pyplot as plt
# see utils.py
from utils import add_features, rmsle, train_encoders, apply_encoders
from utils import show_tuner_results, show_categoricals
# set seaborn look&feel
sns.set()
# +
# globals and load train dataset
STUDY_NAME = "Bike sharing-cat1"
# number of folds for K-fold cv in ADSTuner
FOLDS = 7
# in secs
TIME_BUDGET = 3600
FILE_TRAIN = "train.csv"
FILE_TEST = "test.csv"
# +
# load train dataset
data_orig = pd.read_csv(FILE_TRAIN)
#
# add features
#
data_extended = add_features(data_orig)
# have a look
data_extended.tail()
# -
show_categoricals(data_extended, thr=100)
# +
# ok, we will treat as categorical: holiday, hour, season, weather, windspeed, workingday, year
# +
all_columns = data_extended.columns
# cols to be ignored
# atemp and temp are strongly correlated (0.98) we're taking only one
del_columns = ["datetime", "casual", "registered", "temp"]
TARGET = "count"
cat_cols = ["season", "holiday", "workingday", "weather", "windspeed", "hour", "year"]
num_cols = list(set(all_columns) - set([TARGET]) - set(del_columns) - set(cat_cols))
features = sorted(cat_cols + num_cols)
print("All columns:", len(all_columns))
print("Ignored columns:", len(del_columns))
print("Target:", len([TARGET]))
print("Categorical columns:", len(cat_cols))
print("Numerical columns:", len(num_cols))
print("All the features", len(features))
# -
# drop ignored columns
data_used = data_extended.drop(del_columns, axis=1)
# +
# let's code categorical
# windspeed need a special treatment
le_list = train_encoders(data_extended)
# coding
data_used = apply_encoders(data_used, le_list)
# define indexes for cat_cols
# # cat boost want indexes
cat_columns_idxs = [i for i, col in enumerate(features) if col in cat_cols]
# -
# ### ADSTuner session
# +
#
# Here we define the strategy, the space for hyper-parameters we want to explore
#
params = {
"iterations": CategoricalDistribution([1000, 2000, 3000, 4000, 5000]),
"learning_rate": LogUniformDistribution(low=1e-4, high=1e-2),
"depth": IntUniformDistribution(5, 10),
"use_best_model": True,
# 'categorical_feature' : cat_columns_idxs,
}
alg_reg = cat.CatBoostRegressor(verbose=0)
# define the scorer function for ADSTuner, see def for rmsle before
scorer = make_scorer(rmsle, greater_is_better=False)
# per lista scorer sorted(sklearn.metrics.SCORERS.keys())
tuner = ADSTuner(
alg_reg, cv=FOLDS, strategy=params, scoring=scorer, study_name=STUDY_NAME, n_jobs=4
)
tuner.search_space({"depth": IntUniformDistribution(5, 10)})
x_train = data_used[features]
y_train = data_used[TARGET]
tuner.tune(x_train, y_train, exit_criterion=[TimeBudget(TIME_BUDGET)])
# -
# ### Analyze trials
# +
# get the status to see if completed
print(f"The tuner status is: {tuner.get_status()}")
print(f"Remaining time is: {round(tuner.time_remaining, 1)} sec.")
# +
# look only at completed trials, sorted with best on top. Metric chosen is in the value col.
result_df = tuner.trials[tuner.trials["state"] == "COMPLETE"].sort_values(
by=["value"], ascending=False
)
result_df.head(10)
# -
show_tuner_results(tuner)
tuner.plot_best_scores()
# ### train the model with the best params
# +
# %%time
model = lgb.LGBMRegressor(**tuner.best_params)
model.fit(x_train, y_train, categorical_feature=cat_columns_idxs)
# -
# ### Prediction and submission to Kaggle
test_orig = pd.read_csv(FILE_TEST)
# +
# add engineered features
# feature engineering
test_orig = add_features(test_orig)
# coding
test_orig = apply_encoders(test_orig, le_list)
# data on which do scoring
x_test = test_orig[features]
# +
# scoring
score_test = model.predict(x_test)
# -
# ### prepare submission
df_sub = pd.read_csv("sampleSubmission.csv")
# +
# remove decimals
df_sub["count"] = np.round(score_test, 0)
# remove eventual negative
condition = df_sub["count"] < 0
df_sub.loc[condition, "count"] = 0
# +
FILE_SUB_PREFIX = "sub-demo-001"
FILE_SUB = FILE_SUB_PREFIX + ".csv"
df_sub.to_csv(FILE_SUB, index=False)
# -
# ### Submission
# !kaggle competitions submit -c "bike-sharing-demand" -f $FILE_SUB -m "sub demo 001, adstuner"
print(lgb.__version__)
|
catboost-adstuner.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro to Hidden Markov Models (optional)
# ---
# ### Introduction
#
# In this notebook, you'll use the [Pomegranate](http://pomegranate.readthedocs.io/en/latest/index.html) library to build a simple Hidden Markov Model and explore the Pomegranate API.
#
# <div class="alert alert-block alert-info">
# **Note:** You are not required to complete this notebook and it will not be submitted with your project, but it is designed to quickly introduce the relevant parts of the Pomegranate library that you will need to complete the part of speech tagger.
# </div>
#
# The notebook already contains some code to get you started. You only need to add some new functionality in the areas indicated; you will not need to modify the included code beyond what is requested. Sections that begin with **'IMPLEMENTATION'** in the header indicate that you need to fill in code in the block that follows. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
#
# <div class="alert alert-block alert-info">
# **Note:** Code and Markdown cells can be executed using the `Shift + Enter` keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.
# </div>
# <hr>
# <div class="alert alert-block alert-warning">
# **Note:** Make sure you have selected a **Python 3** kernel in Workspaces or the hmm-tagger conda environment if you are running the Jupyter server on your own machine.
# </div>
import warnings
warnings.filterwarnings('ignore')
# Jupyter "magic methods" -- only need to be run once per kernel restart
# %load_ext autoreload
# %aimport helpers
# %autoreload 1
# +
# import python modules -- this cell needs to be run again if you make changes to any of the files
import matplotlib.pyplot as plt
import numpy as np
from helpers import show_model
from pomegranate import State, HiddenMarkovModel, DiscreteDistribution
# -
# ## Build a Simple HMM
# ---
# You will start by building a simple HMM network based on an example from the textbook [Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu/).
#
# > You are the security guard stationed at a secret under-ground installation. Each day, you try to guess whether it’s raining today, but your only access to the outside world occurs each morning when you see the director coming in with, or without, an umbrella.
#
# A simplified diagram of the required network topology is shown below.
#
# 
#
# ### Describing the Network
#
# <div class="alert alert-block alert-warning">
# $\lambda = (A, B)$ specifies a Hidden Markov Model in terms of an emission probability distribution $A$ and a state transition probability distribution $B$.
# </div>
#
# HMM networks are parameterized by two distributions: the emission probabilties giving the conditional probability of observing evidence values for each hidden state, and the transition probabilities giving the conditional probability of moving between states during the sequence. Additionally, you can specify an initial distribution describing the probability of a sequence starting in each state.
#
# <div class="alert alert-block alert-warning">
# At each time $t$, $X_t$ represents the hidden state, and $Y_t$ represents an observation at that time.
# </div>
#
# In this problem, $t$ corresponds to each day of the week and the hidden state represent the weather outside (whether it is Rainy or Sunny) and observations record whether the security guard sees the director carrying an umbrella or not.
#
# For example, during some particular week the guard may observe an umbrella ['yes', 'no', 'yes', 'no', 'yes'] on Monday-Friday, while the weather outside is ['Rainy', 'Sunny', 'Sunny', 'Sunny', 'Rainy']. In that case, $t=Wednesday$, $Y_{Wednesday}=yes$, and $X_{Wednesday}=Sunny$. (It might be surprising that the guard would observe an umbrella on a sunny day, but it is possible under this type of model.)
#
# ### Initializing an HMM Network with Pomegranate
# The Pomegranate library supports [two initialization methods](http://pomegranate.readthedocs.io/en/latest/HiddenMarkovModel.html#initialization). You can either explicitly provide the three distributions, or you can build the network line-by-line. We'll use the line-by-line method for the example network, but you're free to use either method for the part of speech tagger.
# create the HMM model
model = HiddenMarkovModel(name="Example Model")
# ### **IMPLEMENTATION**: Add the Hidden States
# When the HMM model is specified line-by-line, the object starts as an empty container. The first step is to name each state and attach an emission distribution.
#
# #### Observation Emission Probabilities: $P(Y_t | X_t)$
# We need to assume that we have some prior knowledge (possibly from a data set) about the director's behavior to estimate the emission probabilities for each hidden state. In real problems you can often estimate the emission probabilities empirically, which is what we'll do for the part of speech tagger. Our imaginary data will produce the conditional probability table below. (Note that the rows sum to 1.0)
#
# | | $yes$ | $no$ |
# | --- | --- | --- |
# | $Sunny$ | 0.10 | 0.90 |
# | $Rainy$ | 0.80 | 0.20 |
# +
# create the HMM model
model = HiddenMarkovModel(name="Example Model")
# emission probability distributions, P(umbrella | weather)
sunny_emissions = DiscreteDistribution({"yes": 0.1, "no": 0.9})
sunny_state = State(sunny_emissions, name="Sunny")
# TODO: create a discrete distribution for the rainy emissions from the probability table
# above & use that distribution to create a state named Rainy
rainy_emissions = DiscreteDistribution({"yes": 0.8, "no": 0.2})
rainy_state = State(rainy_emissions, name="Rainy")
# add the states to the model
model.add_states(sunny_state, rainy_state)
assert rainy_emissions.probability("yes") == 0.8, "The director brings his umbrella with probability 0.8 on rainy days"
print("Looks good so far!")
# -
# ### **IMPLEMENTATION:** Adding Transitions
# Once the states are added to the model, we can build up the desired topology of individual state transitions.
#
# #### Initial Probability $P(X_0)$:
# We will assume that we don't know anything useful about the likelihood of a sequence starting in either state. If the sequences start each week on Monday and end each week on Friday (so each week is a new sequence), then this assumption means that it's equally likely that the weather on a Monday may be Rainy or Sunny. We can assign equal probability to each starting state by setting $P(X_0=Rainy) = 0.5$ and $P(X_0=Sunny)=0.5$:
#
# | $Sunny$ | $Rainy$ |
# | --- | ---
# | 0.5 | 0.5 |
#
# #### State transition probabilities $P(X_{t} | X_{t-1})$
# Finally, we will assume for this example that we can estimate transition probabilities from something like historical weather data for the area. In real problems you can often use the structure of the problem (like a language grammar) to impose restrictions on the transition probabilities, then re-estimate the parameters with the same training data used to estimate the emission probabilities. Under this assumption, we get the conditional probability table below. (Note that the rows sum to 1.0)
#
# | | $Sunny$ | $Rainy$ |
# | --- | --- | --- |
# |$Sunny$| 0.80 | 0.20 |
# |$Rainy$| 0.40 | 0.60 |
# +
# create edges for each possible state transition in the model
# equal probability of a sequence starting on either a rainy or sunny day
model.add_transition(model.start, sunny_state, 0.5)
model.add_transition(model.start, rainy_state, 0.5)
# add sunny day transitions (we already know estimates of these probabilities
# from the problem statement)
model.add_transition(sunny_state, sunny_state, 0.8) # 80% sunny->sunny
model.add_transition(sunny_state, rainy_state, 0.2) # 20% sunny->rainy
# TODO: add rainy day transitions using the probabilities specified in the transition table
model.add_transition(rainy_state, sunny_state, 0.4) # 40% rainy->sunny
model.add_transition(rainy_state, rainy_state, 0.6) # 60% rainy->rainy
# finally, call the .bake() method to finalize the model
model.bake()
assert model.edge_count() == 6, "There should be two edges from model.start, two from Rainy, and two from Sunny"
assert model.node_count() == 4, "The states should include model.start, model.end, Rainy, and Sunny"
print("Great! You've finished the model.")
# -
# ## Visualize the Network
# ---
# We have provided a helper function called `show_model()` that generates a PNG image from a Pomegranate HMM network. You can specify an optional filename to save the file to disk. Setting the "show_ends" argument True will add the model start & end states that are included in every Pomegranate network.
show_model(model, figsize=(5, 5), filename="example.png", overwrite=True, show_ends=False)
# ### Checking the Model
# The states of the model can be accessed using array syntax on the `HMM.states` attribute, and the transition matrix can be accessed by calling `HMM.dense_transition_matrix()`. Element $(i, j)$ encodes the probability of transitioning from state $i$ to state $j$. For example, with the default column order specified, element $(2, 1)$ gives the probability of transitioning from "Rainy" to "Sunny", which we specified as 0.4.
#
# Run the next cell to inspect the full state transition matrix, then read the .
# +
column_order = ["Example Model-start", "Sunny", "Rainy", "Example Model-end"] # Override the Pomegranate default order
column_names = [s.name for s in model.states]
order_index = [column_names.index(c) for c in column_order]
# re-order the rows/columns to match the specified column order
transitions = model.dense_transition_matrix()[:, order_index][order_index, :]
print("The state transition matrix, P(Xt|Xt-1):\n")
print(transitions)
print("\nThe transition probability from Rainy to Sunny is {:.0f}%".format(100 * transitions[2, 1]))
# -
# ## Inference in Hidden Markov Models
# ---
# Before moving on, we'll use this simple network to quickly go over the Pomegranate API to perform the three most common HMM tasks:
#
# <div class="alert alert-block alert-info">
# **Likelihood Evaluation**<br>
# Given a model $\lambda=(A,B)$ and a set of observations $Y$, determine $P(Y|\lambda)$, the likelihood of observing that sequence from the model
# </div>
#
# We can use the weather prediction model to evaluate the likelihood of the sequence [yes, yes, yes, yes, yes] (or any other state sequence). The likelihood is often used in problems like machine translation to weight interpretations in conjunction with a statistical language model.
#
# <div class="alert alert-block alert-info">
# **Hidden State Decoding**<br>
# Given a model $\lambda=(A,B)$ and a set of observations $Y$, determine $Q$, the most likely sequence of hidden states in the model to produce the observations
# </div>
#
# We can use the weather prediction model to determine the most likely sequence of Rainy/Sunny states for a known observation sequence, like [yes, no] -> [Rainy, Sunny]. We will use decoding in the part of speech tagger to determine the tag for each word of a sentence. The decoding can be further split into "smoothing" when we want to calculate past states, "filtering" when we want to calculate the current state, or "prediction" if we want to calculate future states.
#
# <div class="alert alert-block alert-info">
# **Parameter Learning**<br>
# Given a model topography (set of states and connections) and a set of observations $Y$, learn the transition probabilities $A$ and emission probabilities $B$ of the model, $\lambda=(A,B)$
# </div>
#
# We don't need to learn the model parameters for the weather problem or POS tagging, but it is supported by Pomegranate.
#
# ### IMPLEMENTATION: Calculate Sequence Likelihood
#
# Calculating the likelihood of an observation sequence from an HMM network is performed with the [forward algorithm](https://en.wikipedia.org/wiki/Forward_algorithm). Pomegranate provides the the `HMM.forward()` method to calculate the full matrix showing the likelihood of aligning each observation to each state in the HMM, and the `HMM.log_probability()` method to calculate the cumulative likelihood over all possible hidden state paths that the specified model generated the observation sequence.
#
# Fill in the code in the next section with a sample observation sequence and then use the `forward()` and `log_probability()` methods to evaluate the sequence.
# +
# TODO: input a sequence of 'yes'/'no' values in the list below for testing
observations = ['yes', 'no', 'yes']
assert len(observations) > 0, "You need to choose a sequence of 'yes'/'no' observations to test"
# TODO: use model.forward() to calculate the forward matrix of the observed sequence,
# and then use np.exp() to convert from log-likelihood to likelihood
forward_matrix = np.exp(model.forward(observations))
# TODO: use model.log_probability() to calculate the all-paths likelihood of the
# observed sequence and then use np.exp() to convert log-likelihood to likelihood
probability_percentage = np.exp(model.log_probability(observations))
# Display the forward probabilities
print(" " + "".join(s.name.center(len(s.name)+6) for s in model.states))
for i in range(len(observations) + 1):
print(" <start> " if i==0 else observations[i - 1].center(9), end="")
print("".join("{:.0f}%".format(100 * forward_matrix[i, j]).center(len(s.name) + 6)
for j, s in enumerate(model.states)))
print("\nThe likelihood over all possible paths " + \
"of this model producing the sequence {} is {:.2f}%\n\n"
.format(observations, 100 * probability_percentage))
# -
# ### IMPLEMENTATION: Decoding the Most Likely Hidden State Sequence
#
# The [Viterbi algorithm](https://en.wikipedia.org/wiki/Viterbi_algorithm) calculates the single path with the highest likelihood to produce a specific observation sequence. Pomegranate provides the `HMM.viterbi()` method to calculate both the hidden state sequence and the corresponding likelihood of the viterbi path.
#
# This is called "decoding" because we use the observation sequence to decode the corresponding hidden state sequence. In the part of speech tagging problem, the hidden states map to parts of speech and the observations map to sentences. Given a sentence, Viterbi decoding finds the most likely sequence of part of speech tags corresponding to the sentence.
#
# Fill in the code in the next section with the same sample observation sequence you used above, and then use the `model.viterbi()` method to calculate the likelihood and most likely state sequence. Compare the Viterbi likelihood against the forward algorithm likelihood for the observation sequence.
# +
# TODO: input a sequence of 'yes'/'no' values in the list below for testing
observations = ['yes', 'no', 'yes']
# TODO: use model.viterbi to find the sequence likelihood & the most likely path
viterbi_likelihood, viterbi_path = model.viterbi(observations)
print("The most likely weather sequence to have generated " + \
"these observations is {} at {:.2f}%."
.format([s[1].name for s in viterbi_path[1:]], np.exp(viterbi_likelihood)*100)
)
# -
# ### Forward likelihood vs Viterbi likelihood
# Run the cells below to see the likelihood of each sequence of observations with length 3, and compare with the viterbi path.
# +
from itertools import product
observations = ['no', 'no', 'yes']
p = {'Sunny': {'Sunny': np.log(.8), 'Rainy': np.log(.2)}, 'Rainy': {'Sunny': np.log(.4), 'Rainy': np.log(.6)}}
e = {'Sunny': {'yes': np.log(.1), 'no': np.log(.9)}, 'Rainy':{'yes':np.log(.8), 'no':np.log(.2)}}
o = observations
k = []
vprob = np.exp(model.viterbi(o)[0])
print("The likelihood of observing {} if the weather sequence is...".format(o))
for s in product(*[['Sunny', 'Rainy']]*3):
k.append(np.exp(np.log(.5)+e[s[0]][o[0]] + p[s[0]][s[1]] + e[s[1]][o[1]] + p[s[1]][s[2]] + e[s[2]][o[2]]))
print("\t{} is {:.2f}% {}".format(s, 100 * k[-1], " <-- Viterbi path" if k[-1] == vprob else ""))
print("\nThe total likelihood of observing {} over all possible paths is {:.2f}%".format(o, 100*sum(k)))
# -
# ### Congratulations!
# You've now finished the HMM warmup. You should have all the tools you need to complete the part of speech tagger project.
|
HMM warmup (optional).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.5
# language: julia
# name: julia-0.4
# ---
# +
using PyPlot
x = collect(0.0:1.0:100)
y = 2.0.*x.^2 - 100.0 + x +0.0003.*x.^5
plot(x,y)
ese = y./100
NN = length(y)
wx = 1. ./(ese.^2) # relative variance of observations
wy = zeros([1])+1. # systematic errors... not used so put them to 1
VAL = ese.^2
M = 2
N = length(x)
K = 1 # number of y columns
MD = 2 #spline mode
NC = length(y)
c = ones(NN,NC)
WK = Float64[1.,1.,1.,1.,1.,1.]
IER=Int32[1]
# -
ccall( (:gcvspl_, "./libgcvspl.so"), Void, (Ptr{Float64},Ptr{Float64},Ptr{Cint},Ptr{Float64},Ptr{Float64},Ptr{Cint},Ptr{Cint},Ptr{Cint},Ptr{Cint},Ptr{Float64},Ptr{Cdouble},Ptr{Cint},Ptr{Cdouble},Ref{Cint}),x,y,&NN,wx,wy,&M,&N,&K,&MD,VAL,c,&NC,WK,IER)
WK
c
|
deps/.ipynb_checkpoints/test-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ejercicio Hundir La Flota | Challenge yourself
#
# #### La fecha de entrega está limitada al próximo 16/05/2021
# En este ejercicio deberás programar el juego **hundir la flota** o [**batalla naval**](https://es.wikipedia.org/wiki/Batalla_naval_(juego))
# El tablero de este juego será una matriz de 10x10 dimensiones, matriz sobre la que plamaremos el estado de cada turno. Cada matriz de información continente del estado en cada momento, se mostrará por pantalla.
# 
# **Las reglas son las siguientes:**
#
# - Cada jugador deberá colocar en su tablero:
# - 4 barcos tamaño 2x1.
#
# - 3 barcos tamaño 3x1.
#
# - 2 barcos tamaño 4x1.
#
# - 1 barco tamaño 5x1.
#
#
# - En cada turno, el jugador correspondiente debe elegir una casilla de la matriz (10x10) y ambos jugadores marcarán la casilla en la matriz.
# - Si la casilla que se ha elegido es agua, será marcada como un una "**o**" y si la casilla que ha marcado es uno de los barcos, la casilla será marcada con una "**x**".
# - Los barcos han de ser mostrados con un "__#__" en la matriz.
# - Los espacios con agua serán mostrados como " " ó "**~**".
#
# Antes de que comience la partida, cada jugador deberá crear su estado inicial. El estado inicial es una configuración de la matriz con todos los barcos colocados. Para ello, el programa debería preguntar, uno a uno, dónde se quiere colocar cada barco. El estilo de pregunta, ha de ser:
#
# - Inserte la posición del barco 1 (2x1). El usuario ha de escribir si se quiere insertar en la 4a fila, en horizontal y ocupando las últimas dos columnas: `4h9:10`.
# - Inserte la posición del barco 2 (2x1). El usuario ha de escribir si se quiere insertar en la 8a fila, en horizontal y ocupando las dos primeras columnas: `8h1:2`.
# - Inserte la posición del barco 3 (5x1). Si se quiere colocar en vertical empezando por la fila 1 a la 5 y columna 8: `8v1:5`.
#
# También tendrás la opción de cargar una partida anteriormente definida en formato [JSON](https://www.w3schools.com/python/python_json.asp).
#
# Para saber quién empieza a jugar, los jugadores deberán ponerse de acuerdo para que uno de ellos sea el que llame a una función que devuelva dos valores random del 1 al 6, de tipo entero. El primer valor que se muestre por pantalla será el correspondiente al jugador que llama a la función, el segundo será el del contrincante. Quien saque un valor más alto, empieza.
#
# Además, el programa guardará el nombre de los jugadores y el histórico, es decir, todos los estados de la partida. Para ello se utilizará el formato JSON.
#
# Cuando un jugador haya creado su estado inicial, este estado también deberá ser guardado en formato JSON. Antes de que se inicie la partida, el otro jugador deberá cargar tu estado inicial y tú el suyo. El estado inicial del enemigo es el que usará tu programa para comprobar si las coordenadas que has utilizado han acertado en el blanco o no.
#
# El formato de preguntas por turno es:
#
# - `Inserte coordenadas a atacar:`. La respuesta válida será `0x8` --> fila 0, columna 8.
#
# Una vez insertada las coordenadas, deberá resultar que aparezcan tu matriz y la matriz de estado de tu enemigo, esta última con la nueva coordenada marcada sin la flota excepto si el torpedo hubiera dado en el blanco.
# **A modo de resumen:**
#
# 1. Se eligen los nombres de jugador. ¡Recuerda no dejar espacios!
#
# 2. Se pregunta sobre las posiciones de tu flota. Una vez que se haya definido, el sistema debe guardar en JSON el estado de tu flota. OPCIONAL: Puedes cargar una partida a partir de un fichero JSON.
#
# 3. Se carga el estado inicial de tu enemigo a partir de un fichero JSON que te habrá definido previamente. El fichero estará en una carpeta llamada "Partidas_Batalla_Naval". Para cargar el fichero se debe introducir la ruta o nombre del fichero.
#
# 4. Cuando ambos estéis en el mismo punto y se sepa quién es el primero, se debe escribir *primero* o *segundo*. Si eres primero, el sistema empezará preguntándote las coordenadas. Si eres segundo, el sistema preguntará por las coordenadas del rival.
#
# 5. Se debe mostrar por pantalla dos matrices: una matriz 10x10 que representa los puntos que has ido marcando, que no mostrará la flota del enemigo excepto si algún barco ha sido dañado, y otra a la derecha que muestre tu flota con los puntos que hayan sido atacados por parte del contrincante.
#
# 6. Cuando toda la flota de uno de los dos jugadores haya sido exterminada, se mostrará el ganador de la partida. Que si has sido tú el que ha programado este juego, ya habrás ganado incluso antes de empezar a jugar.
# ----------------------------------------------------
#
# Hay dos tipos de bonus:
#
# 1. Hacer todo usando clases. Como mínimo, crear una clase "Barco" y "Tablero"
# 2. Toda la parte del JSON es un Bonus extra plus ++ con el objetivo de que se pueda cargar una partida con una configuración predeterminada. De esta forma, podrías configurar una estructura de barcos y que alguien juegue contra ti en su ordenador con esa estructura.
# 3. ¿Habéis acabado todo esto antes de la fecha? Habla con los profesores :=)
# ----------------------------------------------------
# Hay que programar este juego en varios archivos `.py` de modo que puedas ejecutar dicho programa por la terminal desde cualquier dispositivo.
# +
# keep calm and keep coding
|
week3_course_python_III/day5_probability_statistics/exercise/Ejercicio_Hundir_Flota.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Step 3: Using your Graph
#
# In step 3 of this tutorial, we use our cleaned graph to create an Origin-Destination matrix (OD). Our setting remains Reykjavik, Iceland, as we look at travel times along the network to churches.
# This is a Jupyter Notebook extension which reloads all of the modules whenever you run the code
# This is optional but good if you are modifying and testing source code
# %load_ext autoreload
# %autoreload 2
import os, sys
import time
import networkx as nx
import geopandas as gpd
import pandas as pd
# add to your system path the location of the LoadOSM.py and GOSTnet.py scripts
sys.path.append("../")
import GOSTnets as gn
from shapely.geometry import Point
# First, we read in the graph from the result of the cleaning process (Step 2)
pth = "./" # change this path to your working folder
G = nx.read_gpickle(os.path.join(pth, 'tutorial_outputs', r'iceland_network_clean.pickle'))
# At this stage each edge in the network has a property called 'length'. This was actually computed during Step 1 when the generateRoadsGDF function was run. The units of this length are in kilometres.
gn.example_edge(G)
# We want to convert length to time, so that we can conduct analysis on how long it takes to reach certain destinations.
#
# We do this using the convert_network_to_time function. We have used a factor of 1000, because the function is expecting meters, so we need to convert the units of kilometers to meters.
#
# The convert_network_to_time function uses a default speed dictionary that assigns speed limits to OSM highway types. However, it is possible to specify your own speed dictionary.
G_time = gn.convert_network_to_time(G, distance_tag = 'length', road_col = 'infra_type', factor = 1000)
# We can now use the 'time' property for each edge to work out how long it takes to get from one node to another!
gn.example_edge(G_time, 1)
# To do this for just one journey, we could call nx.shortest_path_length on any given origin or destination node. Let's list 10 of our nodes using this networkX function:
list(G_time.nodes)[:10]
A = list(G_time.nodes)[0] # first node in list
B = list(G_time.nodes)[10] # 10th node in list
travel_time = nx.shortest_path_length(G_time, A, B, weight = 'time')
print('The travel time between A and B is: %d seconds, or %d minutes!' % (travel_time, travel_time / 60))
# In our example, we want to use our network for Reykjavik to work out the travel time to local churches.
#
# Here, we import a shapefile for Reykjavik, and reproject it to WGS 84:
rek = gpd.read_file(os.path.join(pth, 'tutorial_data', 'rek2.shp'))
rek = rek.to_crs('epsg:4326')
# Next, We set a variable poly equal to just the geometry
poly = rek.geometry.iloc[0]
# We can visualize this in-line by just calling it:
poly
# With this in hand, we can read in a shapefile of destinations - here, the churches in Iceland. We use Shapely's 'within' command to select just those in the Reykjavik area:
churches = gpd.read_file(os.path.join(pth, 'tutorial_data', 'churches.shp'))
churches = churches.loc[churches.within(poly)]
# In order to perform network analysis we want to know the closest network node to each church. For this, we use the pandana snap function to snap the church locations to the road network:
churches
#the crs of churchs
churches.crs
# +
#view the pandana_snap doc string
# gn.pandana_snap?
# -
# We want the nearest node distance (NN_dist) to be measured in meters, so that is why we include the target_crs parameter specifying the correct UTM zone.
churches = gn.pandana_snap_c(G_time, churches, source_crs = 'epsg:4326', target_crs = 'epsg:32627', add_dist_to_node_col = True)
# As we can see from the NN_dist column, our church locations are very close to a node on the network in all cases
churches
# When calculating an OD-Matrix, we can only use the node IDs as inputs. So, we convert this column of our dataframe over to a list of unique values:
destinations = list(set(churches.NN))
destinations
# ## Further Analysis
# We would like to make an OD matrix where the origin is the cottage we are renting in the city, and the destinations are the churches in Reykjavik. This will help us work out how many churches we can see today!. First, we need to create the origin. It has coordinates: 64.152215, -22.002099 (Lat,Lon), so I make a point of this:
# A list with a single Shapely Point object is created with (x,y)
my_house = [Point(-22.002099, 64.152215)]
# Next, I load it into a geodataframe and snap it to the network:
mini_gdf = gpd.GeoDataFrame({'geometry':my_house}, crs = {'init':'epsg:4326'}, geometry = 'geometry', index = [1])
mini_gdf
origin_gdf = gn.pandana_snap_c(G_time, mini_gdf, source_crs = 'epsg:4326', target_crs = 'epsg:32627')
origin_gdf
# This is the nearest node (NN)
origin_gdf.iloc[0].NN
# Now, We can calcuate the OD matrix using the GOSTNets calculate_OD function. Bear in mind it takes list objects as inputs:
origin = [origin_gdf.iloc[0].NN]
OD = gn.calculate_OD(G_time, origin, destinations, fail_value = 9999999)
# The OD matrix displays the time in seconds to reach each church
OD
# We can use minutes as the measure by dividing every value in the OD Matrix by 60. Then we can convert the array nicely into a pandas Dataframe,
OD = OD / 60
OD_df = pd.DataFrame(OD, columns = destinations, index = origin)
OD_df
# It appears nearly all of the churches less than twenty minutes away.
#
# Now that you are up to speed on the basics, check out some of the example notebooks in the GOSTNets_PublicGoods 'Implementations' folder.
|
Tutorials/Step 3 - Using your Graph.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Percentile Calculation & Visualization
# ## Can you write a percentile function?
#
# Given a 1-dimensional array of numbers as input and a desired percentile point expressed as an integer between 0 and 100, your function should return a single number which is the data point nearest to the desired percentile point.
# +
# TODO: Uncomment and run me if you get errors such as "Module: seaborn does not exist"
# # !pip3 install probscale
# # !pip3 install seaborn
# +
# Can you come up with an algorithm to calculate percentile?
import numpy as np
data = np.array([1, 3, 5, 2, 3, 7, 8, 4, 10, 0, 6, 7, 3, 0, 3, 0, 5, 7, 10, 1, 4, 9, 3])
# +
def percentile(data, percent):
"""
Calculate the item at a given percentile in a dataset.
data: type - np.array
percent: type - integer
"""
# TODO: your code goes here
pass
# 25th percentile
percentile(data, 25)
# check your work by comparing to np.percentile()
np.percentile(data, 25)
# -
# ### Visualizing Percentiles
# +
# Visualizing percentile with matplotlib & seaborn
# A closer look at this plot can be found here: https://matplotlib.org/mpl-probscale/tutorial/closer_look_at_viz.html
import seaborn
import matplotlib.pyplot as plt
import probscale
# Load in the "tips" dataset from the seaborn library for this example
tips = seaborn.load_dataset("tips")
fig, ax = plt.subplots(figsize=(6, 3))
# Use probscale's plottype 'pp' for percentile
# We also provide our labels, which column from the dataset we'd like to plot, and some stylistic arguments
fig = probscale.probplot(tips['total_bill'], ax=ax, plottype='pp', probax='y', datascale='linear',
problabel='Percentile', datalabel='Total Bill (USD)',
scatter_kws=dict(marker='.', linestyle='none', label='Bill Amount'))
# Set y limit - we don't show anything greater than this on our graph
ax.set_ylim(bottom=1, top=100)
# Calling despine() is just an aesthetic decision. Comment this line out and see what happens!
seaborn.despine()
# plt.grid() overlays a grid onto the plot, so we can more easily visualize the values our line overlays.
plt.grid()
# -
# ### Your turn! Use the above code to plot the percentiles for our variable 'data'!
# +
# TODO: your code goes here!
# HINT: you're going to want to make sure to change what we're plotting, as well as one of our labels.
# HINT: don't be afraid to use the code I gave you above!
# -
|
Notebooks/Percentile_Correlation_Quantiles/Percentile.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp parquet
# -
#hide
from nbdev.showdoc import *
#hide
# stellt sicher, dass beim verändern der core library diese wieder neu geladen wird
# %load_ext autoreload
# %autoreload 2
# # 00_01_Parquet_Basics
# Notebook to explore the basics of Parquet
# Links
# * Spark documentation https://spark.apache.org/docs/2.4.5/
# common imports
import os
import zipfile
from bfh_cas_bgd_fs2020_sa.core import *
# Check the current working dir
print(os.getcwd())
# Basic Definition
data_folder = "./data/" # folder with testdata
temp_folder = "./tmp/"
parquet_folder = "./parquet/"
data_files = ['2019q3.zip','2019q4.zip']
# ## Init Spark
# This code initialises the SparkSession and therefore the SparkContext. Pressing the link "Spark UI" opens the Spark UI for this session.
# init Spark
spark = get_spark_session() # Session anlegen
spark # display the moste important information of the session
# ## Datafiles
# The directory contains two zipfiles (2019q3.zip, 2019q4.zip). Each of them contains 4 csv files. The columns and the relation between these files are described in the readme.htm.<br>
# Each zip file contains all quarterly and yearly reports that were filled during the quarter denoted by the filename.
# ### Unpacking
# In a first step, the content of the files are unzipped and placed in separated folders
for data_file in data_files:
path_to_zip_file = data_folder + data_file
directory_to_extract_to = temp_folder + data_file[:-4]
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
# the sizes of the directories
print('Data folder: ', get_size_format(get_directory_size(data_folder)))
print('Temp folder: ', get_size_format(get_directory_size(temp_folder)))
# ## Using Parquet
# let us read a csv file and store it as a parquet file
# * API doc of the csv reader: https://spark.apache.org/docs/2.4.5/api/python/pyspark.sql.html?highlight=parquet#pyspark.sql.DataFrameReader.csv
# * API doc of the parquet writer: https://spark.apache.org/docs/2.4.5/api/python/pyspark.sql.html?highlight=parquet#pyspark.sql.DataFrameWriter.parquet
# imports that are used in this section
from pyspark.sql.types import StructType, StructField, StringType, DateType, DoubleType, IntegerType
from pyspark.sql.functions import countDistinct, year, month
# as a test csv file, "num.txt" from the folder 2019q3 is used
test_file = temp_folder + '2019q3/num.txt'
print('size of test file: ', get_size_format(os.path.getsize(test_file)))
# ### Reading a CSV file
# As a first step, the csv file has to be loaded into a spark df. <br>
# The file has a header row and the columns are separated by a TAB (\t).
df_test_num = spark.read.csv(test_file, sep='\t', header=True)
# When checking the format in the next cell, we see that all columns were read as a string. That is ok for most of the columns but when checking the definitions for the num.txt file in the readme.htm we see, that ddate is a date in the format 'yyyymmdd', qtrs and coreg are 'int' and value is a float.
print('first row: ', df_test_num.head(1))
print('number of rows :', df_test_num.count())
df_test_num.printSchema()
# show how many different reports are available in this quarter
df2 = df_test_num.select(countDistinct("adsh"))
df2.show()
# Acutally, spark can try to infer the types of the columns from the data itself, so lets try that by using the "inferSchema" option
df_test_num = spark.read.csv(test_file, sep='\t', header=True, inferSchema=True)
# As we can see in the next cell, we were only partially sucessfull. The reader was able to detect that qtrs is an integer and that value is a double. But it failed to recognize that ddate is actually a date and that coreg should be an int. That was to be expected: ddate looks like an int and the coreg field is only used in special situations, so there is a good change that its content is None for all entries in the file. <br>
# It looks as if we have to define the schema by hand
print(df_test_num.head(1))
df_test_num.printSchema()
# All necessary classes to define a schema are located inside the package pyspark.sql.types and for our example we need the following import<br>
# ```from pyspark.sql.types import StructType, StructField, StringType, DateType, DoubleType, IntegerType```
#
# An important point is that the dateFormat has to be defined as parameter when calling spark.read.csv()
schema = StructType([StructField("adsh", StringType(), True),\
StructField("tag", StringType(), True),\
StructField("version", StringType(), True),\
StructField("coreg", IntegerType(), True),\
StructField("ddate", DateType(), True),\
StructField("qtrs", IntegerType(), True),\
StructField("uom", StringType(), True),\
StructField("value", DoubleType(), True),\
StructField("footnote",StringType(), True)\
])
df_test_num = spark.read.csv(test_file, sep='\t', header=True, dateFormat="yyyyMMdd", schema = schema)
print(df_test_num.head(1))
df_test_num.printSchema()
# ### Simple write as Parquet
# As first version the dataframe is stored directly in parquet format without additional options
parquet_folder_pure = parquet_folder+"pure/"
df_test_num.write.parquet(parquet_folder_pure, mode="overwrite") # mode 'overwrite' overwrites the data, if they are already present
print('size of parquet_folder_pure: ', get_size_format(get_directory_size(parquet_folder_pure)))
os.listdir(parquet_folder_pure)
# Parquet was able to compress the data down to about 10% of the orginal size. It splitted the data up in 8 different data files. So every file is containing approximatly 300'000 data rows and has a size of about 3MB
# ### Writing using partitions
# Parquet can also store the data in different partitions wich will create a new directory for every partition.
# As a first approach, we could try to create a partition for every report that means for every distinct "adsh" value. However, since we have about 6300 different reports in the used csv file that would result in data files less than 5kb each. Such small files are very inefficient for parquet, so we do soemthing else.<br>
# Since we read the "ddate" column as a proper date-format we can create partitions based on the year and month. In order to do that we need to add two columns for year and month to the dataframe.
df_test_num = df_test_num.withColumn("year", year("ddate")).withColumn("month", month("ddate"))
parquet_folder_by_month = parquet_folder+"month"
df_test_num.write.partitionBy('year','month').parquet(parquet_folder_by_month, mode="overwrite")
# Looking at the result may be a little surprising. There are folders for years starting 1978 up to 2028. The 'ddate' column is defined as "The end date for the data value, rounded to the nearest month end". A lot of values that appear in a report may not be from reported period. For instance, often results from the last couple of years are also included in a yearly report. Or expected returns for the following couple of years appear in the report.<br>
# The total size of on disk has also increased significantly. It is still small compared to the originial CSV file, but around 25% to 30% bigger compared to the size that was needed when the data were stored without defining partitions.
print('size of parquet_folder_month: ', get_size_format(get_directory_size(parquet_folder_by_month)))
os.listdir(parquet_folder_by_month)
# ### Reading parquet
# reading parquet is even simpler as reading a cvs since parquet contains metainformation about the file structure
df_test_num = spark.read.parquet(parquet_folder_pure)
print('number of rows :', df_test_num.count())
df_test_num.printSchema()
|
00_01_Parquet_Basics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Mining Input Grammars
#
# So far, the grammars we have seen have been mostly specified manually – that is, you (or the person knowing the input format) had to design and write a grammar in the first place. While the grammars we have seen so far have been rather simple, creating a grammar for complex inputs can involve quite some effort. In this chapter, we therefore introduce techniques that _automatically mine grammars from programs_ – by executing the programs and observing how they process which parts of the input. In conjunction with a grammar fuzzer, this allows us to
# 1. take a program,
# 2. extract its input grammar, and
# 3. fuzz it with high efficiency and effectiveness, using the concepts in this book.
# + slideshow={"slide_type": "skip"}
from bookutils import YouTubeVideo
YouTubeVideo("ddM1oL2LYDI")
# + [markdown] slideshow={"slide_type": "subslide"}
# **Prerequisites**
#
# * You should have read the [chapter on grammars](Grammars.ipynb).
# * The [chapter on configuration fuzzing](ConfigurationFuzzer.ipynb) introduces grammar mining for configuration options, as well as observing variables and values during execution.
# * We use the tracer from the [chapter on coverage](Coverage.ipynb).
# * The concept of parsing from the [chapter on parsers](Parser.ipynb) is also useful.
# + [markdown] slideshow={"slide_type": "skip"}
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from fuzzingbook.GrammarMiner import <identifier>
# ```
#
# and then make use of the following features.
#
#
# This chapter provides a number of classes to mine input grammars from existing programs. The function `recover_grammar()` could be the easiest to use. It takes a function and a set of inputs, and returns a grammar that describes its input language.
#
# We apply `recover_grammar()` on a `url_parse()` function that takes and decomposes URLs:
#
# ```python
# >>> url_parse('https://www.fuzzingbook.org/')
# >>> URLS
# ['http://user:pass@www.google.com:80/?q=path#ref',
# 'https://www.cispa.saarland:80/',
# 'http://www.fuzzingbook.org/#News']
# ```
# We extract the input grammar for `url_parse()` using `recover_grammar()`:
#
# ```python
# >>> grammar = recover_grammar(url_parse, URLS, files=['urllib/parse.py'])
# >>> grammar
# {'<start>': ['<urlsplit@437:url>'],
# '<urlsplit@437:url>': ['<urlparse@394:scheme>:<_splitnetloc@411:url>'],
# '<urlparse@394:scheme>': ['http', 'https'],
# '<_splitnetloc@411:url>': ['//<urlparse@394:netloc>/',
# '//<urlparse@394:netloc><urlsplit@481:url>'],
# '<urlparse@394:netloc>': ['user:pass@www.google.com:80',
# 'www.cispa.saarland:80',
# 'www.fuzzingbook.org'],
# '<urlsplit@481:url>': ['<urlsplit@486:url>#<urlparse@394:fragment>',
# '/#<urlparse@394:fragment>'],
# '<urlsplit@486:url>': ['/?<urlparse@394:query>'],
# '<urlparse@394:query>': ['q=path'],
# '<urlparse@394:fragment>': ['ref', 'News']}
# ```
# The names of nonterminals are a bit technical; but the grammar nicely represents the structure of the input; for instance, the different schemes (`"http"`, `"https"`) are all identified:
#
# ```python
# >>> syntax_diagram(grammar)
# start
#
# ```
# 
# ```
# urlsplit@437:url
#
# ```
# 
# ```
# urlparse@394:scheme
#
# ```
# 
# ```
# _splitnetloc@411:url
#
# ```
# 
# ```
# urlparse@394:netloc
#
# ```
# 
# ```
# urlsplit@481:url
#
# ```
# 
# ```
# urlsplit@486:url
#
# ```
# 
# ```
# urlparse@394:query
#
# ```
# 
# ```
# urlparse@394:fragment
#
# ```
# 
#
# The grammar can be immediately used for fuzzing, producing arbitrary combinations of input elements, which are all syntactically valid.
#
# ```python
# >>> from GrammarCoverageFuzzer import GrammarCoverageFuzzer
# >>> fuzzer = GrammarCoverageFuzzer(grammar)
# >>> [fuzzer.fuzz() for i in range(5)]
# ['https://www.fuzzingbook.org/',
# 'http://user:pass@www.google.com:80/#News',
# 'https://www.cispa.saarland:80/?q=path#ref',
# 'http://user:pass@www.google.com:80/#ref',
# 'http://www.fuzzingbook.org/']
# ```
# Being able to automatically extract a grammar and to use this grammar for fuzzing makes for very effective test generation with a minimum of manual work.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Grammar Challenge
#
# Consider the `process_inventory()` method from the [chapter on parsers](Parser.ipynb):
# + slideshow={"slide_type": "skip"}
import bookutils
# + slideshow={"slide_type": "skip"}
from typing import List, Tuple, Callable, Any
from collections.abc import Iterable
# + slideshow={"slide_type": "skip"}
from Parser import process_inventory, process_vehicle, process_car, process_van, lr_graph # minor dependency
# + [markdown] slideshow={"slide_type": "fragment"}
# It takes inputs of the following form.
# + slideshow={"slide_type": "fragment"}
INVENTORY = """\
1997,van,Ford,E350
2000,car,Mercury,Cougar
1999,car,Chevy,Venture\
"""
# + slideshow={"slide_type": "subslide"}
print(process_inventory(INVENTORY))
# + [markdown] slideshow={"slide_type": "subslide"}
# We found from the [chapter on parsers](Parser.ipynb) that coarse grammars do not work well for fuzzing when the input format includes details expressed only in code. That is, even though we have the formal specification of CSV files ([RFC 4180](https://tools.ietf.org/html/rfc4180)), the inventory system includes further rules as to what is expected at each index of the CSV file. The solution of simply recombining existing inputs, while practical, is incomplete. In particular, it relies on a formal input specification being available in the first place. However, we have no assurance that the program obeys the input specification given.
# + [markdown] slideshow={"slide_type": "subslide"}
# One of the ways out of this predicament is to interrogate the program under test as to what its input specification is. That is, if the program under test is written in a style such that specific methods are responsible for handling specific parts of the input, one can recover the parse tree by observing the process of parsing. Further, one can recover a reasonable approximation of the grammar by abstraction from multiple input trees.
# + [markdown] slideshow={"slide_type": "subslide"}
# _We start with the assumption (1) that the program is written in such a fashion that specific methods are responsible for parsing specific fragments of the program -- This includes almost all ad hoc parsers._
#
# The idea is as follows:
#
# * Hook into the Python execution and observe the fragments of input string as they are produced and named in different methods.
# * Stitch the input fragments together in a tree structure to retrieve the **Parse Tree**.
# * Abstract common elements from multiple parse trees to produce the **Context Free Grammar** of the input.
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Simple Grammar Miner
# + [markdown] slideshow={"slide_type": "fragment"}
# Say we want to obtain the input grammar for the function `process_vehicle()`. We first collect the sample inputs for this function.
# + slideshow={"slide_type": "fragment"}
VEHICLES = INVENTORY.split('\n')
# + [markdown] slideshow={"slide_type": "fragment"}
# The set of methods responsible for processing inventory are the following.
# + slideshow={"slide_type": "fragment"}
INVENTORY_METHODS = {
'process_inventory',
'process_vehicle',
'process_van',
'process_car'}
# + [markdown] slideshow={"slide_type": "subslide"}
# We have seen from the chapter on [configuration fuzzing](ConfigurationFuzzer.ipynb) that one can hook into the Python runtime to observe the arguments to a function and any local variables created. We have also seen that one can obtain the context of execution by inspecting the `frame` argument. Here is a simple tracer that can return the local variables and other contextual information in a traced function. We reuse the `Coverage` tracing class.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Tracer
# + slideshow={"slide_type": "skip"}
from Coverage import Coverage
# + slideshow={"slide_type": "skip"}
import inspect
# + slideshow={"slide_type": "fragment"}
class Tracer(Coverage):
def traceit(self, frame, event, arg):
method_name = inspect.getframeinfo(frame).function
if method_name not in INVENTORY_METHODS:
return
file_name = inspect.getframeinfo(frame).filename
param_names = inspect.getargvalues(frame).args
lineno = inspect.getframeinfo(frame).lineno
local_vars = inspect.getargvalues(frame).locals
print(event, file_name, lineno, method_name, param_names, local_vars)
return self.traceit
# + [markdown] slideshow={"slide_type": "fragment"}
# We run the code under trace context.
# + slideshow={"slide_type": "subslide"}
with Tracer() as tracer:
process_vehicle(VEHICLES[0])
# + [markdown] slideshow={"slide_type": "subslide"}
# The main thing that we want out of tracing is a list of assignments of input fragments to different variables. We can use the tracing facility `settrace()` to get that as we showed above.
#
# However, the `settrace()` function hooks into the Python debugging facility. When it is in operation, no debugger can hook into the program. That is, if there is a problem with our grammar miner, we will not be able to attach a debugger to it to understand what is happening. This is not ideal. Hence, we limit the tracer to the simplest implementation possible, and implement the core of grammar mining in later stages.
# + [markdown] slideshow={"slide_type": "fragment"}
# The `traceit()` function relies on information from the `frame` variable which exposes Python internals. We define a `context` class that encapsulates the information that we need from the `frame`.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Context
#
# The `Context` class provides easy access to the information such as the current module, and parameter names.
# + slideshow={"slide_type": "subslide"}
class Context:
def __init__(self, frame, track_caller=True):
self.method = inspect.getframeinfo(frame).function
self.parameter_names = inspect.getargvalues(frame).args
self.file_name = inspect.getframeinfo(frame).filename
self.line_no = inspect.getframeinfo(frame).lineno
def _t(self):
return (self.file_name, self.line_no, self.method,
','.join(self.parameter_names))
def __repr__(self):
return "%s:%d:%s(%s)" % self._t()
# + [markdown] slideshow={"slide_type": "fragment"}
# Here we add a few convenience methods that operate on the `frame` to `Context`.
# + slideshow={"slide_type": "subslide"}
class Context(Context):
def extract_vars(self, frame):
return inspect.getargvalues(frame).locals
def parameters(self, all_vars):
return {k: v for k, v in all_vars.items() if k in self.parameter_names}
def qualified(self, all_vars):
return {"%s:%s" % (self.method, k): v for k, v in all_vars.items()}
# + [markdown] slideshow={"slide_type": "fragment"}
# We hook printing the context to our `traceit()` to see it in action. First we define a `log_event()` for displaying events.
# + slideshow={"slide_type": "fragment"}
def log_event(event, var):
print({'call': '->', 'return': '<-'}.get(event, ' '), var)
# + [markdown] slideshow={"slide_type": "fragment"}
# And use the `log_event()` in the `traceit()` function.
# + slideshow={"slide_type": "subslide"}
class Tracer(Tracer):
def traceit(self, frame, event, arg):
log_event(event, Context(frame))
return self.traceit
# + [markdown] slideshow={"slide_type": "fragment"}
# Running `process_vehicle()` under trace prints the contexts encountered.
# + slideshow={"slide_type": "subslide"}
with Tracer() as tracer:
process_vehicle(VEHICLES[0])
# + [markdown] slideshow={"slide_type": "subslide"}
# The trace produced by executing any function can get overwhelmingly large. Hence, we need to restrict our attention to specific modules. Further, we also restrict our attention exclusively to `str` variables since these variables are more likely to contain input fragments. (We will show how to deal with complex objects later in exercises.)
#
# The `Context` class we developed earlier is used to decide which modules to monitor, and which variables to trace.
#
# We store the current *input string* so that it can be used to determine if any particular string fragments came from the current input string. Any optional arguments are processed separately.
# + slideshow={"slide_type": "subslide"}
class Tracer(Tracer):
def __init__(self, my_input, **kwargs):
self.options(kwargs)
self.my_input, self.trace = my_input, []
# + [markdown] slideshow={"slide_type": "fragment"}
# We use an optional argument `files` to indicate the specific source files we are interested in, and `methods` to indicate which specific methods are of interest. Further, we also use `log` to specify whether verbose logging should be enabled during trace. We use the `log_event()` method we defined earlier for logging.
# + [markdown] slideshow={"slide_type": "fragment"}
# The options processing is as below.
# + slideshow={"slide_type": "fragment"}
class Tracer(Tracer):
def options(self, kwargs):
self.files = kwargs.get('files', [])
self.methods = kwargs.get('methods', [])
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
# + [markdown] slideshow={"slide_type": "subslide"}
# The `files` and `methods` are checked to determine, if a particular event should be traced or not
# + slideshow={"slide_type": "fragment"}
class Tracer(Tracer):
def tracing_context(self, cxt, event, arg):
fres = not self.files or any(
cxt.file_name.endswith(f) for f in self.files)
mres = not self.methods or any(cxt.method == m for m in self.methods)
return fres and mres
# + [markdown] slideshow={"slide_type": "fragment"}
# Similar to the context of events, we also want to restrict our attention to specific variables. For now, we want to focus only on strings. (See the Exercises at the end of the chapter on how to extend it to other kinds of objects).
# + slideshow={"slide_type": "fragment"}
class Tracer(Tracer):
def tracing_var(self, k, v):
return isinstance(v, str)
# + [markdown] slideshow={"slide_type": "subslide"}
# We modify the `traceit()` to call an `on_event()` function with the context information only on the specific events we are interested in.
# + slideshow={"slide_type": "subslide"}
class Tracer(Tracer):
def on_event(self, event, arg, cxt, my_vars):
self.trace.append((event, arg, cxt, my_vars))
def create_context(self, frame):
return Context(frame)
def traceit(self, frame, event, arg):
cxt = self.create_context(frame)
if not self.tracing_context(cxt, event, arg):
return self.traceit
self.log(event, cxt)
my_vars = {
k: v
for k, v in cxt.extract_vars(frame).items()
if self.tracing_var(k, v)
}
self.on_event(event, arg, cxt, my_vars)
return self.traceit
# + [markdown] slideshow={"slide_type": "subslide"}
# The `Tracer` class can now focus on specific kinds of events on specific files. Further, it provides a first level filter for variables that we find interesting. For example, we want to focus specifically on variables from `process_*` methods that contain input fragments. Here is how our updated `Tracer` can be used.
# + slideshow={"slide_type": "subslide"}
with Tracer(VEHICLES[0], methods=INVENTORY_METHODS, log=True) as tracer:
process_vehicle(VEHICLES[0])
# + [markdown] slideshow={"slide_type": "subslide"}
# The execution produced the following trace.
# + slideshow={"slide_type": "subslide"}
for t in tracer.trace:
print(t[0], t[2].method, dict(t[3]))
# + [markdown] slideshow={"slide_type": "subslide"}
# Since we are saving the input already in `Tracer`, it is redundant to specify it separately again as an argument.
# + slideshow={"slide_type": "subslide"}
with Tracer(VEHICLES[0], methods=INVENTORY_METHODS, log=True) as tracer:
process_vehicle(tracer.my_input)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### DefineTracker
# + [markdown] slideshow={"slide_type": "fragment"}
# We define a `DefineTracker` class that processes the trace from the `Tracer`. The idea is to store different variable definitions which are input fragments.
#
# The tracker identifies string fragments that are part of the input string, and stores them in a dictionary `my_assignments`. It saves the trace, and the corresponding input for processing. Finally it calls `process()` to process the `trace` it was given. We will start with a simple tracker that relies on certain assumptions, and later see how these assumptions can be relaxed.
# + slideshow={"slide_type": "subslide"}
class DefineTracker:
def __init__(self, my_input, trace, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.trace = trace
self.my_assignments = {}
self.process()
# + [markdown] slideshow={"slide_type": "fragment"}
# One of the problems of using substring search is that short string sequences tend to be included in other string sequences even though they may not have come from the original string. That is, say the input fragment is `v`, it could have equally come from either `van` or `chevy`. We rely on being able to predict the exact place in the input where a given fragment occurred. Hence, we define a constant `FRAGMENT_LEN` such that we ignore strings up to that length. We also incorporate a logging facility as before.
# + slideshow={"slide_type": "subslide"}
FRAGMENT_LEN = 3
# + slideshow={"slide_type": "fragment"}
class DefineTracker(DefineTracker):
def options(self, kwargs):
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
self.fragment_len = kwargs.get('fragment_len', FRAGMENT_LEN)
# + [markdown] slideshow={"slide_type": "fragment"}
# Our tracer simply records the variable values as they occur. We next need to check if the variables contain values from the **input string**. Common ways to do this is to rely on symbolic execution or at least dynamic tainting, which are powerful, but also complex. However, one can obtain a reasonable approximation by simply relying on substring search. That is, we consider any value produced that is a substring of the original input string to have come from the original input.
# + [markdown] slideshow={"slide_type": "fragment"}
# We define an `is_input_fragment()` method that relies on string inclusion to detect if the string came from the input.
# + slideshow={"slide_type": "subslide"}
class DefineTracker(DefineTracker):
def is_input_fragment(self, var, value):
return len(value) >= self.fragment_len and value in self.my_input
# + [markdown] slideshow={"slide_type": "fragment"}
# We can use `is_input_fragment()` to select only a subset of variables defined, as implemented below in `fragments()`.
# + slideshow={"slide_type": "fragment"}
class DefineTracker(DefineTracker):
def fragments(self, variables):
return {k: v for k, v in variables.items(
) if self.is_input_fragment(k, v)}
# + [markdown] slideshow={"slide_type": "subslide"}
# The tracker processes each event, and at each event, it updates the dictionary `my_assignments` with the current local variables that contain strings that are part of the input. Note that there is a choice here with respect to what happens during reassignment. We can either discard all the reassignments, or keep only the last assignment. Here, we choose the latter. If you want the former behavior, check whether the value exists in `my_assignments` before storing a fragment.
# + slideshow={"slide_type": "fragment"}
class DefineTracker(DefineTracker):
def track_event(self, event, arg, cxt, my_vars):
self.log(event, (cxt.method, my_vars))
self.my_assignments.update(self.fragments(my_vars))
def process(self):
for event, arg, cxt, my_vars in self.trace:
self.track_event(event, arg, cxt, my_vars)
# + [markdown] slideshow={"slide_type": "subslide"}
# Using the tracker, we can obtain the input fragments. For example, say we are only interested in strings that are at least `5` characters long.
# + slideshow={"slide_type": "fragment"}
tracker = DefineTracker(tracer.my_input, tracer.trace, fragment_len=5)
for k, v in tracker.my_assignments.items():
print(k, '=', repr(v))
# + [markdown] slideshow={"slide_type": "fragment"}
# Or strings that are `2` characters long (the default).
# + slideshow={"slide_type": "subslide"}
tracker = DefineTracker(tracer.my_input, tracer.trace)
for k, v in tracker.my_assignments.items():
print(k, '=', repr(v))
# + slideshow={"slide_type": "fragment"}
class DefineTracker(DefineTracker):
def assignments(self):
return self.my_assignments.items()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Assembling a Derivation Tree
# + slideshow={"slide_type": "skip"}
from Grammars import START_SYMBOL, syntax_diagram, \
is_nonterminal, Grammar
# + slideshow={"slide_type": "skip"}
from GrammarFuzzer import GrammarFuzzer, display_tree, \
DerivationTree
# + [markdown] slideshow={"slide_type": "fragment"}
# The input fragments from the `DefineTracker` only tell half the story. The fragments may be created at different stages of parsing. Hence, we need to assemble the fragments to a derivation tree of the input. The basic idea is as follows:
#
# Our input from the previous step was:
#
# ```python
# "1997,van,Ford,E350"
# ```
#
# We start a derivation tree, and associate it with the start symbol in the grammar.
# + slideshow={"slide_type": "subslide"}
derivation_tree: DerivationTree = (START_SYMBOL, [("1997,van,Ford,E350", [])])
# + slideshow={"slide_type": "fragment"}
display_tree(derivation_tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# The next input was:
# ```python
# vehicle = "1997,van,Ford,E350"
# ```
# Since vehicle covers the `<start>` node's value completely, we replace the value with the vehicle node.
# + slideshow={"slide_type": "fragment"}
derivation_tree: DerivationTree = (START_SYMBOL,
[('<vehicle>', [("1997,van,Ford,E350", [])],
[])])
# + slideshow={"slide_type": "fragment"}
display_tree(derivation_tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# The next input was:
# ```python
# year = '1997'
# ```
# Traversing the derivation tree from `<start>`, we see that it replaces a portion of the `<vehicle>` node's value. Hence we split the `<vehicle>` node's value to two children, where one corresponds to the value `"1997"` and the other to `",van,Ford,E350"`, and replace the first one with the node `<year>`.
# + slideshow={"slide_type": "fragment"}
derivation_tree: DerivationTree = (START_SYMBOL,
[('<vehicle>', [('<year>', [('1997', [])]),
(",van,Ford,E350", [])], [])])
# + slideshow={"slide_type": "fragment"}
display_tree(derivation_tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# We perform similar operations for
# ```python
# company = 'Ford'
# ```
# + slideshow={"slide_type": "fragment"}
derivation_tree: DerivationTree = (START_SYMBOL,
[('<vehicle>', [('<year>', [('1997', [])]),
(",van,", []),
('<company>', [('Ford', [])]),
(",E350", [])], [])])
# + slideshow={"slide_type": "fragment"}
display_tree(derivation_tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# Similarly for
# ```python
# kind = 'van'
# ```
# and
# ```python
# model = 'E350'
# ```
# + slideshow={"slide_type": "subslide"}
derivation_tree: DerivationTree = (START_SYMBOL,
[('<vehicle>', [('<year>', [('1997', [])]),
(",", []),
("<kind>", [('van', [])]),
(",", []),
('<company>', [('Ford', [])]),
(",", []),
("<model>", [('E350', [])])
], [])])
# + slideshow={"slide_type": "fragment"}
display_tree(derivation_tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# We now develop the complete algorithm with the above described steps.
# The derivation tree `TreeMiner` is initialized with the input string, and the variable assignments, and it converts the assignments to the corresponding derivation tree.
# + slideshow={"slide_type": "subslide"}
class TreeMiner:
def __init__(self, my_input, my_assignments, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.my_assignments = my_assignments
self.tree = self.get_derivation_tree()
def options(self, kwargs):
self.log = log_call if kwargs.get('log') else lambda _i, _v: None
def get_derivation_tree(self):
return (START_SYMBOL, [])
# + [markdown] slideshow={"slide_type": "fragment"}
# The `log_call()` is as follows.
# + slideshow={"slide_type": "fragment"}
def log_call(indent, var):
print('\t' * indent, var)
# + [markdown] slideshow={"slide_type": "subslide"}
# The basic idea is as follows:
# * **For now, we assume that the value assigned to a variable is stable. That is, it is never reassigned. In particular, there are no recursive calls, or multiple calls to the same function from different parts.** (We will show how to overcome this limitation later).
# * For each pair _var_, _value_ found in `my_assignments`:
# 1. We search for occurrences of _value_ `val` in the derivation tree recursively.
# 2. If an occurrence was found as a value `V1` of a node `P1`, we partition the value of the node `P1` into three parts, with the central part matching the _value_ `val`, and the first and last part, the corresponding prefix and suffix in `V1`.
# 3. Reconstitute the node `P1` with three children, where prefix and suffix mentioned earlier are string values, and the matching value `val` is replaced by a node `var` with a single value `val`.
# + [markdown] slideshow={"slide_type": "subslide"}
# First, we define a wrapper to generate a nonterminal from a variable name.
# + slideshow={"slide_type": "fragment"}
def to_nonterminal(var):
return "<" + var.lower() + ">"
# + [markdown] slideshow={"slide_type": "fragment"}
# The `string_part_of_value()` method checks whether the given `part` value was part of the whole.
# + slideshow={"slide_type": "fragment"}
class TreeMiner(TreeMiner):
def string_part_of_value(self, part, value):
return (part in value)
# + [markdown] slideshow={"slide_type": "fragment"}
# The `partition_by_part()` splits the `value` by the given part if it matches, and returns a list containing the first part, the part that was replaced, and the last part. This is a format that can be used as a part of the list of children.
# + slideshow={"slide_type": "subslide"}
class TreeMiner(TreeMiner):
def partition(self, part, value):
return value.partition(part)
# + slideshow={"slide_type": "fragment"}
class TreeMiner(TreeMiner):
def partition_by_part(self, pair, value):
k, part = pair
prefix_k_suffix = [
(k, [[part, []]]) if i == 1 else (e, [])
for i, e in enumerate(self.partition(part, value))
if e]
return prefix_k_suffix
# + [markdown] slideshow={"slide_type": "fragment"}
# The `insert_into_tree()` method accepts a given tree `tree` and a `(k,v)` pair. It recursively checks whether the given pair can be applied. If the pair can be applied, it applies the pair and returns `True`.
# + slideshow={"slide_type": "subslide"}
class TreeMiner(TreeMiner):
def insert_into_tree(self, my_tree, pair):
var, values = my_tree
k, v = pair
self.log(1, "- Node: %s\t\t? (%s:%s)" % (var, k, repr(v)))
applied = False
for i, value_ in enumerate(values):
value, arr = value_
self.log(2, "-> [%d] %s" % (i, repr(value)))
if is_nonterminal(value):
applied = self.insert_into_tree(value_, pair)
if applied:
break
elif self.string_part_of_value(v, value):
prefix_k_suffix = self.partition_by_part(pair, value)
del values[i]
for j, rep in enumerate(prefix_k_suffix):
values.insert(j + i, rep)
applied = True
self.log(2, " > %s" % (repr([i[0] for i in prefix_k_suffix])))
break
else:
continue
return applied
# + [markdown] slideshow={"slide_type": "subslide"}
# Here is how `insert_into_tree()` is used.
# + slideshow={"slide_type": "fragment"}
tree: DerivationTree = (START_SYMBOL, [("1997,van,Ford,E350", [])])
m = TreeMiner('', {}, log=True)
# + [markdown] slideshow={"slide_type": "fragment"}
# First, we have our input string as the only node.
# + slideshow={"slide_type": "fragment"}
display_tree(tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Inserting the `<vehicle>` node.
# + slideshow={"slide_type": "fragment"}
v = m.insert_into_tree(tree, ('<vehicle>', "1997,van,Ford,E350"))
# + slideshow={"slide_type": "fragment"}
display_tree(tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Inserting `<model>` node.
# + slideshow={"slide_type": "subslide"}
v = m.insert_into_tree(tree, ('<model>', 'E350'))
# + slideshow={"slide_type": "fragment"}
display_tree((tree))
# + [markdown] slideshow={"slide_type": "fragment"}
# Inserting `<company>`.
# + slideshow={"slide_type": "subslide"}
v = m.insert_into_tree(tree, ('<company>', 'Ford'))
# + slideshow={"slide_type": "fragment"}
display_tree(tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Inserting `<kind>`.
# + slideshow={"slide_type": "subslide"}
v = m.insert_into_tree(tree, ('<kind>', 'van'))
# + slideshow={"slide_type": "fragment"}
display_tree(tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Inserting `<year>`.
# + slideshow={"slide_type": "subslide"}
v = m.insert_into_tree(tree, ('<year>', '1997'))
# + slideshow={"slide_type": "fragment"}
display_tree(tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# To make life simple, we define a wrapper function `nt_var()` that will convert a token to its corresponding nonterminal symbol.
# + slideshow={"slide_type": "fragment"}
class TreeMiner(TreeMiner):
def nt_var(self, var):
return var if is_nonterminal(var) else to_nonterminal(var)
# + [markdown] slideshow={"slide_type": "fragment"}
# Now, we need to apply a new definition to an entire grammar.
# + slideshow={"slide_type": "subslide"}
class TreeMiner(TreeMiner):
def apply_new_definition(self, tree, var, value):
nt_var = self.nt_var(var)
return self.insert_into_tree(tree, (nt_var, value))
# + [markdown] slideshow={"slide_type": "fragment"}
# This algorithm is implemented as `get_derivation_tree()`.
# + slideshow={"slide_type": "fragment"}
class TreeMiner(TreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [])])
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
# + [markdown] slideshow={"slide_type": "fragment"}
# The `TreeMiner` is used as follows:
# + slideshow={"slide_type": "subslide"}
with Tracer(VEHICLES[0]) as tracer:
process_vehicle(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
dt = TreeMiner(tracer.my_input, assignments, log=True)
dt.tree
# + [markdown] slideshow={"slide_type": "subslide"}
# The obtained derivation tree is as below.
# + slideshow={"slide_type": "fragment"}
display_tree(TreeMiner(tracer.my_input, assignments).tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Combining all the pieces:
# + slideshow={"slide_type": "subslide"}
trees = []
for vehicle in VEHICLES:
print(vehicle)
with Tracer(vehicle) as tracer:
process_vehicle(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
trees.append((tracer.my_input, assignments))
for var, val in assignments:
print(var + " = " + repr(val))
print()
# + [markdown] slideshow={"slide_type": "subslide"}
# The corresponding derivation trees are below.
# + slideshow={"slide_type": "fragment"}
csv_dt = []
for inputstr, assignments in trees:
print(inputstr)
dt = TreeMiner(inputstr, assignments)
csv_dt.append(dt)
display_tree(dt.tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Recovering Grammars from Derivation Trees
#
# We define a class `Miner` that can combine multiple derivation trees to produce the grammar. The initial grammar is empty.
# + slideshow={"slide_type": "fragment"}
class GrammarMiner:
def __init__(self):
self.grammar = {}
# + [markdown] slideshow={"slide_type": "fragment"}
# The `tree_to_grammar()` method converts our derivation tree to a grammar by picking one node at a time, and adding it to the grammar. The node name becomes the key, and any list of children it has becomes another alternative for that key.
# + slideshow={"slide_type": "subslide"}
class GrammarMiner(GrammarMiner):
def tree_to_grammar(self, tree):
node, children = tree
one_alt = [ck for ck, gc in children]
hsh = {node: [one_alt] if one_alt else []}
for child in children:
if not is_nonterminal(child[0]):
continue
chsh = self.tree_to_grammar(child)
for k in chsh:
if k not in hsh:
hsh[k] = chsh[k]
else:
hsh[k].extend(chsh[k])
return hsh
# + slideshow={"slide_type": "subslide"}
gm = GrammarMiner()
gm.tree_to_grammar(csv_dt[0].tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# The grammar being generated here is `canonical`. We define a function `readable()` that takes in a canonical grammar and returns it in a readable form.
# + slideshow={"slide_type": "subslide"}
def readable(grammar):
def readable_rule(rule):
return ''.join(rule)
return {k: list(set(readable_rule(a) for a in grammar[k]))
for k in grammar}
# + slideshow={"slide_type": "subslide"}
syntax_diagram(readable(gm.tree_to_grammar(csv_dt[0].tree)))
# + [markdown] slideshow={"slide_type": "subslide"}
# The `add_tree()` method gets a combined list of non-terminals from current grammar, and the tree to be added to the grammar, and updates the definitions of each non-terminal.
# + slideshow={"slide_type": "skip"}
import itertools
# + slideshow={"slide_type": "fragment"}
class GrammarMiner(GrammarMiner):
def add_tree(self, t):
t_grammar = self.tree_to_grammar(t.tree)
self.grammar = {
key: self.grammar.get(key, []) + t_grammar.get(key, [])
for key in itertools.chain(self.grammar.keys(), t_grammar.keys())
}
# + [markdown] slideshow={"slide_type": "fragment"}
# The `add_tree()` is used as follows:
# + slideshow={"slide_type": "fragment"}
inventory_grammar_miner = GrammarMiner()
for dt in csv_dt:
inventory_grammar_miner.add_tree(dt)
# + slideshow={"slide_type": "subslide"}
syntax_diagram(readable(inventory_grammar_miner.grammar))
# + [markdown] slideshow={"slide_type": "subslide"}
# Given execution traces from various inputs, one can define `update_grammar()` to obtain the complete grammar from the traces.
# + slideshow={"slide_type": "fragment"}
class GrammarMiner(GrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(inputstr, at.assignments())
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return DefineTracker(*args)
def create_tree_miner(self, *args):
return TreeMiner(*args)
# + [markdown] slideshow={"slide_type": "fragment"}
# The complete grammar recovery is implemented in `recover_grammar()`.
# + slideshow={"slide_type": "subslide"}
def recover_grammar(fn: Callable, inputs: Iterable[str],
**kwargs: Any) -> Grammar:
miner = GrammarMiner()
for inputstr in inputs:
with Tracer(inputstr, **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.grammar)
# + [markdown] slideshow={"slide_type": "fragment"}
# Note that the grammar could have been retrieved directly from the tracker, without the intermediate derivation tree stage. However, going through the derivation tree allows one to inspect the inputs being fragmented and verify that it happens correctly.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 1. Recovering the Inventory Grammar
# + slideshow={"slide_type": "fragment"}
inventory_grammar = recover_grammar(process_vehicle, VEHICLES)
# + slideshow={"slide_type": "fragment"}
inventory_grammar
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 2. Recovering URL Grammar
# + [markdown] slideshow={"slide_type": "fragment"}
# Our algorithm is robust enough to recover grammar from real world programs. For example, the `urlparse` function in the Python `urlib` module accepts the following sample URLs.
# + slideshow={"slide_type": "fragment"}
URLS = [
'http://user:pass@www.google.com:80/?q=path#ref',
'https://www.cispa.saarland:80/',
'http://www.fuzzingbook.org/#News',
]
# + [markdown] slideshow={"slide_type": "fragment"}
# The urllib caches its intermediate results for faster access. Hence, we need to disable it using `clear_cache()` after every invocation.
# + slideshow={"slide_type": "skip"}
from urllib.parse import urlparse, clear_cache # type: ignore
# + [markdown] slideshow={"slide_type": "fragment"}
# We use the sample URLs to recover grammar as follows. The `urlparse` function tends to cache its previous parsing results. Hence, we define a new method `url_parse()` that clears the cache before each call.
# + slideshow={"slide_type": "subslide"}
def url_parse(url):
clear_cache()
urlparse(url)
# + slideshow={"slide_type": "subslide"}
trees = []
for url in URLS:
print(url)
with Tracer(url) as tracer:
url_parse(tracer.my_input)
assignments = DefineTracker(tracer.my_input, tracer.trace).assignments()
trees.append((tracer.my_input, assignments))
for var, val in assignments:
print(var + " = " + repr(val))
print()
url_dt = []
for inputstr, assignments in trees:
print(inputstr)
dt = TreeMiner(inputstr, assignments)
url_dt.append(dt)
display_tree(dt.tree)
# + [markdown] slideshow={"slide_type": "subslide"}
# Let us use `url_parse()` to recover the grammar:
# + slideshow={"slide_type": "fragment"}
url_grammar = recover_grammar(url_parse, URLS, files=['urllib/parse.py'])
# + slideshow={"slide_type": "subslide"}
syntax_diagram(url_grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# The recovered grammar describes the URL format reasonably well.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Fuzzing
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now use our recovered grammar for fuzzing as follows.
#
# First, the inventory grammar.
# + slideshow={"slide_type": "subslide"}
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, the URL grammar.
# + slideshow={"slide_type": "subslide"}
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
# + [markdown] slideshow={"slide_type": "subslide"}
# What this means is that we can now take a program and a few samples, extract its grammar, and then use this very grammar for fuzzing. Now that's quite an opportunity!
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Problems with the Simple Miner
# + [markdown] slideshow={"slide_type": "fragment"}
# One of the problems with our simple grammar miner is the assumption that the values assigned to variables are stable. Unfortunately, that may not hold true in all cases. For example, here is a URL with a slightly different format.
# + slideshow={"slide_type": "fragment"}
URLS_X = URLS + ['ftp://freebsd.org/releases/5.8']
# + [markdown] slideshow={"slide_type": "fragment"}
# The grammar generated from this set of samples is not as nice as what we got earlier
# + slideshow={"slide_type": "fragment"}
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
# + slideshow={"slide_type": "subslide"}
syntax_diagram(url_grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# Clearly, something has gone wrong.
#
# To investigate why the `url` definition has gone wrong, let us inspect the trace for the URL.
# + slideshow={"slide_type": "subslide"}
clear_cache()
with Tracer(URLS_X[0]) as tracer:
urlparse(tracer.my_input)
for i, t in enumerate(tracer.trace):
if t[0] in {'call', 'line'} and 'parse.py' in str(t[2]) and t[3]:
print(i, t[2]._t()[1], t[3:])
# + [markdown] slideshow={"slide_type": "subslide"}
# Notice how the value of `url` changes as the parsing progresses? This violates our assumption that the value assigned to a variable is stable. We next look at how this limitation can be removed.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Grammar Miner with Reassignment
# + [markdown] slideshow={"slide_type": "fragment"}
# One way to uniquely identify different variables is to annotate them with *line numbers* both when they are defined and also when their value changes. Consider the code fragment below
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Tracking variable assignment locations
# + slideshow={"slide_type": "fragment"}
def C(cp_1):
c_2 = cp_1 + '@2'
c_3 = c_2 + '@3'
return c_3
# + slideshow={"slide_type": "fragment"}
def B(bp_7):
b_8 = bp_7 + '@8'
return C(b_8)
# + slideshow={"slide_type": "fragment"}
def A(ap_12):
a_13 = ap_12 + '@13'
a_14 = B(a_13) + '@14'
a_14 = a_14 + '@15'
a_13 = a_14 + '@16'
a_14 = B(a_13) + '@17'
a_14 = B(a_13) + '@18'
# + [markdown] slideshow={"slide_type": "subslide"}
# Notice how all variables are either named corresponding to either where they are defined, or the value is annotated to indicate that it was changed.
#
# Let us run this under the trace.
# + slideshow={"slide_type": "subslide"}
with Tracer('____') as tracer:
A(tracer.my_input)
for t in tracer.trace:
print(t[0], "%d:%s" % (t[2].line_no, t[2].method), t[3])
# + [markdown] slideshow={"slide_type": "subslide"}
# Each variables were referenced first as follows:
#
# * `cp_1` -- *call* `1:C`
# * `c_2` -- *line* `3:C` (but the previous event was *line* `2:C`)
# * `c_3` -- *line* `4:C` (but the previous event was *line* `3:C`)
# * `bp_7` -- *call* `7:B`
# * `b_8` -- *line* `9:B` (but the previous event was *line* `8:B`)
# * `ap_12` -- *call* `12:A`
# * `a_13` -- *line* `14:A` (but the previous event was *line* `13:A`)
# * `a_14` -- *line* `15:A` (the previous event was *return* `9:B`. However, the previous event in `A()` was *line* `14:A`)
# * reassign `a_14` at *15* -- *line* `16:A` (the previous event was *line* `15:A`)
# * reassign `a_13` at *16* -- *line* `17:A` (the previous event was *line* `16:A`)
# * reassign `a_14` at *17* -- *return* `17:A` (the previous event in `A()` was *line* `17:A`)
# * reassign `a_14` at *18* -- *return* `18:A` (the previous event in `A()` was *line* `18:A`)
# + [markdown] slideshow={"slide_type": "subslide"}
# So, our observations are that, if it is a call, the current location is the right one for any new variables being defined. On the other hand, if the variable being referenced for the first time (or reassigned a new value), then the right location to consider is the previous location *in the same method invocation*. Next, let us see how we can incorporate this information into variable naming.
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we need a way to track the individual method calls as they are being made. For this we define the class `CallStack`. Each method invocation gets a separate identifier, and when the method call is over, the identifier is reset.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### CallStack
# + slideshow={"slide_type": "subslide"}
class CallStack:
def __init__(self, **kwargs):
self.options(kwargs)
self.method_id = (START_SYMBOL, 0)
self.method_register = 0
self.mstack = [self.method_id]
def enter(self, method):
self.method_register += 1
self.method_id = (method, self.method_register)
self.log('call', "%s%s" % (self.indent(), str(self)))
self.mstack.append(self.method_id)
def leave(self):
self.mstack.pop()
self.log('return', "%s%s" % (self.indent(), str(self)))
self.method_id = self.mstack[-1]
# + [markdown] slideshow={"slide_type": "subslide"}
# A few extra functions to make life simpler.
# + slideshow={"slide_type": "subslide"}
class CallStack(CallStack):
def options(self, kwargs):
self.log = log_event if kwargs.get('log') else lambda _evt, _var: None
def indent(self):
return len(self.mstack) * "\t"
def at(self, n):
return self.mstack[n]
def __len__(self):
return len(mstack) - 1
def __str__(self):
return "%s:%d" % self.method_id
def __repr__(self):
return repr(self.method_id)
# + [markdown] slideshow={"slide_type": "subslide"}
# We also define a convenience method to display a given stack.
# + slideshow={"slide_type": "fragment"}
def display_stack(istack):
def stack_to_tree(stack):
current, *rest = stack
if not rest:
return (repr(current), [])
return (repr(current), [stack_to_tree(rest)])
display_tree(stack_to_tree(istack.mstack), graph_attr=lr_graph)
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is how we can use the `CallStack`.
# + slideshow={"slide_type": "fragment"}
cs = CallStack()
display_stack(cs)
cs
# + slideshow={"slide_type": "subslide"}
cs.enter('hello')
display_stack(cs)
cs
# + slideshow={"slide_type": "fragment"}
cs.enter('world')
display_stack(cs)
cs
# + slideshow={"slide_type": "fragment"}
cs.leave()
display_stack(cs)
cs
# + slideshow={"slide_type": "subslide"}
cs.enter('world')
display_stack(cs)
cs
# + slideshow={"slide_type": "fragment"}
cs.leave()
display_stack(cs)
cs
# + [markdown] slideshow={"slide_type": "fragment"}
# In order to account for variable reassignments, we need to have a more intelligent data structure than a dictionary for storing variables. We first define a simple interface `Vars`. It acts as a container for variables, and is instantiated at `my_assignments`.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Vars
# + [markdown] slideshow={"slide_type": "fragment"}
# The `Vars` stores references to variables as they occur during parsing in its internal dictionary `defs`. We initialize the dictionary with the original string.
# + slideshow={"slide_type": "fragment"}
class Vars:
def __init__(self, original):
self.defs = {}
self.my_input = original
# + [markdown] slideshow={"slide_type": "fragment"}
# The dictionary needs two methods: `update()` that takes a set of key-value pairs to update itself, and `_set_kv()` that updates a particular key-value pair.
# + slideshow={"slide_type": "subslide"}
class Vars(Vars):
def _set_kv(self, k, v):
self.defs[k] = v
def __setitem__(self, k, v):
self._set_kv(k, v)
def update(self, v):
for k, v in v.items():
self._set_kv(k, v)
# + [markdown] slideshow={"slide_type": "fragment"}
# The `Vars` is a proxy for the internal dictionary. For example, here is how one can use it.
# + slideshow={"slide_type": "fragment"}
v = Vars('')
v.defs
# + slideshow={"slide_type": "subslide"}
v['x'] = 'X'
v.defs
# + slideshow={"slide_type": "fragment"}
v.update({'x': 'x', 'y': 'y'})
v.defs
# + [markdown] slideshow={"slide_type": "subslide"}
# ### AssignmentVars
# + [markdown] slideshow={"slide_type": "fragment"}
# We now extend the simple `Vars` to account for variable reassignments. For this, we define `AssignmentVars`.
#
# The idea for detecting reassignments and renaming variables is as follows: We keep track of the previous reassignments to particular variables using `accessed_seq_var`. It contains the last rename of any particular variable as its corresponding value. The `new_vars` contains a list of all new variables that were added on this iteration.
# + slideshow={"slide_type": "subslide"}
class AssignmentVars(Vars):
def __init__(self, original):
super().__init__(original)
self.accessed_seq_var = {}
self.var_def_lines = {}
self.current_event = None
self.new_vars = set()
self.method_init()
# + [markdown] slideshow={"slide_type": "fragment"}
# The `method_init()` method takes care of keeping track of method invocations using records saved in the `call_stack`. `event_locations` is for keeping track of the locations accessed *within this method*. This is used for line number tracking of variable definitions.
# + slideshow={"slide_type": "subslide"}
class AssignmentVars(AssignmentVars):
def method_init(self):
self.call_stack = CallStack()
self.event_locations = {self.call_stack.method_id: []}
# + [markdown] slideshow={"slide_type": "fragment"}
# The `update()` is now modified to track the changed line numbers if any, using `var_location_register()`. We reinitialize the `new_vars` after use for the next event.
# + slideshow={"slide_type": "fragment"}
class AssignmentVars(AssignmentVars):
def update(self, v):
for k, v in v.items():
self._set_kv(k, v)
self.var_location_register(self.new_vars)
self.new_vars = set()
# + [markdown] slideshow={"slide_type": "subslide"}
# The variable name now incorporates an index of how many reassignments it has gone through, effectively making each reassignment a unique variable.
# + slideshow={"slide_type": "fragment"}
class AssignmentVars(AssignmentVars):
def var_name(self, var):
return (var, self.accessed_seq_var[var])
# + [markdown] slideshow={"slide_type": "fragment"}
# While storing variables, we need to first check whether it was previously known. If it is not, we need to initialize the rename count. This is accomplished by `var_access`.
# + slideshow={"slide_type": "fragment"}
class AssignmentVars(AssignmentVars):
def var_access(self, var):
if var not in self.accessed_seq_var:
self.accessed_seq_var[var] = 0
return self.var_name(var)
# + [markdown] slideshow={"slide_type": "subslide"}
# During a variable reassignment, we update the `accessed_seq_var` to reflect the new count.
# + slideshow={"slide_type": "fragment"}
class AssignmentVars(AssignmentVars):
def var_assign(self, var):
self.accessed_seq_var[var] += 1
self.new_vars.add(self.var_name(var))
return self.var_name(var)
# + [markdown] slideshow={"slide_type": "fragment"}
# These methods can be used as follows
# + slideshow={"slide_type": "fragment"}
sav = AssignmentVars('')
sav.defs
# + slideshow={"slide_type": "fragment"}
sav.var_access('v1')
# + slideshow={"slide_type": "fragment"}
sav.var_assign('v1')
# + [markdown] slideshow={"slide_type": "subslide"}
# Assigning to it again increments the counter.
# + slideshow={"slide_type": "fragment"}
sav.var_assign('v1')
# + [markdown] slideshow={"slide_type": "subslide"}
# The core of the logic is in `_set_kv()`. When a variable is being assigned, we get the sequenced variable name `s_var`. If the sequenced variable name was previously unknown in `defs`, then we have no further concerns. We add the sequenced variable to `defs`.
#
# If the variable is previously known, then it is an indication of a possible reassignment. In this case, we look at the value the variable is holding. We check if the value changed. If it has not, then it is not.
#
# If the value has changed, it is a reassignment. We first increment the variable usage sequence using `var_assign`, retrieve the new name, update the new name in `defs`.
# + slideshow={"slide_type": "subslide"}
class AssignmentVars(AssignmentVars):
def _set_kv(self, var, val):
s_var = self.var_access(var)
if s_var in self.defs and self.defs[s_var] == val:
return
self.defs[self.var_assign(var)] = val
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is how it can be used. Assigning a variable the first time initializes its counter.
# + slideshow={"slide_type": "fragment"}
sav = AssignmentVars('')
sav['x'] = 'X'
sav.defs
# + [markdown] slideshow={"slide_type": "fragment"}
# If the variable is assigned again with the same value, it is probably not a reassignment.
# + slideshow={"slide_type": "subslide"}
sav['x'] = 'X'
sav.defs
# + [markdown] slideshow={"slide_type": "fragment"}
# However, if the value changed, it is a reassignment.
# + slideshow={"slide_type": "fragment"}
sav['x'] = 'Y'
sav.defs
# + [markdown] slideshow={"slide_type": "fragment"}
# There is a subtlety here. It is possible for a child method to be called from the middle of a parent method, and for both to use the same variable name with different values. In this case, when the child returns, parent will have the old variable with old value in context. With our implementation, we consider this as a reassignment. However, this is OK because adding a new reassignment is harmless, but missing one is not. Further, we will discuss later how this can be avoided.
# + [markdown] slideshow={"slide_type": "subslide"}
# We also define book keeping codes for `register_event()` `method_enter()` and `method_exit()` which are the methods responsible for keeping track of the method stack. The basic idea is that, each `method_enter()` represents a new method invocation. Hence it merits a new method id, which is generated from the `method_register`, and saved in the `method_id`. Since this is a new method, the method stack is extended by one element with this id. In the case of `method_exit()`, we pop the method stack, and reset the current `method_id` to what was below the current one.
# + slideshow={"slide_type": "subslide"}
class AssignmentVars(AssignmentVars):
def method_enter(self, cxt, my_vars):
self.current_event = 'call'
self.call_stack.enter(cxt.method)
self.event_locations[self.call_stack.method_id] = []
self.register_event(cxt)
self.update(my_vars)
def method_exit(self, cxt, my_vars):
self.current_event = 'return'
self.register_event(cxt)
self.update(my_vars)
self.call_stack.leave()
def method_statement(self, cxt, my_vars):
self.current_event = 'line'
self.register_event(cxt)
self.update(my_vars)
# + [markdown] slideshow={"slide_type": "subslide"}
# For each of the method events, we also register the event using `register_event()` which keeps track of the line numbers that were referenced in *this* method.
# + slideshow={"slide_type": "fragment"}
class AssignmentVars(AssignmentVars):
def register_event(self, cxt):
self.event_locations[self.call_stack.method_id].append(cxt.line_no)
# + [markdown] slideshow={"slide_type": "fragment"}
# The `var_location_register()` keeps the locations of newly added variables. The definition location of variables in a `call` is the *current* location. However, for a `line`, it would be the previous event in the current method.
# + slideshow={"slide_type": "subslide"}
class AssignmentVars(AssignmentVars):
def var_location_register(self, my_vars):
def loc(mid):
if self.current_event == 'call':
return self.event_locations[mid][-1]
elif self.current_event == 'line':
return self.event_locations[mid][-2]
elif self.current_event == 'return':
return self.event_locations[mid][-2]
else:
assert False
my_loc = loc(self.call_stack.method_id)
for var in my_vars:
self.var_def_lines[var] = my_loc
# + [markdown] slideshow={"slide_type": "subslide"}
# We define `defined_vars()` which returns the names of variables annotated with the line numbers as below.
# + slideshow={"slide_type": "fragment"}
class AssignmentVars(AssignmentVars):
def defined_vars(self, formatted=True):
def fmt(k):
v = (k[0], self.var_def_lines[k])
return "%s@%s" % v if formatted else v
return [(fmt(k), v) for k, v in self.defs.items()]
# + [markdown] slideshow={"slide_type": "fragment"}
# Similar to `defined_vars()` we define `seq_vars()` which annotates different variables with the number of times they were used.
# + slideshow={"slide_type": "subslide"}
class AssignmentVars(AssignmentVars):
def seq_vars(self, formatted=True):
def fmt(k):
v = (k[0], self.var_def_lines[k], k[1])
return "%s@%s:%s" % v if formatted else v
return {fmt(k): v for k, v in self.defs.items()}
# + [markdown] slideshow={"slide_type": "subslide"}
# ### AssignmentTracker
# + [markdown] slideshow={"slide_type": "fragment"}
# The `AssignmentTracker` keeps the assignment definitions using the `AssignmentVars` we defined previously.
# + slideshow={"slide_type": "fragment"}
class AssignmentTracker(DefineTracker):
def __init__(self, my_input, trace, **kwargs):
self.options(kwargs)
self.my_input = my_input
self.my_assignments = self.create_assignments(my_input)
self.trace = trace
self.process()
def create_assignments(self, *args):
return AssignmentVars(*args)
# + [markdown] slideshow={"slide_type": "subslide"}
# To fine-tune the process, we define an optional parameter called `track_return`. During tracing a method return, Python produces a virtual variable that contains the result of the returned value. If the `track_return` is set, we capture this value as a variable.
#
# * `track_return` -- if true, add a *virtual variable* to the Vars representing the return value
# + slideshow={"slide_type": "fragment"}
class AssignmentTracker(AssignmentTracker):
def options(self, kwargs):
self.track_return = kwargs.get('track_return', False)
super().options(kwargs)
# + [markdown] slideshow={"slide_type": "subslide"}
# There can be different kinds of events during a trace, which includes `call` when a function is entered, `return` when the function returns, `exception` when an exception is thrown and `line` when a statement is executed.
#
# The previous `Tracker` was too simplistic in that it did not distinguish between the different events. We rectify that and define `on_call()`, `on_return()`, and `on_line()` respectively, which get called on their corresponding events.
#
# Note that `on_line()` is called also for `on_return()`. The reason is, that Python invokes the trace function *before* the corresponding line is executed. Hence, effectively, the `on_return()` is called with the binding produced by the execution of the previous statement in the environment. Our processing in effect is done on values that were bound by the previous statement. Hence, calling `on_line()` here is appropriate as it provides the event handler a chance to work on the previous binding.
# + slideshow={"slide_type": "subslide"}
class AssignmentTracker(AssignmentTracker):
def on_call(self, arg, cxt, my_vars):
my_vars = cxt.parameters(my_vars)
self.my_assignments.method_enter(cxt, self.fragments(my_vars))
def on_line(self, arg, cxt, my_vars):
self.my_assignments.method_statement(cxt, self.fragments(my_vars))
def on_return(self, arg, cxt, my_vars):
self.on_line(arg, cxt, my_vars)
my_vars = {'<-%s' % cxt.method: arg} if self.track_return else {}
self.my_assignments.method_exit(cxt, my_vars)
def on_exception(self, arg, cxt, my_vara):
return
def track_event(self, event, arg, cxt, my_vars):
self.current_event = event
dispatch = {
'call': self.on_call,
'return': self.on_return,
'line': self.on_line,
'exception': self.on_exception
}
dispatch[event](arg, cxt, my_vars)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can now use `AssignmentTracker` to track the different variables. To verify that our variable line number inference works, we recover definitions from the functions `A()`, `B()` and `C()` (with data annotations removed so that the input fragments are correctly identified).
# + slideshow={"slide_type": "fragment"}
def C(cp_1): # type: ignore
c_2 = cp_1
c_3 = c_2
return c_3
# + slideshow={"slide_type": "fragment"}
def B(bp_7): # type: ignore
b_8 = bp_7
return C(b_8)
# + slideshow={"slide_type": "subslide"}
def A(ap_12): # type: ignore
a_13 = ap_12
a_14 = B(a_13)
a_14 = a_14
a_13 = a_14
a_14 = B(a_13)
a_14 = B(a_14)[3:]
# + [markdown] slideshow={"slide_type": "fragment"}
# Running `A()` with sufficient input.
# + slideshow={"slide_type": "subslide"}
with Tracer('---xxx') as tracer:
A(tracer.my_input)
tracker = AssignmentTracker(tracer.my_input, tracer.trace, log=True)
for k, v in tracker.my_assignments.seq_vars().items():
print(k, '=', repr(v))
print()
for k, v in tracker.my_assignments.defined_vars(formatted=True):
print(k, '=', repr(v))
# + [markdown] slideshow={"slide_type": "subslide"}
# As can be seen, the line numbers are now correctly identified for each variables.
# + [markdown] slideshow={"slide_type": "fragment"}
# Let us try retrieving the assignments for a real world example.
# + slideshow={"slide_type": "subslide"}
traces = []
for inputstr in URLS_X:
clear_cache()
with Tracer(inputstr, files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
traces.append((tracer.my_input, tracer.trace))
tracker = AssignmentTracker(tracer.my_input, tracer.trace, log=True)
for k, v in tracker.my_assignments.defined_vars():
print(k, '=', repr(v))
print()
# + [markdown] slideshow={"slide_type": "subslide"}
# The line numbers of variables can be verified from the source code of [urllib/parse.py](https://github.com/python/cpython/blob/3.6/Lib/urllib/parse.py).
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Recovering a Derivation Tree
# + [markdown] slideshow={"slide_type": "fragment"}
# Does handling variable reassignments help with our URL examples? We look at these next.
# + slideshow={"slide_type": "fragment"}
class TreeMiner(TreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [])])
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 1: Recovering URL Derivation Tree
# + [markdown] slideshow={"slide_type": "fragment"}
# First we obtain the derivation tree of the URL 1
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### URL 1 derivation tree
# + slideshow={"slide_type": "fragment"}
clear_cache()
with Tracer(URLS_X[0], files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we obtain the derivation tree of URL 4
# + [markdown] slideshow={"slide_type": "subslide"}
# ##### URL 4 derivation tree
# + slideshow={"slide_type": "fragment"}
clear_cache()
with Tracer(URLS_X[-1], files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree)
# + [markdown] slideshow={"slide_type": "fragment"}
# The derivation trees seem to belong to the same grammar. Hence, we obtain the grammar for the complete set. First, we update the `recover_grammar()` to use `AssignTracker`.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Recover Grammar
# + slideshow={"slide_type": "fragment"}
class GrammarMiner(GrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(inputstr, at.my_assignments.defined_vars())
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return AssignmentTracker(*args)
def create_tree_miner(self, *args):
return TreeMiner(*args)
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we use the modified `recover_grammar()` on derivation trees obtained from URLs.
# + slideshow={"slide_type": "subslide"}
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
# + [markdown] slideshow={"slide_type": "fragment"}
# The recovered grammar is below.
# + slideshow={"slide_type": "subslide"}
syntax_diagram(url_grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# Let us fuzz a little to see if the produced values are sane.
# + slideshow={"slide_type": "fragment"}
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
# + [markdown] slideshow={"slide_type": "subslide"}
# Our modifications does seem to help. Next, we check whether we can still retrieve the grammar for inventory.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 2: Recovering Inventory Grammar
# + slideshow={"slide_type": "fragment"}
inventory_grammar = recover_grammar(process_vehicle, VEHICLES)
# + slideshow={"slide_type": "subslide"}
syntax_diagram(inventory_grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# Using fuzzing to produce values from the grammar.
# + slideshow={"slide_type": "fragment"}
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Problems with the Grammar Miner with Reassignment
# + [markdown] slideshow={"slide_type": "fragment"}
# One of the problems with our grammar miner is that it doesn't yet account for the current context. That is, when replacing, a variable can replace tokens that it does not have access to (and hence, it is not a fragment of). Consider this example.
# + slideshow={"slide_type": "fragment"}
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = AssignmentTracker(tracer.my_input, tracer.trace)
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars())
display_tree(dt.tree, graph_attr=lr_graph)
# + [markdown] slideshow={"slide_type": "fragment"}
# As can be seen, the derivation tree obtained is not quite what we expected. The issue is easily seen if we enable logging in the `TreeMiner`.
# + slideshow={"slide_type": "subslide"}
dt = TreeMiner(tracer.my_input, sm.my_assignments.defined_vars(), log=True)
# + [markdown] slideshow={"slide_type": "subslide"}
# Look at the last statement. We have a value `1999,car,` where only the `year` got replaced. We no longer have a `'car'` variable to continue the replacement here. This happens because the `'car'` value in `'1999,car,Chevy,Venture'` is not treated as a new value because the value `'car'` had occurred for `'vehicle'` variable in the exact same location for a *different* method call (for `'2000,car,Mercury,Cougar'`).
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Grammar Miner with Scope
#
# We need to incorporate inspection of the variables in the current context. We already have a stack of method calls so that we can obtain the current method at any point. We need to do the same for variables.
#
# For that, we extend the `CallStack` to a new class `InputStack` which holds the method invoked as well as the parameters observed. It is essentially the record of activation of the method. We start with the original input at the base of the stack, and for each new method-call, we push the parameters of that call into the stack as a new record.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Input Stack
# + slideshow={"slide_type": "fragment"}
class InputStack(CallStack):
def __init__(self, i, fragment_len=FRAGMENT_LEN):
self.inputs = [{START_SYMBOL: i}]
self.fragment_len = fragment_len
super().__init__()
# + [markdown] slideshow={"slide_type": "fragment"}
# In order to check if a particular variable be saved, we define `in_current_record()` which checks only the variables in the current scope for inclusion (rather than the original input string).
# + slideshow={"slide_type": "fragment"}
class InputStack(InputStack):
def in_current_record(self, val):
return any(val in var for var in self.inputs[-1].values())
# + slideshow={"slide_type": "fragment"}
my_istack = InputStack('hello my world')
# + slideshow={"slide_type": "fragment"}
my_istack.in_current_record('hello')
# + slideshow={"slide_type": "subslide"}
my_istack.in_current_record('bye')
# + slideshow={"slide_type": "fragment"}
my_istack.inputs.append({'greeting': 'hello', 'location': 'world'})
# + slideshow={"slide_type": "fragment"}
my_istack.in_current_record('hello')
# + slideshow={"slide_type": "fragment"}
my_istack.in_current_record('my')
# + [markdown] slideshow={"slide_type": "fragment"}
# We define the method `ignored()` that returns true if either the variable is not a string, or the variable length is less than the defined `fragment_len`.
# + slideshow={"slide_type": "fragment"}
class InputStack(InputStack):
def ignored(self, val):
return not (isinstance(val, str) and len(val) >= self.fragment_len)
# + slideshow={"slide_type": "subslide"}
my_istack = InputStack('hello world')
my_istack.ignored(1)
# + slideshow={"slide_type": "fragment"}
my_istack.ignored('a')
# + slideshow={"slide_type": "fragment"}
my_istack.ignored('help')
# + [markdown] slideshow={"slide_type": "fragment"}
# We can now define the `in_scope()` method that checks whether the variable needs to be ignored, and if it is not to be ignored, whether the variable value is present in the current scope.
# + slideshow={"slide_type": "fragment"}
class InputStack(InputStack):
def in_scope(self, k, val):
if self.ignored(val):
return False
return self.in_current_record(val)
# + [markdown] slideshow={"slide_type": "subslide"}
# Finally, we update `enter()` that pushes relevant variables in the current context to the stack.
# + slideshow={"slide_type": "fragment"}
class InputStack(InputStack):
def enter(self, method, inputs):
my_inputs = {k: v for k, v in inputs.items() if self.in_scope(k, v)}
self.inputs.append(my_inputs)
super().enter(method)
# + [markdown] slideshow={"slide_type": "fragment"}
# When a method returns, we also need a corresponding `leave()` to pop out the inputs and unwind the stack.
# + slideshow={"slide_type": "fragment"}
class InputStack(InputStack):
def leave(self):
self.inputs.pop()
super().leave()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ScopedVars
# + [markdown] slideshow={"slide_type": "fragment"}
# We need to update our `AssignmentVars` to include information about which scope the variable was defined in. We start by updating `method_init()`.
# + slideshow={"slide_type": "fragment"}
class ScopedVars(AssignmentVars):
def method_init(self):
self.call_stack = self.create_call_stack(self.my_input)
self.event_locations = {self.call_stack.method_id: []}
def create_call_stack(self, i):
return InputStack(i)
# + [markdown] slideshow={"slide_type": "fragment"}
# Similarly, the `method_enter()` now initializes the `accessed_seq_var` for the current method call.
# + slideshow={"slide_type": "subslide"}
class ScopedVars(ScopedVars):
def method_enter(self, cxt, my_vars):
self.current_event = 'call'
self.call_stack.enter(cxt.method, my_vars)
self.accessed_seq_var[self.call_stack.method_id] = {}
self.event_locations[self.call_stack.method_id] = []
self.register_event(cxt)
self.update(my_vars)
# + [markdown] slideshow={"slide_type": "subslide"}
# The `update()` method now saves the context in which the value is defined. In the case of a parameter to a function, the context should be the context in which the function was called. On the other hand, a value defined during a statement execution would have the current context.
#
# Further, we annotate on value rather than key because we do not want to duplicate variables when parameters are in context in the next line. They will have same value, but different context because they are present in a statement execution.
#
# + slideshow={"slide_type": "subslide"}
class ScopedVars(ScopedVars):
def update(self, v):
if self.current_event == 'call':
context = -2
elif self.current_event == 'line':
context = -1
else:
context = -1
for k, v in v.items():
self._set_kv(k, (v, self.call_stack.at(context)))
self.var_location_register(self.new_vars)
self.new_vars = set()
# + [markdown] slideshow={"slide_type": "fragment"}
# We also need to save the current method invocation so as to determine which variables are in scope. This information is now incorporated in the variable name as `accessed_seq_var[method_id][var]`.
# + slideshow={"slide_type": "subslide"}
class ScopedVars(ScopedVars):
def var_name(self, var):
return (var, self.call_stack.method_id,
self.accessed_seq_var[self.call_stack.method_id][var])
# + [markdown] slideshow={"slide_type": "fragment"}
# As before, `var_access` simply initializes the corresponding counter, this time in the context of `method_id`.
# + slideshow={"slide_type": "fragment"}
class ScopedVars(ScopedVars):
def var_access(self, var):
if var not in self.accessed_seq_var[self.call_stack.method_id]:
self.accessed_seq_var[self.call_stack.method_id][var] = 0
return self.var_name(var)
# + [markdown] slideshow={"slide_type": "fragment"}
# During a variable reassignment, we update the `accessed_seq_var` to reflect the new count.
# + slideshow={"slide_type": "subslide"}
class ScopedVars(ScopedVars):
def var_assign(self, var):
self.accessed_seq_var[self.call_stack.method_id][var] += 1
self.new_vars.add(self.var_name(var))
return self.var_name(var)
# + [markdown] slideshow={"slide_type": "fragment"}
# We now update `defined_vars()` to account for the new information.
# + slideshow={"slide_type": "fragment"}
class ScopedVars(ScopedVars):
def defined_vars(self, formatted=True):
def fmt(k):
method, i = k[1]
v = (method, i, k[0], self.var_def_lines[k])
return "%s[%d]:%s@%s" % v if formatted else v
return [(fmt(k), v) for k, v in self.defs.items()]
# + [markdown] slideshow={"slide_type": "fragment"}
# Updating `seq_vars()` to account for new information.
# + slideshow={"slide_type": "subslide"}
class ScopedVars(ScopedVars):
def seq_vars(self, formatted=True):
def fmt(k):
method, i = k[1]
v = (method, i, k[0], self.var_def_lines[k], k[2])
return "%s[%d]:%s@%s:%s" % v if formatted else v
return {fmt(k): v for k, v in self.defs.items()}
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Scope Tracker
# + [markdown] slideshow={"slide_type": "fragment"}
# With the `InputStack` and `Vars` defined, we can now define the `ScopeTracker`. The `ScopeTracker` only saves variables if the value is present in the current scope.
# + slideshow={"slide_type": "fragment"}
class ScopeTracker(AssignmentTracker):
def __init__(self, my_input, trace, **kwargs):
self.current_event = None
super().__init__(my_input, trace, **kwargs)
def create_assignments(self, *args):
return ScopedVars(*args)
# + [markdown] slideshow={"slide_type": "fragment"}
# We define a wrapper for checking whether a variable is present in the scope.
# + slideshow={"slide_type": "subslide"}
class ScopeTracker(ScopeTracker):
def is_input_fragment(self, var, value):
return self.my_assignments.call_stack.in_scope(var, value)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can use the `ScopeTracker` as follows.
# + slideshow={"slide_type": "subslide"}
vehicle_traces = []
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
vehicle_traces.append((tracer.my_input, sm))
for k, v in sm.my_assignments.seq_vars().items():
print(k, '=', repr(v))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Recovering a Derivation Tree
# + [markdown] slideshow={"slide_type": "fragment"}
# The main difference in `apply_new_definition()` is that we add a second condition that checks for scope. In particular, variables are only allowed to replace portions of string fragments that were in scope.
# The variable scope is indicated by `scope`. However, merely accounting for scope is not sufficient. For example, consider the fragment below.
# + [markdown] slideshow={"slide_type": "subslide"}
# ```python
# def my_fn(stringval):
# partA, partB = stringval.split('/')
# return partA, partB
#
# svalue = ...
# v1, v2 = my_fn(svalue)
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# Here, `v1` and `v2` get their values from a previous function call. Not from their current context. That is, we have to provide an exception for cases where an internal child method call may have generated a large fragment as we showed above. To account for that, we define `mseq()` that retrieves the method call sequence. In the above case, the `mseq()` of the internal child method call would be larger than the current `mseq()`. If so, we allow the replacement to proceed.
# + slideshow={"slide_type": "fragment"}
class ScopeTreeMiner(TreeMiner):
def mseq(self, key):
method, seq, var, lno = key
return seq
# + [markdown] slideshow={"slide_type": "fragment"}
# The `nt_var()` method needs to take the tuple and generate a non-terminal symbol out of it. We skip the method sequence because it is not relevant for the grammar.
# + slideshow={"slide_type": "subslide"}
class ScopeTreeMiner(ScopeTreeMiner):
def nt_var(self, key):
method, seq, var, lno = key
return to_nonterminal("%s@%d:%s" % (method, lno, var))
# + [markdown] slideshow={"slide_type": "fragment"}
# We now redefine the `apply_new_definition()` to account for context and scope. In particular, a variable is allowed to replace a part of a value only if the variable is in *scope* -- that is, it's scope (method sequence number of either its calling context in case it is a parameter or the current context in case it is a statement) is same as that of the value's method sequence number. An exception is made when the value's method sequence number is greater than the variable's method sequence number. In that case, the value may have come from an internal call. We allow the replacement to proceed in that case.
# + slideshow={"slide_type": "subslide"}
class ScopeTreeMiner(ScopeTreeMiner):
def partition(self, part, value):
return value.partition(part)
def partition_by_part(self, pair, value):
(nt_var, nt_seq), (v, v_scope) = pair
prefix_k_suffix = [
(nt_var, [(v, [], nt_seq)]) if i == 1 else (e, [])
for i, e in enumerate(self.partition(v, value))
if e]
return prefix_k_suffix
def insert_into_tree(self, my_tree, pair):
var, values, my_scope = my_tree
(nt_var, nt_seq), (v, v_scope) = pair
applied = False
for i, value_ in enumerate(values):
key, arr, scope = value_
self.log(2, "-> [%d] %s" % (i, repr(value_)))
if is_nonterminal(key):
applied = self.insert_into_tree(value_, pair)
if applied:
break
else:
if v_scope != scope:
if nt_seq > scope:
continue
if not v or not self.string_part_of_value(v, key):
continue
prefix_k_suffix = [(k, children, scope) for k, children
in self.partition_by_part(pair, key)]
del values[i]
for j, rep in enumerate(prefix_k_suffix):
values.insert(j + i, rep)
applied = True
self.log(2, " > %s" % (repr([i[0] for i in prefix_k_suffix])))
break
return applied
# + [markdown] slideshow={"slide_type": "subslide"}
# The `apply_new_definition()` is now modified to carry additional contextual information `mseq`.
# + slideshow={"slide_type": "fragment"}
class ScopeTreeMiner(ScopeTreeMiner):
def apply_new_definition(self, tree, var, value_):
nt_var = self.nt_var(var)
seq = self.mseq(var)
val, (smethod, mseq) = value_
return self.insert_into_tree(tree, ((nt_var, seq), (val, mseq)))
# + [markdown] slideshow={"slide_type": "fragment"}
# We also modify `get_derivation_tree()` so that the initial node carries the context.
# + slideshow={"slide_type": "subslide"}
class ScopeTreeMiner(ScopeTreeMiner):
def get_derivation_tree(self):
tree = (START_SYMBOL, [(self.my_input, [], 0)], 0)
for var, value in self.my_assignments:
self.log(0, "%s=%s" % (var, repr(value)))
self.apply_new_definition(tree, var, value)
return tree
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 1: Recovering URL Parse Tree
# + [markdown] slideshow={"slide_type": "fragment"}
# We verify that our URL parse tree recovery still works as expected.
# + slideshow={"slide_type": "subslide"}
url_dts = []
for inputstr in URLS_X:
clear_cache()
with Tracer(inputstr, files=['urllib/parse.py']) as tracer:
urlparse(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
for k, v in sm.my_assignments.defined_vars(formatted=False):
print(k, '=', repr(v))
dt = ScopeTreeMiner(
tracer.my_input,
sm.my_assignments.defined_vars(
formatted=False))
display_tree(dt.tree, graph_attr=lr_graph)
url_dts.append(dt)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Example 2: Recovering Inventory Parse Tree
# + [markdown] slideshow={"slide_type": "fragment"}
# Next, we look at recovering the parse tree from `process_inventory()` which failed last time.
# + slideshow={"slide_type": "subslide"}
with Tracer(INVENTORY) as tracer:
process_inventory(tracer.my_input)
sm = ScopeTracker(tracer.my_input, tracer.trace)
for k, v in sm.my_assignments.defined_vars():
print(k, '=', repr(v))
inventory_dt = ScopeTreeMiner(
tracer.my_input,
sm.my_assignments.defined_vars(
formatted=False))
display_tree(inventory_dt.tree, graph_attr=lr_graph)
# + [markdown] slideshow={"slide_type": "subslide"}
# The recovered parse tree seems reasonable.
#
# One of the things that one might notice from our Example (2) is that the three subtrees -- `vehicle[2:1]`, `vehicle[4:1]` and `vehicle[6:1]` are quite alike. We will examine how this can be exploited to generate a grammar directly, next.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Grammar Mining
# + [markdown] slideshow={"slide_type": "fragment"}
# The `tree_to_grammar()` is now redefined as follows, to account for the extra scope in nodes.
# + slideshow={"slide_type": "subslide"}
class ScopedGrammarMiner(GrammarMiner):
def tree_to_grammar(self, tree):
key, children, scope = tree
one_alt = [ckey for ckey, gchildren, cscope in children if ckey != key]
hsh = {key: [one_alt] if one_alt else []}
for child in children:
(ckey, _gc, _cscope) = child
if not is_nonterminal(ckey):
continue
chsh = self.tree_to_grammar(child)
for k in chsh:
if k not in hsh:
hsh[k] = chsh[k]
else:
hsh[k].extend(chsh[k])
return hsh
# + [markdown] slideshow={"slide_type": "subslide"}
# The grammar is in canonical form, which needs to be massaged to display. First, the recovered grammar for inventory.
# + slideshow={"slide_type": "subslide"}
si = ScopedGrammarMiner()
si.add_tree(inventory_dt)
syntax_diagram(readable(si.grammar))
# + [markdown] slideshow={"slide_type": "subslide"}
# The recovered grammar for URLs.
# + slideshow={"slide_type": "subslide"}
su = ScopedGrammarMiner()
for t in url_dts:
su.add_tree(t)
syntax_diagram(readable(su.grammar))
# + [markdown] slideshow={"slide_type": "subslide"}
# One might notice that the grammar is not entirely human readable, with a number of single token definitions.
# + [markdown] slideshow={"slide_type": "fragment"}
# Hence, the last piece of the puzzle is the cleanup method `clean_grammar()`, which cleans up such definitions. The idea is to look for single token definitions such that a key is defined exactly by another key (single alternative, single token, nonterminal).
# + slideshow={"slide_type": "subslide"}
class ScopedGrammarMiner(ScopedGrammarMiner):
def get_replacements(self, grammar):
replacements = {}
for k in grammar:
if k == START_SYMBOL:
continue
alts = grammar[k]
if len(set([str(i) for i in alts])) != 1:
continue
rule = alts[0]
if len(rule) != 1:
continue
tok = rule[0]
if not is_nonterminal(tok):
continue
replacements[k] = tok
return replacements
# + [markdown] slideshow={"slide_type": "subslide"}
# Once we have such a list, iteratively replace the original key where ever it is used with the token we found earlier. Repeat until none is left.
# + slideshow={"slide_type": "subslide"}
class ScopedGrammarMiner(ScopedGrammarMiner):
def clean_grammar(self):
replacements = self.get_replacements(self.grammar)
while True:
changed = set()
for k in self.grammar:
if k in replacements:
continue
new_alts = []
for alt in self.grammar[k]:
new_alt = []
for t in alt:
if t in replacements:
new_alt.append(replacements[t])
changed.add(t)
else:
new_alt.append(t)
new_alts.append(new_alt)
self.grammar[k] = new_alts
if not changed:
break
for k in changed:
self.grammar.pop(k, None)
return readable(self.grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# The `clean_grammar()` is used as follows:
# + slideshow={"slide_type": "subslide"}
si = ScopedGrammarMiner()
si.add_tree(inventory_dt)
syntax_diagram(readable(si.clean_grammar()))
# + [markdown] slideshow={"slide_type": "subslide"}
# We update the `update_grammar()` to use the right tracker and miner.
# + slideshow={"slide_type": "fragment"}
class ScopedGrammarMiner(ScopedGrammarMiner):
def update_grammar(self, inputstr, trace):
at = self.create_tracker(inputstr, trace)
dt = self.create_tree_miner(
inputstr, at.my_assignments.defined_vars(
formatted=False))
self.add_tree(dt)
return self.grammar
def create_tracker(self, *args):
return ScopeTracker(*args)
def create_tree_miner(self, *args):
return ScopeTreeMiner(*args)
# + [markdown] slideshow={"slide_type": "subslide"}
# The `recover_grammar()` uses the right miner, and returns a cleaned grammar.
# + slideshow={"slide_type": "fragment"}
def recover_grammar(fn, inputs, **kwargs): # type: ignore
miner = ScopedGrammarMiner()
for inputstr in inputs:
with Tracer(inputstr, **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.clean_grammar())
# + slideshow={"slide_type": "fragment"}
url_grammar = recover_grammar(url_parse, URLS_X, files=['urllib/parse.py'])
# + slideshow={"slide_type": "subslide"}
syntax_diagram(url_grammar)
# + slideshow={"slide_type": "subslide"}
f = GrammarFuzzer(url_grammar)
for _ in range(10):
print(f.fuzz())
# + slideshow={"slide_type": "fragment"}
inventory_grammar = recover_grammar(process_inventory, [INVENTORY])
# + slideshow={"slide_type": "subslide"}
syntax_diagram(inventory_grammar)
# + slideshow={"slide_type": "subslide"}
f = GrammarFuzzer(inventory_grammar)
for _ in range(10):
print(f.fuzz())
# + [markdown] slideshow={"slide_type": "subslide"}
# We see how tracking scope helps us to extract an even more precise grammar.
# + [markdown] slideshow={"slide_type": "fragment"}
# Notice that we use *String* inclusion testing as a way of determining whether a particular string fragment came from the original input string. While this may seem rather error-prone compared to dynamic tainting, we note that numerous tracing tools such as `dtrace()` and `ptrace()` allow one to obtain the information we seek from execution of binaries directly in different platforms. However, methods for obtaining dynamic taints almost always involve instrumenting the binaries before they can be used. Hence, this method of string inclusion can be more generally applied than dynamic tainting approaches. Further, dynamic taints are often lost due to implicit transmission, or at the boundary between *Python* and *C* code. String inclusion has not such problems. Hence, our approach can often obtain better results than relying on dynamic tainting.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Synopsis
#
# This chapter provides a number of classes to mine input grammars from existing programs. The function `recover_grammar()` could be the easiest to use. It takes a function and a set of inputs, and returns a grammar that describes its input language.
# + [markdown] slideshow={"slide_type": "fragment"}
# We apply `recover_grammar()` on a `url_parse()` function that takes and decomposes URLs:
# + slideshow={"slide_type": "fragment"}
url_parse('https://www.fuzzingbook.org/')
# + slideshow={"slide_type": "fragment"}
URLS
# + [markdown] slideshow={"slide_type": "fragment"}
# We extract the input grammar for `url_parse()` using `recover_grammar()`:
# + slideshow={"slide_type": "subslide"}
grammar = recover_grammar(url_parse, URLS, files=['urllib/parse.py'])
grammar
# + [markdown] slideshow={"slide_type": "subslide"}
# The names of nonterminals are a bit technical; but the grammar nicely represents the structure of the input; for instance, the different schemes (`"http"`, `"https"`) are all identified:
# + slideshow={"slide_type": "subslide"}
syntax_diagram(grammar)
# + [markdown] slideshow={"slide_type": "subslide"}
# The grammar can be immediately used for fuzzing, producing arbitrary combinations of input elements, which are all syntactically valid.
# + slideshow={"slide_type": "skip"}
from GrammarCoverageFuzzer import GrammarCoverageFuzzer
# + slideshow={"slide_type": "fragment"}
fuzzer = GrammarCoverageFuzzer(grammar)
[fuzzer.fuzz() for i in range(5)]
# + [markdown] slideshow={"slide_type": "fragment"}
# Being able to automatically extract a grammar and to use this grammar for fuzzing makes for very effective test generation with a minimum of manual work.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Lessons Learned
#
# * Given a set of sample inputs for a program, we can learn an input grammar by examining variable values during execution if the program relies on handwritten parsers.
# * Simple string inclusion checks are sufficient to obtain reasonably accurate grammars from real world programs.
# * The resulting grammars can be directly used for fuzzing, and can have a multiplier effect on any samples you have.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Next Steps
#
# * Learn how to use [information flow](InformationFlow.ipynb) to further improve mapping inputs to states.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Background
#
# Recovering the language from a _set of samples_ (i.e., not taking into account a possible program that might process them) is a well researched topic. The excellent reference by Higuera \cite{higuera2010grammatical} covers all the classical approaches. The current state of the art in black box grammar mining is described by Clark \cite{clark2013learning}.
#
# Learning an input language from a _program_, with or without samples, is yet a emerging topic, despite its potential for fuzzing. The pioneering work in this area was done by Lin et al. \cite{Lin2008} who invented a way to retrieve the parse trees from top down and bottom up parsers. The approach described in this chapter is based directly on the AUTOGRAM work of Hoschele et al. \cite{Hoschele2017}.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercises
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exercise 1: Flattening complex objects
#
# Our grammar miners only check for string fragments. However, programs may often pass containers or custom objects containing input fragments. For example, consider the plausible modification for our inventory processor, where we use a custom object `Vehicle` to carry fragments.
# + slideshow={"slide_type": "fragment"}
class Vehicle:
def __init__(self, vehicle: str):
year, kind, company, model, *_ = vehicle.split(',')
self.year, self.kind, self.company, self.model = year, kind, company, model
# + slideshow={"slide_type": "subslide"}
def process_inventory_with_obj(inventory: str) -> str:
res = []
for vehicle in inventory.split('\n'):
ret = process_vehicle(vehicle)
res.extend(ret)
return '\n'.join(res)
# + slideshow={"slide_type": "subslide"}
def process_vehicle_with_obj(vehicle: str) -> List[str]:
v = Vehicle(vehicle)
if v.kind == 'van':
return process_van_with_obj(v)
elif v.kind == 'car':
return process_car_with_obj(v)
else:
raise Exception('Invalid entry')
# + slideshow={"slide_type": "subslide"}
def process_van_with_obj(vehicle: Vehicle) -> List[str]:
res = [
"We have a %s %s van from %s vintage." % (vehicle.company,
vehicle.model, vehicle.year)
]
iyear = int(vehicle.year)
if iyear > 2010:
res.append("It is a recent model!")
else:
res.append("It is an old but reliable model!")
return res
# + slideshow={"slide_type": "subslide"}
def process_car_with_obj(vehicle: Vehicle) -> List[str]:
res = [
"We have a %s %s car from %s vintage." % (vehicle.company,
vehicle.model, vehicle.year)
]
iyear = int(vehicle.year)
if iyear > 2016:
res.append("It is a recent model!")
else:
res.append("It is an old but reliable model!")
return res
# + [markdown] slideshow={"slide_type": "fragment"}
# We recover the grammar as before.
# + slideshow={"slide_type": "subslide"}
vehicle_grammar = recover_grammar(
process_inventory_with_obj,
[INVENTORY],
methods=INVENTORY_METHODS)
# + [markdown] slideshow={"slide_type": "fragment"}
# The new vehicle grammar is missing in details, especially as to the different models and company for a van and car.
# + slideshow={"slide_type": "subslide"}
syntax_diagram(vehicle_grammar)
# + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true
# The problem is that, we are looking specifically for string objects that contain fragments of the input string during tracing. Can you modify our grammar miner to correctly account for the complex objects too?
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# **Solution.**
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# The problem can be understood if we execute the tracer under verbose logging.
# + slideshow={"slide_type": "skip"} solution2="hidden"
with Tracer(INVENTORY, methods=INVENTORY_METHODS, log=True) as tracer:
process_inventory(tracer.my_input)
print()
print('Traced values:')
for t in tracer.trace:
print(t)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# You can see that we lose track of string fragments as soon as they are incorporated into the `Vehicle` object. The way out is to trace these variables separately.
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# For that, we develop the `flatten()` method that given any custom complex object and its key, returns a list of flattened *key*,*value* pairs that correspond to the object passed in.
#
# The `MAX_DEPTH` parameter controls the maximum flattening limit.
# + slideshow={"slide_type": "skip"} solution2="hidden"
MAX_DEPTH = 10
# + slideshow={"slide_type": "skip"} solution2="hidden"
def set_flatten_depth(depth):
global MAX_DEPTH
MAX_DEPTH = depth
# + slideshow={"slide_type": "skip"} solution2="hidden"
def flatten(key, val, depth=MAX_DEPTH):
tv = type(val)
if depth <= 0:
return [(key, val)]
if isinstance(val, (int, float, complex, str, bytes, bytearray)):
return [(key, val)]
elif isinstance(val, (set, frozenset, list, tuple, range)):
values = [(i, e) for i, elt in enumerate(val) for e in flatten(i, elt, depth-1)]
return [("%s.%d" % (key, i), v) for i, v in values]
elif isinstance(val, dict):
values = [e for k, elt in val.items() for e in flatten(k, elt, depth-1)]
return [("%s.%s" % (key, k), v) for k, v in values]
elif isinstance(val, str):
return [(key, val)]
elif hasattr(val, '__dict__'):
values = [e for k, elt in val.__dict__.items()
for e in flatten(k, elt, depth-1)]
return [("%s.%s" % (key, k), v) for k, v in values]
else:
return [(key, val)]
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# Next, we hook the `flatten()` into the `Context` class so that the parameters we obtain are flattened.
# + slideshow={"slide_type": "skip"} solution2="hidden"
class Context(Context):
def extract_vars(self, frame):
vals = inspect.getargvalues(frame).locals
return {k1: v1 for k, v in vals.items() for k1, v1 in flatten(k, v)}
def parameters(self, all_vars):
def check_param(k):
return any(k.startswith(p) for p in self.parameter_names)
return {k: v for k, v in all_vars.items() if check_param(k)}
def qualified(self, all_vars):
return {"%s:%s" % (self.method, k): v for k, v in all_vars.items()}
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# With this change, we have the following trace output.
# + slideshow={"slide_type": "skip"} solution2="hidden"
with Tracer(INVENTORY, methods=INVENTORY_METHODS, log=True) as tracer:
process_inventory(tracer.my_input)
print()
print('Traced values:')
for t in tracer.trace:
print(t)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# Our change seems to have worked. Let us derive the grammar.
# + slideshow={"slide_type": "skip"} solution2="hidden"
vehicle_grammar = recover_grammar(
process_inventory,
[INVENTORY],
methods=INVENTORY_METHODS)
# + slideshow={"slide_type": "skip"} solution2="hidden"
syntax_diagram(vehicle_grammar)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# The recovered grammar contains all the details that we were able to recover before.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Exercise 2: Incorporating Taints from InformationFlow
# + [markdown] slideshow={"slide_type": "fragment"} solution2="hidden" solution2_first=true
# We have been using *string inclusion* to check whether a particular fragment came from the input string. This is unsatisfactory as it required us to compromise on the size of the strings tracked, which was limited to those greater than `FRAGMENT_LEN`. Further, it is possible that a single method could process a string where a fragment repeats, but is part of different tokens. For example, an embedded comma in the CSV file would cause our parser to fail. One way to avoid this is to rely on *dynamic taints*, and check for taint inclusion rather than string inclusion.
#
# The chapter on [information flow](InformationFlow.ipynb) details how to incorporate dynamic taints. Can you update our grammar miner based on scope to use *dynamic taints* instead?
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# **Solution.**
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# First, we import `ostr` to track the origins of string fragments.
# + slideshow={"slide_type": "skip"} solution2="hidden"
from InformationFlow import ostr
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# Next, we define `is_fragment()` to verify that a fragment is from a given input string.
# + slideshow={"slide_type": "skip"} solution2="hidden"
def is_fragment(fragment, original):
assert isinstance(original, ostr)
if not isinstance(fragment, ostr):
return False
return set(fragment.origin) <= set(original.origin)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# Now, all that remains is to hook the tainted fragment check to our grammar miner. This is accomplished by modifying `in_current_record()` and `ignored()` methods in the `InputStack`.
# + slideshow={"slide_type": "skip"} solution2="hidden"
class TaintedInputStack(InputStack):
def in_current_record(self, val):
return any(is_fragment(val, var) for var in self.inputs[-1].values())
# + slideshow={"slide_type": "skip"} solution2="hidden"
class TaintedInputStack(TaintedInputStack):
def ignored(self, val):
return not isinstance(val, ostr)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# We then hook in the `TaintedInputStack` to the grammar mining infrastructure.
# + slideshow={"slide_type": "skip"} solution2="hidden"
class TaintedScopedVars(ScopedVars):
def create_call_stack(self, i):
return TaintedInputStack(i)
# + slideshow={"slide_type": "skip"} solution2="hidden"
class TaintedScopeTracker(ScopeTracker):
def create_assignments(self, *args):
return TaintedScopedVars(*args)
# + slideshow={"slide_type": "skip"} solution2="hidden"
class TaintedScopeTreeMiner(ScopeTreeMiner):
def string_part_of_value(self, part, value):
return is_fragment(part, value)
def partition(self, part, value):
begin = value.origin.index(part.origin[0])
end = value.origin.index(part.origin[-1])+1
return value[:begin], value[begin:end], value[end:]
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# <!-- **Advanced.** The *dynamic taint* approach is limited in that it can not observe implicit flows. For example, consider the fragment below.
#
# ```python
# if my_fragment == 'begin':
# return 'begin'
# ```
#
# In this case, we lose track of the string `begin` that is returned even though it is dependent on the value of `my_fragment`. For such cases, a better (but costly) alternative is to rely on concolic execution and capture the constraints as it relates to input characters on each variable.
#
# The chapter on [concolic fuzzing](ConcolicFuzzer.ipynb) details how to incorporate concolic symbolic execution to program execution. Can you update our grammar miner to use *concolic execution* to track taints instead?
# -->
# + slideshow={"slide_type": "skip"} solution2="hidden"
class TaintedScopedGrammarMiner(ScopedGrammarMiner):
def create_tracker(self, *args):
return TaintedScopeTracker(*args)
def create_tree_miner(self, *args):
return TaintedScopeTreeMiner(*args)
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# Finally, we define `recover_grammar_with_taints()` to recover the grammar.
# + slideshow={"slide_type": "skip"} solution2="hidden"
def recover_grammar_with_taints(fn, inputs, **kwargs):
miner = TaintedScopedGrammarMiner()
for inputstr in inputs:
with Tracer(ostr(inputstr), **kwargs) as tracer:
fn(tracer.my_input)
miner.update_grammar(tracer.my_input, tracer.trace)
return readable(miner.clean_grammar())
# + [markdown] slideshow={"slide_type": "skip"} solution2="hidden"
# Here is how one can use it.
# + slideshow={"slide_type": "skip"} solution2="hidden"
inventory_grammar = recover_grammar_with_taints(
process_inventory, [INVENTORY],
methods=[
'process_inventory', 'process_vehicle', 'process_car', 'process_van'
])
# + slideshow={"slide_type": "skip"} solution2="hidden"
syntax_diagram(inventory_grammar)
# + slideshow={"slide_type": "skip"} solution2="hidden"
url_grammar = recover_grammar_with_taints(
url_parse, URLS_X + ['ftp://user4:pass1@host4/?key4=value3'],
methods=['urlsplit', 'urlparse', '_splitnetloc'])
|
docs/notebooks/GrammarMiner.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
import pandas as pd
# One way to obtain Pandas DataFrames (other than reading them from file or database) is by manually creating them, as in the example below. Creating such small toy examples is useful for studying the exact behavior of API methods or our own tooling.
transaction_df = pd.DataFrame({
'amount': [42., 100., 999.],
'from': ['bob', 'alice', 'bob'],
'to': ['alice', 'bob', 'alice']
})
transaction_df
# Below are some examples of commonly used methods from the Pandas DataFrame API with prompts to study the API docs. In this context, "study" doesn't mean you should fully understand and master these API methods. "Studying" in this sense is more of an invitation to start thinking about how these methods internally work, how Pandas as a toolset is constructed, and how you could create your own tools using similar constructs.
# ## Selection and Transformation
# The following statement selects all transactions with 'alice' as recipient and adds a column that doubles the transaction amount.
(
transaction_df
.loc[lambda df: df['to'] == 'alice'] # 1
.assign(mod_amount=lambda df: df['amount'] * 2) # 2
)
# Study the [API documentation for `.loc[]`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html), and the [API documentation for `.assign()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.assign.html). Think about the following questions (without the need to provide an answer), and try to experiment a bit with the examples given in the docs:
#
# - What _is_ `loc[]` exactly? Is it a method or function? Something else entirely?
# - How does it handle the statements that are passed to these square brackets?
# - How does it have access to the DataFrame's data?
# - What is this `lambda` expression?
# - What is the `df` within the `lambda` expression?
# - Is the original `transaction_df` DataFrame modified by the above statement?
# ## Grouping and Aggregating
for recipient, recipient_df in transaction_df.groupby('to'):
print(f'{recipient} received a total sum of {recipient_df["amount"].sum()}')
transaction_df.groupby('to').sum()
# Study the [API documentation for `.groupby()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.groupby.html). Think about the following questions (without the need to provide an answer), and try to experiment a bit with the examples given in the docs:
#
# - What is the result of calling `groupby()` on the DataFrame? What is being returned?
# - How is it possible that we can use that result in a `for ... in` loop?
# - How is the `sum()` aggregation created?
# ## Pipelines
# +
def select_amounts_greater_than(transaction_df, amount=100):
return transaction_df.loc[lambda df: df['amount'] > amount]
transaction_df.pipe(select_amounts_greater_than, amount=99)
# -
# Study the [API documentation for `.pipe()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pipe.html). Think about the following questions (without the need to provide an answer), and try to experiment a bit with the examples given in the docs:
#
# - What is being passed as the first argument to `pipe()`?
# - What object or method or function is actually calling the `select_amounts_greater_than()` function?
# - What is being returned by `pipe()`?
|
00_introduction.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
10*'t'
X = 'Silicon'
type(X)
X.upper()
X.capitalize()
X.split()
Y = '<NAME>'
Y.split()
Y.split('i')
# # List
my_list = ['Silicon',2004,8.01]
type(my_list)
# # List Support Indexing and Slicing
my_list[0]
my_list[0:2]
# # List are Mutable
my_list[0]='Bhubaneswar'
my_list
my_list.append('Odisha')
my_list
my_list.insert(0,'India')
my_list
my_list.pop()
my_list.pop(0)
my_list
my_list1 = ['e','q','t','a','d']
my_list1.sort()
my_list1
my_list1.reverse()
my_list1
# # Q) Define a list my_list2 = ['a','g',['w',2]]. Objective is to extract 2
my_list2 = ['a','g',['w',2]]
my_list2[2][1]
my_list2.pop(2).pop(1)
# # Dictionaries
my_dict = {'Breakfast':30 , 'Lunch':80 , 'Dinner':60}
type(my_dict)
my_dict['Dinner']
my_dict.keys()
my_dict.values()
my_dict.items()
my_list = ['silicon',{'breakfast':30,'lunch':80,'dinner':60}]
my_list[1]
my_list[1]['lunch']
# # Define a Dictionary D={'K1' : ['Silicon',2020],'K2':30}. Now Objective is to grab Silicon and make it capital.
D = {'K1':['Silicon',2020],'K2':30}
D
D['K1'][0].upper()
|
Python-Week 1/10 Aug 2021, Day 2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
# -
DIRPATH = '/home/apbarret/Data/CMIP6/siconc/SImon/CESM2/historical/r11i1p1f1'
filepath = os.path.join(DIRPATH, 'siconc_SImon_CESM2_historical_r11i1p1f1_gn_200001-201412.nc')
# +
with xr.open_dataset('../siconc_SImon_MRI-ESM2-0_historical_r1i1p1f1_gn_185001-201412.nc') as ds:
dsClm = ds.groupby('time.month').mean(dim='time')
dsClm
# -
fig = plt.figure(figsize=(7,7))
ax = fig.add_subplot(projection=ccrs.NorthPolarStereo())
ax.set_extent([-180.,180.,50.,90.], ccrs.PlateCarree())
dsClm.siconc.sel(month=9).plot(ax=ax, transform=ccrs.PlateCarree())
dsClm.siconc.sel(month=9).plot()
|
notebooks/process_and_plot_siconc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from pathlib import Path
# %matplotlib inline
# # Regression Analysis: Seasonal Effects with Sklearn Linear Regression
# In this notebook, you will build a SKLearn linear regression model to predict Yen futures ("settle") returns with *lagged* CAD/JPY exchange rate returns.
# Currency pair exchange rates for CAD/JPY
cad_jpy_df = pd.read_csv(
Path("cad_jpy.csv"), index_col="Date", infer_datetime_format=True, parse_dates=True
)
cad_jpy_df.head()
# Trim the dataset to begin on January 1st, 1990
cad_jpy_df = cad_jpy_df.loc["1990-01-01":, :]
cad_jpy_df.head()
# # Data Preparation
# ### Returns
# Create a series using "Price" percentage returns, drop any nan"s, and check the results:
# (Make sure to multiply the pct_change() results by 100)
# In this case, you may have to replace inf, -inf values with np.nan"s
cad_jpy_df['Price'] = cad_jpy_df.Price.pct_change() * 100
# ### Lagged Returns
# Create a lagged return using the shift function
cad_jpy_df['Lagged_Return'] = cad_jpy_df['Price'].shift()
cad_jpy_df = cad_jpy_df.replace(-np.inf, np.nan).dropna()
cad_jpy_df.tail()
# ### Train Test Split
# Create a train/test split for the data using 2018-2019 for testing and the rest for training
train = cad_jpy_df[:'2017']
test = cad_jpy_df['2018':]
# Create four dataframes:
# X_train (training set using just the independent variables), X_test (test set of of just the independent variables)
# Y_train (training set using just the "y" variable, i.e., "Futures Return"), Y_test (test set of just the "y" variable):
X_train = train["Lagged_Return"].to_frame()
X_test = test["Lagged_Return"].to_frame()
y_train = train["Price"]
y_test = test["Price"]
# +
# Preview the X_train data
X_train.head()
# -
# # Linear Regression Model
# +
# Create a Linear Regression model and fit it to the training data
from sklearn.linear_model import LinearRegression
# Fit a SKLearn linear regression using just the training set (X_train, Y_train):
model=LinearRegression()
model.fit(X_train, y_train)
# -
# # Make predictions using the Testing Data
#
# **Note:** We want to evaluate the model using data that it has never seen before, in this case: `X_test`.
# Make a prediction of "y" values using just the test dataset
predictions = model.predict(X_test)
# Assemble actual y data (Y_test) with predicted y data (from just above) into two columns in a dataframe:
Results = y_test.to_frame()
Results["Predicted Return"] = predictions
# Plot the first 20 predictions vs the true values
Results[:20].plot(subplots=True)
# # Out-of-Sample Performance
#
# Evaluate the model using "out-of-sample" data (`X_test` and `y_test`)
# +
from sklearn.metrics import mean_squared_error
# Calculate the mean_squared_error (MSE) on actual versus predicted test "y"
mse = mean_squared_error(
Results["Price"],
Results["Predicted Return"]
)
# Using that mean-squared-error, calculate the root-mean-squared error (RMSE):
rolling_rmse = np.sqrt(mse)
print(f"Out-of-Sample Root Mean Squared Error (RMSE): {rolling_rmse}")
# -
# # In-Sample Performance
#
# Evaluate the model using in-sample data (X_train and y_train)
# +
# Construct a dataframe using just the "y" training data:
in_sample_results = y_train.to_frame()
# Add a column of "in-sample" predictions to that dataframe:
in_sample_results["In-sample Predictions"] = model.predict(X_train)
# Calculate in-sample mean_squared_error (for comparison to out-of-sample)
in_sample_mse = mean_squared_error(
in_sample_results["Price"],
in_sample_results["In-sample Predictions"]
)
# Calculate in-sample root mean_squared_error (for comparison to out-of-sample)
in_sample_rmse = np.sqrt(in_sample_mse)
print(f"In-sample Root Mean Squared Error (RMSE): {in_sample_rmse}")
# -
# # Conclusions
# **Question:** Does this model perform better or worse on out-of-sample data as compared to in-sample data?
#
# **Answer:** YOUR ANSWER HERE
|
Starter_Code/regression_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Metodo Jerarquico
#
# Por: [<NAME>](https://joserzapata.github.io/)
# Importar librerias
# %matplotlib inline
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.metrics import confusion_matrix, classification_report
from sklearn import datasets
import numpy as np
import pandas as pd
# ## Importar Datos
iris = datasets.load_iris()
X = iris.data
y_iris = iris.target
class_names = iris.target_names
# ### Clustering Jerarquico (Hierarchical Clustering)
# Generacion de la matriz de linkage
# se utiliza la distancia ward, pero hay otras distancias:
# 'euclidean' (default), 'cityblock' aka Manhattan, 'hamming', 'cosine'...
# y existen varios metodos de linkage 'single', 'complete', 'average', ...
Z = linkage(X, 'ward')
# +
# Algo bueno es verificar el Cophenetic Correlation Coefficient del cluster
# con la función cophenet (). Este coeficiente correlaciona las distancias
# por pares de todas las muestras que esten en cada cluster jerárquico.
# Cuanto más cerca esté el valor de 1, mejor será el cluster
from scipy.cluster.hierarchy import cophenet
from scipy.spatial.distance import pdist
c, coph_dists = cophenet(Z, pdist(X))
c
# -
# ## Dendogram
#
# Es una visualización en forma de un árbol que muestra el orden y las distancias de las fusiones durante la clusterizacion jerárquica.
plt.figure(figsize=(25, 12))
plt.title('Dendograma jerárquico para clasificar IRIS setosa',fontsize=24)
plt.xlabel('Indice de entrada (1-50,51-100,101-150)')
plt.ylabel('Distancia')
max_d = 10
den = dendrogram(
Z,
leaf_rotation=90.,
leaf_font_size=8.,
show_contracted=True
)
plt.axhline(y=max_d, c='k')
plt.show()
# Mas informacion la pueden encontrar en:
#
# [https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/#Selecting-a-Distance-Cut-Off-aka-Determining-the-Number-of-Clusters](https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/#Selecting-a-Distance-Cut-Off-aka-Determining-the-Number-of-Clusters)
|
jupyter_notebook/4_no_supervisados/2_Metodo_Jerarquico.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Matrix Inverse
#
# In this exercise, you will write a function to calculate the inverse of either a 1x1 or a 2x2 matrix.
# +
### TODO: Write a function called inverse_matrix() that
### receives a matrix and outputs the inverse
###
### You are provided with start code that checks
### if the matrix is square and if not, throws an error
###
### You will also need to check the size of the matrix.
### The formula for a 1x1 matrix and 2x2 matrix are different,
### so your solution will need to take this into account.
###
### If the user inputs a non-invertible 2x2 matrix or a matrix
### of size 3 x 3 or greater, the function should raise an
### error. A non-invertible
### 2x2 matrix has ad-bc = 0 as discussed in the lesson
###
### Python has various options for raising errors
### raise RuntimeError('this is the error message')
### raise NotImplementedError('this functionality is not implemented')
### raise ValueError('The denominator of a fraction cannot be zero')
def inverse_matrix(matrix):
inverse = []
if len(matrix) != len(matrix[0]):
raise ValueError('The matrix must be square')
## TODO: Check if matrix is larger than 2x2.
## If matrix is too large, then raise an error
if len(matrix) > 2:
raise NotImplementedError('this functionality is not implemented')
## TODO: Check if matrix is 1x1 or 2x2.
if len(matrix) == 1:
if matrix[0][0] == 0:
raise ValueError('The denominator of a fraction cannot be zero')
#print(matrix[0][0])
inverse = [[1./matrix[0][0] ]]
return inverse
## Depending on the matrix size, the formula for calculating
## the inverse is different.
# If the matrix is 2x2, check that the matrix is invertible
den = matrix[0][0]*matrix[1][1] - matrix[1][0]*matrix[0][1]
if den == 0:
raise ValueError('The denominator of a fraction cannot be zero')
inverse = [
[1./den *matrix[1][1], -1./den *matrix[0][1]],
[-1./den *matrix[1][0], 1./den *matrix[0][0]]
]
return inverse
## TODO: Calculate the inverse of the square 1x1 or 2x2 matrix.
return inverse
#print(inverse_matrix([[4, 5], [7, 1]]))
#print(inverse_matrix([[100]]) )
# +
## TODO: Run this cell to check your output. If this cell does
## not output anything your answers were as expected.
assert inverse_matrix([[100]]) == [[0.01]]
assert inverse_matrix([[4, 5], [7, 1]]) == [[-0.03225806451612903, 0.16129032258064516],
[0.22580645161290322, -0.12903225806451613]]
# -
### Run this line of code and see what happens. Because ad = bc, this
### matrix does not have an inverse
inverse_matrix([[4, 2], [14, 7]])
### Run this line of code and see what happens. This is a 3x3 matrix
inverse_matrix([[4, 5, 1], [2, 9, 7], [6, 3, 9]])
|
4_6_Matrices_and_Transformation_of_State/6_inverse_matrix.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Distance Metrics
# There are lots of ways to calculate the distance between two points. The most familiar is the **Euclidean** metric, which is calculated in the familiar Pythagorean way: $\sqrt{(x_1 - x_0)^2 + (y_1 - y_0)^2}$, but there are others.
#
# If we're driving a cab in Manhattan, then this metric may not be most useful. The shortest distance bewteen two points would be the Euclidean line _only if_ there were a street connecting them directly, whereas the more likely situation is that I can only travel along east-west streets and north-south streets. This **Manhattan** or **taxicab** metric is calculated like this: $|x_1 - x_0| + |y_1 - y_0|$.
#
# If we write the _Euclidean_ metric like: $((x_1 - x_0)^2 + (y_1 - y_0)^2)^{\frac{1}{2}}$, then we can write the Manhattan metric like: $((x_1 - x_0)^1 + (y_1 - y_0)^1)^{\frac{1}{1}}$, and this invites the following **Minkowski** generalization:
#
# $d_n = ((x_1 - x_0)^n + (y_1 - y_0)^n)^{\frac{1}{n}}$, where we can let $n$ be any natural number.
#
# And if we calculate $lim_{n\rightarrow\infty}((x_1 - x_0)^n + (y_1 - y_0)^n)^{\frac{1}{n}}$, we're working with the **Chebyshev** distance.
# +
from matplotlib import pyplot as plt
import numpy as np
X = np.linspace(-1, 1, 200)
y0 = 1 - np.abs(X) # This is the 'taxicab' metric
y1 = (1 - X ** 2) ** 0.5 # This is Euclidean
y2 = (1 - X ** 4) ** 0.25
y3 = (1 - X ** 10) ** 0.1
y4 = (1 - X ** 1000) ** 0.001
#y5 = (1 - X ** 100000) ** 0.00001
# +
# Let's plot these Unit Circles!
plt.figure(figsize = (8, 8))
plt.plot(X, y0, 'm')
plt.plot(X, -y0, 'm')
plt.plot(X, y1, 'b')
plt.plot(X, -y1, 'b')
plt.plot(X, y2, 'g')
plt.plot(X, -y2, 'g')
plt.plot(X, y3, 'y')
plt.plot(X, -y3, 'y')
plt.plot(X, y4, 'r')
plt.plot(X, -y4, 'r');
#plt.plot(X, y5)
# -
|
Phase_4/ds-clustering_kvo32-main/notebooks/unit_minkowski_circles.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <p align="center">
# <img src="https://github.com/GeostatsGuy/GeostatsPy/blob/master/TCG_color_logo.png?raw=true" width="220" height="240" />
#
# </p>
#
# ## Training GAN for 2D Channel Reservoir
#
# #### <NAME>, Graduate Candidate, The University of Texas at Austin
#
# ##### [LinkedIn](https://www.linkedin.com/in/honggeun-jo/?originalSubdomain=kr) | [GitHub](https://github.com/whghdrms) | [Twitter](https://twitter.com/HonggeunJ)
#
# #### <NAME>, Associate Professor, University of Texas at Austin
#
# ##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy)
#
#
# #### Workflow for Training GAN to Extract Major Patterns in 2D Subsurface Model
#
# This workflow demonstrate training of GAN [(Goodfellow et al., 2014)](https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf) for 2D channel reservoir.
#
# * the 2D channel reservoir models are generated from multipoint statistics (MPS), which is wildly used for facies modeling.
#
# * MPS uses a training image and mimics the main patterns in the training image to generates realizations through search tree algorithm. Details are [here](https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1002/2017WR021078).
#
# #### Import Packages
#
# Fist of all, we need to load the required libraries, such as
#
# * [numpy](https://numpy.org/): To generate arrays <br>
# * [matplotlib](https://matplotlib.org/): Vilsualization purpose <br>
# * [tensorflow and Keras](https://www.tensorflow.org/learn): Design, compile and train neural network models
# * os: to update working directory
# * time: to measrue compute time
# +
# Load library:
import numpy as np
import matplotlib.pyplot as plt
import os
import time
# Change working directory, if needed:
# os.chdir(' ')
# This is for making directories where to save trained model and snapshots:
if os.path.isdir('Trained Models') == False:
os.mkdir('Trained Models')
if os.path.isdir('Snapshots') == False:
os.mkdir('Snapshots')
# -
# #### Load the Training Models
#
# This includes the orginal training images and 500 MPS realization
# +
# Load both training image and MPS 500 MPS realizations
Loaded_data = np.load('MPS_Training_image_and_Realizations_500.npz')
TI = Loaded_data['array1']
MPS_real = Loaded_data['array2']
# Visualizae Training image for 2D channel subsurface model:
plt.figure(figsize = (7,7))
plt.imshow(TI, cmap='binary')
plt.title('Training Image')
plt.xlabel('X axis, 100 ft')
plt.ylabel('Y axis, 100 ft')
# -
# #### Visualize Multiple Realizations from MPS
#
# Let's look at 9 realizations from the MPS simulation
# +
# Visualizae MPS realizations from the above training image:
print('Total Number of realizations: %d \nThe dimension of each reservoir model: %d x %d in X and Y' %(MPS_real.shape[2],MPS_real.shape[0], MPS_real.shape[1]))
plt.figure(figsize = (10,10))
for i in range(9):
plt.subplot(3,3,i+1)
plt.imshow(MPS_real[:,:,i], cmap='binary')
plt.title('Realization # %d' %(i))
plt.xlabel('X axis, 100 ft')
plt.ylabel('Y axis, 100 ft')
plt.tight_layout()
# -
# #### Step 1. Define convolutional neural networks for GAN (i.e., Generator and Discriminator)
# First we need to define the structure of Generative adversarial network (GAN) to learn the main pattern from the above realizations. Following figure shows the schematic diagram of GAN.
#
# 
#
# Here, both **Generator** and **Discriminator** are convolutional neural networks.
#
# * **Generator** maps a latent random vector (consists of 100 random variables that follow Gaussian, N(0,1)) to rule-based model whose dimension is 128 x 128 grid cells in x- and y- direction.
#
# * **Discriminator** takes a subsurface model (from either dataset or generator) and compute the probability the input model to be realistic (i.e., having major patterns within training dataset.)
#
# The following includes functions:
#
# * **generator** - the convolutional neural network that makes new images
#
# * **discriminator** - the convolutional neural network that takes images and accesses their probability that they came from the training image set
#
# * **GAN** - the coupled generator and discriminator
#
# * **train** - the procedure for training the GAN storing results
#
# #### Import teh Tensorflow Packages Modules
#
# We need additional package / modules for building the GAN
# +
## Import the required packages for GAN
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Be sure that you have tensorflow > 2.0 version
print(tf.__version__)
assert tf.__version__.startswith('2.') # This will give you error if tensorflow < 2.0
# Default float to be 'float32' (So far, Tensorflow only works with float8, 16, 32)
tf.keras.backend.set_floatx('float32')
# Fix random seed for tensorflow / numpy
tf.random.set_seed(77777)
np.random.seed(77777)
# -
# #### Load the Training Data
#
# Load the training data
# Load training data
X_train = MPS_real.reshape(128,128,1,500).astype("float32")
X_train = np.moveaxis(X_train,-1,0)
# Batch and shuffle the data
BUFFER_SIZE = 60000
BATCH_SIZE = 16
train_dataset = tf.data.Dataset.from_tensor_slices(X_train).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
# #### **Design Generator** Function
#
# Design CNN to map a random latent vector (dimension of 100) to a image
def make_generator_model():
model = tf.keras.Sequential()
# Feature map 1: 100 --> (8x8x64), where (width x height x # of channel)
model.add(layers.Dense(8*8*64, input_shape=(100,))) # This input dimension can be adjusted depends of the complexity
model.add(layers.Reshape((8, 8, 64)))
# Feature map 2: (8x8x64) --> (16x16x32)
model.add(layers.Conv2DTranspose(32, kernel_size=(4, 4), strides=(2,2), padding = "same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.ReLU())
# Feature map 3: (16x16x32) --> (32x32x16)
model.add(layers.Conv2DTranspose(16, kernel_size=(4, 4), strides=(2,2), padding = "same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.ReLU())
# Feature map 4: (32x32x16) --> (64x64x8)
model.add(layers.Conv2DTranspose(8, kernel_size=(4, 4), strides=(2,2), padding = "same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.ReLU())
# Feature map 5: (64x64x8) --> (128x128x4)
model.add(layers.Conv2DTranspose(4, kernel_size=(4, 4), strides=(2,2), padding = "same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.ReLU())
# Feature map 6: (128x128x4) --> (128x128x1)
model.add(layers.Conv2D(1, kernel_size=(3,3), padding="same", activation = 'sigmoid'))
return model
# #### **Design Discriminator** Function
#
# Design CNN to map a image to the likelihood to be true. (output = 1 if real, 0 otherwise)
def make_discriminator_model():
# define neural network model sequentially
model = tf.keras.Sequential()
# Feature map 1: (128x128x1) --> (64x64x8)
model.add(layers.Conv2D(8, kernel_size=(3,3), strides=2, input_shape=[128,128,1], padding="same"))
model.add(layers.LeakyReLU(alpha=0.2))
model.add(layers.Dropout(0.25))
# Feature map 2: (64x64x8) --> (32x32x16)
model.add(layers.Conv2D(16, kernel_size=(3,3), strides=2, padding="same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.LeakyReLU(alpha=0.2))
model.add(layers.Dropout(0.25))
# Feature map 3: (32x32x16) --> (16x16x32)
model.add(layers.Conv2D(32, kernel_size=(3,3), strides=2, padding="same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.LeakyReLU(alpha=0.2))
model.add(layers.Dropout(0.25))
# Feature map 4: (16x16x32) --> (8x8x64)
model.add(layers.Conv2D(64, kernel_size=(3,3), strides=1, padding="same"))
model.add(layers.BatchNormalization(momentum=0.8))
model.add(layers.LeakyReLU(alpha=0.2))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
# Feature map 5: (8x8x64) --> 1 (either 1 (for real) or 0 (for fake))
model.add(layers.Dense(1,activation = 'sigmoid'))
return model
# #### Define the Loss and Optimizers
## Define the loss and optimizers
# Loss fuction of GAN:
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
# Optimier of Generator and Discriminator (both "Adam")
generator_optimizer = tf.keras.optimizers.Adam(0.0002,0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(0.0002,0.5)
# #### Instantiate the Generator and Descriminator
# Instantiate generator and discriminator
generator = make_generator_model()
discriminator = make_discriminator_model()
# Define number of epochs with seed to visualize training process
EPOCHS = 1001
noise_dim = 100
num_examples_to_generate = 25
# We will reuse this seed overtime to visualize progress in the animated GIF
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# +
## Define fuctions to train GAN
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
# Noise (i.e., latent random variables) for fake images
noise = tf.random.normal([BATCH_SIZE, noise_dim])
# ------------------------------------------------
# 0. Compute of loss in generator and discriminator
# ------------------------------------------------
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# generator random images
generated_images = generator(noise, training=True)
# output of discriminator when feeding real images
real_output = discriminator(images, training=True)
# output of discriminator when feeding fake images
fake_output = discriminator(generated_images, training=True)
# Compute both loss of generator and discriminator
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
# ------------------------------------------------
# 1. Update Generator
# ------------------------------------------------
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
# ------------------------------------------------
# 2. Update Discriminator
# ------------------------------------------------
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
start = time.time()
for epoch in range(epochs):
# Train GAN on Batches
for image_batch in dataset:
train_step(image_batch)
# Save the model every 50 epochs
if (epoch) % 10 == 0:
save_model(generator, epoch)
save_imgs(generator, epoch, test_input = seed)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
start = time.time()
# This is to save snapshot of images from generator
def save_imgs(generator, epoch, test_input):
# r, c = row and columns of the subplot
r, c = 5, 5
noise = test_input
# Generate 25 new images with the given seed
gen_imgs = generator(noise, training=False)
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='binary')
axs[i,j].axis('off')
cnt += 1
fig.savefig("Snapshots/MPS_epoch_%d.png" % epoch)
plt.close()
# This is to save the trained generators
def save_model(generator, epoch):
generator.save('Trained Models/Generator_Epoch_%d.h5' % epoch)
# -
# Train GAN
train(train_dataset, EPOCHS)
# #### Step 2. Load the trainined GAN
#
# Now we load the trained generator and compare GAN realizations with MPS models.
#
# * First we will do visual inspection on both models depending on GAN's training process.
#
# * After that, we will use multi-dimensional scaling to visualize reservoir models in 2D space and analyze similarity between MPS and GAN realizations.
from tensorflow.keras.models import load_model # this is for loading NN models
# +
# Visual insepction
plt.figure(figsize = (10,10))
plt.subplot(3,3,1)
plt.imshow(MPS_real[:,:,0], cmap='binary')
plt.title('Realization # %d' %(0))
plt.xlabel('X axis, 100 ft')
plt.ylabel('Y axis, 100 ft')
epochs = [0, 30, 50, 80, 100, 150, 200, 250, 1000]
fixed_noise = np.random.normal(0,1,(1,100))
for i in range(1,9):
plt.subplot(3,3,i+1)
Generator = load_model('Trained Models/Generator_Epoch_%d.h5' % epochs[i], compile = False)
GAN_realization = Generator.predict(fixed_noise).reshape(128,128)
plt.imshow(GAN_realization, cmap='binary')
plt.title('GAN realization - %d epochs ' %(epochs[i]))
plt.xlabel('X axis, 100 ft')
plt.ylabel('Y axis, 100 ft')
plt.tight_layout()
# +
# Load Generator
Generator = load_model('Trained Models/Generator_Epoch_1000.h5', compile = False)
GAN_realization = Generator.predict(np.random.normal(0,1,(9,100))).reshape(9,128,128)
plt.figure(figsize = (10,10))
for i in range(9):
plt.subplot(3,3,i+1)
plt.imshow(GAN_realization[i, :,:], cmap='binary')
plt.title('GAN realization # %d' %(i))
plt.xlabel('X axis, 100 ft')
plt.ylabel('Y axis, 100 ft')
plt.tight_layout()
# +
# Visualize similarity in 2D space (MDS)
from sklearn.manifold import MDS
mds = MDS(random_state=123)
MPS_realizations = X_train.reshape(500,-1)[:100]
plt.figure(figsize=(15,10))
epochs = [0,50,80,150,250,1000]
for i in range(6):
# Map to 2D space
Generator = load_model('Trained Models/Generator_Epoch_%d.h5' % epochs[i], compile = False)
GAN_realizations = Generator.predict(np.random.normal(0,1,(100,100))).reshape(100,-1)
mds_array = np.concatenate([MPS_realizations, GAN_realizations], axis = 0)
mds_2D = mds.fit_transform(mds_array)
plt.subplot(2,3,i+1)
plt.plot(mds_2D[:100,0],mds_2D[:100,1],'o', alpha = 0.3, label = 'MPS realizations')
plt.plot(mds_2D[100:,0],mds_2D[100:,1],'o', alpha = 0.3, label = 'GAN realizations')
plt.title('GAN realizations - %d epochs ' %(epochs[i]))
plt.grid('on')
plt.xlabel('MDS 1')
plt.ylabel('MDS 2')
plt.legend()
plt.tight_layout()
# -
# #### Benefits of using GAN for subsurface models
#
# 1. We can represent realizations with the smaller number of numeric values (~100). **Dimensionality reduction**.
# 2. ...which means we can now represent qualitative information (we had in training dataset) using quantitative values.
# 3. As we can represent realizations with a numeric expression, we can easily apply optimization or history matching to GAN realization.
# 4. GAN gaurantees that any new realizations conserve the major geological pattern we had in training dataset. **Navigation in manifold**.
# 5. Flexibility of GAN enable expanding its application to any type of subsurface model (e.g., 3D, channel, lobe, detaic, etc)
# #### Comments
#
# The Texas Center for Geostatistics has many other demonstrations on the basics of working with DataFrames, ndarrays, univariate statistics, plotting data, declustering, data transformations, trend modeling and many other workflows available [here](https://github.com/GeostatsGuy/PythonNumericalDemos), along with a package for geostatistics in Python called [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy).
#
# We hope this was helpful,
#
# *Michael* and *Honggeun*
#
# ***
#
# #### More on <NAME> and the Texas Center for Data Analytics and Geostatistics:
#
# ### <NAME>, Associate Professor, University of Texas at Austin
# *Novel Data Analytics, Geostatistics and Machine Learning Subsurface Solutions*
#
# With over 17 years of experience in subsurface consulting, research and development, Michael has returned to academia driven by his passion for teaching and enthusiasm for enhancing engineers' and geoscientists' impact in subsurface resource development.
#
# For more about Michael check out these links:
#
# #### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)
#
# #### Want to Work Together?
#
# I hope this content is helpful to those that want to learn more about subsurface modeling, data analytics and machine learning. Students and working professionals are welcome to participate.
#
# * Want to invite me to visit your company for training, mentoring, project review, workflow design and / or consulting? I'd be happy to drop by and work with you!
#
# * Interested in partnering, supporting my graduate student research or my Subsurface Data Analytics and Machine Learning consortium (co-PIs including Profs. Foster, Torres-Verdin and van Oort)? My research combines data analytics, stochastic modeling and machine learning theory with practice to develop novel methods and workflows to add value. We are solving challenging subsurface problems!
#
# * I can be reached at <EMAIL>.
#
# I'm always happy to discuss,
#
# *Michael*
#
# <NAME>, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin
#
# #### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)
#
# ### for any further comments or questions, please contact <NAME> via <EMAIL>
|
SubsurfaceDataAnalytics_ConvolutionalNeuralNetworks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from vedo import *
embedWindow('ipyvtk')
s = Sphere().cutWithPlane(normal=(1,1,1))
scals = s.points()[:,2] # use z-coords to color vertices
# NB, actions can be concatenated into a pipeline:
# add point scalars with a choice of color map, use flat shading, print infos and then show
s.cmap('Set3', scals)
s.show(axes=1, viewup='z')
# -
settings.plotter_instance.backgroundColor('lg','lb')
settings.plotter_instance.add("some message")
|
examples/notebooks/basic/sphere.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + run_control={"frozen": true} deletable=false dc={"key": "<KEY>"} tags=["context"] editable=false
# ## 1. This is a Jupyter notebook!
# <p>A <em>Jupyter notebook</em> is a document that contains text cells (what you're reading right now) and code cells. What is special with a notebook is that it's <em>interactive</em>: You can change or add code cells, and then <em>run</em> a cell by first selecting it and then clicking the <em>run cell</em> button above ( <strong>▶|</strong> Run ) or hitting <code>ctrl + enter</code>. </p>
# <p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_33/datasets/run_code_cell_image.png" alt=""></p>
# <p>The result will be displayed directly in the notebook. You <em>could</em> use a notebook as a simple calculator. For example, it's estimated that on average 256 children were born every minute in 2016. The code cell below calculates how many children were born on average on a day. </p>
# + dc={"key": "<KEY>"} tags=["sample_code"]
# I'm a code cell, click me, then run me!
256 * 60 * 24 # Children × minutes × hours
# + run_control={"frozen": true} deletable=false dc={"key": "b5ed313abb"} tags=["context"] editable=false
# ## 2. Put _any_ code in code cells
# <p>But a code cell can contain much more than a simple one-liner! This is a notebook running python and you can put <em>any</em> python code in a code cell (but notebooks can run other languages too, like R). Below is a code cell where we define a whole new function (<code>greet</code>). To show the output of <code>greet</code> we run it last in the code cell as the last value is always printed out. </p>
# + dc={"key": "b5ed313abb"} tags=["sample_code"]
def greet(first_name, last_name):
greeting = 'My name is ' + last_name + ', ' + first_name + ' ' + last_name + '!'
return greeting
# Replace with your first and last name.
# That is, unless your name is already <NAME>.
greet('Arash', 'Tabrizian')
# + run_control={"frozen": true} deletable=false dc={"key": "<KEY>"} tags=["context"] editable=false
# ## 3. Jupyter notebooks ♡ data
# <p>We've seen that notebooks can display basic objects such as numbers and strings. But notebooks also support the objects used in data science, which makes them great for interactive data analysis!</p>
# <p>For example, below we create a <code>pandas</code> DataFrame by reading in a <code>csv</code>-file with the average global temperature for the years 1850 to 2016. If we look at the <code>head</code> of this DataFrame the notebook will render it as a nice-looking table.</p>
# + dc={"key": "<KEY>"} tags=["sample_code"]
# Importing the pandas module
import pandas as pd
# Reading in the global temperature data,data
global_temp = pd.read_csv('datasets/global_temperature.csv')
# Take a look at the first datapoints
# ... YOUR CODE FOR TASK 3 ...
global_temp.head()
# + run_control={"frozen": true} deletable=false dc={"key": "<KEY>"} tags=["context"] editable=false
# ## 4. Jupyter notebooks ♡ plots
# <p>Tables are nice but — as the saying goes — <em>"a plot can show a thousand data points"</em>. Notebooks handle plots as well, but it requires a bit of magic. Here <em>magic</em> does not refer to any arcane rituals but to so-called "magic commands" that affect how the Jupyter notebook works. Magic commands start with either <code>%</code> or <code>%%</code> and the command we need to nicely display plots inline is <code>%matplotlib inline</code>. With this <em>magic</em> in place, all plots created in code cells will automatically be displayed inline. </p>
# <p>Let's take a look at the global temperature for the last 150 years.</p>
# + dc={"key": "<KEY>"} tags=["sample_code"]
# Setting up inline plotting using jupyter notebook "magic"
# %matplotlib inline
import matplotlib.pyplot as plt
# Plotting global temperature in degrees celsius by year.
plt.plot(global_temp['year'], global_temp['degrees_celsius'])
# Adding some nice labels
plt.xlabel('Year')
plt.ylabel('Global Temperature (in Celsius)')
# + run_control={"frozen": true} deletable=false dc={"key": "<KEY>"} tags=["context"] editable=false
# ## 5. Jupyter notebooks ♡ a lot more
# <p>Tables and plots are the most common outputs when doing data analysis, but Jupyter notebooks can render many more types of outputs such as sound, animation, video, etc. Yes, almost anything that can be shown in a modern web browser. This also makes it possible to include <em>interactive widgets</em> directly in the notebook!</p>
# <p>For example, this (slightly complicated) code will create an interactive map showing the locations of the three largest smartphone companies in 2016. You can move and zoom the map, and you can click the markers for more info! </p>
# + dc={"key": "<KEY>"} tags=["sample_code"]
# Making a map using the folium module
import folium
phone_map = folium.Map()
# Top three smart phone companies by market share in 2016.
companies = [
{'loc': [37.4970, 127.0266], 'label': 'Samsung: 20.5%'},
{'loc': [37.3318, -122.0311], 'label': 'Apple: 14.4%'},
{'loc': [22.5431, 114.0579], 'label': 'Huawei: 8.9%'}]
# Adding markers to the map.
for company in companies:
marker = folium.Marker(location=company['loc'], popup=company['label'])
marker.add_to(phone_map)
# The last object in the cell always gets shown in the notebook
phone_map
# + run_control={"frozen": true} deletable=false dc={"key": "9ccef156d2"} tags=["context"] editable=false
# ## 6. Goodbye for now!
# <p>This was just a short introduction to Jupyter notebooks, an open source technology that is increasingly used for data science and analysis. I hope you enjoyed it! :)</p>
# + dc={"key": "9ccef156d2"} tags=["sample_code"]
# Are you ready to get started with DataCamp projects?
I_am_ready = False
# Ps.
# Feel free to try out any other stuff in this notebook.
# It's all yours!
|
DataCampProjects/Introduction to DataCamp Projects/notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intro
# In this document I want to give an overview of magic commands, short cuts and code snippets I find extremely helpful in my day-2-day work with jupyter-notebooks.
# # Command Mode
# I don't know if it is just me or if there are some other people as well, who came across the command mode quite late. So this would have saved me really a lot of time!
#
# To enter the command mode, just press ```esc```.
#
# - Now you can move across the cells with the arrow keys.
# - Enable editing of a specific cell, just by hitting ```enter```.
# - Insert new cells before or after the current cell with ```a```, ```b```.
# - Change between ```code```and ```markdown```with ```y```and ```m```.
# - Deleting a cell is as easy as pressing ```x```.
# - ...
#
# Check out https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/ for more functionalities.
# # Toggling code
# Sometimes you like to use the notebook as a nice and clean documentation for sharing your results and maybe you don't want to disturb the viewer with long and maybe messy codecells. Then you can insert one of the following code snippets at the beginning of your notebook.
#
# The first one is for toggling all code in the notebook # https://stackoverflow.com/questions/27934885/how-to-hide-code-from-cells-in-ipython-notebook-visualized-with-nbviewer
#
# The second one is for toggling one specific cell by adding the command ```hide_toggle()``` at the end https://stackoverflow.com/questions/31517194/how-to-hide-one-specific-cell-input-or-output-in-ipython-notebook/48084050
# +
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# +
from IPython.display import HTML
import random
def hide_toggle(for_next=False):
this_cell = """$('div.cell.code_cell.rendered.selected')"""
next_cell = this_cell + '.next()'
toggle_text = 'Toggle show/hide' # text shown on toggle link
target_cell = this_cell # target cell to control with toggle
js_hide_current = '' # bit of JS to permanently hide code in current cell (only when toggling next cell)
if for_next:
target_cell = next_cell
toggle_text += ' next cell'
js_hide_current = this_cell + '.find("div.input").hide();'
js_f_name = 'code_toggle_{}'.format(str(random.randint(1,2**64)))
html = """
<script>
function {f_name}() {{
{cell_selector}.find('div.input').toggle();
}}
{js_hide_current}
</script>
<a href="javascript:{f_name}()">{toggle_text}</a>
""".format(
f_name=js_f_name,
cell_selector=target_cell,
js_hide_current=js_hide_current,
toggle_text=toggle_text
)
return HTML(html)
hide_toggle()
# -
# # Toggle output
# Sometimes you also want to toggle the output, especially if you have created some long print statements in a loop and you don't want to scroll down so much.
#
# Therefore you have to enter the command mode again, press ```esc``` and use the shortcut ```esc + o```. The same shortcut is used to untoggle the output.
# # Variable output
# One great thing about jupyter-notebook is the output of all variables, including arrays and dataframes. But by default only the last statement will produce a printed output. So, in the end you will end up with lots of cells or un/commenting lines for printing several variables. But there is an easy solution - check out the following code snippet. Found on https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/ - there is also the description how to enable this permanently for all your notebooks.
# +
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
hide_toggle()
# +
a=10
b=20
c=a+b
a
b
c
# -
# # Help - man pages
# By adding a ```?``` in front of a function, you will get the documentation for this function, so no need to google it while coding.
#
# --> And if you want to close the documentation, just hit ```esc```.
# ?print
# # Execute shell commands
# For executing shell commands, e.g. creating folders, getting filenames, you just have to preset a ```!```
# !ls -al
# # Magic commands
# Ipython has so called magic commands built-in, from which I find some extremely helpful!
#
# Some more examples can also be found here:
# https://towardsdatascience.com/top-8-magic-commands-in-jupyter-notebook-c1582e813560
#
# To get an overview of all available commands, type ```%lsmagic```.
# %lsmagic
# In the following list I describe my favourite commands
# - %%time: Measure cell execution time
# - %env: set and get system environment variables
# - %who: show all variables of a specific type, e.g. str / int
# - %load: load code into your notebook (does not work with .ipynb, but with .py)
# - %run: execute external code (does work with .ipynb)
# %%time
for i in range(1,10000):
i+=1
# %env NEW_ENV=MAGIC
# %env
# %who int
# %load ./hello_world.py
# %run ./hello_world.ipynb
# # Virtual Environments w/ Jupyter
# Python and PYthon packages are quite volatile and dynamic. So depending on versions packages might be or migt not be compatible. Virtual environemnts are very common to ensure a stable python env for your current project with the needed packages without breaking your systemwide python installation.
#
# Setting up the venv is well known
#
# ```python
# python3 -m venv --system-site-packages NAME_ENV
# ```
#
# But if you use jupyter-notebooks a lot for development, you want to use the same environemnts also in your notebooks:
#
# ```python
# pip install --user ipykernel
# python -m ipykernel install --user --name=myenv
# source env/bin/activate
# ```
|
JupyterHacks/.ipynb_checkpoints/Jupyter-Hacks-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Random Forests
# +
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
# -
# ## Exemplo 1: Digits Dataset
# **Trabalhando os dados**
from sklearn.datasets import load_digits
# +
digits = load_digits()
#Plotando um dos valores
plt.gray()
plt.matshow(digits.images[5])
plt.show()
# -
# Dividindo os dados entre treino e teste
Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target, random_state=0)
# **Treinando o modelo**
# +
Model1 = RandomForestClassifier(n_estimators=1000) # Instanciando
Model1.fit(Xtrain, ytrain)
# -
# **Aplicando o modelo treinado no conjunto de teste**
ypred = Model1.predict(Xtest)
# **Verificando a acurácia**
# Nos dados abaixo podemos ver a acurácia de acerto para cada dígito predito pelo algoritmo treinado.
from sklearn import metrics
print(metrics.classification_report(ypred, ytest))
# Podemos confrontar os valores preditos x reias através da confusion matrix
# +
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(ytest, ypred)
sns.heatmap(mat.T, square=True, annot=True, cbar=False, cmap='seismic')
plt.xlabel('valor real')
plt.ylabel('valor predito');
# -
# ## EXEMPLO2: Breast Cancer Dataset
# **Trabalhando os dados**
from sklearn.datasets import load_breast_cancer
# +
cancer = load_breast_cancer()
X_train1, X_test1, y_train1, y_test1 = train_test_split(cancer.data, cancer.target, random_state=0)
# -
# **Treinando e verificando a acurácia**
# +
estimators = (2, 10, 100, 1000)
for i in estimators:
cancerForest = RandomForestClassifier(n_estimators=i, random_state=0)
cancerForest.fit(X_train1, y_train1);
print('Teste com', i, 'arvores', cancerForest.score(X_test1, y_test1))
# -
# Como podemos ver acima, conforme o número de estimadores foi aumentando a acurácia foi ficando maior, porém não houve ganho quando passamos de 100 para 1000 de árvores. Devemos sempre verificar se há ganho quando incrementamos os estimadores, caso contrário podemos deixar o algoritmo lento desnecessariamente.
# **Verificando a importância atribuída a cada uma das features**
plt.plot(cancerForest.feature_importances_, 'o')
plt.title('Modelo com 1000 arvores')
plt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90);
# Outro ponto que podemos observar é que devido à forma que a Random Forest é criada, o algoritmo acaba dando peso à outras features ingoradas quando trabalhamos somente com a decision tree.
# ## Exemplo3: Boston house-prices dataset
# Para esse dataset vamos utilizar Random Forest de regressão para prever o preço das casas
# **Trabalhando os Dados**
from sklearn.datasets import load_boston
# +
boston = load_boston()
features = pd.DataFrame(boston.data)
target = pd.DataFrame(boston.target)
features.head()
# -
# Como vamos trabalhar com um algoritmo regressor, é importante colocar os dados na mesma escala. Vamos utiizar o ``RobustScaler`` que traz dados robustos ainda que haja outliers.
# Importando
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
# +
# Deixando na mesma escala tanto a features quanto o target
features = pd.DataFrame(scaler.fit_transform(features))
target = pd.DataFrame(scaler.fit_transform(target))
features.head()
# -
X_train, X_test, y_train, y_test = train_test_split(features, target, random_state=0)
# **Treinando o modelo**
regressor = RandomForestRegressor(n_estimators=500, random_state=0, )
regressor.fit(X_train, y_train.values.ravel());
# **Verificando a acurácia**
regressor.score(X_train, y_train)
regressor.score(X_test, y_test)
# **Verificando a importência atribuída a cada uma das features**
plt.plot(regressor.feature_importances_, 'o')
plt.title('Modelo com 1000 arvores')
plt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90);
|
Python/05 Scikit-learn/04.04 Random Forests - Exemplos.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Update sklearn to prevent version mismatches
# !pip install sklearn --upgrade
# install joblib. This will be used to save your model.
# Restart your kernel after installing
# !pip install joblib
import pandas as pd
# # Read the CSV and Perform Basic Data Cleaning
df = pd.read_csv("exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
# # Select your features (columns)
# +
# Set features. This will also be used as your x values.
X = df.drop(['koi_disposition'], axis=1)
#Use `koi_disposition` for the y values
y = df["koi_disposition"].values.reshape(-1, 1)
print(X.shape,y.shape)
# -
# # Create a Train Test Split
#
#
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y)
# -
X_train.head()
# # Pre-processing
#
# Scale the data using the MinMaxScaler and perform some feature selection
# +
# Scale your data
from sklearn.preprocessing import StandardScaler
# Create a StandardScater model and fit it to the training data
X_scaler = StandardScaler().fit(X_train)
# +
# Transform the training and testing data using the X_scaler models
### BEGIN SOLUTION
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
### END SOLUTION
# -
# # Train the Model - Method1: Logistic Regression Model
#
#
# +
from sklearn.linear_model import LogisticRegression
model1 = LogisticRegression()
#model1
model1.fit(X_train_scaled, y_train)
print(f"Training Data Score: {model1.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {model1.score(X_test_scaled, y_test)}")
# -
# # Save the Model
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
import joblib
filename = 'ml_LogisticModel.sav'
joblib.dump(model1, filename)
|
code/model_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <h1><b>Word Embedding Based Answer Evaluation System for Online Assessments (WebAES)</b></h1>
# <h3>A smart system to automate the process of answer evaluation in online assessments.</h3>
# <h5> BERT Model for WebAES</h5>
# +
# To perform sentence encoding using BERT model
from sentence_transformers import SentenceTransformer
# To determine similarity between 2 vectors
from sklearn.metrics.pairwise import cosine_similarity
# +
# Answer expected by the faculty
exp_ans = '''The performance measure for medical diagnosis system may include the number of patients healed by
correctly and accurately diagnosing diseases. For example, the performance measure may be the percentage of cases diagnosed
correctly by the system. The environment for a medical diagnosis system includes patients and their vital signs. This
environment is fully observable, dynamic and complete. The actuators include display screens and alert systems that send
feedback to doctors. Sensors include equipment including medical sensors as well as medical images.'''
# Answer submitted by the student
stu_ans = '''The percentage of cases correctly identified by the medical diagnosis system is the performance measure
for a medical diagnosis system. The performance measure can be described as the number of cases out of every one hundred
cases that were diagnosed correctly by the system. In this case, the environment is complete, observable and static. The
feedback system which gives the result of diagnosis to a doctor is the actuator. Sensors include medical sensors such as
ECG equipment as well as cameras to monitor images of X Rays.'''
# List of documents
docs = [exp_ans, stu_ans]
# +
# Load pre-trained BERT model
model = SentenceTransformer('bert-base-nli-mean-tokens')
# Encode documents using BERT model and display shape
doc_embeddings = model.encode(docs)
doc_embeddings.shape
# +
# Calculate similarity using cosine similarity measure
sim_score = cosine_similarity([doc_embeddings[0]], doc_embeddings[1:])[0][0]
# Calculate marks based on similarity score and display marks scored
marks = round(sim_score*10, 2)
print('Similarity score: {}\nMarks (out of 10): {}'.format(sim_score, marks))
|
notebooks/WebAES-BERT-Evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
"""
File : kdeCloneDiversityTumourSize.ipynb
Plot : kernel density estimation of tumour states with respect to the number of clones and size
Input :
SourceData_Fig5b.xlsx
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
path_to_all_source_data = "../Source data for figures"
# +
def plot_data(
data
):
for condition in data.Condition.unique():
print(f'{condition} Growth')
data_cond = data.loc[
data.Condition == condition
]
macrodiv_plot = data_cond.loc[
data_cond.SliceDiameter >= 20
]
if condition == 'Surface':
color = 'red'
elif condition == 'Volume':
color = 'blue'
jp = sns.jointplot(
data = macrodiv_plot,
x = "SliceDiameter", y = "NumberSubclone",
color = color,
kind="kde",
fill=True,
height = 2.75,
levels = 10,
thresh = 0.05
)
jp.set_axis_labels('Diameter of tumour slice (mm)', 'Number of subclones', fontsize=8)
jp.ax_marg_x.set_xlim(-10, 160)
jp.ax_marg_y.set_ylim(-2, 15)
plt.show()
# -
# # Figure 5b
path_to_excelfile = os.path.join(
path_to_all_source_data,
"Source_Data_Figure_5",
"SourceData_Fig5b.xlsx"
)
excelfile = pd.ExcelFile(path_to_excelfile)
excelfile.sheet_names
data = pd.read_excel(
excelfile,
sheet_name=excelfile.sheet_names[0]
)
plot_data(data)
|
Scripts for making figures/kdeCloneDiversityTumourSize.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img style="float: right; width: 350px" src="./Data/Logo.png">
# # Lending Club - Do market trends influence who defaults and who doesn't?
#
# **Introduction to Data Science Fall 2018**<br/>
# **Group #72:** <NAME>, <NAME>, <NAME>, <NAME>
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
# +
# Data and Stats packages
import numpy as np
import pandas as pd
import re
import statsmodels.api as sm
from statsmodels.api import OLS
from sklearn import metrics, datasets
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegressionCV
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.preprocessing import PolynomialFeatures
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.utils import shuffle
#Keras and Tensorflow
# Visualization packages
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
# Other
import itertools
from collections import OrderedDict
import tensorflow as tf
import keras
from keras.optimizers import RMSprop
from keras.models import Sequential
from keras.layers import Dense , Flatten, Dropout
from keras import regularizers
# Aesthetic settings
from IPython.display import display
pd.set_option('display.max_columns', 999)
pd.set_option('display.width', 500)
sns.set_style('whitegrid')
sns.set_context('talk')
#Suppress warnings
import warnings
warnings.filterwarnings('ignore')
# -
# ## Data Processing
# +
#import data
df_07to11 = pd.read_csv('./Data/LoanStats3a.csv',header=1)
df_2016Q1 = pd.read_csv('./Data/LoanStats_2016Q1.csv',header=1)
df_2016Q2 = pd.read_csv('./Data/LoanStats_2016Q2.csv',header=1)
df_2016Q3 = pd.read_csv('./Data/LoanStats_2016Q3.csv',header=1)
df_2016Q4 = pd.read_csv('./Data/LoanStats_2016Q4.csv',header=1)
# -
#concat quarterly dfs for 2016
df_16 = pd.concat([df_2016Q1,df_2016Q2,df_2016Q3,df_2016Q4])
#figure out index when 2007 data begins
not07 = True
i = 0
while(not07):
val = df_07to11['issue_d'].values[i][-2:]
if val == '07':
print(i)
not07 = False
i+=1
df_07 = df_07to11.drop(np.arange(39533))
def clean(df):
preds = ['loan_status'
,'loan_amnt'
,'funded_amnt'
,'term'
,'int_rate'
,'installment'
,'emp_length'
,'home_ownership'
,'annual_inc'
,'issue_d'
,'zip_code'
,'addr_state'
,'dti'
,'delinq_2yrs'
,'earliest_cr_line'
,'inq_last_6mths'
,'mths_since_last_delinq'
,'open_acc'
,'pub_rec'
,'revol_bal'
,'revol_util'
,'total_acc'
]
states = df['addr_state'].unique().tolist()
df = df[preds]
df['term'] = df['term'].str.rstrip(' months').astype('float')
df['int_rate'] = df['int_rate'].str.rstrip('%').astype('float') / 100.0
df['emp_length'] = df['emp_length'].str.strip('< + years').astype('float')
df['issue_d'] = pd.to_datetime(df['issue_d'])
df['zip_code'] = df['zip_code'].str.replace('xx','00').astype('float')
df['earliest_cr_line'] = pd.to_datetime(df['earliest_cr_line'])
df['length_credit_history'] = (df['issue_d']-df['earliest_cr_line']).astype('timedelta64[D]')/30.44
df['mths_since_last_delinq'] = np.where(df['mths_since_last_delinq'].isna() , df['length_credit_history'], df['mths_since_last_delinq'])
df['revol_util'] = df['revol_util'].str.rstrip('%').astype('float') / 100.0
df = pd.get_dummies(df, columns = ['home_ownership'] )
df = pd.get_dummies(df, columns = ['addr_state'] )
preds.append('length_credit_history')
df = df.dropna()
return df
def clean_loan_status (df):
df['loan_status'] = np.where(df['loan_status'] == 'Fully Paid' , 'paid', df['loan_status'])
df['loan_status'] = np.where(df['loan_status'] == 'Charged Off' , 'default', df['loan_status'])
df['loan_status'] = np.where(df['loan_status'] == 'Does not meet the credit policy. Status:Fully Paid' , 'paid', df['loan_status'])
df['loan_status'] = np.where(df['loan_status'] == 'Does not meet the credit policy. Status:Charged Off' , 'default', df['loan_status'])
return df
clean_loan_status(df_07)
clean_loan_status(df_16)
# +
default_16 = df_16[df_16['loan_status']=='default']
not_default_16 = df_16[df_16['loan_status']=='paid']
default_07 = df_07[df_07['loan_status']=='default']
not_default_07 = df_07[df_07['loan_status']=='paid']
# -
df_16 = pd.concat([default_16, not_default_16])
df_07 = pd.concat([default_07, not_default_07])
df_16.shape, df_07.shape
# ## Visualization
# ### Loan Status vs.Installment
plt.figure(figsize=(8,5))
plt.hist(df_07[df_07['loan_status']=='default']['installment'], label='Default', color='red')
plt.hist(df_07[df_07['loan_status']=='paid']['installment'], label='Paid', color='blue', alpha=0.4)
plt.xlabel("Installment Amount")
plt.ylabel('Frequency')
plt.title("Installment Amount and Default Rate 2007")
plt.legend()
plt.show()
# There is no visible relationship between installment amount and default rate in 2007
plt.figure(figsize=(8,5))
plt.hist(df_16[df_16['loan_status']=='default']['installment'], label='Default', color='red')
plt.hist(df_16[df_16['loan_status']=='paid']['installment'], label='Paid', color='yellow', alpha=0.4)
plt.xlabel("Installment Amount")
plt.ylabel('Frequency')
plt.title("Installment Amount and Default Rate 2016")
plt.legend()
plt.show()
# There is no visible relationship between installment amount and default rate in 2016
# ### Loan Status vs. Annual Income
plt.figure(figsize=(8,5))
plt.scatter(df_07['annual_inc'],df_07['loan_status'], color = 'red')
plt.show()
# It appears that individuals with higher incomes (>70k) tend to pay their loans in 2007
plt.figure(figsize=(8,5))
plt.scatter(df_16['annual_inc'],df_16['loan_status'])
plt.show()
# It appears that individuals with higher incomestend to pay their loans in 2016
sns.countplot(y='loan_status', hue = 'home_ownership' , data=df_07)
plt.title('Default / Paid vs Home Ownership - 2007')
plt.show()
# No reasonable pattern detected
sns.countplot(y='loan_status', hue = 'home_ownership' , data=df_16)
plt.title('Default / Paid vs Home Ownership - 2016')
plt.show()
# No reasonable pattern detected
# ###Understanding distribution of loans by State
by_loan_07 = df_07.groupby(['addr_state'], as_index=False).loan_amnt.sum()
by_loan_16 = df_16.groupby(['addr_state'], as_index=False).loan_amnt.sum()
# +
states_07 = by_loan_07['addr_state'].values.tolist()
loan_per_state_07 = by_loan_07['loan_amnt'].values.tolist()
states_16 = by_loan_16['addr_state'].values.tolist()
loan_per_state_16 = by_loan_16['loan_amnt'].values.tolist()
map_07_data = OrderedDict([('state_codes', states_07),
('state_loans', loan_per_state_07)])
map_16_data = OrderedDict([('state_codes', states_16),
('state_loans', loan_per_state_16)])
map_07 = pd.DataFrame.from_dict(map_07_data)
map_16 = pd.DataFrame.from_dict(map_16_data)
for i in map_07.columns:
map_07[i]=map_07[i].astype(str)
for i in map_16.columns:
map_16[i]=map_16[i].astype(str)
map_07['txt'] = map_07['state_codes'] + '<br>' + 'Loan Amount: ' + map_07['state_loans']
map_16['txt'] = map_16['state_codes'] + '<br>' + 'Loan Amount: ' + map_16['state_loans']
# +
#Map of 2007
import plotly.plotly as py
import plotly.graph_objs as go
scl = scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
data = [ dict(
type='choropleth',
colorscale = scl,
autocolorscale = False,
locations = map_07['state_codes'],
z = map_07['state_loans'],
locationmode = 'USA-states',
text = map_07['txt'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "USD")
) ]
layout = dict(
title = 'Loan Issued per state in 2007 <br>(Hover for breakdown)',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, filename='d3-cloropleth-map')
# -
# Most of the loans appear to be distributed around the States in the West Coast and New England in 20007
# +
scl = scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\
[0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']]
data = [ dict(
type='choropleth',
colorscale = scl,
autocolorscale = False,
locations = map_16['state_codes'],
z = map_16['state_loans'],
locationmode = 'USA-states',
text = map_16['txt'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
) ),
colorbar = dict(
title = "USD")
) ]
layout = dict(
title = 'Loan Issued per state in 2016 <br>(Hover for breakdown)',
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)'),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, filename='d3-cloropleth-map')
# -
# Most of the loans appear to be distributed around the States in the West Coast and New England in 20007
# ## Building Models to Predict Loan Default
# ### Step 1: Data Preprocessing
# +
#clean up all the data
df_16 = clean(df_16)
df_07 = clean(df_07)
#get rid of colums
cols_to_remove = ['int_rate', 'issue_d', 'earliest_cr_line', 'zip_code']
df_16 = df_16.drop(cols_to_remove, axis=1)
df_07 = df_07.drop(cols_to_remove, axis=1)
# -
# We remove these columns because:
# 1. Interest Rate : Lending Club uses interest rate to tell us whether they think a debt will be repaid. For example, the higher the interest rate, the riskier the debt and the higher the chance of default
# 2. Issue date: doesn't really help us with anything
# 3. Zip Code : Including it might be considered discriminatory
df_07['loan_status'] = (pd.Series(np.where(df_07.loan_status == 'default', 0, 1), df_07.index)).values
df_16['loan_status'] = (pd.Series(np.where(df_16.loan_status == 'default', 0, 1), df_16.index)).values
# +
#spliting response variable from the rest
df_07_X = df_07.drop(['loan_status'], axis = 1)
df_07_y = df_07['loan_status']
df_16_X = df_16.drop(['loan_status'], axis = 1)
df_16_y = df_16['loan_status']
# -
#Normalize Data
to_norm = ['loan_amnt', 'funded_amnt', 'installment', 'emp_length', 'annual_inc', 'dti', 'delinq_2yrs',
'inq_last_6mths', 'mths_since_last_delinq', 'open_acc', 'pub_rec', 'revol_bal', 'revol_util',
'total_acc', 'length_credit_history']
from scipy.stats import zscore
def norm(df, cols):
for e in cols:
df[e] = zscore(df[e])
return df
df_07_X = norm (df_07_X, to_norm)
df_16_X = norm (df_16_X, to_norm)
df_07_X.head() #we ready to split
#Train, test data split
df_07_X_train, df_07_X_test, df_07_y_train, df_07_y_test = train_test_split(df_07_X, df_07_y,
test_size = 0.2, random_state = 90)
df_16_X_train, df_16_X_test, df_16_y_train, df_16_y_test = train_test_split(df_16_X, df_16_y,
test_size = 0.2, random_state = 90)
# ### Step 2: Building the Models
# #### M0: Trivial Model
# We implemented a trivial model that always predicts a given person will repay their loan. It is worth noting that this model is representative of Lending Club's prediction. Since we only have access to accepted loan data, Lending Club is inherently predicting that a loan will be repayed because they wouldn't give a loan that they expected not to be repayed.
#Trivial Model in which all loans are accepted
triv_mod_07_train = np.sum(df_07_y_train)/len(df_07_y_train)
triv_mod_07_test = np.sum(df_07_y_test)/len(df_07_y_test)
print('2007 Data\nTrain score: '+str(triv_mod_07_train))
print('Test score: '+str(triv_mod_07_test))
triv_mod_16_train = np.sum(df_16_y_train)/len(df_16_y_train)
triv_mod_16_test = np.sum(df_16_y_test)/len(df_16_y_test)
print('2016 Data\nTrain score: '+str(triv_mod_16_train))
print('Test score: '+str(triv_mod_16_test))
# #### M1: Logistic Regression
# The first model we chose is a simple logistic regression with cross validation. We trained it on the training set and printed the scores on both the training and test data sets. The test accuracy was 0.737 which is just below our trivial model's score- meaning it performs moderately worse than Lending Club's algorithm.
# +
#Logistic regression on 2007 data
log_mod07 = LogisticRegressionCV().fit(df_07_X_train, df_07_y_train.values)
log_mod07_train_score = log_mod07.score(df_07_X_train,df_07_y_train.values)
log_mod07_test_score =log_mod07.score(df_07_X_test,df_07_y_test.values)
print("The accuracy of Logistic Regression Model on 2007 Training Set is ", log_mod07_train_score)
print("The accuracy of Logistic Regression Model on 2007 Testing Set is ", log_mod07_test_score)
# +
#Logistic regression on 2016 data
log_mod16 = LogisticRegressionCV().fit(df_16_X_train, df_16_y_train.values)
log_mod16_train_score = log_mod16.score(df_16_X_train, df_16_y_train.values)
log_mod16_test_score = log_mod16.score(df_16_X_test, df_16_y_test.values)
print("The accuracy of Logistic Regression Model on 2016 Training Set is ", log_mod16_train_score)
print("The accuracy of Logistic Regression Model on 2016 Testing Set is ", log_mod16_test_score)
# -
# #### M2: Decision Tree Model
# To create a Decision Tree model, we began by optimizing the tree depth. Our optimal depth was found to be max_depth =i. Depths past i appeared to be overfit, yielding great train accuracies, and poor test accuracies. We then ran the model on both 2007 and 2016 data. The model yielded comparable accuracies to that of Lending Club's.
# +
# Get train score, cross val score means and stds
train_scores = []
cvmeans = []
cvstds = []
depths = list(range(1, 15))
#for all specified maximum tree depths --> fit model and add scores to list
for i in depths:
#create/fit model
tree = DecisionTreeClassifier(max_depth = i).fit(df_07_X_train, df_07_y_train.values)
train_scores.append(tree.score(df_07_X_train, df_07_y_train.values))
score = cross_val_score(estimator=tree, X=df_07_X_train, y=df_07_y_train.values, cv=5)
cvmeans.append(score.mean())
cvstds.append(score.std())
cvstds = np.array(cvstds)
cvmeans = np.array(cvmeans)
# -
#create plot
plt.plot(depths, cvmeans, label="Mean Cross Val")
plt.fill_between(depths, cvmeans - 2*cvstds, cvmeans + 2*cvstds, alpha=0.5)
ylim = plt.ylim()
plt.plot(depths, train_scores, '-+', label="Train")
plt.legend()
plt.ylabel("Accuracy")
plt.xlabel("Max Depth")
plt.xticks(depths)
plt.title("Cross Val Score and Train Score vs Depth");
# +
# tree model for 2007 data
tree07 = DecisionTreeClassifier(max_depth = 4).fit(df_07_X_train, df_07_y_train.values)
tree07_train_score = tree07.score(df_07_X_train, df_07_y_train.values)
tree07_test_score = tree07.score(df_07_X_test, df_07_y_test.values)
print("The accuracy of Tree Model on 2007 Training Set is ", tree07_train_score)
print("The accuracy of Tree Model on 2007 Testing Set is ", tree07_test_score)
# +
# tree model for 2016 data
tree16 = DecisionTreeClassifier(max_depth = 4).fit(df_16_X_train, df_16_y_train.values)
tree16_train_score = tree16.score(df_16_X_train, df_16_y_train.values)
tree16_test_score = tree16.score(df_16_X_test, df_16_y_test.values)
print("The accuracy of Tree Model on 2016 Training Set is ", tree16_train_score)
print("The accuracy of Tree Model on 2016 Testing Set is ", tree16_test_score)
# -
# #### M3: Random Forest Model
# Next we wanted to create an ensemble model, building off our decision tree model. We stuck with our best max depth of 6, and chose to use 45 trees because it provided solid accuracy, without being too computationally expensive. Our model, again, yielded comparable results ot that of Lending Club.
randy07 = RandomForestClassifier(n_estimators = 45, max_depth = 6).fit(df_07_X_train, df_07_y_train.values)
randy07_train_score = randy07.score(df_07_X_train, df_07_y_train.values)
randy07_test_score = randy07.score(df_07_X_test, df_07_y_test.values)
print("The accuracy of Random Forest Model on 2007 Training Set is ", randy07_train_score)
print("The accuracy of Random Forest Model on 2007 Testing Set is ", randy07_test_score)
randy16 = RandomForestClassifier(n_estimators = 45, max_depth = 6).fit(df_16_X_train, df_16_y_train.values)
randy16_train_score = randy16.score(df_16_X_train, df_16_y_train.values)
randy16_test_score = randy16.score(df_16_X_test, df_16_y_test.values)
print("The accuracy of Random Forest Model on 2016 Training Set is ", randy16_train_score)
print("The accuracy of Random Forest Model on 2016 Testing Set is ", randy16_test_score)
# #### M4: AdaBoost Model
# We used an AdaBoost model. This meta estimator fits a decision tree classifier on our training set, then fits additional copies of the model on the same training set, but adjusts weights such that subsequent classifiers focus on challenging cases. For the 2016 dataset, we had to reduce the max depth and estimators to limit the computational time of the model. Both models delivered comparable accuracies to that of Lending Club's model.
ada07 = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=6), n_estimators=800, learning_rate=0.05).fit(df_07_X_train, df_07_y_train.values)
ada07_train_score = ada07.score(df_07_X_train, df_07_y_train.values)
ada07_test_score = ada07.score(df_07_X_test, df_07_y_test.values)
print("The accuracy of Ada Boost Model on 2007 Training Set is ", ada07_train_score)
print("The accuracy of Ada Boost Model on 2007 Testing Set is ", ada07_test_score)
#Takes a while to run
ada16 = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=3), n_estimators=80, learning_rate=0.05).fit(df_16_X_train, df_16_y_train.values)
ada16_train_score = ada16.score(df_16_X_train, df_16_y_train.values)
ada16_test_score = ada16.score(df_16_X_test, df_16_y_test.values)
print("The accuracy of Ada Boost Model on 2016 Training Set is ", ada16_train_score)
print("The accuracy of Ada Boost Model on 2016 Testing Set is ", ada16_test_score)
# #### M5: GradientBoostingClassifier
# We used Gradient Boosting (GB) for classification. GB is an additive model in a forward stage-wise fashion. It allows for the optimization of arbitrary differentiable loss functions. In our case we only used a single regression tree because we are interested in a binary classification. This model did decently well on 2016 data
#
# +
GB_07 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=90).fit(df_07_X_train, df_07_y_train)
GB_07_train_score = GB_07.score(df_07_X_train, df_07_y_train)
GB_07_test_score = GB_07.score(df_07_X_test, df_07_y_test)
print("The accuracy of Gradient Boost Model on 2007 Training Set is ", GB_07_train_score)
print("The accuracy of Gradient Boost Model on 2007 Testing Set is ", GB_07_test_score)
# +
GB_16 = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=90).fit(df_16_X_train, df_16_y_train)
GB_16_train_score = GB_16.score(df_16_X_train, df_16_y_train)
GB_16_test_score = GB_16.score(df_16_X_test, df_16_y_test)
print("The accuracy of Gradient Boost Model on 2016 Training Set is ", GB_16_train_score)
print("The accuracy of Gradient Boost Model on 2016 Testing Set is ", GB_16_test_score)
# -
# #### M6: LDA Model
# Finally, we chose to use linear discriminant analysis. Unsurprisingly, our LDA models performed similarly to the trivial models.
#Build an LDA Model and report train and test accuracy
lda07 = LinearDiscriminantAnalysis().fit(df_07_X_train, df_07_y_train.values)
lda07_train_score = lda07.score(df_07_X_train, df_07_y_train.values)
lda07_test_score = lda07.score(df_07_X_test, df_07_y_test.values)
print("The accuracy of LDA Model on 2007 Training Set is ", lda07_train_score)
print("The accuracy of LDA Model on 2007 Testing Set is ", lda07_test_score)
#Build an LDA Model and report train and test accuracy
lda16 = LinearDiscriminantAnalysis().fit(df_16_X_train, df_16_y_train.values)
lda16_train_score = lda16.score(df_16_X_train, df_16_y_train.values)
lda16_test_score = lda16.score(df_16_X_test, df_16_y_test.values)
print("The accuracy of LDA Model on 2016 Training Set is ", lda16_train_score)
print("The accuracy of LDA Model on 2016 Testing Set is ", lda16_test_score)
# #### M7: QDA Model
# e were suprises that this model performs extremely poorly. It assumes gaussian distribution and differing covariance, which seemingly contibutes to its poor performance with these datasets.
# +
qda07 = QuadraticDiscriminantAnalysis().fit(df_07_X_train, df_07_y_train.values)
qda07_train_score = qda07.score(df_07_X_train, df_07_y_train.values)
qda07_test_score = qda07.score(df_07_X_test, df_07_y_test.values)
print("The accuracy of QDA Model on Training Set is ", qda07_train_score)
print("The accuracy of QDA Model on Testing Set is ", qda07_test_score)
# +
qda16 = QuadraticDiscriminantAnalysis().fit(df_16_X_train, df_16_y_train.values)
qda16_train_score = qda16.score(df_16_X_train, df_16_y_train.values)
qda16_test_score = qda16.score(df_16_X_test, df_16_y_test.values)
print("The accuracy of QDA Model on Training Set is ", qda16_train_score)
print("The accuracy of QDA Model on Testing Set is ", qda16_test_score)
# -
# #### M8: Artificial Neural Network
# We used Keras resting on Tensorflow to build a neural network for both datasets.
# +
# 2007 NN
H = 100
input_dim_07 = 71
NN_07 = Sequential()
NN_07.add(Dense(H, input_dim = input_dim_07, activation='relu'))
NN_07.add(Dropout(0.5))
for i in range(1,4):
NN_07.add(Dense(H, activation='relu'))
NN_07.add(Dropout(0.2))
NN_07.add(Dense(1, activation='sigmoid'))
NN_07.compile(loss='binary_crossentropy',
optimizer = 'rmsprop',
metrics = ['accuracy'])
NN_07.fit(df_07_X_train, df_07_y_train,
epochs = 50,
batch_size = 128, verbose = 0)
# +
nn_07_train_accuracy = accuracy_score(NN_07.predict_classes(df_07_X_train), df_07_y_train)
nn_07_test_accuracy = accuracy_score(NN_07.predict_classes(df_07_X_test), df_07_y_test)
print("The accuracy of Neural Network on 2007 Training Set is ", nn_07_train_accuracy)
print("The accuracy of Neural Network on 2007 Testing Set is ", nn_07_test_accuracy)
# +
#2016 NN
H = 100
input_dim_16 = 70
NN_16 = Sequential()
NN_16.add(Dense(H, input_dim = input_dim_16, activation='relu'))
for i in range(1,10):
NN_16.add(Dense(H, activation='relu'))
NN_16.add(Dropout(0.2))
NN_16.add(Dense(1, activation='sigmoid'))
NN_16.compile(loss='binary_crossentropy',
optimizer = RMSprop(),
metrics = ['accuracy'])
NN_16.fit(df_16_X_train, df_16_y_train,
epochs = 20,
batch_size = 128, verbose =0)
# +
nn_16_train_accuracy = accuracy_score(NN_16.predict_classes(df_16_X_train), df_16_y_train)
nn_16_test_accuracy = accuracy_score(NN_16.predict_classes(df_16_X_test), df_16_y_test)
print("The accuracy of Neural Network on 2016 Training Set is ", nn_07_train_accuracy)
print("The accuracy of Neural Network on 2016 Testing Set is ", nn_07_test_accuracy)
# -
# ##### Discussions can be found on the report
# # The End
|
FinalNotebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# # YouTube - Extract and summarize transcript
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/YouTube/YouTube_Extract_and_summarize_transcript.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# **Tags:** #youtube #transcript #video #summarize #content #snippet #dataframe
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# **Author:** [<NAME>](https://www.linkedin.com/in/ACoAABCNSioBW3YZHc2lBHVG0E_TXYWitQkmwog/)
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# The objective is to summarize the transcript from Youtube with Hugging Face Naas drivers using T5small model.
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ## Input
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ### Install packages
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# !pip install youtube_transcript_api
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ### Import library
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
from youtube_transcript_api import YouTubeTranscriptApi
from naas_drivers import huggingface
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ### Variables
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
video_id = "I6XbLIRa0v0"
file_name = "What on earth is data science?"
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ## Model
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ### Extract the transcript in JSON
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
json = YouTubeTranscriptApi.get_transcript(video_id)
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ### Parse JSON in text string
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
para = ""
for i in json :
para += i["text"]
para += " "
para
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
text = huggingface.get("summarization", model="t5-small", tokenizer="t5-small")(para)
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ## Output
# + [markdown] papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
# ### Display results
# + papermill={} tags=["awesome-notebooks/YouTube/YouTube_Extract_and_summarize_transcript.ipynb"]
text
|
YouTube/YouTube_Extract_and_summarize_transcript.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="farifxiKU1aB" papermill={"duration": 7.263776, "end_time": "2021-08-12T20:58:53.233310", "exception": false, "start_time": "2021-08-12T20:58:45.969534", "status": "completed"} tags=[]
import warnings
import random
import tensorflow as tf
from tensorflow import keras
from random import choice
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Concatenate, Embedding, Flatten, Activation, Dropout
from tensorflow.keras.layers import SimpleRNN as RNN
from sklearn.model_selection import KFold
from tensorflow.python.client import device_lib
warnings.filterwarnings('ignore')
# + id="9kZqV9siDyNb" papermill={"duration": 0.367464, "end_time": "2021-08-12T21:06:47.802915", "exception": false, "start_time": "2021-08-12T21:06:47.435451", "status": "completed"} tags=[]
MAXLENGTH = 13
EMBEDDING_DIM = 128
DENSE_NEURON = 16
RNN_NEURON = 32
# + id="1MksD1JizpPn" papermill={"duration": 0.400679, "end_time": "2021-08-12T21:06:48.563590", "exception": false, "start_time": "2021-08-12T21:06:48.162911", "status": "completed"} tags=[]
FEATURES_SIZE = 2
CHAPTER_SIZE = 38
SUB_CHAPTER_SIZE = 223
QUESTION_SIZE = 1069
# + id="gzJrljnjzypP" outputId="87abe488-b493-4f8f-9d71-45cb1d2ddf51" papermill={"duration": 65.599912, "end_time": "2021-08-12T21:07:58.747002", "exception": false, "start_time": "2021-08-12T21:06:53.147090", "status": "completed"} tags=[]
import torch
X = np.array(grouped_data.keys())
kfold = KFold(n_splits=5, shuffle=True)
train_losses = list()
train_aucs = list()
val_losses = list()
val_aucs = list()
train_eval = list()
test_eval = list()
for train, test in kfold.split(X):
users_train, users_test = X[train], X[test]
n = len(users_test)//2
users_test, users_val = users_test[:n], users_test[n: ]
train_data_space = SPACE_DATASET(grouped_data[users_train], MAXLENGTH)
val_data_space = SPACE_DATASET(grouped_data[users_val], MAXLENGTH)
test_data_space = SPACE_DATASET(grouped_data[users_test], MAXLENGTH)
#construct training input
train_chapter=[]
train_sub_chapter=[]
train_question = []
train_features=[]
train_shifted_t = []
train_labels=[]
for i in range(len(users_train)):
user = train_data_space.__getitem__(i)
train_chapter.append(user[0])
train_sub_chapter.append(user[1])
train_question.append(user[2])
train_features.append(user[3])
train_shifted_t.append(user[4])
train_labels.append(user[5])
train_chapter = np.array(train_chapter)
train_sub_chapter = np.array(train_sub_chapter)
train_question = np.array(train_question)
train_features = np.array(train_features)
train_shifted_t = np.array(train_shifted_t)
train_labels= np.array(train_labels)[..., np.newaxis]
#construct validation input
val_chapter=[]
val_sub_chapter=[]
val_question = []
val_features=[]
val_shifted_t = []
val_labels=[]
for i in range(len(users_val)):
user = val_data_space.__getitem__(i)
val_chapter.append(user[0])
val_sub_chapter.append(user[1])
val_question.append(user[2])
val_features.append(user[3])
val_shifted_t.append(user[4])
val_labels.append(user[5])
val_chapter = np.array(val_chapter)
val_sub_chapter = np.array(val_sub_chapter)
val_features = np.array(val_features)
val_question = np.array(val_question)
val_shifted_t = np.array(val_shifted_t)
val_labels= np.array(val_labels)[..., np.newaxis]
# construct test input
test_chapter=[]
test_sub_chapter=[]
test_features=[]
test_question=[]
test_shifted_t = []
test_labels=[]
for i in range(len(users_test)):
user = test_data_space.__getitem__(i)
test_chapter.append(user[0])
test_sub_chapter.append(user[1])
test_question.append(user[2])
test_features.append(user[3])
test_shifted_t.append(user[4])
test_labels.append(user[5])
test_chapter = np.array(test_chapter)
test_sub_chapter = np.array(test_sub_chapter)
test_features = np.array(test_features)
test_question = np.array(test_question)
test_shifted_t = np.array(test_shifted_t)
test_labels= np.array(test_labels)[..., np.newaxis]
# define loss function and evaluation metrics
bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)
acc = tf.keras.metrics.Accuracy()
auc = tf.keras.metrics.AUC()
def masked_bce(y_true, y_pred):
flat_pred = y_pred
flat_ground_truth = y_true
label_mask = tf.math.not_equal(flat_ground_truth, -1)
return bce(flat_ground_truth, flat_pred, sample_weight=label_mask)
def masked_acc(y_true, y_pred):
flat_pred = y_pred
flat_ground_truth = y_true
flat_pred = (flat_pred >= 0.5)
label_mask = tf.math.not_equal(flat_ground_truth, -1)
return acc(flat_ground_truth, flat_pred, sample_weight=label_mask)
def masked_auc(y_true, y_pred):
flat_pred = y_pred
flat_ground_truth = y_true
label_mask = tf.math.not_equal(flat_ground_truth, -1)
return auc(flat_ground_truth, flat_pred, sample_weight=label_mask)
# input layer
input_chap = tf.keras.Input(shape=(MAXLENGTH))
input_sub_chap = tf.keras.Input(shape=(MAXLENGTH))
input_ques = tf.keras.Input(shape=(MAXLENGTH))
input_shifted = tf.keras.Input(shape=(MAXLENGTH))
input_features = tf.keras.Input(shape=(MAXLENGTH, FEATURES_SIZE))
# embedding layer for categorical features
embedding_chap = Embedding(input_dim = CHAPTER_SIZE, output_dim = EMBEDDING_DIM)(input_chap)
embedding_sub_chap = Embedding(input_dim = SUB_CHAPTER_SIZE, output_dim = EMBEDDING_DIM)(input_sub_chap)
embedding_ques = Embedding(input_dim = QUESTION_SIZE, output_dim = EMBEDDING_DIM)(input_ques)
embedding_shifted = Embedding(input_dim = 3, output_dim = EMBEDDING_DIM)(input_shifted)
# dense layer for numeric features
dense_features = Dense(EMBEDDING_DIM,input_shape = (None, MAXLENGTH))(input_features)
# definr RNN layers
RNN_chap = RNN(RNN_NEURON, input_shape = (None, EMBEDDING_DIM),return_sequences = True)(embedding_chap)
RNN_sub_chap = RNN(RNN_NEURON, input_shape = (None, EMBEDDING_DIM),return_sequences = True)(embedding_sub_chap)
RNN_ques = RNN(RNN_NEURON, input_shape = (None, EMBEDDING_DIM),return_sequences = True)(embedding_ques)
RNN_shif = RNN(RNN_NEURON, input_shape = (None, EMBEDDING_DIM),return_sequences = True)(embedding_shifted)
RNN_features = RNN(RNN_NEURON, input_shape = (None, EMBEDDING_DIM),return_sequences = True)(dense_features)
RNN_output = tf.concat([RNN_chap, RNN_sub_chap, RNN_ques, RNN_shif, RNN_features], axis = 2)
dense1 = Dense(256, input_shape = (None, 5*EMBEDDING_DIM), activation='relu')(RNN_output)
dropout1 = Dropout(0.1)(dense1)
dense2 = Dense(64, input_shape = (None, 256), activation='relu')(dropout1)
dropout2 = Dropout(0.1)(dense2)
pred = Dense(1, input_shape = (None, 64), activation='sigmoid')(dropout2)
model = tf.keras.Model(
inputs=[input_chap, input_sub_chap,input_ques, input_shifted, input_features],
outputs=pred,
name='RNN_model'
)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
opt_adam = Adam(learning_rate = 0.005)
model.compile(
optimizer=opt_adam,
loss= masked_bce,
metrics = [masked_acc, masked_auc]
)
history = model.fit(
[train_chapter, train_sub_chapter, train_question, train_shifted_t, train_features],
train_labels,
batch_size = 64,
epochs = 100,
validation_data=([val_chapter, val_sub_chapter, val_question, val_shifted_t, val_features], val_labels),
callbacks=[callback]
)
val_losses.append(list(history.history['val_loss']))
train_losses.append(list(history.history['loss']))
val_aucs.append(list(history.history['val_masked_auc']))
train_aucs.append(list(history.history['masked_auc']))
train_score = model.evaluate([train_chapter, train_sub_chapter, train_question, train_shifted_t, train_features], train_labels)
train_eval.append(train_score)
test_score = model.evaluate([test_chapter, test_sub_chapter, test_question, test_shifted_t, test_features], test_labels)
test_eval.append(test_score)
print("Test: ", test_score)
def reset_weights(model):
for layer in model.layers:
if isinstance(layer, tf.keras.Model):
reset_weights(layer)
continue
for k, initializer in layer.__dict__.items():
if "initializer" not in k:
continue
# find the corresponding variable
var = getattr(layer, k.replace("_initializer", ""))
var.assign(initializer(var.shape, var.dtype))
reset_weights(model)
# + id="QsVmumHMz3lx" outputId="4ff1e2fa-6abb-458e-c729-495b456f53e5" papermill={"duration": 0.53112, "end_time": "2021-08-12T21:07:59.800224", "exception": false, "start_time": "2021-08-12T21:07:59.269104", "status": "completed"} tags=[]
t_eval = np.array(test_eval)
print("test avg loss: ", np.mean(t_eval[:, 0]), "+/-" ,np.std(t_eval[:, 0]))
print("test avg acc: ", np.mean(t_eval[:, 1]), "+/-" ,np.std(t_eval[:, 1]))
print("test avg auc: ", np.mean(t_eval[:, 2]), "+/-" ,np.std(t_eval[:, 2]))
# + id="b9MM_CXWz5K6" outputId="4cf88e1d-3a74-4e7d-f92c-d01522e91757" papermill={"duration": 0.531593, "end_time": "2021-08-12T21:08:00.853274", "exception": false, "start_time": "2021-08-12T21:08:00.321681", "status": "completed"} tags=[]
t_eval = np.array(train_eval)
print("train avg loss: ", np.mean(t_eval[:, 0]), "+/-" ,np.std(t_eval[:, 0]))
print("train avg acc: ", np.mean(t_eval[:, 1]), "+/-" ,np.std(t_eval[:, 1]))
print("train avg auc: ", np.mean(t_eval[:, 2]), "+/-" ,np.std(t_eval[:, 2]))
|
additional_features/rnn-based/RNN/rnn_PF_TF.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # matplotlibrcの場所
import matplotlib
matplotlib.matplotlib_fname()
# # 日本語の設定
# フォントのキャッシュを削除
import matplotlib.font_manager as fm
fm._rebuild()
# +
# 日本語が表示できるかのテスト
import matplotlib.pyplot as plt
# %matplotlib inline
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 3], [2, 4])
ax.set_title('日本語のテスト')
plt.show()
# -
|
tutorial/matplotlib_ja.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:deepchem]
# language: python
# name: conda-env-deepchem-py
# ---
# ### TensorGraph Layers and TensorFlow eager
# In this tutorial we will look at the working of TensorGraph layer with TensorFlow eager.
# But before that let's see what exactly is TensorFlow eager.
# Eager execution is an imperative, define-by-run interface where operations are executed immediately as they are called from Python. In other words, eager execution is a feature that makes TensorFlow execute operations immediately. Concrete values are returned instead of a computational graph to be executed later.
# As a result:
# - It allows writing imperative coding style like numpy
# - Provides fast debugging with immediate run-time errors and integration with Python tools
# - Strong support for higher-order gradients
import tensorflow as tf
import tensorflow.contrib.eager as tfe
# After importing neccessary modules, at the program startup we invoke `enable_eager_execution()`.
tfe.enable_eager_execution()
# Enabling eager execution changes how TensorFlow functions behave. Tensor objects return concrete values instead of being a symbolic reference to nodes in a static computational graph(non-eager mode). As a result, eager execution should be enabled at the beginning of a program.
# Note that with eager execution enabled, these operations consume and return multi-dimensional arrays as `Tensor` objects, similar to NumPy `ndarrays`
# ### Dense layer
import numpy as np
import deepchem as dc
from deepchem.models.tensorgraph import layers
# In the following snippet we describe how to create a `Dense` layer in eager mode. The good thing about calling a layer as a function is that we don't have to call `create_tensor()` directly. This is identical to tensorflow API and has no conflict. And since eager mode is enabled, it should return concrete tensors right away.
# +
# Initialize parameters
in_dim = 2
out_dim = 3
batch_size = 10
inputs = np.random.rand(batch_size, in_dim).astype(np.float32) #Input
layer = layers.Dense(out_dim) # Provide the number of output values as parameter. This creates a Dense layer
result = layer(inputs) #get the ouput tensors
print(result)
# -
# Creating a second `Dense` layer should produce different results.
# +
layer2 = layers.Dense(out_dim)
result2 = layer2(inputs)
print(result2)
# -
# We can also execute the layer in eager mode to compute its output as a function of inputs. If the layer defines any variables, they are created the first time it is invoked. This happens in the same exact way that we would create a single layer in non-eager mode.
# The following is also a way to create a layer in eager mode. The `create_tensor()` is invoked by `__call__()` object. This gives us an advantage of directly passing the tensor as a parameter while constructing a TensorGraph layer.
# +
x = layers.Dense(out_dim)(inputs)
print(x)
# -
# ### Conv1D layer
# `Dense` layers are one of the layers defined in Deepchem. Along with it there are several others like `Conv1D`, `Conv2D`, `conv3D` etc. We also take a look at how to construct a `Conv1D` layer below.
# Basically this layer creates a convolution kernel that is convolved with the layer input over a single spatial (or temporal) dimension to produce a tensor of outputs.
# When using this layer as the first layer in a model, provide an `input_shape` argument (tuple of integers or `None`)
# When the argument `input_shape` is passed in as a tuple of integers e.g (2, 3) it would mean we are passing a sequence of 2 vectors of 3-Dimensional vectors.
# And when it is passed as (None, 3) it means that we want variable-length sequences of 3-dimensional vectors.
from deepchem.models.tensorgraph.layers import Conv1D
# +
width = 5
in_channels = 2
filters = 3
kernel_size = 2
batch_size = 5
inputs = np.random.rand(batch_size, width, in_channels).astype(
np.float32)
layer = layers.Conv1D(filters, kernel_size)
result = layer(inputs)
print(result)
# -
# Again it should be noted that creating a second `Conv1D` layer would producr different results.
# So thats how we invoke different DeepChem layers in eager mode.
#
# One of the other interesting point is that we can mix tensorflow layers and DeepChem layers. Since they all take tensors as inputs and return tensors as outputs, so you can take the output from one kind of layer and pass it as input to a different kind of layer. But it should be noted that tensorflow layers can't be added to a TensorGraph.
# ### Workflow of DeepChem layers
# Now that we've generalised so much, we should actually see if deepchem supplies an identical workflow for layers to that of tensorflow. For instance, let's consider the code where we create a `Dense` layer.
# ```python
# y = Dense(3)(input)
# ```
#
# What the above line does is that it creates a dense layer with three outputs. It initializes the weights and the biases. And then it multiplies the input tensor by the weights.
#
# Let's put the above statement in some mathematical terms. A `Dense` layer has a matrix of weights of shape `(M, N)`, where M is the number of outputs and N is the number of inputs. The first time we call it, the layer sets N based on the shape of the input we passed to it and creates the weight matrix.
# +
_input = tf.random_normal([2, 3])
print(_input)
layer = layers.Dense(4) # A DeepChem Dense layer
result = layer(_input)
print(result)
# -
# This is exactly how a tensorflow `Dense` layer works. It implements the same operation as that of DeepChem's `Dense` layer i.e., `outputs = activation(inputs.kernel + bias)` where `kernel` is the weights matrix created by the layer, and `bias` is a bias vector created by the layer.
result = tf.layers.dense(_input, units=4) # A tensorflow Dense layer
print(result)
# We pass a tensor input to that of tensorflow `Dense` layer and recieve an output tensor that has the same shape as that of input except the last dimension is that of ouput space.
# ### Gradients
# Finding gradients under eager mode is much similar to the `autograd` API. The computational flow is very clean and logical.
# What happens is that different operations can occur during each call, all forward operations are recorded to a tape, which is then played backwards when computing gradients. After the gradients have been computed, the tape is discared.
#
# +
def dense_squared(x):
return layers.Dense(1)(layers.Dense(1)(inputs))
grad = tfe.gradients_function(dense_squared)
print(dense_squared(3.0))
print(grad(3.0))
# -
# In the above example, The `gradients_function` call takes a Python function `dense_squared()` as an argument and returns a Python callable that computes the partial derivatives of `dense_squared()` with respect to its inputs.
|
examples/notebooks/deepchem_tensorflow_eager.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="01yn_ljI1ISd"
# # **Interpreting Transformers Models**
#
# In this tutorial we will learn how to intepret BERT using the SQUAD dataset. BERT is a transformer-based machine learning technique for natural language processing pre-training developed by Google. This code was originally proposed by [Captum](https://captum.ai/tutorials/Bert_SQUAD_Interpret).
#
# + colab={"base_uri": "https://localhost:8080/"} id="Nt_xwgexJuuO" outputId="8624a24d-ba50-4460-806c-824969795a52"
# install required python packages
# !pip install seaborn
# !pip install pandas
# !pip install matplotlib
# !pip install transformers
# !pip install captum
# + id="5hlxPHgAKOPP"
# import required python libraries
import numpy as np # multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
import pandas as pd # data manipulation and analysis
import seaborn as sns # statistical data visualization
import matplotlib.pyplot as plt # object-oriented API for embedding plots
import torch # data structures for multi-dimensional tensors and defines mathematical operations over these tensors
import torch.nn as nn # basic building blocks for graphs
import json
# from transformers import pipeline
from transformers import BertTokenizer, BertForQuestionAnswering, BertConfig # library for transformer models
from captum.attr import visualization as viz # captum visualization tool for matplotlib figure, it visualizes attribution for a given image by normalizing attribution values of the desired sign
from captum.attr import LayerConductance, LayerIntegratedGradients # computes conductance with respect to the given layer and provides integrated gradients augmenting accuracy metrics, model debugging and feature or rule extraction.
# + id="smkMUNhfAqX5"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# + [markdown] id="SIlwV1dvG8OS"
# Commented parts suggest how to use another pretrained model from hugging face. The model is a BERT-large-uncased model, pretrained on a corpus of messages from Twitter about COVID-19.
# + colab={"base_uri": "https://localhost:8080/", "height": 367, "referenced_widgets": ["eaca553105c74789a2aed2797fc36e28", "50f1e8bb513f4fd09f68a311e3b43a29", "<KEY>", "30c2ac9c17ea45d19eb94ad71edaf5a2", "<KEY>", "a45e579683194a9cad686b925e99b3d0", "a21a010f605b43778866e966970d5df3", "d65aba9c92164b6e931b0ef3ab392dc9", "42c7e68a3b2147d8812ab3a550e0149e", "f7241d02659442aea193a7c33580e0e3", "f31aabe5e78a48a3ab64a0977a79b71a", "6d9661d989e4425e8f27517cd3797f02", "1771b2ba73344c23ad321a5746648031", "97f3b3a2d47543b29ecab09c43e8c789", "<KEY>", "d76d70fcba2b404196e0137f359b6685", "df0e06ab39704875bca03ade97146d75", "24665edcd3334de8ab9a99a115c6ff0d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "42e3c9e64a464b31926f8932677f0857", "<KEY>", "6a9d644736d044b8b2b6ee7a7782fd93", "6043c8131bda492884586ae1cb5b44c5", "d73f4844c63f40eb83153a327d9017aa", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "ab42578766b046da8043dc427aed9301", "<KEY>", "<KEY>", "<KEY>", "2f562c7c09e148fea8f223b68f7f4a28", "<KEY>"]} id="PVw1fLw8Kgvf" outputId="dacba106-f5da-44f5-c395-c31be48e2901"
# replace <bert-case-uncased> with the path of the saved model
# https://huggingface.co/bert-base-uncased
model_path = 'bert-base-uncased'
# model_path = 'digitalepidemiologylab/covid-twitter-bert-v2'
# pipe = pipeline(task='fill-mask', model='digitalepidemiologylab/covid-twitter-bert-v2')
# out = pipe(f"In places with a lot of people, it's a good idea to wear a {pipe.tokenizer.mask_token}")
# print(json.dumps(out, indent=4))
# [
# {
# "sequence": "[CLS] in places with a lot of people, it's a good idea to wear a mask [SEP]",
# "score": 0.9998226761817932,
# "token": 7308,
# "token_str": "mask"
# },
# ...
# ]
# load model
model = BertForQuestionAnswering.from_pretrained(model_path)
model.to(device)
model.eval()
model.zero_grad()
# load tokenizer
tokenizer = BertTokenizer.from_pretrained(model_path)
# + id="p7M98f0kN_Ok"
def predict(inputs, token_type_ids=None, position_ids=None, attention_mask=None):
output = model(inputs, token_type_ids=token_type_ids,
position_ids=position_ids, attention_mask=attention_mask, )
return output.start_logits, output.end_logits
# + id="JamF5RGMOCvk"
def squad_pos_forward_func(inputs, token_type_ids=None, position_ids=None, attention_mask=None, position=0):
pred = predict(inputs,
token_type_ids=token_type_ids,
position_ids=position_ids,
attention_mask=attention_mask)
pred = pred[position]
return pred.max(1).values
# + id="9G3aoVtNOIHk"
ref_token_id = tokenizer.pad_token_id # A token used for generating token reference
sep_token_id = tokenizer.sep_token_id # A token used as a separator between question and text and it is also added to the end of the text.
cls_token_id = tokenizer.cls_token_id # A token used for prepending to the concatenated question-text word sequence
# + id="PJpMRBsVOMLE"
def construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id):
question_ids = tokenizer.encode(question, add_special_tokens=False)
text_ids = tokenizer.encode(text, add_special_tokens=False)
# construct input token ids
input_ids = [cls_token_id] + question_ids + [sep_token_id] + text_ids + [sep_token_id]
# construct reference token ids
ref_input_ids = [cls_token_id] + [ref_token_id] * len(question_ids) + [sep_token_id] + \
[ref_token_id] * len(text_ids) + [sep_token_id]
return torch.tensor([input_ids], device=device), torch.tensor([ref_input_ids], device=device), len(question_ids)
def construct_input_ref_token_type_pair(input_ids, sep_ind=0):
seq_len = input_ids.size(1)
token_type_ids = torch.tensor([[0 if i <= sep_ind else 1 for i in range(seq_len)]], device=device)
ref_token_type_ids = torch.zeros_like(token_type_ids, device=device)# * -1
return token_type_ids, ref_token_type_ids
def construct_input_ref_pos_id_pair(input_ids):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
# we could potentially also use random permutation with `torch.randperm(seq_length, device=device)`
ref_position_ids = torch.zeros(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
ref_position_ids = ref_position_ids.unsqueeze(0).expand_as(input_ids)
return position_ids, ref_position_ids
def construct_attention_mask(input_ids):
return torch.ones_like(input_ids)
def construct_whole_bert_embeddings(input_ids, ref_input_ids, \
token_type_ids=None, ref_token_type_ids=None, \
position_ids=None, ref_position_ids=None):
input_embeddings = model.bert.embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
ref_input_embeddings = model.bert.embeddings(ref_input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
return input_embeddings, ref_input_embeddings
# + [markdown] id="Uf7-qOeMKyIv"
# We now define a question - context (or text) pair as input for our BERT model and interpret what the model was focusing on when predicting an answer to the question from given input context.
# + id="1p0ZYI7tMnut"
question, text = "Why should people wear masks?", "Masks are a key measure to suppress transmission and save lives."
# + [markdown] id="Uo3aQZOdLWJg"
# We then embed the question, the input text and generate corresponding baselines/references. We use the **construct_whole_bert_embeddings** function defined above for embedding the word, the **construct_input_ref_pos_id_pair** function for embedding the position, and the **construct_input_ref_token_type_pair** for embedding the token.
# + id="G_b0YqpAOVoF"
input_ids, ref_input_ids, sep_id = construct_input_ref_pair(question, text, ref_token_id, sep_token_id, cls_token_id)
token_type_ids, ref_token_type_ids = construct_input_ref_token_type_pair(input_ids, sep_id)
position_ids, ref_position_ids = construct_input_ref_pos_id_pair(input_ids)
attention_mask = construct_attention_mask(input_ids)
indices = input_ids[0].detach().tolist()
all_tokens = tokenizer.convert_ids_to_tokens(indices)
# + [markdown] id="1vsQbm3DMqZS"
# We state our ground truth for prediction's start and end positions.
# + id="DjTsSfB0KIDp"
ground_truth = 'key measure to suppress transmission and save lives.'
ground_truth_tokens = tokenizer.encode(ground_truth, add_special_tokens=False)
ground_truth_end_ind = indices.index(ground_truth_tokens[-1])
ground_truth_start_ind = ground_truth_end_ind - len(ground_truth_tokens) + 1
# + [markdown] id="BpXhFkEjM1la"
# To obtain our prediction we use the input, the token type, the position id and a default attention mask previously computed.
# + colab={"base_uri": "https://localhost:8080/"} id="24ZmckerOWiF" outputId="a6b216fd-e6d3-498e-80ce-b2747b0fc6c5"
start_scores, end_scores = predict(input_ids, \
token_type_ids=token_type_ids, \
position_ids=position_ids, \
attention_mask=attention_mask)
print('Question: ', question)
print('Predicted Answer: ', ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# + id="rQfyXaAQOZmt"
lig = LayerIntegratedGradients(squad_pos_forward_func, model.bert.embeddings)
attributions_start, delta_start = lig.attribute(inputs=input_ids,
baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 0),
return_convergence_delta=True)
attributions_end, delta_end = lig.attribute(inputs=input_ids, baselines=ref_input_ids,
additional_forward_args=(token_type_ids, position_ids, attention_mask, 1),
return_convergence_delta=True)
# + id="qqwiwIelOfVl"
def summarize_attributions(attributions):
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
return attributions
# + id="rEb2hZIkOyUR"
attributions_start_sum = summarize_attributions(attributions_start)
attributions_end_sum = summarize_attributions(attributions_end)
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="tPynBxVMO3Vu" outputId="f0bab772-7b08-48cb-8f94-15b838b733c7"
# storing couple samples in an array for visualization purposes
start_position_vis = viz.VisualizationDataRecord(
attributions_start_sum,
torch.max(torch.softmax(start_scores[0], dim=0)),
torch.argmax(start_scores),
torch.argmax(start_scores),
str(ground_truth_start_ind),
attributions_start_sum.sum(),
all_tokens,
delta_start)
end_position_vis = viz.VisualizationDataRecord(
attributions_end_sum,
torch.max(torch.softmax(end_scores[0], dim=0)),
torch.argmax(end_scores),
torch.argmax(end_scores),
str(ground_truth_end_ind),
attributions_end_sum.sum(),
all_tokens,
delta_end)
print('\033[1m', 'Visualizations For Start Position', '\033[0m')
viz.visualize_text([start_position_vis])
print('\033[1m', 'Visualizations For End Position', '\033[0m')
viz.visualize_text([end_position_vis])
# + colab={"base_uri": "https://localhost:8080/"} id="rww_f3fuPJNE" outputId="2c842e30-1ace-4e65-ff64-924b6e8105af"
lig2 = LayerIntegratedGradients(squad_pos_forward_func, \
[model.bert.embeddings.word_embeddings, \
model.bert.embeddings.token_type_embeddings, \
model.bert.embeddings.position_embeddings])
attributions_start = lig2.attribute(inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 0))
attributions_end = lig2.attribute(inputs=(input_ids, token_type_ids, position_ids),
baselines=(ref_input_ids, ref_token_type_ids, ref_position_ids),
additional_forward_args=(attention_mask, 1))
attributions_start_word = summarize_attributions(attributions_start[0])
attributions_end_word = summarize_attributions(attributions_end[0])
attributions_start_token_type = summarize_attributions(attributions_start[1])
attributions_end_token_type = summarize_attributions(attributions_end[1])
attributions_start_position = summarize_attributions(attributions_start[2])
attributions_end_position = summarize_attributions(attributions_end[2])
# + id="6JKzHVXHPMhu"
def get_topk_attributed_tokens(attrs, k=5):
values, indices = torch.topk(attrs, k)
top_tokens = [all_tokens[idx] for idx in indices]
return top_tokens, values, indices
# + colab={"base_uri": "https://localhost:8080/"} id="0f_S7whxPP7G" outputId="69174153-b9b5-4959-8b8f-f54349c518ca"
top_words_start, top_words_val_start, top_word_ind_start = get_topk_attributed_tokens(attributions_start_word)
top_words_end, top_words_val_end, top_words_ind_end = get_topk_attributed_tokens(attributions_end_word)
top_token_type_start, top_token_type_val_start, top_token_type_ind_start = get_topk_attributed_tokens(attributions_start_token_type)
top_token_type_end, top_token_type_val_end, top_token_type_ind_end = get_topk_attributed_tokens(attributions_end_token_type)
top_pos_start, top_pos_val_start, pos_ind_start = get_topk_attributed_tokens(attributions_start_position)
top_pos_end, top_pos_val_end, pos_ind_end = get_topk_attributed_tokens(attributions_end_position)
df_start = pd.DataFrame({'Word(Index), Attribution': ["{} ({}), {}".format(word, pos, round(val.item(),2)) for word, pos, val in zip(top_words_start, top_word_ind_start, top_words_val_start)],
'Token Type(Index), Attribution': ["{} ({}), {}".format(ttype, pos, round(val.item(),2)) for ttype, pos, val in zip(top_token_type_start, top_token_type_ind_start, top_words_val_start)],
'Position(Index), Attribution': ["{} ({}), {}".format(position, pos, round(val.item(),2)) for position, pos, val in zip(top_pos_start, pos_ind_start, top_pos_val_start)]})
df_start.style.apply(['cell_ids: False'])
df_end = pd.DataFrame({'Word(Index), Attribution': ["{} ({}), {}".format(word, pos, round(val.item(),2)) for word, pos, val in zip(top_words_end, top_words_ind_end, top_words_val_end)],
'Token Type(Index), Attribution': ["{} ({}), {}".format(ttype, pos, round(val.item(),2)) for ttype, pos, val in zip(top_token_type_end, top_token_type_ind_end, top_words_val_end)],
'Position(Index), Attribution': ["{} ({}), {}".format(position, pos, round(val.item(),2)) for position, pos, val in zip(top_pos_end, pos_ind_end, top_pos_val_end)]})
df_end.style.apply(['cell_ids: False'])
['{}({})'.format(token, str(i)) for i, token in enumerate(all_tokens)]
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="ATCru9MbPUFW" outputId="c75876aa-0288-41ef-9a0e-ede8c7045b19"
df_start
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="dHiNnEYxPWyf" outputId="79407a74-753d-42b9-cd2c-845638fe532c"
df_end
# + id="djwysN07PZgt"
def squad_pos_forward_func2(input_emb, attention_mask=None, position=0):
pred = model(inputs_embeds=input_emb, attention_mask=attention_mask, )
pred = pred[position]
return pred.max(1).values
# + id="uUusJItbPdAn"
layer_attrs_start = []
layer_attrs_end = []
# The token that we would like to examine separately.
token_to_explain = 18 # the index of the token that we would like to examine more thoroughly
layer_attrs_start_dist = []
layer_attrs_end_dist = []
input_embeddings, ref_input_embeddings = construct_whole_bert_embeddings(input_ids, ref_input_ids, \
token_type_ids=token_type_ids, ref_token_type_ids=ref_token_type_ids, \
position_ids=position_ids, ref_position_ids=ref_position_ids)
for i in range(model.config.num_hidden_layers):
lc = LayerConductance(squad_pos_forward_func2, model.bert.encoder.layer[i])
layer_attributions_start = lc.attribute(inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 0))
layer_attributions_end = lc.attribute(inputs=input_embeddings, baselines=ref_input_embeddings, additional_forward_args=(attention_mask, 1))
layer_attrs_start.append(summarize_attributions(layer_attributions_start).cpu().detach().tolist())
layer_attrs_end.append(summarize_attributions(layer_attributions_end).cpu().detach().tolist())
# storing attributions of the token id that we would like to examine in more detail in token_to_explain
layer_attrs_start_dist.append(layer_attributions_start[0,token_to_explain,:].cpu().detach().tolist())
layer_attrs_end_dist.append(layer_attributions_end[0,token_to_explain,:].cpu().detach().tolist())
# + colab={"base_uri": "https://localhost:8080/"} id="ZAoQU0PsPmZX" outputId="7dc98e35-c211-470a-fde7-ec2045851076"
fig, ax = plt.subplots(figsize=(15,5))
xticklabels=all_tokens
yticklabels=list(range(1,13))
ax = sns.heatmap(np.array(layer_attrs_start), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2)
plt.xlabel('Tokens')
plt.ylabel('Layers')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="ifRf6Jq6QlDt" outputId="dcfd48af-a12d-4c87-b454-9dcfe744f702"
fig, ax = plt.subplots(figsize=(15,5))
xticklabels=all_tokens
yticklabels=list(range(1,13))
ax = sns.heatmap(np.array(layer_attrs_end), xticklabels=xticklabels, yticklabels=yticklabels, linewidth=0.2) #, annot=True
plt.xlabel('Tokens')
plt.ylabel('Layers')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="1icWJDjdQp9Z" outputId="78375db8-8e18-4d8b-bc05-bbc736f50282"
fig, ax = plt.subplots(figsize=(20,10))
ax = sns.boxplot(data=layer_attrs_start_dist)
plt.xlabel('Layers')
plt.ylabel('Attribution')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="gSZ-l-IXQtfZ" outputId="9c5fd211-de0b-494d-de21-58509eeeff9e"
fig, ax = plt.subplots(figsize=(20,10))
ax = sns.boxplot(data=layer_attrs_end_dist)
plt.xlabel('Layers')
plt.ylabel('Attribution')
plt.show()
# + id="DKMMXQocQxiI"
def pdf_attr(attrs, bins=100):
return np.histogram(attrs, bins=bins, density=True)[0]
# + id="sspdfxupQ0iB"
layer_attrs_end_pdf = map(lambda layer_attrs_end_dist: pdf_attr(layer_attrs_end_dist), layer_attrs_end_dist)
layer_attrs_end_pdf = np.array(list(layer_attrs_end_pdf))
# summing attribution along embedding diemension for each layer
# size: #layers
attr_sum = np.array(layer_attrs_end_dist).sum(-1)
# size: #layers
layer_attrs_end_pdf_norm = np.linalg.norm(layer_attrs_end_pdf, axis=-1, ord=1)
#size: #bins x #layers
layer_attrs_end_pdf = np.transpose(layer_attrs_end_pdf)
#size: #bins x #layers
layer_attrs_end_pdf = np.divide(layer_attrs_end_pdf, layer_attrs_end_pdf_norm, where=layer_attrs_end_pdf_norm!=0)
# + colab={"base_uri": "https://localhost:8080/"} id="14ZkmeVkQ4xC" outputId="3b503dc4-2e16-4a7c-ffd9-57ef52511c97"
fig, ax = plt.subplots(figsize=(20,10))
plt.plot(layer_attrs_end_pdf)
plt.xlabel('Bins')
plt.ylabel('Density')
plt.legend(['Layer '+ str(i) for i in range(1,13)])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="YctGTjWDQ8D2" outputId="38b03c94-9cb1-45b1-d25f-b0ca53074528"
fig, ax = plt.subplots(figsize=(20,10))
# replacing 0s with 1s. np.log(1) = 0 and np.log(0) = -inf
layer_attrs_end_pdf[layer_attrs_end_pdf == 0] = 1
layer_attrs_end_pdf_log = np.log2(layer_attrs_end_pdf)
# size: #layers
entropies= -(layer_attrs_end_pdf * layer_attrs_end_pdf_log).sum(0)
plt.scatter(np.arange(12), attr_sum, s=entropies * 100)
plt.xlabel('Layers')
plt.ylabel('Total Attribution')
plt.show()
# + id="_CRJAYQfROFa"
|
BERT_SQUAD_interpret.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"checksum": "4cea28a1843359931a5a3bd9dd871a7b", "grade": false, "grade_id": "cell-1cec5ee110f26162", "locked": true, "schema_version": 1, "solution": false}
# # Exercício Prático 7: QR e estimadores de quadrados mínimos
#
# Neste exercício vamos estudar o uso da decomposição QR na obtenção de estimadores de quadrados mínimos de uma regressão linear. A grande vantagem da decomposição QR é que ela não requer a solução direta das equações normais, que podem ser extremamente malcondicionadas. Existem diversos algoritmos para implementá-la, que possuem diferentes estabilidades. Neste EP iremos implementar:
# * o Gram-Schmidt clássico (visto em sala)
# * o Gram-Schmidt modificado
# e iremos compará-lo com um dos melhores algoritmos para QR, conhecido como Reflexões de Householder.
#
# Incluímos também a estimação dos parâmetros resolvendo as equações normais pelo método de Cholesky. No entanto, para o conjunto de dados utilizados, o sistema é tão mal condicionado que os erros numéricos impedem que Cholesky seja usado com sucesso.
# -
NAME = "<NAME>"
COLLABORATORS = "<NAME>"
# + [markdown] deletable=false editable=false nbgrader={"checksum": "0c58e08699a342ba39ecaa578a70f3a2", "grade": false, "grade_id": "cell-6d6d241bbaea84c7", "locked": true, "schema_version": 1, "solution": false}
# ## Introdução
#
# Seja a regressão polinomial
#
# $$
# y = \beta_0 + \beta_1 x + \beta_2 x^2 + \ldots + \beta_p x^p + \epsilon.
# $$
#
# Os estimadores de mínimos quadrados $\beta$ podem ser obtidos pela solução das equações normais
#
# $$
# X^\top X \beta = X^\top y,
# $$
#
# onde a matriz $X$ é calculada pela função abaixo.
# + deletable=false editable=false nbgrader={"checksum": "277f6a2524eebb9208293a39a85cbdd2", "grade": false, "grade_id": "cell-17e8a89c821e42c6", "locked": true, "schema_version": 1, "solution": false}
import numpy as np
import scipy.linalg
def RegressaoPolinomial_getX(x,p):
n = len(x)
X = np.empty((n,p+1))
X[:,0] = 1
X[:,1] = x
for i in range(2,p+1):
X[:,i] = X[:,i-1]*x
return X
# + deletable=false editable=false nbgrader={"checksum": "40f3be010735e881f1b69163e176882d", "grade": false, "grade_id": "cell-307821ed3aa23569", "locked": true, "schema_version": 1, "solution": false}
RegressaoPolinomial_getX([1.1,1.2,1.7],4)
# -
# A seguir apresentamos a implementação um gerador de polinômios aleatórios. Mais precisamente, iremos escrever uma função que retorna $p+1$ números aleatórios independentes e com distribuição uniforme entre -5 e 5. Para isto, usamos a função np.random.rand. Tendo em vista que esta função do numpy gera valores em $[0,1)$, iremos transformá-los de maneira a mapeá-los para o intervalo $[-5,5)$.
# + deletable=false editable=false nbgrader={"checksum": "73c4f8c438f87664c7698bbdf46369a6", "grade": false, "grade_id": "cell-6540c95b49975a18", "locked": true, "schema_version": 1, "solution": false}
def geraPolinomioAleatorio(p):
return -5+10*np.random.rand(p+1)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "f28ee6af8702eaaa5de4ce26df0d1dd7", "grade": false, "grade_id": "cell-7130806ba6f1fc00", "locked": true, "schema_version": 1, "solution": false}
# A seguir mostramos como a função ```geraPolinomioAleatorio``` pode ser utilizada. Fixando a semente do gerador de números aleatórios igual a 1, iremos obter o polinômio de 3o. grau $p(x) = -0.83 + 2.20x -5x^2 -1.98x^3$.
# + deletable=false editable=false nbgrader={"checksum": "2758b3084d692f53d7bce7d22f858b3b", "grade": false, "grade_id": "cell-7bdb5efee3efb368", "locked": true, "schema_version": 1, "solution": false}
np.random.seed(1) # seta a semente do gerador de numeros aleatorios igual a 1
coef = geraPolinomioAleatorio(3)
print('Coeficientes:',coef)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "c8ff689c3164dd5430550e3637666e80", "grade": false, "grade_id": "cell-5930603c484495e3", "locked": true, "schema_version": 1, "solution": false}
# A seguir apresentamos uma função para gerar uma tabela de pontos $(x,y)$ **com erros de medição em $y$**, a partir da avaliação de um polinômio. Vamos assumir que os coeficientes são dados em ordem crescente de grau.
#
# Nesta tabela, as abcissas são igualmente espaçadas entre $x_1 = 0$ e $x_n=1$.
# + deletable=false editable=false nbgrader={"checksum": "d90bfc80fe29dec2d9661005d81f68cc", "grade": false, "grade_id": "cell-2c4d36af74f88e12", "locked": true, "schema_version": 1, "solution": false}
def geraTabelaAleatoriaY(n, coef):
x = np.linspace(0,1,n).reshape(n,1)
y = (np.polyval(coef[::-1],x) + np.random.normal(scale=0.1,size=(n,1)))
return x,y
# -
# A seguir geramos uma tabela usando a função ```geraTabelaAleatoriaY``` (e os coeficientes ```coef```).
# + deletable=false editable=false nbgrader={"checksum": "9fdb188376da2b50389e899db977d2ba", "grade": false, "grade_id": "cell-018398c6e0a9b26e", "locked": true, "schema_version": 1, "solution": false}
x, y = geraTabelaAleatoriaY(11,coef)
print(np.hstack([x,y]))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "e785e5b0352180e8051a2f7f70372635", "grade": false, "grade_id": "cell-646b9e7582930ab2", "locked": true, "schema_version": 1, "solution": false}
# ## Estudo da estabilidade dos métodos para estimação de quadrados mínimos
#
# Nesta parte, vamos comparar os seguintes métodos para a estimação de quadrados mínimos linear:
# * Cholesky via Equações Normais
# * QR, método de Gram-Schmidt (clássico)
# * QR, método de Gram-Schmidt (modificado)
# * QR, método de Reflexões de Householder
#
# Para isto, precisamos definir um problema de regressão onde a solução exata (isto é, os coeficientes $\beta$) é conhecida. O código a seguir cria:
# * a matriz $X$ a partir de $m=50$ pontos igualmente espaçados entre 0 e 1 usando o método RegressaoPolinomial_getX,
# * o vetor $\beta=[1.0,2.0,\ldots,15.0]$, e
# * as respostas correspondentes ao vetor $y = X\beta$.
# + deletable=false editable=false nbgrader={"checksum": "f90ecbee491a18885e965db52ab31540", "grade": false, "grade_id": "cell-51a725ad7ee03145", "locked": true, "schema_version": 1, "solution": false}
import numpy as np
def createLeastSquaresProblem(m,p):
x = np.linspace(0.0,1.0,num=m)
X = RegressaoPolinomial_getX(x,p-1)
beta = np.arange(1,p+1)
y = X.dot(beta)
return X, beta, y
np.random.seed(1)
X, beta, y = createLeastSquaresProblem(50,15)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "f1724fd29f4d3704e7b627d30938e9ab", "grade": false, "grade_id": "cell-6a702495dc618c1e", "locked": true, "schema_version": 1, "solution": false}
# Observa-se abaixo que a matriz $X$ (e consequentemente $X^\top X$) é extremamente malcondicionada.
# + deletable=false editable=false nbgrader={"checksum": "0d25233ff86a41d4afbfba270a689d3a", "grade": false, "grade_id": "cell-8274ea3ea7d54d61", "locked": true, "schema_version": 1, "solution": false}
# numero de condicao de X
print('Numero de condicao de X:',np.linalg.cond(X))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "2a96bc7e084baa0b85264a42b97b0249", "grade": false, "grade_id": "cell-9b7e49f824154de5", "locked": true, "schema_version": 1, "solution": false}
# A estimação dos parâmetros $\beta$ usando Cholesky via equações normais já está implementada. Embora a matrix $X^\top X$ seja simétrica definida positiva, o método gera um erro em tempo de execução devido a problemas numéricos.
# + deletable=false editable=false nbgrader={"checksum": "d2d72b11c9c4bc4cc401ae695f2fa2fa", "grade": false, "grade_id": "cell-142da595b4afb91b", "locked": true, "schema_version": 1, "solution": false}
import scipy.linalg
def leastSquares_Cholesky(X, y):
(L,lower) = scipy.linalg.cho_factor(X.T@X, lower=True)
beta = scipy.linalg.cho_solve((L,lower),X.T@y)
return beta
try:
beta_cho = leastSquares_Cholesky(X,y)
print(beta_cho)
except np.linalg.LinAlgError as err:
print('Erro numérico:', err)
# + [markdown] deletable=false editable=false nbgrader={"checksum": "6981f080e9d8ef288fe52a4c39ce9176", "grade": false, "grade_id": "cell-339632112dd54c0a", "locked": true, "schema_version": 1, "solution": false}
# **Exercício 1:** Complete a implementação do Gram-Schmidt clássico (visto em sala).
# + deletable=false nbgrader={"checksum": "f5427adfdffb5b29b83ab9c42ad82107", "grade": false, "grade_id": "cell-6dbe72c060e9452d", "locked": false, "schema_version": 1, "solution": true}
def CGS(A):
m,n = A.shape # numero de colunas
Q = np.zeros((m,n))
R = np.zeros((n,n))
for j in range(n):
u = None
# Passo 1: inicializa vetor u com j-ésima coluna de A (~1 linha)
u = A[:,j]
# raise NotImplementedError()
for i in range(0,j):
# Passo 2: escreve em R[i,j] o tamanho da projecao de aj em qi (~ 1 linha)
q = Q[:, i]
R[i, j] = q.dot(u)
# Passo 3: subtrai de u a componente de aj em qi, cujo tamanho eh R[i,j] (~ 1 linha)
u = u - R[i, j] * q
# raise NotImplementedError()
# Passo 4: escreve em R[j,j] o tamanho da projecao de u em qj (~ 1 linha)
# Passo 5: escreve na j-ésima coluna de Q o vetor u normalizado (~ 1 linha)
norm = np.linalg.norm(u)
Q[:, j] = u / norm
R[j, j] = norm
# raise NotImplementedError()
return Q,R
# + deletable=false editable=false nbgrader={"checksum": "a5e949e18b15757a4343b0d06d41ebbf", "grade": true, "grade_id": "cell-a4eb2b4cedd84f1f", "locked": true, "points": 1, "schema_version": 1, "solution": false}
A = 1.0*np.array([[1,1,0],[1,0,1],[0,1,1]])
print(A)
Q,R = CGS(A)
assert np.allclose(Q,np.array([[ 0.70710678, 0.40824829, -0.57735027],
[ 0.70710678, -0.40824829, 0.57735027],
[ 0. , 0.81649658, 0.57735027]]))
assert np.allclose(R,np.array([[ 1.41421356, 0.70710678, 0.70710678],
[ 0. , 1.22474487, 0.40824829],
[ 0. , 0. , 1.15470054]]))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "17d210ca4e4479d91f34bb83b33abe54", "grade": false, "grade_id": "cell-caf4bf13eba3b069", "locked": true, "schema_version": 1, "solution": false}
# O método Gram-Schmidt modificado é, algebricamente, igual ao método Gram-Schmidt clássico. Contudo, devido a diferenças nos erros de arredondamento, a versão modificada é mais estável numericamente.
#
# O método Gram-Schmidt consiste em:
# * criar uma cópia $U$ da matriz $A$
# * para cada coluna $i=0,\ldots$:
# * definir $r_{i,i}$ como a norma-2 de $u_i$
# * definir $q_i$ como $u_i$ normalizado
# * para cada coluna $j=i+1,\ldots$:
# - definir $r_{i,j}$ como o tamanho da projeção de $u_j$ em $q_i$
# - subtrair de $u_j$ a projeção de $u_j$ em $q_i$
#
# Ou seja, no início da iteração $i$, todas as colunas de $U$ a partir de $i$-ésima são ortogonais a $q_0, q_1, \ldots, q_{i-1}$.
#
# **Exercício 2** Complete a implementação do Gram-Schmidt modificado.
# + deletable=false nbgrader={"checksum": "7f065b4464f638eca9d63bd1662191fe", "grade": false, "grade_id": "cell-c8670a10ea2468ce", "locked": false, "schema_version": 1, "solution": true}
def MGS(A):
m,n = A.shape # numero de colunas
Q = np.zeros((m,n))
R = np.zeros((n,n))
# Passo 1: cria uma cópia $U$ da matriz $A$ (~1 linha)
U = A.copy()
# raise NotImplementedError()
for i in range(n):
# Passo 2: define $r_{i,i}$ como a norma-2 de $u_i$ (~1 linha, consulte numpy.linalg.norm)
# Passo 3: define $q_i$ como $u_i$ normalizado (~1 linha)
R[i, i] = np.linalg.norm(U[:,i])
Q[:, i] = U[:,i]/R[i, i]
# raise NotImplementedError()
for j in range(i+1,n):
# Passo 4: define $r_{i,j}$ como o tamanho da projeção de $u_j$ em $q_i$ (~1 linha)
# Passo 5: subtrai de $u_j$ a projeção de $u_j$ em $q_i$ (~1 linha)
R[i, j] = np.dot(Q[:, i], U[:,j])
U[:, j] = U[:, j] - R[i, j]*Q[:, i]
# raise NotImplementedError()
return Q,R
# + deletable=false editable=false nbgrader={"checksum": "c3b9b8c1ff89c25c3895cb435269e315", "grade": true, "grade_id": "cell-8c4d045e5d30fe90", "locked": true, "points": 1, "schema_version": 1, "solution": false}
Q,R = MGS(A)
assert np.allclose(Q,np.array([[0.70710678, 0.40824829, -0.57735027],
[ 0.70710678, -0.40824829, 0.57735027],
[ 0. , 0.81649658, 0.57735027]]))
assert np.allclose(R,np.array([[1.41421356, 0.70710678, 0.70710678],
[ 0. , 1.22474487, 0.40824829],
[ 0. , 0. , 1.15470054]]))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "8957b5b11860db9fe578763b56e183b7", "grade": false, "grade_id": "cell-b9e79c4f39e79e69", "locked": true, "schema_version": 1, "solution": false}
# Os métodos a seguir encontram a solução para o problema de quadrados mínimos linear usando CGS, MGS e Reflexões de Householder, respectivamente.
# + [markdown] deletable=false editable=false nbgrader={"checksum": "a71242ca8b70b1e70c38425747df4926", "grade": false, "grade_id": "cell-c95983c72c5c1e2e", "locked": true, "schema_version": 1, "solution": false}
# **Exercício 3** Sabendo que CGS, MGS e Reflexões de Householder são métodos de decomposição QR e que, quando $X$ é de posto completo, a solução de
#
# $$
# X^\top X \beta = X^\top y
# $$
#
# pode ser encontrada resolvendo-se
#
# $$
# R\beta = Q^\top y,
# $$
#
# complete as três funções abaixo de forma a encontrar as estimativas de quadrados mínimos usando a decomposição QR.
# + deletable=false nbgrader={"checksum": "26f331b5fe88181ae590dedab2bc715d", "grade": false, "grade_id": "cell-1f74b3d1a27de39e", "locked": false, "schema_version": 1, "solution": true}
def leastSquares_CGS(X, y):
Q,R = CGS(X)
# Passo único: chama método para resolver sistema triangular superior (~ 1 linha, consulte scipy.linalg.solve_triangular)
beta = scipy.linalg.solve_triangular(R,Q.transpose()@y)
# raise NotImplementedError()
return beta
# + deletable=false editable=false nbgrader={"checksum": "3c9e31b9fd48a6558da806860a457d21", "grade": true, "grade_id": "cell-7693da3368200a61", "locked": true, "points": 1, "schema_version": 1, "solution": false}
beta_cgs = leastSquares_CGS(X,y)
print(beta_cgs)
# + deletable=false nbgrader={"checksum": "cdb63c5c8c2a9f8301f700c36c336dfa", "grade": false, "grade_id": "cell-2802a58b8b1f84af", "locked": false, "schema_version": 1, "solution": true}
def leastSquares_MGS(X, y):
Q,R = MGS(X)
# Passo único: chama método para resolver sistema triangular superior (~ 1 linha, consulte scipy.linalg.solve_triangular)
beta = scipy.linalg.solve_triangular(R,Q.transpose()@y)
# raise NotImplementedError()
return beta
# + deletable=false editable=false nbgrader={"checksum": "210b3b7a4cfa07fef72c63f435f977af", "grade": true, "grade_id": "cell-d70235cd6637fe2d", "locked": true, "points": 1, "schema_version": 1, "solution": false}
beta_mgs = leastSquares_MGS(X,y)
print(beta_mgs)
# + deletable=false nbgrader={"checksum": "b143b7f9c2a52afc0c14fe3daf14b05d", "grade": false, "grade_id": "cell-c04bec782c596e26", "locked": false, "schema_version": 1, "solution": true}
def leastSquares_Householder(X,y):
Q,R = scipy.linalg.qr(X, mode='economic')
beta = np.linalg.solve(R,Q.transpose()@y)
return beta
# + deletable=false editable=false nbgrader={"checksum": "aa34747716ebcb1d53cb69dfb1026ebe", "grade": true, "grade_id": "cell-d70235cd6637fe2e", "locked": true, "points": 1, "schema_version": 1, "solution": false}
beta_hh = leastSquares_Householder(X,y)
print(beta_hh)
|
alc/.ipynb_checkpoints/ep7 (1)-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import argparse
import logging
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from utils import data
import models, utils
import pandas as pd
from laspy.file import File
from pickle import dump, load
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as udata
from torch.autograd import Variable
from sklearn.preprocessing import MinMaxScaler
import lidar_data_processing
# %matplotlib inline
# -
class Args(object):
def __init__(self):
self.data_path= 'data' # not used
self.dataset= 'masked_pwc' # move lidar into datasets
self.batch_size= 64
self.model= 'lidar_unet2d_old'
self.in_channels = 6
self.out_channels = 3
self.lr= 0.005
self.weight_decay = 0.
self.num_epochs= 50
self.min_sep = 5 # not used
self.num_scan_lines = 1000
self.seq_len = 32
self.scan_line_gap_break = 7000 # threshold over which scan_gap indicates a new scan line
self.min_pt_count = 1700 # in a scan line, otherwise line not used
self.max_pt_count = 2000 # in a scan line, otherwise line not used
self.mask_pts_per_seq = 5
self.mask_consecutive = True
# points in between scan lines
self.stride_inline = 5
self.stride_across_lines = 3
self.valid_interval= 1
self.save_interval= 1
self.seed = 0
# self.experiment_dir = 'lidar_experiments/2d'
self.output_dir= '../lidar_experiments/2d'
# self.checkpoint_dir= 'lidar_experiments/2d'
self.MODEL_PATH_LOAD = "../lidar_experiments/2d/lidar_unet2d/lidar-unet2d-Nov-09-21_16_11/checkpoints/checkpoint_best.pt"
self.experiment= ''
self.resume_training= False
self.restore_file= None
self.no_save= False
self.step_checkpoints= False
self.no_log= False
self.log_interval= 100
self.no_visual= False
self.visual_interval= 100
self.no_progress= False
self.draft= False
self.dry_run= False
self.bias= False
# self.in_channels= 1 # maybe 6?
self.test_num = 0
# UNET
self.residual = False
self.wtd_loss = True
self.batch_norm = True
args=Args()
# gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
args = utils.setup_experiment(args)
utils.init_logging(args)
# +
# Saving model
# MODEL_PATH = "models/lidar/conv1d_256seq_400epochs_092620.pth"
# torch.save(model.state_dict(), MODEL_PATH)
# Loading models
train_new_model = False
# Build data loaders, a model and an optimizer
if train_new_model:
model = models.build_model(args).to(device)
else:
model = models.build_model(args)
model.load_state_dict(torch.load(args.MODEL_PATH_LOAD)['model'][0])
model.to(device)
print(model)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay = args.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,milestones=[5,15,30,50,500], gamma=0.5)
logging.info(f"Built a model consisting of {sum(p.numel() for p in model.parameters()):,} parameters")
if args.resume_training:
state_dict = utils.load_checkpoint(args, model, optimizer, scheduler)
global_step = state_dict['last_step']
start_epoch = int(state_dict['last_step']/(403200/state_dict['args'].batch_size))+1
else:
global_step = -1
start_epoch = 0
# -
# ### Load the pts files
# `scan_line_tensor` is the data file, a 3-D tensor of size [num_scan_lines,pts_per_scan_line,num_feats].
# `idx_lists` indicate the top left corner of each training or validation square.
# `sc` is the minmaxscaler used to generate the training set. Needed here to calculate weightedMSELoss at real scale.
# Loads as a list of numpy arrays
scan_line_tensor = torch.load('../lidar_data/32_32/'+'scan_line_tensor.pts')
train_idx_list = torch.load('../lidar_data/32_32/'+'train_idx_list.pts')
valid_idx_list = torch.load('../lidar_data/32_32/'+'valid_idx_list.pts')
sc = torch.load('../lidar_data/32_32/'+'sc.pts')
def add_mask(sample,mask_pts_per_seq,consecutive=True):
# Given a 3-D tensor of all ones, returns a mask_tensor of same shape
# with random masking determined by mask_pts_per_seq
mask_tensor = torch.ones(sample.shape)
seq_len = mask_tensor.shape[0]
if consecutive:
# Creates a square of missing points
first_mask = int(np.random.choice(np.arange(8,seq_len-8-mask_pts_per_seq),1))
mask_tensor[first_mask:first_mask+mask_pts_per_seq,first_mask:first_mask+mask_pts_per_seq,:] = 0
else:
# TO DO: Random points throughout the patch
for i in range(sample.shape[0]):
m[i,:] = np.random.choice(np.arange(8,seq_len-8),mask_pts_per_seq,replace=False)
return mask_tensor
# Dataloader class
class LidarLstmDataset(udata.Dataset):
def __init__(self, scan_line_tensor, idx_list, seq_len = 64, mask_pts_per_seq = 5, consecutive = True):
super(LidarLstmDataset, self).__init__()
self.scan_line_tensor = scan_line_tensor
self.idx_list = idx_list
self.seq_len = seq_len
self.mask_pts_per_seq = mask_pts_per_seq
self.consecutive = consecutive
def __len__(self):
return len(self.idx_list)
def __getitem__(self,index):
row = self.idx_list[index][0]
col = self.idx_list[index][1]
clean = self.scan_line_tensor[row:row+self.seq_len,col:col+self.seq_len,:]
mask = add_mask(clean,self.mask_pts_per_seq,self.consecutive)
return clean.permute(2,0,1), mask.permute(2,0,1)
# +
train_dataset = LidarLstmDataset(scan_line_tensor,train_idx_list,args.seq_len, args.mask_pts_per_seq)
valid_dataset = LidarLstmDataset(scan_line_tensor,valid_idx_list,args.seq_len, args.mask_pts_per_seq)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, num_workers=4, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=32, num_workers=4, shuffle=True)
# -
# Define a loss function that weights the loss according to coordinate ranges (xmax-xmin, ymax-ymin, zmax-zmin)
def weighted_MSELoss(pred,true,sc,mask_pts_per_seq=args.mask_pts_per_seq):
'''weighted_MSELoss reconverts MSE loss back to the original scale of x,y,z.
Rationale is because xyz have such different ranges, we don't want to ignore the ones with largest scale.
Assumes that x,y,z are the first 3 features in sc scaler'''
ranges = torch.Tensor(sc.data_max_[:3]-sc.data_min_[:3])
raw_loss = torch.zeros(3,dtype=float)
for i in range(3):
raw_loss[i] = F.mse_loss(pred[:,i,:,:], true[:,i,:,:], reduction="sum")
return (ranges**2 * raw_loss).sum() #/ (pred.shape[0]*mask_pts_per_seq**2)
# ### Training the model
# Track moving average of loss values
train_meters = {name: utils.RunningAverageMeter(0.98) for name in (["train_loss"])}
valid_meters = {name: utils.AverageMeter() for name in (["valid_loss"])}
writer = SummaryWriter(log_dir=args.experiment_dir) if not args.no_visual else None
# +
# TRAINING
for epoch in range(start_epoch, args.num_epochs):
if args.resume_training:
if epoch %1 == 0:
optimizer.param_groups[0]["lr"] /= 2
print('learning rate reduced by factor of 2')
train_bar = utils.ProgressBar(train_loader, epoch)
for meter in train_meters.values():
meter.reset()
# epoch_loss_sum = 0
for batch_id, (clean, mask) in enumerate(train_bar):
# dataloader returns [clean, mask] list
model.train()
global_step += 1
inputs = clean.to(device)
mask_inputs = mask.to(device)
# only use the mask part of the outputs
raw_outputs = model(inputs,mask_inputs)
outputs = (1-mask_inputs[:,:3,:,:])*raw_outputs + mask_inputs[:,:3,:,:]*inputs[:,:3,:,:]
if args.wtd_loss:
loss = weighted_MSELoss(outputs,inputs[:,:3,:,:],sc)/(inputs.size(0)*(args.mask_pts_per_seq**2))
# # Regularization?
else:
# normalized by the number of masked points
loss = F.mse_loss(outputs, inputs[:,:3,:,:], reduction="sum") / \
(inputs.size(0) * (args.mask_pts_per_seq**2))
model.zero_grad()
loss.backward()
optimizer.step()
# epoch_loss_sum += loss * inputs.size(0)
train_meters["train_loss"].update(loss)
train_bar.log(dict(**train_meters, lr=optimizer.param_groups[0]["lr"]), verbose=True)
if writer is not None and global_step % args.log_interval == 0:
writer.add_scalar("lr", optimizer.param_groups[0]["lr"], global_step)
writer.add_scalar("loss/train", loss.item(), global_step)
gradients = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], dim=0)
writer.add_histogram("gradients", gradients, global_step)
sys.stdout.flush()
# epoch_loss = epoch_loss_sum / len(train_loader.dataset)
if epoch % args.valid_interval == 0:
model.eval()
for meter in valid_meters.values():
meter.reset()
valid_bar = utils.ProgressBar(valid_loader)
val_loss = 0
for sample_id, (clean, mask) in enumerate(valid_bar):
with torch.no_grad():
inputs = clean.to(device)
mask_inputs = mask.to(device)
# only use the mask part of the outputs
raw_output = model(inputs,mask_inputs)
output = (1-mask_inputs[:,:3,:,:])*raw_output + mask_inputs[:,:3,:,:]*inputs[:,:3,:,:]
# TO DO, only run loss on masked part of output
if args.wtd_loss:
val_loss = weighted_MSELoss(output,inputs[:,:3,:,:],sc)/(inputs.size(0)*(args.mask_pts_per_seq**2))
else:
# normalized by the number of masked points
val_loss = F.mse_loss(output, inputs[:,:3,:,:], reduction="sum")/(inputs.size(0)* \
(args.mask_pts_per_seq**2))
valid_meters["valid_loss"].update(val_loss.item())
if writer is not None:
writer.add_scalar("loss/valid", valid_meters['valid_loss'].avg, global_step)
sys.stdout.flush()
logging.info(train_bar.print(dict(**train_meters, **valid_meters, lr=optimizer.param_groups[0]["lr"])))
utils.save_checkpoint(args, global_step, model, optimizer, score=valid_meters["valid_loss"].avg, mode="min")
scheduler.step()
logging.info(f"Done training! Best Loss {utils.save_checkpoint.best_score:.3f} obtained after step {utils.save_checkpoint.best_step}.")
# -
# Single output example
i,m = next(iter(train_loader))
inputs = i.to(device)
mask = m.to(device)
raw_output = model(inputs,mask)
output_model = (1-mask[:,:3,:])*raw_output + mask[:,:3,:]*inputs[:,:3,:]
raw_output.shape
# ## Baselines
# First: Interpolate between last and next
import gc
gc.collect()
# model_cpu = model.to('cpu')
def loss_comparison(loader,model,mask_pts_per_seq=args.mask_pts_per_seq,pt_count=len(valid_dataset)):
wtd_loss = True
loss_model = 0
loss_interp = 0
for batch_id, (i, m) in enumerate(loader):
with torch.no_grad():
# conv1D model
inputs = i.to(device)
mask = m.to(device)
raw_output = model(inputs,mask)
output_model = (1-mask[:,:3,:])*raw_output + mask[:,:3,:]*inputs[:,:3,:]
# Interpolation
output_interp = lidar_data_processing.outer_interp_loop(i,m,mask_pts_per_seq,2)
if wtd_loss:
loss_model+=weighted_MSELoss(output_model,inputs[:,:3,:],sc)
loss_interp+=weighted_MSELoss(output_interp,i[:,:3,:],sc)
else:
# normalized by the number of masked points
loss_model += F.mse_loss(output_model, inputs[:,:3,:], reduction="sum")
loss_interp += F.mse_loss(output_interp, i[:,:3,:], reduction="sum")
print("Batch {} done".format(batch_id))
# Normalize by number of batches
loss_model = loss_model/((mask_pts_per_seq**2)*pt_count)
loss_interp = loss_interp/((mask_pts_per_seq**2)*pt_count)
print("Validation Loss\n","*"*30)
print("Model: {:2.2f}".format(loss_model))
print("Interpolation: {:2.2f}".format(loss_interp))
gc.collect()
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, num_workers=8, shuffle=True)
# loss_comparison(train_loader,model)
loss_comparison(train_loader,model,pt_count=len(train_dataset))
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=128, num_workers=8, shuffle=True)
# loss_comparison(train_loader,model)
loss_comparison(valid_loader,model,pt_count=len(valid_dataset))
# ### Plotting
# +
inputs,mask_inputs = next(iter(valid_loader))
# only use the mask part of the outputs
raw_outputs = model(inputs.to(device),mask_inputs.to(device))
outputs = (1-mask_inputs[:,:3,:,:])*raw_outputs.to('cpu') + mask_inputs[:,:3,:,:]*inputs[:,:3,:,:]
output_interp = lidar_data_processing.outer_interp_loop(inputs,mask_inputs,args.mask_pts_per_seq,2)
# -
inputs.shape
# +
# %matplotlib notebook
im_idx = 28
def plot_infill(clean,output,mask,label_key = 'z'):
''' Plotting function for 2D infill. Takes a single patch for clean,output,
and mask, and a label_key indicating which value (x,y, or z) to display.
'''
# Which dimension to plot
xyz_dict = {'x':2,'y':3,'z':4}
z_val = xyz_dict[label_key]
# Set up plot
fig = plt.figure(figsize=[12,12])
ax = fig.add_subplot(111, projection='3d')
# Plot unmasked points
surrounding_no_mask = mask[0] != 0
unmasked_arr = np.array(lidar_data_processing.surrounding_grid(clean,surrounding_no_mask))
ax.scatter(unmasked_arr[:,0],unmasked_arr[:,1],unmasked_arr[:,z_val], marker='+')
# Plot masked, filled points
surrounding_mask = mask[0] == 0
filled_arr = np.array(lidar_data_processing.surrounding_grid(output,surrounding_mask))
ax.scatter(filled_arr[:,0],filled_arr[:,1],filled_arr[:,z_val], color='r', marker='o')
# Plot original, masked points
masked_arr = np.array(lidar_data_processing.surrounding_grid(clean,surrounding_mask))
ax.scatter(masked_arr[:,0],masked_arr[:,1],masked_arr[:,z_val], color='g', marker='o')
# Labels and such
ax.set_xlabel('Grid Across-Flight',fontsize=15)
ax.set_ylabel('Grid Along-Flight',fontsize=15)
ax.set_zlabel(label_key+' value',fontsize=15)
plt.show()
# print(output.shape)
# print(clean.shape)
# print(weighted_MSELoss(output.unsqueeze(0),clean[:,:3,:].unsqueeze(0),sc))
plot_infill(inputs[im_idx],outputs[im_idx],mask_inputs[im_idx],'z')
# -
plot_infill(inputs[im_idx],output_interp[im_idx],mask_inputs[im_idx],'z')
import plotly
import plotly.graph_objs as go
outputs.shape
# +
# Configure Plotly to be rendered inline in the notebook.
plotly.offline.init_notebook_mode()
# Configure the trace.
trace = go.Scatter3d(
x=outputs.detach()[0,0,:,5], # <-- Put your data instead
y=outputs.detach()[0,1,:,5], # <-- Put your data instead
z=outputs.detach()[0,2,:,5], # <-- Put your data instead
mode='markers',
marker={
'size': 10,
'opacity': 0.8,
}
)
# Configure the layout.
layout = go.Layout(
margin={'l': 0, 'r': 0, 'b': 0, 't': 0}
)
data = [trace]
plot_figure = go.Figure(data=data, layout=layout)
# Render the plot.
plotly.offline.iplot(plot_figure)
# -
|
lidar_conv2d.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"checksum": "2a962de7bcd02f81afba383207fa8e55", "grade": false, "grade_id": "cell-520b7e772181517f", "locked": true, "schema_version": 1, "solution": false}
# # Agent Based Modeling - Mesa
#
# "[Mesa](https://mesa.readthedocs.io/en/master/) is an agent-based modeling framework in Python. Mesa allows users to quickly create agent-based models using built-in core components (such as spatial grids and agent schedulers) or customized implementations, visualize them using a browser-based interface, and analyze their results using Python’s data analysis tools."
#
# _This notebook is based on and uses parts of texts from the Mesa docs._
#
# ### Discretised Lotka Volterra
# In this notebook we aim to familiarise you with Mesa and its Object Oriented Programming approach to agent-based models. This will be done through implementation of a simplified preditor-prey model (discretised Lotka Volterra).
#
# If you are not yet comfortable with Object Oriented Programming or need a refresher, please visit [this page](https://python.swaroopch.com/oop.html).
#
# We will create a version of the "sheep-wolf" model wherein wolves and sheep wander randomly around the landscape. To allow the population to grow, each wolf or sheep has a fixed probability of reproducing at each timestep. In this simplified version, sheep do not have to eat, and only die when eaten by a wolf. Wolves, however, have a probability of dying. This produces interesting population dynamics, but creates a model that is ultimately unstable.
#
# To do this we need the following:
# - **Agents:** the predators and preys
# - **Environment:** an area in which the predators and prey can move around and interact
# - **Model:** the controller that links the environment and the agents
#
# We've already implemented the model and the environment is already implemented by Mesa, but it is up to you to implement the Agents!
#
# First: We install MESA and test if we can import it
# + deletable=false editable=false nbgrader={"checksum": "dd8556dcfc6b153f869ba4431c5ea534", "grade": false, "grade_id": "cell-9901288bc38efb00", "locked": true, "schema_version": 1, "solution": false}
from IPython.display import clear_output
# !pip install -e git+https://github.com/projectmesa/mesa#egg=mesa
import mesa
# !pip install ipynb
import ipynb
clear_output(wait=True)
print("Everything A-Okay!")
# + [markdown] deletable=false editable=false nbgrader={"checksum": "bca35b7dfd68ed7325de48ca4db7df13", "grade": false, "grade_id": "cell-995423520ad86893", "locked": true, "schema_version": 1, "solution": false}
# ### Testing
# Since testing each of the methods that you will create seperately might be difficult, we provide you with a very minimal model that will be used during the testing phase. Later in this notebook we will introduce you to the model that has been implemented in `model.py`.
# + deletable=false editable=false nbgrader={"checksum": "7eb7b2e53f7df328aeaa05e654bfc915", "grade": false, "grade_id": "cell-a762fbb609f2033f", "locked": true, "schema_version": 1, "solution": false}
from mesa import Model
from mesa.space import MultiGrid
class TestModel(Model):
def __init__(self, width, height):
self.height = width
self.width = height
self.grid = MultiGrid(self.width, self.height, torus=True)
self.n_agents = 0
self.agents = []
def new_agent(self, agent_type, pos):
'''
Method that enables us to add agents of a given type.
'''
self.n_agents += 1
# Create a new agent of the given type
new_agent = agent_type(self.n_agents, self, pos)
# Place the agent on the grid
self.grid.place_agent(new_agent, pos)
# And add the agent to the model so we can track it
self.agents.append(new_agent)
def remove_agent(self, agent):
'''
Method that enables us to remove passed agents.
'''
self.n_agents -= 1
# Remove agent from grid
self.grid.remove_agent(agent)
# Remove agent from model
self.agents.remove(agent)
def step(self):
'''
Method that steps every agent.
Prevents applying step on new agents by creating a local list.
'''
for agent in list(self.agents):
agent.step()
# + [markdown] deletable=false editable=false nbgrader={"checksum": "4c330bd796dd29c046d959bd0eb10aed", "grade": false, "grade_id": "cell-93554d67bd9ce2a8", "locked": true, "schema_version": 1, "solution": false}
# ### Agents - Random walker
# As both the sheep and the wolves will walk around randomly, we will first implement a `RandomWalker` class. We can then re-use (inherit) this class when making our `Wolf` and `Sheep` classes. Below you can find the framework for the class, but you will have to implement the `random_move()` method. This method should get the neighbouring cells (Moore's neighbourhood), select one, and move the agent to this cell.
#
# Find the following functions through the provided link and find out which inputs they need:
# - [MultiGrid](https://github.com/projectmesa/mesa/blob/master/mesa/space.py).get_neighborhood()
# - [MultiGrid](https://github.com/projectmesa/mesa/blob/master/mesa/space.py).move_agent()
#
# Keep in mind that the `model` that is passed when the RandomWalker is initialised contains the grid. It is accessable through `self.model.grid` in the `random_move` function.
# + deletable=false nbgrader={"checksum": "705e3e0531a81c95f100f4d39d7de275", "grade": false, "grade_id": "cell-1bff8a60c7a9567e", "locked": false, "schema_version": 1, "solution": true}
from mesa import Agent
import random
class RandomWalker(Agent):
def __init__(self, unique_id, model, pos):
super().__init__(unique_id, model)
self.pos = pos
def random_move(self):
'''
This method should get the neighbouring cells (Moore's neighbourhood), select one, and move the agent to this cell.
'''
neighbourhood = self.model.grid.get_neighborhood(self.pos, True)
new_pos = self.random.choice(neighbourhood)
self.model.grid.move_agent(self, new_pos)
# + deletable=false editable=false nbgrader={"checksum": "6e049d26bd9887cd44135ac0628cdb98", "grade": true, "grade_id": "cell-8b1f4b5f5975c03b", "locked": true, "points": 1, "schema_version": 1, "solution": false}
tester = TestModel(10, 10)
# Create a RandomWalker, so that we can call the random_move() method
start_position = (5, 5)
tester.new_agent(RandomWalker, start_position)
# Create a reference, so that we can properly test
walker = tester.agents[0]
# Check movement
walker.random_move()
assert walker.pos != (5, 5), "The walker has not moved!"
assert abs(walker.pos[0] - 5) <= 1 or abs(walker.pos[1] - 5) <= 1, "The walker has moved more than 1 square"
# + [markdown] deletable=false editable=false nbgrader={"checksum": "db18798788284003479cae503fb9ddb5", "grade": false, "grade_id": "cell-01160d202fabdc5a", "locked": true, "schema_version": 1, "solution": false}
# ### Agents - Sheep
# As described above, to allow the population to grow, each wolf or sheep has a fixed probability of reproducing at each timestep. Sheep do not have to eat, and only die when eaten by a wolf. We have again provided you with the framework for this class, but you will have to implement the `step()` method. This method should move the Sheep using the `random_move()` method implemented earlier, then conditionally reproduce. You can use the 'new_agent()' method implemented in model to create new sheep.
#
# Attributes/methods you might need:
# - self.random_move()
# - self.model.new_agent()
# - self.model.sheep_reproduction_chance
# + deletable=false nbgrader={"checksum": "452bb0f0cd5f10ed4c1095ffdfa72cd5", "grade": false, "grade_id": "cell-bdcef78c12f1fbe2", "locked": false, "schema_version": 1, "solution": true}
class Sheep(RandomWalker):
def __init__(self, unique_id, model, pos):
super().__init__(unique_id, model, pos)
def step(self):
'''
This method should move the Sheep using the `random_move()` method implemented earlier, then conditionally reproduce.
'''
self.random_move()
if random.random() < self.model.sheep_reproduction_chance:
self.model.new_agent(Sheep, self.pos)
# + deletable=false editable=false nbgrader={"checksum": "1123709e6a1c1db6416546bc85241a86", "grade": true, "grade_id": "cell-a38971467ab6ea79", "locked": true, "points": 1, "schema_version": 1, "solution": false}
tester = TestModel(10, 10)
tester.sheep_reproduction_chance = 1
# Create a Sheep
start_position = (5, 5)
tester.new_agent(Sheep, start_position)
# Create a reference, so that we can properly test
sheep = tester.agents[0]
# Check movement
tester.step()
assert sheep.pos != (5, 5), "The sheep has not moved!"
assert abs(sheep.pos[0] - 5) <= 1 or abs(sheep.pos[1] - 5) <= 1, "The sheep has moved more than 1 square!"
# Check fertility
assert tester.n_agents == 2, "Your sheep is infertile!"
tester.sheep_reproduction_chance = 0
tester.step()
assert tester.n_agents == 2, "Your sheep are too fertile!"
# Check if the random_move function was used
old_pos = sheep.pos
sheep.random_move = lambda: False
tester.step()
assert old_pos == sheep.pos, "You didn't use the random_move() function!"
# + [markdown] deletable=false editable=false nbgrader={"checksum": "6692b355cc093c23e82e791119d6f42c", "grade": false, "grade_id": "cell-3dbd192f8addb916", "locked": true, "schema_version": 1, "solution": false}
# ### Agents - Wolf
# Each wolf has a fixed probability of dying at each timestep. When wolves meet a sheep they will eat it and then reproduce. We have again provided you with the framework for this class, but you will have to implement the step() method. This method should move the wolf, then check for sheep on its location, eat the sheep if it is there and reproduce, and finally conditionally die.
#
#
# Attributes/methods you might need:
# - self.random_move()
# - self.model.new_agent()
# - self.model.remove_agent()
# - self.model.wolf_reproduction_chance
# - [MultiGrid](https://github.com/projectmesa/mesa/blob/master/mesa/space.py).get_neighbors()
#
# + deletable=false nbgrader={"checksum": "3fbe6c210a5d39b6b09d422ba879ad5f", "grade": false, "grade_id": "cell-e6b7ab29c84f80f5", "locked": false, "schema_version": 1, "solution": true}
class Wolf(RandomWalker):
def __init__(self, unique_id, model, pos):
super().__init__(unique_id, model, pos)
self.total_eaten = 0
self.age = 0
def step(self):
'''
This method should move the wolf, then check for sheep on its location,
eat the sheep if it is there and reproduce, and finally conditionally die.
'''
self.age += 1
self.random_move()
animals = self.model.grid.get_neighbors(self.pos, True, include_center=True, radius=0)
for animal in animals:
if isinstance(animal, Sheep):
self.model.remove_agent(animal)
self.model.new_agent(Wolf, self.pos)
self.total_eaten += 1
if random.random() < self.model.wolf_death_chance:
self.model.remove_agent(self)
# + deletable=false editable=false nbgrader={"checksum": "d14e386c464f5221a9e279a0225cf4c5", "grade": true, "grade_id": "cell-7fa56d562b5c523c", "locked": true, "points": 2, "schema_version": 1, "solution": false}
tester = TestModel(10, 10)
tester.wolf_death_chance = 0
# Create a Wolf
start_position = (5, 5)
tester.new_agent(Wolf, start_position)
# Create a reference, so that we can properly test
wolf = tester.agents[0]
# Check movement
tester.step()
assert wolf.pos != (5, 5), "The wolf has not moved!"
assert abs(wolf.pos[0] - 5) <= 1 or abs(wolf.pos[1] - 5) <= 1, "The wolf has moved more than 1 square!"
# Check death rates
assert tester.n_agents == 1, "Your wolf died!"
tester.wolf_death_chance = 1
tester.step()
assert tester.n_agents == 0, "Your wolf is invincible!"
# Create another model so that we can be sure the wolf eats the sheep
tester = TestModel(1, 1)
tester.wolf_death_chance = 0
tester.sheep_reproduction_chance = 0
# Create a Sheep and a Wolf
start_position = (0, 0)
tester.new_agent(Sheep, start_position)
tester.new_agent(Wolf, start_position)
# Check if the sheep was eaten
tester.step()
assert type(tester.agents[0]) is not Sheep, "The sheep has not been eaten!"
# + [markdown] deletable=false editable=false nbgrader={"checksum": "f6cbd39841b40b8ffbf3d83301b66dde", "grade": false, "grade_id": "cell-7daf425c56b81c02", "locked": true, "schema_version": 1, "solution": false}
# ### The model
# Now that you have implemented the agents, we will introduce you to the model. The model links the agents to the environment and provides methods that make interactions with the model easy. It uses the `DataCollector` class, which we will explain later, and the `RandomActivation` class. The `RandomActivation` class is a scheduler that iterates through each of the agents randomly and uses its `step()` method. We have seperated the Sheep scheduler from the Wolf scheduler to prevent what is called a ["race-condition"](https://en.wikipedia.org/wiki/Race_condition) wherein a Sheep gets eaten before it is its turn to `step()`.
#
# At the time of writing of this notebook, there are 4 different schedulers available in Mesa:
# - `BaseScheduler`: This is a scheduler that activates agents one at a time, in the order they were added.
# - `RandomActivation`: Activates each agent once per step, in random order, with the order reshuffled every step.
# - `SimultaneousActivation`: Activates all agents simultaneously.
# - `StagedActivation`: Allows agent activation to be divided into several stages instead of a single `step` method.
#
# Check out [the docs](https://mesa.readthedocs.io/en/master/apis/time.html) to learn more.
# + deletable=false editable=false nbgrader={"checksum": "58eae17276d48a3360c9b57ac0fde878", "grade": false, "grade_id": "cell-080df0e3734f5e9e", "locked": true, "schema_version": 1, "solution": false}
import random
from mesa import Model
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from mesa.time import RandomActivation
class WolfSheep(Model):
'''
Wolf-Sheep Predation Model
'''
def __init__(self, height=20, width=20,
initial_sheep=100, initial_wolves=30,
sheep_reproduction_chance=0.05, wolf_death_chance=0.05):
super().__init__()
self.height = height
self.width = width
self.initial_sheep = initial_sheep
self.initial_wolves = initial_wolves
self.sheep_reproduction_chance = sheep_reproduction_chance
self.wolf_death_chance = wolf_death_chance
# Add a schedule for sheep and wolves seperately to prevent race-conditions
self.schedule_Sheep = RandomActivation(self)
self.schedule_Wolf = RandomActivation(self)
self.grid = MultiGrid(self.width, self.height, torus=True)
self.datacollector = DataCollector(
{"Sheep": lambda m: self.schedule_Sheep.get_agent_count(),
"Wolves": lambda m: self.schedule_Wolf.get_agent_count()})
# Create sheep and wolves
self.init_population(Sheep, self.initial_sheep)
self.init_population(Wolf, self.initial_wolves)
# This is required for the datacollector to work
self.running = True
self.datacollector.collect(self)
def init_population(self, agent_type, n):
'''
Method that provides an easy way of making a bunch of agents at once.
'''
for i in range(n):
x = random.randrange(self.width)
y = random.randrange(self.height)
self.new_agent(agent_type, (x, y))
def new_agent(self, agent_type, pos):
'''
Method that creates a new agent, and adds it to the correct scheduler.
'''
agent = agent_type(self.next_id(), self, pos)
self.grid.place_agent(agent, pos)
getattr(self, f'schedule_{agent_type.__name__}').add(agent)
def remove_agent(self, agent):
'''
Method that removes an agent from the grid and the correct scheduler.
'''
self.grid.remove_agent(agent)
getattr(self, f'schedule_{type(agent).__name__}').remove(agent)
def step(self):
'''
Method that calls the step method for each of the sheep, and then for each of the wolves.
'''
self.schedule_Sheep.step()
self.schedule_Wolf.step()
# Save the statistics
self.datacollector.collect(self)
def run_model(self, step_count=200):
'''
Method that runs the model for a specific amount of steps.
'''
for i in range(step_count):
self.step()
# + [markdown] deletable=false editable=false nbgrader={"checksum": "ecaad32249685e66048d4d7709697a98", "grade": false, "grade_id": "cell-55327128631eeeec", "locked": true, "schema_version": 1, "solution": false}
# You might have noticed that we are using a so called `MultiGrid`. Again, [Mesa provides us with multiple different types of environments](https://mesa.readthedocs.io/en/master/apis/space.html) for the agents to exist in:
# - `ContinuousSpace`: Each agent can have an arbitrary position.
# - `SingleGrid`: Grid where each cell contains at most one object.
# - `MultiGrid`: Grid where each cell can contain more than one object.
# - `HexGrid`: extends the grid to handle hexagonal neighbors.
# - `NetworkGrid`: Network where each node contains zero or more agents.
#
# We are using `MultiGrid` as we want multiple agents to be able to be in one cell.
#
# Running the model is now as simple as:
# + deletable=false editable=false nbgrader={"checksum": "9dd27024c17efe45580b44431c5131a8", "grade": false, "grade_id": "cell-aea508ce36b68c4d", "locked": true, "schema_version": 1, "solution": false}
model = WolfSheep()
model.run_model()
# + [markdown] deletable=false editable=false nbgrader={"checksum": "309e289929196455c4431c993081f6a2", "grade": false, "grade_id": "cell-c107971ac7ed69f9", "locked": true, "schema_version": 1, "solution": false}
# ### Data collector
# So far, we have tested the model by checking variables manually. This is not very efficient, and requires us to make complicated loops if we need more than just the end results. Mesa provides a class which can handle the data collection and storage. This class was already added to the model above; `DataCollector`. From the Mesa docs:
#
# "The data collector stores three categories of data: model-level variables, agent-level variables, and tables (which are a catch-all for everything else). Model- and agent-level variables are added to the data collector along with either the name of an attribute that the agents or models have, or a function for collecting some data. Model-level collection functions take a model object as an input, while agent-level collection functions take an agent object as an input. Both then return a value computed from the model or each agent at their current state. When the data collector’s collect method is called, with a model object as its argument, it applies each model-level collection function to the model, and stores the results in a dictionary, associating the current value with the current step of the model. Similarly, the method applies each agent-level collection function to each agent currently in the schedule, associating the resulting value with the step of the model, and the agent’s unique_id."
#
# Simply said, the `DataCollector` adds data to its dictionary when you call `collect()` method. We have added two functions that keep track of the amount of Sheep and Wolf to the `DataCollector` and call collect after each `step()` that the model makes.
#
# Run the following piece of code to see what happens.
# + deletable=false editable=false nbgrader={"checksum": "0f3f0991153595db10242fe1a2103249", "grade": false, "grade_id": "cell-827801dbb93bcd97", "locked": true, "schema_version": 1, "solution": false}
# %matplotlib inline
model = WolfSheep()
model.run_model()
data = model.datacollector.get_model_vars_dataframe()
data.plot()
# + [markdown] deletable=false editable=false nbgrader={"checksum": "bb116d4152d8179234b36b7f25359807", "grade": false, "grade_id": "cell-a57d99ecfb3a6106", "locked": true, "schema_version": 1, "solution": false}
# See if you can make a similar plot which displays the average number of sheep a wolf has eaten over its lifetime for each `step()` in the model. You will need to change the `step()` method of the wolf so that it counts every sheep it has eaten, and you will need to change the `DataCollector` to log this data. You can then use `model.datacollector.get_agent_vars_dataframe()` to get your data.
#
# Take a look at the [Mesa Read The Docs](https://mesa.readthedocs.io/en/master/tutorials/intro_tutorial.html#collecting-data) for more details.
# +
# %matplotlib inline
import random
from mesa import Model
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from mesa.time import RandomActivation
def mean_wolf(model):
"""
Determines mean sheep eaten by wolves over their lifetime.
"""
sheap_eaten_wolf = sum([agent.total_eaten for agent in model.schedule_Wolf.agents])
lifetime_wolves = sum([agent.age for agent in model.schedule_Wolf.agents])
try:
return sheap_eaten_wolf / lifetime_wolves
except ZeroDivisionError:
return 0
class WolfSheep(Model):
'''
Wolf-Sheep Predation Model
'''
def __init__(self, height=20, width=20,
initial_sheep=100, initial_wolves=30,
sheep_reproduction_chance=0.05, wolf_death_chance=0.05):
super().__init__()
self.height = height
self.width = width
self.initial_sheep = initial_sheep
self.initial_wolves = initial_wolves
self.sheep_reproduction_chance = sheep_reproduction_chance
self.wolf_death_chance = wolf_death_chance
# Add a schedule for sheep and wolves seperately to prevent race-conditions
self.schedule_Sheep = RandomActivation(self)
self.schedule_Wolf = RandomActivation(self)
self.grid = MultiGrid(self.width, self.height, torus=True)
self.datacollector = DataCollector(
{"Sheep": lambda m: self.schedule_Sheep.get_agent_count(),
"Wolves": lambda m: self.schedule_Wolf.get_agent_count(),
"Mean": mean_wolf})
# Create sheep and wolves
self.init_population(Sheep, self.initial_sheep)
self.init_population(Wolf, self.initial_wolves)
# This is required for the datacollector to work
self.running = True
self.datacollector.collect(self)
def init_population(self, agent_type, n):
'''
Method that provides an easy way of making a bunch of agents at once.
'''
for i in range(n):
x = random.randrange(self.width)
y = random.randrange(self.height)
self.new_agent(agent_type, (x, y))
def new_agent(self, agent_type, pos):
'''
Method that creates a new agent, and adds it to the correct scheduler.
'''
agent = agent_type(self.next_id(), self, pos)
self.grid.place_agent(agent, pos)
getattr(self, f'schedule_{agent_type.__name__}').add(agent)
def remove_agent(self, agent):
'''
Method that removes an agent from the grid and the correct scheduler.
'''
self.grid.remove_agent(agent)
getattr(self, f'schedule_{type(agent).__name__}').remove(agent)
def step(self):
'''
Method that calls the step method for each of the sheep, and then for each of the wolves.
'''
self.schedule_Sheep.step()
self.schedule_Wolf.step()
# Save the statistics
self.datacollector.collect(self)
def run_model(self, step_count=200):
'''
Method that runs the model for a specific amount of steps.
'''
for i in range(step_count):
self.step()
model = WolfSheep()
model.run_model()
data = model.datacollector.get_model_vars_dataframe()["Mean"]
data.plot()
data = model.datacollector.get_model_vars_dataframe()[["Sheep", "Wolves"]]
data.plot()
# + [markdown] deletable=false editable=false nbgrader={"checksum": "5d279bd6965c706f71ae5889985f45ad", "grade": false, "grade_id": "cell-34a66061d1e417f1", "locked": true, "schema_version": 1, "solution": false}
# ### Visualisation
# Mesa provides you with an interactive visualisation toolkit. This is done through a local server that uses JavaScript to draw everything in a browser window. An advantage of this is that we can run models step by step, spotting patterns, behaviors, bugs, developing new intuitions, hypotheses or insights. It is also a very good way to explain a model to your roommates or grandparents.
#
# We have provided you with a seperate `server.py` that shows you how you could visualise this model, as the visualisation will not cooperate with notebooks. Feel free to take a look and change elements to get accustomed to how the visualisation module works. For more information, visit [this link](https://mesa.readthedocs.io/en/master/tutorials/adv_tutorial.html#adding-visualization).
#
# Run this model by issuing the following command in the command line:
#
# `> ipython server.py`
#
# NOTE: Keep in mind that the "Frames Per Second" setting does not take into account the time it takes to build a frame. This means that when your simulation takes more time to build a frame than it takes the visualisation to display it, and you press stop, the visualisation will continue untill it has displayed all the frames that it should have displayed in the first place. To prevent this from happening, do not set "Frames Per Second" too high.
# + [markdown] deletable=false editable=false nbgrader={"checksum": "3846ebbfb249d0bab85bf4a2369e9ed7", "grade": false, "grade_id": "cell-51367e5799383810", "locked": true, "schema_version": 1, "solution": false}
# ### Mesa best practices
# To ensure that our models are easily accessible and maintainable, Mesa provides us with some general principles. These principles can be found [here](https://mesa.readthedocs.io/en/master/best-practices.html), but the main take-away is that you should structure your files. The model class for example, should be located in `model.py` and generally, the agents should be located in `agent.py`.
#
|
Notebooks/Mesa_introduction-4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Progressive Rollouts using Two Seldon Deployments
#
# In this example we will AB Test two Iris models: an SKLearn model and an XGBOOST model.
# We will run a progressive rollout allowing Iter8 to control the traffic to the two Seldon Deployments and gradually move traffic to the best model.
# ## Install Depenendcies
#
# * Istio
# * Seldon Core
# * Seldon Core Analytics
# * Iter8
#
# You can create a Kind cluster with all dependencies installed with [Ansible](https://www.ansible.com/) with:
#
# ```
# pip install ansible openshift
# ansible-galaxy collection install git+https://github.com/SeldonIO/ansible-k8s-collection.git,v0.1.0
# ```
#
# Then from `example/iter8` folder run:
#
# ```
# ansible-playbook playbooks/iter8.yml
# ```
# ## Create ABTest with Two Seldon Deployments
# !cat baseline.yaml
# !kubectl apply -f baseline.yaml
# !cat candidate.yaml
# !kubectl apply -f candidate.yaml
# !kubectl wait --for condition=ready --timeout=600s pods --all -n ns-baseline
# !kubectl wait --for condition=ready --timeout=600s pods --all -n ns-candidate
# ## Create Virtual Service to Split Traffic
# !cat routing-rule.yaml
# !kubectl apply -f routing-rule.yaml
# ## Create some load on models.
#
# We will send reqeusts which will be split by the Seldon AB Test as well as random feedback to both models with feedback favouring the candidate
# !cat fortio.yaml
# !URL_VALUE="http://$(kubectl -n istio-system get svc istio-ingressgateway -o jsonpath='{.spec.clusterIP}')" && \
# sed "s+URL_VALUE+${URL_VALUE}+g" fortio.yaml | kubectl apply -f -
# !kubectl wait --for condition=ready --timeout=600s pods --all -n default
# ## Create Metrics to evaluate
#
# These are a standard set of metrics we use in all examples.
# !cat ../../metrics.yaml
# !kubectl create -f ../../metrics.yaml
# !kubectl get metrics -n iter8-seldon
# ## Create Progressive Rollout Experiment
#
# * Run 15 iterations with 5 second gaps between default and candidate models
# * Both models must pass objectives
# * winnder will be chosen based on user engagement metric
# !cat experiment.yaml
# !kubectl create -f experiment.yaml
# ## Monitor Experiment
#
# Download iter8ctl.
#
# ```
# GO111MODULE=on GOBIN=/usr/local/bin go get github.com/iter8-tools/iter8ctl@v0.1.3
# ```
#
# Then:
#
# ```
# while clear; do kubectl get experiment quickstart-exp -o yaml | iter8ctl describe -f -; sleep 8; done
# ```
#
# By the end you should see the xgboost candidate model is promoted.
# !kubectl wait experiment quickstart-exp --for=condition=Completed --timeout=300s
# !kubectl get experiment quickstart-exp
# ## Cleanup
# !kubectl delete -f fortio.yaml
# !kubectl delete -f experiment.yaml
# !kubectl delete -f ../../metrics.yaml
# !kubectl delete -f routing-rule.yaml
# !kubectl delete -f baseline.yaml
# !kubectl delete -f candidate.yaml
|
examples/iter8/progressive_rollout/separate_sdeps/abtest.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,md,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Name(s)
# ## <NAME>
#
# **Instructions:** This is an individual assignment. Complete the following code and push to get your score.
# I am providing the autograder answers locally so you may test your code before pushing. I will be reviewing your submissions, and if I find you are circumventing the autograder in any manner, you will receive a 0 on this assignment and your case will be reported to the honor board for review. i.e., approach the assignment in a genuine manner and you have nothing to worry about.
# **Question 1.**
# When will new material be available each week?
# You can answer the question by defining an anonymous function. This creates a function that I can test using pytest. You don't have to worry about the details. You just need to answer the question by changing the string argument that is currently set to "D". I know this is a bit weird, but I want you to get used to submitting code as early as possible.
# Nothing to modify in this cell
def question_1(answer):
answers = {
"A": "Monday morning",
"B": "Sunday night",
"C": "Monday evening",
"D": "I don't know"
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_1 = lambda: question_1("C")
# **Question 2.**
# Do I need to buy the textbook?
# Nothing to modify in this cell
def question_2(answer):
answers = {
"A": "No",
"B": "Maybe",
"C": "Yes. You will struggle with some of the chapters without the textbook",
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_2 = lambda: question_2("C")
# **Question 3.**
# Are these any required times that I be online?
# Nothing to modify in this cell
def question_3(answer):
answers = {
"A": "Yes",
"B": "No"
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_3 = lambda: question_3("A")
# **Question 4.**
# What software will I use to complete the assignments?
# Nothing to modify in this cell
def question_4(answer):
answers = {
"A": "Java",
"B": "Netbeans",
"C": "Anaconda"
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_4 = lambda: question_4("C")
# **Question 5.**
# Do I need to participate in this class or can I just do the labs and assignments?
# Nothing to modify in this cell
def question_5(answer):
answers = {
"A": "Yes. If you want to get anything higher than a C, you'll need to do more than the labs and assignments",
"B": "No",
}
try:
return answers[answer]
except:
return "Not a valid answer"
# YOUR SOLUTION HERE
# Sample incorrect answer
answer_question_5 = lambda: question_5("A")
# +
# Don't forget to push!
# -
|
Syllabus.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Source detection with Gammapy
#
# ## Context
#
# The first task in a source catalogue production is to identify significant excesses in the data that can be associated to unknown sources and provide a preliminary parametrization in term of position, extent, and flux. In this notebook we will use Fermi-LAT data to illustrate how to detect candidate sources in counts images with known background.
#
# **Objective: build a list of significant excesses in a Fermi-LAT map**
#
#
# ## Proposed approach
#
# This notebook show how to do source detection with Gammapy using the methods available in `~gammapy.estimators`.
# We will use images from a Fermi-LAT 3FHL high-energy Galactic center dataset to do this:
#
# * perform adaptive smoothing on counts image
# * produce 2-dimensional test-statistics (TS)
# * run a peak finder to detect point-source candidates
# * compute Li & Ma significance images
# * estimate source candidates radius and excess counts
#
# Note that what we do here is a quick-look analysis, the production of real source catalogs use more elaborate procedures.
#
# We will work with the following functions and classes:
#
# * `~gammapy.maps.WcsNDMap`
# * `~gammapy.estimators.ASmoothEstimator`
# * `~gammapy.estimators.TSMapEstimator`
# * `gammapy.estimators.utils.find_peaks`
# ## Setup
#
# As always, let's get started with some setup ...
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from gammapy.maps import Map
from gammapy.estimators import ASmoothMapEstimator, TSMapEstimator
from gammapy.estimators.utils import find_peaks
from gammapy.datasets import MapDataset
from gammapy.modeling.models import (
BackgroundModel,
SkyModel,
PowerLawSpectralModel,
PointSpatialModel,
)
from gammapy.irf import PSFMap, EnergyDependentTablePSF, EDispKernelMap
from astropy.coordinates import SkyCoord
import astropy.units as u
import numpy as np
# ## Read in input images
#
# We first read in the counts cube and sum over the energy axis:
# +
counts = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts-cube.fits.gz"
)
background = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-background-cube.fits.gz"
)
exposure = Map.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-exposure-cube.fits.gz"
)
# unit is not properly stored on the file. We add it manually
exposure.unit = "cm2s"
psf = EnergyDependentTablePSF.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-psf-cube.fits.gz"
)
psfmap = PSFMap.from_energy_dependent_table_psf(psf)
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=counts.geom.axes["energy"],
energy_axis_true=exposure.geom.axes["energy_true"],
)
dataset = MapDataset(
counts=counts,
background=background,
exposure=exposure,
psf=psfmap,
name="fermi-3fhl-gc",
edisp=edisp,
)
# -
# ## Adaptive smoothing
#
# For visualisation purpose it can be nice to look at a smoothed counts image. This can be performed using the adaptive smoothing algorithm from [Ebeling et al. (2006)](https://ui.adsabs.harvard.edu/abs/2006MNRAS.368...65E/abstract).
#
# In the following example the `threshold` argument gives the minimum significance expected, values below are clipped.
# %%time
scales = u.Quantity(np.arange(0.05, 1, 0.05), unit="deg")
smooth = ASmoothMapEstimator(
threshold=3, scales=scales, energy_edges=[10, 500] * u.GeV
)
images = smooth.run(dataset)
plt.figure(figsize=(15, 5))
images["flux"].plot(add_cbar=True, stretch="asinh");
# ## TS map estimation
#
# The Test Statistic, TS = 2 ∆ log L ([Mattox et al. 1996](https://ui.adsabs.harvard.edu/abs/1996ApJ...461..396M/abstract)), compares the likelihood function L optimized with and without a given source.
# The TS map is computed by fitting by a single amplitude parameter on each pixel as described in Appendix A of [Stewart (2009)](https://ui.adsabs.harvard.edu/abs/2009A%26A...495..989S/abstract). The fit is simplified by finding roots of the derivative of the fit statistics (default settings use [Brent's method](https://en.wikipedia.org/wiki/Brent%27s_method)).
#
# We first need to define the model that will be used to test for the existence of a source. Here, we use a point source.
spatial_model = PointSpatialModel()
spectral_model = PowerLawSpectralModel(index=2)
model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)
# %%time
estimator = TSMapEstimator(
model,
kernel_width="1 deg",
selection_optional=[],
energy_edges=[10, 500] * u.GeV,
)
maps = estimator.run(dataset)
# ### Plot resulting images
plt.figure(figsize=(15, 5))
maps["sqrt_ts"].plot(add_cbar=True);
plt.figure(figsize=(15, 5))
maps["flux"].plot(add_cbar=True, stretch="sqrt", vmin=0);
plt.figure(figsize=(15, 5))
maps["niter"].plot(add_cbar=True);
# ## Source candidates
#
# Let's run a peak finder on the `sqrt_ts` image to get a list of point-sources candidates (positions and peak `sqrt_ts` values).
# The `find_peaks` function performs a local maximun search in a sliding window, the argument `min_distance` is the minimum pixel distance between peaks (smallest possible value and default is 1 pixel).
sources = find_peaks(maps["sqrt_ts"], threshold=5, min_distance="0.25 deg")
nsou = len(sources)
sources
# +
# Plot sources on top of significance sky image
plt.figure(figsize=(15, 5))
_, ax, _ = maps["sqrt_ts"].plot(add_cbar=True)
ax.scatter(
sources["ra"],
sources["dec"],
transform=plt.gca().get_transform("icrs"),
color="none",
edgecolor="w",
marker="o",
s=600,
lw=1.5,
);
# -
# Note that we used the instrument point-spread-function (PSF) as kernel, so the hypothesis we test is the presence of a point source. In order to test for extended sources we would have to use as kernel an extended template convolved by the PSF. Alternatively, we can compute the significance of an extended excess using the Li & Ma formalism, which is faster as no fitting is involve.
# ## What next?
#
# In this notebook, we have seen how to work with images and compute TS and significance images from counts data, if a background estimate is already available.
#
# Here's some suggestions what to do next:
#
# - Look how background estimation is performed for IACTs with and without the high-level interface in [analysis_1](analysis_1.ipynb) and [analysis_2](analysis_2.ipynb) notebooks, respectively
# - Learn about 2D model fitting in the [modeling 2D](modeling_2D.ipynb) notebook
# - find more about Fermi-LAT data analysis in the [fermi_lat](fermi_lat.ipynb) notebook
# - Use source candidates to build a model and perform a 3D fitting (see [analysis_3d](analysis_3d.ipynb), [analysis_mwl](analysis_mwl.ipynb) notebooks for some hints)
|
docs/tutorials/detect.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import Point
from sklearn.neighbors import KNeighborsRegressor
import rasterio as rst
from rasterstats import zonal_stats
# %matplotlib inline
# -
path = r"[CHANGE THIS PATH]\Wales\\"
data = pd.read_csv(path + "final_data.csv", index_col = 0)
# # Convert to GeoDataFrame
geo_data = gpd.GeoDataFrame(data = data,
crs = {'init':'epsg:27700'},
geometry = data.apply(lambda geom: Point(geom['oseast1m'],geom['osnrth1m']),axis=1))
geo_data.head()
# +
f, (ax1, ax2, ax3) = plt.subplots(1,3, figsize = (16,6), sharex = True, sharey = True)
geo_data[geo_data['Year'] == 2016].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax1);
geo_data[geo_data['Year'] == 2017].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax2);
geo_data[geo_data['Year'] == 2018].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax3);
# -
# ## k-nearest neighbour interpolation
#
# Non-parametric interpolation of loneliness based on local set of _k_ nearest neighbours for each cell in our evaluation grid.
#
# Effectively becomes an inverse distance weighted (idw) interpolation when weights are set to be distance based.
# +
def idw_model(k, p):
def _inv_distance_index(weights, index=p):
return (test==0).astype(int) if np.any(weights == 0) else 1. / weights**index
return KNeighborsRegressor(k, weights=_inv_distance_index)
def grid(xmin, xmax, ymin, ymax, cellsize):
# Set x and y ranges to accommodate cellsize
xmin = (xmin // cellsize) * cellsize
xmax = -(-xmax // cellsize) * cellsize # ceiling division
ymin = (ymin // cellsize) * cellsize
ymax = -(-ymax // cellsize) * cellsize
# Make meshgrid
x = np.linspace(xmin,xmax,(xmax-xmin)/cellsize)
y = np.linspace(ymin,ymax,(ymax-ymin)/cellsize)
return np.meshgrid(x,y)
def reshape_grid(xx,yy):
return np.append(xx.ravel()[:,np.newaxis],yy.ravel()[:,np.newaxis],1)
def reshape_image(z, xx):
return np.flip(z.reshape(np.shape(xx)),0)
def idw_surface(locations, values, xmin, xmax, ymin, ymax, cellsize, k=5, p=2):
# Make and fit the idw model
idw = idw_model(k,p).fit(locations, values)
# Make the grid to estimate over
xx, yy = grid(xmin, xmax, ymin, ymax, cellsize)
# reshape the grid for estimation
xy = reshape_grid(xx,yy)
# Predict the grid values
z = idw.predict(xy)
# reshape to image array
z = reshape_image(z, xx)
return z
# -
# ## 2016 data
# +
# Get point locations and values from data
points = geo_data[geo_data['Year'] == 2016][['oseast1m','osnrth1m']].values
vals = geo_data[geo_data['Year'] == 2016]['loneills'].values
surface2016 = idw_surface(points, vals, 90000,656000,10000,654000,250,7,2)
# -
# Look at surface
f, ax = plt.subplots(figsize = (8,10))
ax.imshow(surface2016, cmap='Reds')
ax.set_aspect('equal')
# ## 2017 Data
# +
# Get point locations and values from data
points = geo_data[geo_data['Year'] == 2017][['oseast1m','osnrth1m']].values
vals = geo_data[geo_data['Year'] == 2017]['loneills'].values
surface2017 = idw_surface(points, vals, 90000,656000,10000,654000,250,7,2)
# -
# Look at surface
f, ax = plt.subplots(figsize = (8,10))
ax.imshow(surface2017, cmap='Reds')
ax.set_aspect('equal')
# ## 2018 Data
# Get minimum and maximum bounds from the data. Round these down (in case of the 'min's) and up (in case of the 'max's) to get the values for `idw_surface()`
print("xmin = ", geo_data['oseast1m'].min(), "\n\r",
"xmax = ", geo_data['oseast1m'].max(), "\n\r",
"ymin = ", geo_data['osnrth1m'].min(), "\n\r",
"ymax = ", geo_data['osnrth1m'].max())
xmin = 175000
xmax = 357000
ymin = 167000
ymax = 393000
# +
# Get point locations and values from data
points = geo_data[geo_data['Year'] == 2018][['oseast1m','osnrth1m']].values
vals = geo_data[geo_data['Year'] == 2018]['loneills'].values
surface2018 = idw_surface(points, vals, xmin,xmax,ymin,ymax,250,7,2)
# -
# Look at surface
f, ax = plt.subplots(figsize = (8,10))
ax.imshow(surface2018, cmap='Reds')
ax.set_aspect('equal')
# # Extract Values to MSOAs
#
# Get 2011 MSOAs from the Open Geography Portal: http://geoportal.statistics.gov.uk/
# +
# Get MSOAs which we use to aggregate the loneills variable.
#filestring = './Data/MSOAs/Middle_Layer_Super_Output_Areas_December_2011_Full_Clipped_Boundaries_in_England_and_Wales.shp'
filestring = r'[CHANGE THIS PATH]\Data\Boundaries\England and Wales\Middle_Layer_Super_Output_Areas_December_2011_Super_Generalised_Clipped_Boundaries_in_England_and_Wales.shp'
msoas = gpd.read_file(filestring)
msoas.to_crs({'init':'epsg:27700'})
# keep the Wales MSOAs
msoas = msoas[msoas['msoa11cd'].str[:1] == 'W'].copy()
# +
# Get GB countries data to use for representation
#gb = gpd.read_file('./Data/GB/Countries_December_2017_Generalised_Clipped_Boundaries_in_UK_WGS84.shp')
#gb = gb.to_crs({'init':'epsg:27700'})
# get England
#eng = gb[gb['ctry17nm'] == 'England'].copy()
# -
# Make affine transform for raster
trans = rst.Affine.from_gdal(xmin-125,250,0,ymax+125,0,-250)
# NB This process is slooow - write bespoke method?
# 2016
#msoa_zones = zonal_stats(msoas['geometry'], surface2016, affine = trans, stats = 'mean', nodata = np.nan)
#msoas['loneills_2016'] = list(map(lambda x: x['mean'] , msoa_zones))
# 2017
#msoa_zones = zonal_stats(msoas['geometry'], surface2017, affine = trans, stats = 'mean', nodata = np.nan)
#msoas['loneills_2017'] = list(map(lambda x: x['mean'] , msoa_zones))
# 2018
msoa_zones = zonal_stats(msoas['geometry'], surface2018, affine = trans, stats = 'mean', nodata = np.nan)
msoas['loneills_2018'] = list(map(lambda x: x['mean'] , msoa_zones))
# +
# Check out the distributions of loneills by MSOA
f, [ax1, ax2, ax3] = plt.subplots(1,3, figsize=(14,5), sharex = True, sharey=True)
#ax1.hist(msoas['loneills_2016'], bins = 30)
#ax2.hist(msoas['loneills_2017'], bins = 30)
ax3.hist(msoas['loneills_2018'], bins = 30)
ax1.set_title("2016")
ax2.set_title("2017")
ax3.set_title("2018");
# +
bins = [-10, -5, -3, -2, -1, 1, 2, 3, 5, 10, 22]
labels = ['#01665e','#35978f', '#80cdc1','#c7eae5','#f5f5f5','#f6e8c3','#dfc27d','#bf812d','#8c510a','#543005']
#msoas['loneills_2016_class'] = pd.cut(msoas['loneills_2016'], bins, labels = labels)
#msoas['loneills_2017_class'] = pd.cut(msoas['loneills_2017'], bins, labels = labels)
msoas['loneills_2018_class'] = pd.cut(msoas['loneills_2018'], bins, labels = labels)
msoas['loneills_2018_class'] = msoas.loneills_2018_class.astype(str) # convert categorical to string
# +
f, (ax1, ax2, ax3) = plt.subplots(1,3,figsize = (16,10))
#msoas.plot(color = msoas['loneills_2016_class'], ax=ax1)
#msoas.plot(color = msoas['loneills_2017_class'], ax=ax2)
msoas.plot(color = msoas['loneills_2018_class'], ax=ax3)
#gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax1)
#gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax2)
#gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax3)
# restrict to England
#ax1.set_xlim([82672,656000])
#ax1.set_ylim([5342,658000])
#ax2.set_xlim([82672,656000])
#ax2.set_ylim([5342,658000])
#ax3.set_xlim([82672,656000])
#ax3.set_ylim([5342,658000])
# Make a legend
# make bespoke legend
from matplotlib.patches import Patch
handles = []
ranges = ["-10, -5","-5, -3","-3, -2","-2, -1","-1, 1","1, 2","3, 3","3, 5","5, 10","10, 22"]
for color, label in zip(labels,ranges):
handles.append(Patch(facecolor = color, label = label))
ax1.legend(handles = handles, loc = 2);
# -
# Save out msoa data as shapefile and geojson
msoas.to_file(path + "msoa_loneliness.shp", driver = 'ESRI Shapefile')
# msoas.to_file(path + "msoa_loneliness.geojson", driver = 'GeoJSON')
# save out msoa data as csv
msoas.to_csv(path + "msoa_loneliness.csv")
|
MSOA Mapping - Wales.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from IPython.display import display,Math
from sympy import *
init_session()
from ipywidgets import interact
from ipywidgets import interact,Dropdown,IntSlider
@interact
def _(n="1"):
dlist = []
for s in str(n)[::-1]: # 逆順に1文字ずつ登録
dlist.append(s)
sum1 = int(dlist[0]) # 各桁の和
sum1str = dlist[0] # 計算式
for s in dlist[1:]:
d = int(s)
sum1 += d
sum1str += "+{}".format(s)
if sum1%3 == 0:
check3 = "3の倍数です"
else:
check3 = "3の倍数ではない"
return display(Math("{}={}\\equiv{} \mod 3 \\\\ より{}は{}".format(sum1str,sum1,sum1%3,n,check3)))
from ipywidgets import interact
from ipywidgets import interact,Dropdown,IntSlider
@interact
def _(n="1"):
dlist = []
for s in str(n)[::-1]: # 逆順に1文字ずつ登録
dlist.append(s)
altsum = int(dlist[0]) # 交代和
altsumstr = dlist[0] # 計算式
k = -1
for s in dlist[1:]:
d = int(s)
altsum += d*k
if k<0:
altsumstr += "-{}".format(s)
else:
altsumstr += "+{}".format(s)
k *= -1
if altsum%11 == 0:
check11 = "11の倍数です"
else:
check11 = "11の倍数ではない"
return display(Math("{}={}\\equiv{} \mod 11 \\\\ より{}は{}".format(
altsumstr,altsum,altsum%11,n,check11)))
from ipywidgets import interact
from ipywidgets import interact,Dropdown,IntSlider
@interact
def _(n="1"):
dlist = [n[-3:]] # 最初の3桁
for i in range(3,len(n),3): # 逆順に3文字ずつ登録
dlist.append(n[-i-3:-i])
altsum = int(dlist[0]) # 3桁交代和
altsumstr = dlist[0] # 計算式
k = -1
for s in dlist[1:]:
d = int(s)
altsum += d*k
if k<0:
altsumstr += "-{}".format(s)
else:
altsumstr += "+{}".format(s)
k *= -1
if altsum%7 == 0:
check7 = "7の倍数です"
else:
check7 = "7の倍数ではない"
return display(Math("{}={}\\equiv{} \mod 7 \\\\ より{}は{}".format(
altsumstr,altsum,altsum%7,n,check7)))
|
21jk1-0421.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from skimage import io
from skimage.color import *
import cv2
from my_commonfunctions import *
matplotlib.rcParams['figure.dpi'] = 200
# -
test = (rgb2gray(io.imread('ym_test.png')))
#test3_binary = test3_gray < 200
#test3_thresholded_gray = gray2rgb(test3_binary * 255).astype(np.uint8)
#print(test3_thresholded_gray.dtype)
np.max(test)
# 1. Canny
# edges = cv2.Canny(test, 100, 200)
edges = test
# 2. Closing
kc1 = np.ones((35, 35))
closed1 = my_close(edges, kc1)
my_show_images([test, edges, closed1], dpi=100, row_max=2)
# 3. White pixels freq in each row
closed1_bin = closed1 > 200
sum_of_rows = np.sum(closed1_bin, axis=1) # Sum of each row
img_width = test.shape[1]
is_line = (sum_of_rows / img_width) > 0.5 # Normalization & thresholding (If percentage of white pixels of each row is 80% or higher, this row is considered a line)
is_line
# 4. Contours of is_line
image, init_contours, hierarchy = cv2.findContours((is_line*255).astype(np.uint8),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
init_contours = np.array(init_contours).squeeze()
#print(init_contours)
init_contours_y = init_contours.T[1].T
# 5. Mid point between each 2 contours
init_contours_y_flattened_sorted = np.sort(init_contours_y.flatten())
init_contours_y_flattened_sorted_2_consecutive = init_contours_y_flattened_sorted[1:-1].reshape(-1, 2)
init_contour_y_mid_points = np.mean(init_contours_y_flattened_sorted_2_consecutive, axis=1).astype(np.int32)
init_contours_y
init_contours_y.flatten()
init_contours_y_flattened_sorted
init_contours_y_flattened_sorted_2_consecutive
# Show only
init_contours_y_closed1 = gray2rgb(closed1.copy())
init_contours_y_closed1[init_contours_y.flatten()] = [255, 0, 0]
plt.imshow(init_contours_y_closed1)
init_contour_y_mid_points
# Show only
init_contours_y_mid_points_closed1 = gray2rgb(closed1.copy())
init_contours_y_mid_points_closed1[init_contour_y_mid_points] = [255, 0, 0]
plt.imshow(init_contours_y_mid_points_closed1)
# 6. Final contours
img_height = test3_gray.shape[0]
final_contours_y = np.append(np.array([0]), init_contour_y_mid_points) # Add 0 @ the beginning
final_contours_y = np.append(final_contours_y, np.array([img_height])) # Add img_height @ the end
final_contours_y
# +
final_lines = []
for i in range(len(final_contours_y)-1):
x0 = 0
x1 = img_width
y0 = final_contours_y[i]
y1 = final_contours_y[i+1]
final_lines.append(test3_gray[y0:y1, x0:x1])
my_show_images(final_lines, row_max=1)
# -
|
Junk/Yamani/Separate Lines TEST.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Copyright (c) 2020 <NAME>**
#
# **Copyright (c) 2021 Skymind Education Group Sdn. Bhd.**
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **SPDX-License-Identifier: Apache-2.0**
#
# + [markdown] id="0fO5omuHvQ6y"
# # Hello Pythoneer!
#
# You've learnt about Python conventions and common practices in the previous lesson. Today, you will execute the first few lines of Python code (Spoiler: Sorry, not HelloWorld).
#
# *Author: <NAME>, <NAME>*
# + [markdown] id="1rqokl756-0j"
# # Variables
# + [markdown] id="r923vW3ewdI0"
# ## Recap: Naming rules for variables
#
# Let's recap some of the naming rules for variables.
# + [markdown] id="BHiu8-1pwnLt"
# 1. Python variables can only begin with a letter (A-Z or a-z) or an underscore.
# + colab={"base_uri": "https://localhost:8080/", "height": 128} executionInfo={"elapsed": 769, "status": "error", "timestamp": 1606548882562, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="NjFVgcVivCBI" outputId="4c5217e5-05f3-4d26-ea21-5c901287859b"
9lives=9
# + [markdown] id="DVBXcZvKwzI4"
# *Quiz: Did you find any other convention violations in the above code line?*
# + [markdown] id="sLP8CaCYxDnI"
# The correct way:
#
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 783, "status": "ok", "timestamp": 1606549050158, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Y--TaCD0xIMa" outputId="68b636b3-ce4a-444d-f71a-21af8e073743"
# Correct
var = 0
var
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 757, "status": "ok", "timestamp": 1606548887475, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="9_R3R3lgxbNK" outputId="055f0eec-3952-48f0-cba0-5696e95a713f"
# Correct
_9lives = "cat"
_9lives
# + [markdown] id="Xb9sUi6qxsWm"
# 2. The rest of the identifier may contain letters (A-Z or a-z), underscores(_) and numbers (0-9).
# + id="XyqmlriFxpSQ"
# Correct
year2 = "Sophomore"
year2
# + colab={"base_uri": "https://localhost:8080/", "height": 128} executionInfo={"elapsed": 813, "status": "error", "timestamp": 1606549129595, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="UThIq7FeyYY0" outputId="d95c5829-aa67-407d-9d99-73960549d5d3"
# Incorrect
_$$ = 7
# + [markdown] id="UQCWgXsKynzl"
# 3. Python is case-sensitive. "Name" and "name" are two different identifiers.
# + id="0yfccYQhyhZP"
name = "Skymind" # declare a string variable here
# + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 496, "status": "ok", "timestamp": 1606549268957, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="IisEhoa8zDZu" outputId="f5de5541-2235-44e6-ec22-7be58e1e8b4a"
name
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 834, "status": "error", "timestamp": 1606549270483, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="7c44doQTzFbm" outputId="35c4ea5d-fa5b-4f7e-81d2-efcc9bc26a01"
Name
# + [markdown] id="gCiBt5TYzDE0"
# 4. Reserved words (keywords) cannot be used as identifier names.
# + [markdown] id="do4jZrvczmTu"
# --- | --- | Reserved | Keywords | --- | ---
# --- | --- | ----- | ------ | --- | ---
# and | def | False | import | not | True
# as | del | finally | in | or | try
# assert | elif | for | is | pass | while
# break | else | from | lambda | print | with
# class | except | global | None | raise | yield
# continue | exec | if | nonlocal | return
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 128} executionInfo={"elapsed": 831, "status": "error", "timestamp": 1606550720609, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="CQ555PAp4lFw" outputId="79bc91eb-7bd6-4879-9edb-0f5fd1d7bfd3"
False = 7
# + [markdown] id="sY9-lW2-3EgV"
# ## Assigning and reassigning
# + [markdown] id="8IFkZUx33Wiv"
# 1. To assign a value, just name a variable and type the value after the equal sign. No type declaration needed.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 756, "status": "ok", "timestamp": 1606550750231, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="SXCMoTT0zlpi" outputId="cf0e9543-cc7d-432d-e1af-12d8f2146714"
age = 18
print(age)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 774, "status": "ok", "timestamp": 1606550499532, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Kp7E8Gsy3uMP" outputId="60227fb5-9186-44b9-d871-022e4cdaab92"
age = "Dinosaur"
print(age)
# + [markdown] id="jYu41D0n3ziL"
# However, keep in mind to avoid declaring nonsensical variable names. Thus, `age = Dinosaur` doesn't make sense.
# + [markdown] id="IxoUgdVH4C1-"
# 2. Variables in Python are unusable until a value is assigned.
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 964, "status": "error", "timestamp": 1606550657776, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="4jfdyhoc0WLl" outputId="9fb51181-03e1-4d31-99b3-c85a37a06e96"
unassigned_variable
# + [markdown] id="SmclClew4crM"
# 3. Identifier can only be at the left-hand side of the equal sign.
# + colab={"base_uri": "https://localhost:8080/", "height": 128} executionInfo={"elapsed": 776, "status": "error", "timestamp": 1606550745250, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="JQ3ENdKZ4Ujs" outputId="0d2d5298-9f97-443f-a750-4ea8741b4ce2"
18 = age
# + [markdown] id="2SuUNan040i7"
# 4. Multiple assignments are possible in Python.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 763, "status": "ok", "timestamp": 1606550853082, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="epDk9DHk4t53" outputId="713b569f-09b2-4856-b7e6-5656532f3a94"
age, city = 18, "Kuala Lumpur"
print(age,city)
# + [markdown] id="_OHHyeBb5LZN"
# 5. You can also assign same value to multiple Python variables.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 749, "status": "ok", "timestamp": 1606550922689, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="ChXCPQw35JK0" outputId="7a158461-a380-4f94-91cd-0898daa7570c"
age = num = 20
print(age,num)
# + [markdown] id="Ct43jSy_5ejV"
# ## Swapping Variables
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 932, "status": "ok", "timestamp": 1606551205783, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="gGewKX575YkQ" outputId="590db801-fc5f-486c-fe6b-d609514c4fe4"
var1, var2 = "mouse", "keyboard"
var1, var2 = var2, var1
print(var1,var2)
# + [markdown] id="CGqP7hKR6iAz"
# ## Deleting variables
# + colab={"base_uri": "https://localhost:8080/", "height": 195} executionInfo={"elapsed": 722, "status": "error", "timestamp": 1606551315279, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="ZO4FQZEk6fPK" outputId="f7daa65d-d3bc-4efb-9cce-40b5f429d0c7"
var = "I am <NAME>"
del var
print(var)
# + [markdown] id="_nEBrHnt7E5T"
# ## End: Variables
#
# You have known more about variables than you have ever been. Let's go over to data types!
# + [markdown] id="EjdkM8rj7DXR"
# # Data Types
#
# Data types are important in order to perform operations. Different data types have different performable operations.
#
# There are four numeric data types in Python: `int`, `float`, `long` (deprecated in Python 3.x) and `complex`.
#
# You can use *type()* to check the data types of variables.
# + [markdown] id="-dd8XpKm7V-y"
# ## Integers or int
#
# Integers are the most basic data type, holding signed integers.
#
# "Signed" means it has positive and negative signs.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 737, "status": "ok", "timestamp": 1606551871166, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="XR3BD2sw83np" outputId="a0b761d8-3f77-4154-fed3-7fd3cf1c8727"
var1 = -11
print(type(var1))
# + [markdown] id="1GkL3BMm78En"
# Integers can be of any length, with the only limitation being the available memory.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 707, "status": "ok", "timestamp": 1606551922506, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="mbUe1IOh9JeY" outputId="42c12c4c-640d-423f-a812-ca2ef124e721"
var2 = 9999999999999999999999999999999999999999999999999
print(type(var2))
# + [markdown] id="gwJuTS3l9QQJ"
# ## Float or float
#
# Float holds floating-point real values. The difference between int and float is that int can only store "3" but float can store "3.3".
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 680, "status": "ok", "timestamp": 1606552041655, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="6JWK5Mai9ORa" outputId="02141bc3-a2a9-40ec-fb37-37c2be5c68a6"
var3 = 3.3
print(type(var3))
# + [markdown] id="KompMSZj9sgA"
# Let's do a quick example to demonstrate the difference between int and float.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 744, "status": "ok", "timestamp": 1606552163187, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="aoqzDYdB98c7" outputId="28e5cd17-b76d-4aa4-c321-cf830311fddc"
# Store a float in var_float
var_float = 3.1415
print(type(var_float))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 712, "status": "ok", "timestamp": 1606552208729, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="dHmgx0ie9rXH" outputId="e372a30c-9dc1-4891-ee62-d5ef8152f884"
var_int = int(var_float) # Convert into int
print(var_int)
print(type(var_int))
# + [markdown] id="CJFYwLly-a4L"
# ## Long
#
# Long is deprecated in Python 3.x so let's not waste time on this.
# + [markdown] id="oVDNfqjX-nV0"
# ## Complex
#
# This data type holds a complex number. Essentially, a complex number looks like this:
#
# $a + bj$
#
# where $a$ and $b$ are the real parts of the number, and $j$ is imaginary.
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 720, "status": "ok", "timestamp": 1606553369607, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="RAuGeeb9-UJm" outputId="8c1e4be5-1025-44c3-8791-a80b2a57a488"
var_cplx = 2 + 3j
print(type(var_cplx))
# + [markdown] id="XzrHUTZYCw7-"
# Other than `type()`, you can also check if a Python variable belongs to a particular class using `isinstance(variable,class)`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 712, "status": "ok", "timestamp": 1606553560513, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="EbaCyoSdCvjt" outputId="d56fd00e-9654-4a22-9f40-db88138b60ff"
print(isinstance(var_cplx,complex))
# + [markdown] id="I5VLVZyWDgfU"
# ## Strings
#
# `String` is a sequence of characters. In Python, there are no `char` data type, which is common in C++ or Java (wanna learn more about Java?).
#
# You can delimit a string using single quotes or double quotes, but pick one method and stick to it. (Check Module 2-1 in this course or PEP8 for more information.)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 714, "status": "ok", "timestamp": 1606553716184, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="9bl-m99RDeK2" outputId="f57cda72-c25b-4fc1-a63d-65bdd2d0d823"
my_city = "Kuala Lumpur"
print(my_city)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 692, "status": "ok", "timestamp": 1606553804374, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="sd4Tkmn8EELN" outputId="9d3a0523-cdd8-4fe2-9ff4-91166dcfa9cd"
my_city = 'Kuala Lumpur'
print(my_city)
# + [markdown] id="o4EzNuSQE2PR"
# **What can you do with a `String`?**
# + [markdown] id="mNw1UnitEjDi"
# 1. Spanning `string` across lines
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1330, "status": "ok", "timestamp": 1606553980844, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="PAgkeprfEZtp" outputId="d021eaed-fd42-4698-9cad-ae851ea7ef3e"
var_str = """I love
Skymind"""
print(var_str)
# + [markdown] id="YwEBVfKZFIlp"
# Quiz: What does `\n` and `\t` means?
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 703, "status": "ok", "timestamp": 1606554222388, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="1MKmBg5TFEpH" outputId="e637d465-882c-494b-bad0-f96cc9c173bc"
var_str = "This is \na newline"
print(var_str)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 714, "status": "ok", "timestamp": 1606554226380, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="m1tkp7oYFa37" outputId="64438c12-eb7d-477c-bb83-4d456c16e4bc"
var_str = "This is \ta tab"
print(var_str)
# + [markdown] id="ezWUEHhrGHNa"
# 2. Displaying part of a `string`
#
# + [markdown] id="UCrCZ2vUHgoP"
# You can display a character from a string using indexing. Note that all indexing in Python starts with 0 (beware, Matlab users!)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 666, "status": "ok", "timestamp": 1606554427499, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="09XbMEsfFtof" outputId="ee00fba2-2f7f-418b-dc16-3bcded8e5db0"
longest_english_word = "pneumonoultramicroscopicsilicovolcanoconiosis"
print(longest_english_word[3])
# + [markdown] id="Y5gd_EGrG2X_"
# **Fun fact:** "*pneumonoultramicroscopicsilicovolcanoconiosi*s" is actually the longest English word in any major English dictionaries.
# + [markdown] id="sUfZ96mWHb-v"
# You can also display more than one character using slicing.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 744, "status": "ok", "timestamp": 1606554668053, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="OMRr80CMGnXD" outputId="b8fd0e1f-0296-4783-eac7-21c44ab03b8b"
print(longest_english_word[3:5])
# + [markdown] id="a8BHvfSFHtkX"
# Quiz: Why only two characters displayed and not three?
# + [markdown] id="drjh1_-2H3dL"
# 3. Formatting `string`
#
# String formatters allow us to print characters and values at once. There are three methods:
# + [markdown] id="h06sYUOwISQk"
# 3a. Using `%` operator
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 735, "status": "ok", "timestamp": 1606555051808, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="wCDbVE19Hsjz" outputId="61c12f2e-27ad-437c-ff18-dd949d134e78"
degree_year = 4
university = "Universiti Tun Hussein Onn Malaysia"
print("I am a graduate of %s after completing my %i-year degree." % (university, degree_year))
# + [markdown] id="NALGZabjJMaR"
# 3b. Or you can use the `.format` method
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 725, "status": "ok", "timestamp": 1606555331934, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="soGDOvlEKBYt" outputId="d6ce8a29-08ce-44b1-a394-2f2489a1f002"
workplace = "Skymind Holdings Berhad"
travelling_mins = 30
print("I am working at {0} and I spent {1} minutes travelling to work daily.".format(workplace, travelling_mins))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 780, "status": "ok", "timestamp": 1606555418097, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Pv2bip9eKOpW" outputId="e2bbc2d1-7018-431f-f418-23cbc3ce794f"
print("I am working at {a} and I spent {b} minutes travelling to work daily.".format(a = "Skymind Holdings Berhad", b = 30))
# + [markdown] id="Uiz_PXtAKosl"
# 3c. Or you can use f-strings
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 724, "status": "ok", "timestamp": 1606555506108, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="opL55TKZKc6Y" outputId="e6d56ceb-1879-43ed-85a3-a5a7644d215e"
print(f"I am working at {workplace} and I spent {travelling_mins} minutes travelling to work daily.")
# + [markdown] id="24AB2-s1K8R7"
# Quiz: Without declaring `workplace` and `travelling_mins` in the previous block, why is there no error?
# + [markdown] id="w5OWVVP9LJay"
# 4. Concatenating strings
# + [markdown] id="TGUNPKhFLdAb"
# Concatenate means joining multiple things together.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 780, "status": "ok", "timestamp": 1606555621320, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Oyv7mBEaKyaQ" outputId="8bb8edc5-1f71-4c9e-a6cc-6410f7fe1849"
print("Deeplearning"+"4java")
# + [markdown] id="_-mSAIbELmfI"
# However, you cannot concatenate values of different types.
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 681, "status": "error", "timestamp": 1606555721765, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="9ujQFy01LVSK" outputId="0812c667-25ba-4792-ec60-b1dc56f1cd19"
print("Deeplearning"+4+"java")
# + [markdown] id="HRUu5REnLyIu"
# *P/s: Want to know more about Deeplearning4java?*
# + [markdown] id="owAMRKKEMC0W"
# ## Lists
#
# A `list` is a collection of values. It can contain similar or different types of values.
#
# To define a list, you have to put the values separated with commas `,` in square brackets `[]`. No type declaration needed for lists either.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 753, "status": "ok", "timestamp": 1606556505113, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="bmGH6JL3Lt1D" outputId="b0d7b0ff-352e-4371-b169-56eea8dea338"
days_in_week = ["Sunday", "Monday", "Tuesday", 3, 4, 5, 6]
print(days_in_week)
# + [markdown] id="vYy92Y6KOCE3"
# **After defining a list, what can you do with a list?**
# + [markdown] id="Q1wel58GMGV-"
# 1. You can slice a list using the same method like a `string`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 767, "status": "ok", "timestamp": 1606556507764, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="B4XS_qZWNlH2" outputId="a7774320-e6d8-4c21-8682-9dc618363e8a"
print(days_in_week[1:3])
# + [markdown] id="XbMbPWBsNrra"
# Quiz: Isn't the output be `['Sunday', 'Monday', 'Tuesday']`?
# + [markdown] id="elaMzadGNkjf"
# 2. Finding the length of string.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 984, "status": "ok", "timestamp": 1606556511124, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="-XuoXtsVOJt5" outputId="5a68853b-4f05-4d4f-b4e6-8e4d492e293f"
print(len(days_in_week))
# + [markdown] id="bqQfin-KOxJc"
# 3. Reassigning elements in a list
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 727, "status": "ok", "timestamp": 1606556575579, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="iX2Dt5x8Op4S" outputId="68adffec-41d1-480c-b543-b168905b8d01"
days_in_week[2] = 2
print(days_in_week)
# + [markdown] id="ZeSP6qc5PCMs"
# 4. Iterating on the `list`
# + [markdown] id="5qZ84CDgPLce"
# We can use the `for` loop to iterate over a list. By doing this, we can access each element one by one and is often helpful when we need to perform some operations on each element in the list.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 490, "status": "ok", "timestamp": 1606556743681, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Dsa1N35kO-RM" outputId="8fea445f-e470-4d54-87de-6b80b927ead3"
num_in_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for n in num_in_list:
print(n)
# + [markdown] id="bNz3jEw7Pq7G"
# 5. Creating multi-dimensional lists
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 711, "status": "ok", "timestamp": 1606556953159, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="FWyl36WgPnO1" outputId="d2636dae-c87a-44e1-e4d5-5d4476b1e880"
multidim_list = [[1, 2, 3], [4, 5, 6]]
print(multidim_list)
# + [markdown] id="sunAZoJ9QbJw"
# Quiz: How many rows and columns in `multidim_list`?
# + [markdown] id="6deyRxGuQowM"
# ## Tuples
#
# A `tuple` is like a like a list. But you declare it using parentheses `()` instead.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 702, "status": "ok", "timestamp": 1606557191462, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="0XB50-m_P55t" outputId="56ae743b-121b-4944-864d-4eceb2afa836"
places_in_selangor = ("Petaling Jaya", "USJ", "Gombak")
print(places_in_selangor)
# + [markdown] id="LHEKixkkRdOz"
# What you can do with `tuple` is similar to `list`, but not the other way round (you will understand this later).
# + [markdown] id="jGkJWlBPRWqI"
# 1. Similarly to `list`, you can access and slice a `tuple`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 871, "status": "ok", "timestamp": 1606557314911, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="cbVmVz4yRUor" outputId="37b5c9a6-fd0b-459d-daa8-9858d866af5e"
print(places_in_selangor[2])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 742, "status": "ok", "timestamp": 1606557336799, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="OW8Tzsz-Rytz" outputId="55e66ca4-7abb-4703-bf0e-e916d91785c6"
print(places_in_selangor[0:2])
# + [markdown] id="nk75LbWTR6XI"
# 2. What you can do with `list` but not with `tuple` is changing its size or elements. A `tuple` is immutable once declared.
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 896, "status": "error", "timestamp": 1606557435273, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="1gsSauRNR4G_" outputId="cc4f6edb-24aa-4707-b819-0373c694d1e0"
places_in_selangor[1] = "Bangi" # Changing an element
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 706, "status": "error", "timestamp": 1606557465346, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="xU0NKBZRSQHK" outputId="5ee44645-ea4c-4210-b07b-90c380231878"
places_in_selangor[3] = "Bangi" # Adding a new element
# + [markdown] id="UsXvXfNWSfhu"
# ## Dictionaries
#
# A dictionary or `dict` holds key-value pairs. You declare it using curly braces `{}`, with pairs separated by commas `,` while keys and values are separated by a colon `:`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1745, "status": "ok", "timestamp": 1606557651685, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="gJefIPRLSUb9" outputId="4685c5e8-2115-41ec-812b-f626f260f16a"
me = {"city":"Kuala Lumpur", "age":18}
print(me)
# + [markdown] id="vS53fv-CTIa0"
# **What you can do with dictionaries?**
# + [markdown] id="3MW8dSkzTHOH"
# 1. You can check its type using the `type()` function.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2015, "status": "ok", "timestamp": 1606557711052, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="l9kcPviKTEd7" outputId="6883de89-88e0-428e-d9ba-633a90e42818"
print(type(me))
# + [markdown] id="yG3yM0fNTVhb"
# 2. You can access a value by mentioning its key.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 798, "status": "ok", "timestamp": 1606557765868, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Ky8tZRW6TTA-" outputId="e1ee6d25-8397-490e-afed-2324ea7691e2"
print(me["age"])
# + [markdown] id="oqaJX7dgTjxY"
# 3. You can reassign a value to a key.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 745, "status": "ok", "timestamp": 1606557832002, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="WCX9tABmTg2C" outputId="a541b09b-b02e-4a8c-f49e-c9a4c75cfa3c"
me["city"] = "Petaling Jaya"
print(me["city"])
# + [markdown] id="afOBBu8MTyyz"
# 4. You can use `keys()` function to get a list of keys in the dictionary
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 740, "status": "ok", "timestamp": 1606557904525, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Jt9FoY9zTxAZ" outputId="a7823ec9-97e7-4ce6-f661-a2525c1825ca"
print(me.keys())
# + [markdown] id="0vV1WYaTUxSm"
# P/s: Another data type which is almost similar to Dictionaries is Sets or `set`. We are not going into this but you can read on this further in the Internet. Happy exploring!
# + [markdown] id="eRgymntTUIDT"
# ## Boolean or bool
#
# A `bool` value can be True or False.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 711, "status": "ok", "timestamp": 1606558058984, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="SB5V9op-UCtY" outputId="3e7d840f-58a1-4f17-cf85-97be2f55bbd8"
var_bool = 2 > 1
print(type(var_bool))
# + [markdown] id="hjlOrozHVULX"
# ## Type Conversion
#
# Data types in Python are very dynamic. You can convert a variable into another type.
# + [markdown] id="xmspjcIjWeHx"
# ### Conversion involving integers
# + [markdown] id="0zuenCdeVhbI"
# 1. Converting to integer using `int()`
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 706, "status": "ok", "timestamp": 1606558406895, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="rO7qIUJrUobJ" outputId="7d4b7bb4-ea1c-4c15-efd9-35f69b9e63e5"
print(int(9.81))
# + [markdown] id="LNYevQUhVsOH"
# Notice that it truncated .81 instead of rounding the number off to 10.
#
# Trivia: Calling all engineers, what does 9.81 mean to you?
# + [markdown] id="BSp8E8jNWBVF"
# 2. Converting boolean into integer
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 570, "status": "ok", "timestamp": 1606558511262, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="w3G7-VFrVoKt" outputId="a4d1e038-eb5a-4ff1-e5b3-d04451c28678"
print(int(True))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 497, "status": "ok", "timestamp": 1606558517297, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="1FXh30JnWW1_" outputId="9680550e-3228-4e9a-9c0a-acdbbcda2b8e"
print(int(False))
# + [markdown] id="t3mQrKQBWvpG"
# 3. However, you cannot convert a string into an integer. An error will show up.
# + colab={"base_uri": "https://localhost:8080/", "height": 162} executionInfo={"elapsed": 818, "status": "error", "timestamp": 1606558720149, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="kSjxiFsJW1J2" outputId="1e3fc7a4-22aa-44ea-b335-a6c914042606"
print(int("Pavillion KL"))
# + [markdown] id="gct2_AXDW9Ca"
# 4. But, if the string has only numbers, conversion is still possible.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 749, "status": "ok", "timestamp": 1606558698860, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="QX345iVoXCGX" outputId="02fe2251-9b9c-46fa-d3e1-5abe79111541"
print(int("18"))
# + [markdown] id="wPiteDmmWZ1t"
# ### Conversion involving floats
# + [markdown] id="KBbgs8kjXM02"
# 1. You can convert an integer into a float using `float()`.
# + id="WyFmirA4XWqT"
my_age = 18
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 727, "status": "ok", "timestamp": 1606558837326, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="hLp4LJPIWYRU" outputId="d32c3290-6a8d-4bf4-8ecf-f3f4b7cca098"
print(float(my_age))
print(type(float(my_age)))
# + [markdown] id="x0u1BjeTXquG"
# 2. Converting a decimal into float
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 658, "status": "ok", "timestamp": 1606558882807, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Aj0DcPijXks0" outputId="18e94d32-5c1b-4429-8e6d-2f78b4261a17"
print(float(.7))
print(type(float(.7)))
# + [markdown] id="nKn5B8R6Xz0x"
# 3. Converting a boolean into float
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 716, "status": "ok", "timestamp": 1606558923806, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="UCWteNk3Xxkg" outputId="4c5dbb36-9ab3-4660-a8b5-dfe6883fca3f"
print(float(True))
print(type(float(True)))
# + [markdown] id="OWIFg6RAYENh"
# ### Conversion involving strings
# + [markdown] id="nL1SAoN3YJXZ"
# 1. Converting a value into string using`str()`.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 717, "status": "ok", "timestamp": 1606559013897, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="HH-JgM_TX7kQ" outputId="c395f361-bb8d-4f89-aa35-901a07d70fc4"
print(str(2.1))
print(type(str(2.1)))
# + [markdown] id="6yVo-2QaYS4w"
# 2. Converting an integer into string
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 679, "status": "ok", "timestamp": 1606559041538, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Y7TVAShdYNV4" outputId="b5bea907-7ff2-4a3f-e65e-68c2f220206d"
print(str(2))
print(type(str(2)))
# + [markdown] id="ojzMyrkpYaC6"
# 3. Converting a boolean into string
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 824, "status": "ok", "timestamp": 1606559066960, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="KYZD9t0zYYUW" outputId="e45d4b4c-30b1-44e2-b137-3f10336aee71"
print(str(True))
print(type(str(True)))
# + [markdown] id="InpznQkgYgiN"
# 4. Converting a list, tuple or dictionary into a string
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 678, "status": "ok", "timestamp": 1606559119972, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="025z2h9KYefP" outputId="c311a57c-7d3b-4a4c-874c-05c1d6d9e767"
print(str([1,1,2,3]))
# + [markdown] id="8R6c1XkVYvU0"
# Trivia: Guess this number sequence!
# + [markdown] id="TWdI_3lHZHAG"
# ### Conversion involving booleans
# + [markdown] id="uy-oPHQOZOdY"
# 1. You can convert a value into boolean by using `bool()`
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 747, "status": "ok", "timestamp": 1606559338464, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="P6prOHgrYpP_" outputId="c977620a-f45d-487f-eb1e-0f175b2673e2"
print(bool(18))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 791, "status": "ok", "timestamp": 1606559351056, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="lrIq8-Y7ZfnM" outputId="b57eca94-bb7e-4d47-cce2-f153a14815ad"
print(bool(0))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 715, "status": "ok", "timestamp": 1606559363535, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="jM7i--4vZj20" outputId="3b389669-4371-43e8-ef9f-6e222cb9a0ec"
print(bool(True))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 711, "status": "ok", "timestamp": 1606559381126, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="IhdO2QR1Zm02" outputId="f2257d4b-47f0-4741-888a-aafdd8ea1eb9"
print(bool(0.1))
# + [markdown] id="c-WpKePxZur2"
# 2. You can also convert a list into a boolean
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 725, "status": "ok", "timestamp": 1606559430816, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="Z-qvGzLDZqHC" outputId="d790a249-c0e0-41cc-be6d-6f4440a96be2"
print(bool([1,-1]))
# + [markdown] id="p197O_X5Z7if"
# 3. The function returns false if input is empty
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 574, "status": "ok", "timestamp": 1606559465172, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="GjknvU4aZyj0" outputId="b7d22b34-92ff-4e23-b277-036cf3f33d2e"
print(bool())
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 666, "status": "ok", "timestamp": 1606559477448, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="YTYcWobLZ_u0" outputId="75ce5040-d031-4076-abb8-615198e13682"
print(bool([]))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 678, "status": "ok", "timestamp": 1606559491561, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="IuT9US6TaCvn" outputId="c6818131-57f2-4fd3-dbda-eea0a963834c"
print(bool({}))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 485, "status": "ok", "timestamp": 1606559504397, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="k5_khHu-aGL1" outputId="7c4d556b-4f8e-48eb-b805-724511dad57b"
print(bool(None))
# + [markdown] id="OKq6975haMjU"
# ### Conversion involving lists
# + [markdown] id="yOf1C4cJaSaj"
# 1. You can convert a `string` into a `list`
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 712, "status": "ok", "timestamp": 1606559589000, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="U8jGbH2oaJWw" outputId="a1240fa7-0516-45a8-cf70-2f59e2b4e5f7"
print(list("123456789"))
# + [markdown] id="Pyn1ZuOrat5X"
# 2. You can also convert a `set` into a `list`
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 657, "status": "ok", "timestamp": 1606559619820, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="eFg9JLUTad90" outputId="dd199262-379f-46fa-e177-dfe98d87e215"
print(list({1, 2, 3, 4, 5, 6, 7, 8, 9}))
# + [markdown] id="EQW7Twvia3wo"
# 3. You can also convert a `dict` into a `list`
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 732, "status": "ok", "timestamp": 1606559791783, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="BNy18fTfalfu" outputId="adca57fb-b943-45e9-cad1-b2255ab770f0"
print(list({"age":18, "workplace":"Skymind"}))
# + [markdown] id="nIWR7zgMbRsn"
# ### Conversion involving tuples
# + [markdown] id="-u8YqbzcbWHG"
# 1. You can use `tuple()` to convert values into tuple
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 762, "status": "ok", "timestamp": 1606559886221, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="OwKo4D2SbLIX" outputId="2c533732-f1b7-4e68-bda5-517130c5e20c"
print(tuple({1, 2, 3, 4}))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 885, "status": "ok", "timestamp": 1606559912228, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "17516954092065775720"}, "user_tz": -480} id="skti_yjobjBb" outputId="3545c60a-1b04-4218-d33c-7769b24a54a7"
print(tuple(list(set([1,2])))) # composite functions
# + [markdown] id="Dm_vX989b1K2"
# # The End
#
# That is all you need to know about variables and data types (for now)!
#
# We did not touch Sets `set`, which is another data type in Python. This should be your homework for the week: **find out what is a set in Python.**
#
# References:
# 1. https://www.programiz.com/python-programming/variables-datatypes
|
solution/basics/02_ Variables and Data Types.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/PatrikDurdevic/Don-t-Patronize-Me-FER/blob/main/BERT.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="TtScpUGdiK6C" outputId="7d7dd2bb-a0f5-4fa0-efb4-55a49af1a4d7"
from urllib import request
module_url = f"https://raw.githubusercontent.com/Perez-AlmendrosC/dontpatronizeme/master/semeval-2022/dont_patronize_me.py"
module_name = module_url.split('/')[-1]
print(f'Fetching {module_url}')
#with open("file_1.txt") as f1, open("file_2.txt") as f2
with request.urlopen(module_url) as f, open(module_name,'w') as outf:
a = f.read()
outf.write(a.decode('utf-8'))
# + id="MMDPTEeEx2tR" outputId="ae2de600-591e-4206-b696-907c8c6bb15a" colab={"base_uri": "https://localhost:8080/"}
# !pip install transformers
# + id="3P7dRyVFhjZH"
import torch
import transformers
import pandas as pd
import numpy as np
import spacy
from tqdm import tqdm
from dont_patronize_me import DontPatronizeMe
# + id="_Hmi4xUTh5uT"
dpm = DontPatronizeMe('.', 'dontpatronizeme_pcl.tsv')
dpm.load_task1()
dpm.train_task1_df.rename(columns={'country': 'sentence'}, inplace=True)
sentences = dpm.train_task1_df.sentence.values
labels = dpm.train_task1_df.label.values
# + colab={"base_uri": "https://localhost:8080/", "height": 162, "referenced_widgets": ["9c157147e40a4fe5b8b49864acee35a7", "<KEY>", "0918c6b0c4a9478f87e14f722ad7c823", "<KEY>", "0cf4ed9619ab4a389200c07ca05160ff", "18f8ea78cb1e4e0abe75d8269f2c21c7", "9bd5a32168d04af88eb043b8d5ec4ffc", "956e6a9fd6874ee5832159c393f3ef0f", "bb17dac4c2e747988b7766efd90115e9", "8bf538eb955948ebb03044e905f29aec", "<KEY>", "<KEY>", "f54fdd3d34734ea395847d40ecf6d636", "067eaf6f85944d0b8a262f22e96292bc", "<KEY>", "608ddbe423a64e269d25a4122d64d890", "4954e633eba1486c991251f7a22d15a3", "<KEY>", "<KEY>", "71f7a4ea7e6d4372856abb6b8e8f32ff", "e13b184c86714233a1d6019bb5edf903", "<KEY>", "3ebb0839d9da4e699439e93f585e3a56", "3828fa499f81486f8fca1b9aee38fd99", "d9d8680bbaa04d25a2d733eb8abb6fb9", "<KEY>", "7401ccde239a46d2b3085829dea497f7", "9eb3268b61e54997bdc03c0a4d7145b6", "375c5a52359b47b0b556b00e570dcb40", "<KEY>", "ec52570d56cc4406afa890ae4af7a3c6", "<KEY>", "de3beffb739549d9a590538383244809", "<KEY>", "<KEY>", "972a2de70be64c6793cc1bb01e216018", "<KEY>", "3a8a75c9f4ca4daba1fe29fa5c165b2c", "98a747dd560145719373d0f592c41549", "<KEY>", "<KEY>", "1bc6fe1a93134320b07e885ee23a3250", "5594d285cdeb4dfba32d0e8e2d30511e", "<KEY>"]} id="FoyQOaRwig1f" outputId="6e9404e8-2b4d-44a9-ad0c-c10aee5c67ba"
from transformers import BertTokenizer
# Load the BERT tokenizer.
print('Loading BERT tokenizer...')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# + colab={"base_uri": "https://localhost:8080/"} id="LS2i4WNii8nj" outputId="5177c5ed-0821-4812-e24f-7f4d5b644398"
# Print the original sentence.
print(' Original: ', sentences[0])
# Print the sentence split into tokens.
print('Tokenized: ', tokenizer.tokenize(sentences[0]))
# Print the sentence mapped to token ids.
print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0])))
# + colab={"base_uri": "https://localhost:8080/"} id="tco2vHMEi_pz" outputId="36ba0de1-f7c1-4a30-f9d4-ab5e58ce3581"
max_len = 0
# For every sentence...
for sent in tqdm(sentences):
# Tokenize the text and add `[CLS]` and `[SEP]` tokens.
input_ids = tokenizer.encode(sent, add_special_tokens=True)
# Update the maximum sentence length.
max_len = max(max_len, len(input_ids))
print('Max sentence length: ', max_len)
# + colab={"base_uri": "https://localhost:8080/"} id="Y0WQLZOnjEZ2" outputId="a56d34a6-0952-403c-88f6-c51fe056b932"
# Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
attention_masks = []
# For every sentence...
for sent in tqdm(sentences):
# `encode_plus` will:
# (1) Tokenize the sentence.
# (2) Prepend the `[CLS]` token to the start.
# (3) Append the `[SEP]` token to the end.
# (4) Map tokens to their IDs.
# (5) Pad or truncate the sentence to `max_length`
# (6) Create attention masks for [PAD] tokens.
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = 64, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
# Print sentence 0, now as a list of IDs.
print('Original: ', sentences[0])
print('Token IDs:', input_ids[0])
# + colab={"base_uri": "https://localhost:8080/"} id="WucQg45ZjaJu" outputId="ba32d1b0-c398-48af-91b6-9f51a2efc536"
from torch.utils.data import TensorDataset, random_split
# Combine the training inputs into a TensorDataset.
dataset = TensorDataset(input_ids, attention_masks, labels)
# Create a 90-10 train-validation split.
# Calculate the number of samples to include in each set.
train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size
# Divide the dataset by randomly selecting samples.
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print('{:>5,} training samples'.format(train_size))
print('{:>5,} validation samples'.format(val_size))
# + colab={"base_uri": "https://localhost:8080/"} id="BILxqOtAjekw" outputId="2d878e01-5101-490e-f48d-9669a5c2b876"
from torch.utils.data import DataLoader, WeightedRandomSampler, SequentialSampler
# The DataLoader needs to know our batch size for training, so we specify it
# here. For fine-tuning BERT on a specific task, the authors recommend a batch
# size of 16 or 32.
batch_size = 32
#lbl = train_dataset.targets
lbl = []
for i in tqdm(range(len(train_dataset))):
lbl.append(train_dataset[i][2].item())
class_sample_count = np.array(
[len(np.where(lbl == t)[0]) for t in np.unique(lbl)])
weight = 1. / class_sample_count
samples_weight = np.array([weight[t] for t in lbl])
samples_weight = torch.from_numpy(samples_weight)
samples_weigth = samples_weight.double()
sampler = WeightedRandomSampler(samples_weight, len(samples_weight))
# Create the DataLoaders for our training and validation sets.
# We'll take training samples in random order.
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler = sampler, # Select batches randomly
batch_size = batch_size # Trains with this batch size.
)
# For validation the order doesn't matter, so we'll just read them sequentially.
validation_dataloader = DataLoader(
val_dataset, # The validation samples.
sampler = SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size = batch_size # Evaluate with this batch size.
)
# + colab={"base_uri": "https://localhost:8080/", "height": 154, "referenced_widgets": ["00caf0ec303d4ce2a8467022bd3efdbb", "<KEY>", "<KEY>", "<KEY>", "cd14ac34c2c641fdb0882e799d5ceb5a", "<KEY>", "0fe9fac2882345e699c317c12a506d13", "<KEY>", "<KEY>", "103175a562164f49a525618d39bc3372", "<KEY>"]} id="uv0PWdoqjfgR" outputId="41d41038-ca6e-40a5-8ee1-1655014749b7"
from transformers import BertForSequenceClassification, AdamW, BertConfig
# Load BertForSequenceClassification, the pretrained BERT model with a single
# linear classification layer on top.
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 2, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
# Tell pytorch to run this model on the GPU.
model.cuda();
# + id="iOqBCz_jjimm"
# Note: AdamW is a class from the huggingface library (as opposed to pytorch)
# I believe the 'W' stands for 'Weight Decay fix"
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
from transformers import get_linear_schedule_with_warmup
# Number of training epochs. The BERT authors recommend between 2 and 4.
# We chose to run for 4, but we'll see later that this may be over-fitting the
# training data.
epochs = 4
# Total number of training steps is [number of batches] x [number of epochs].
# (Note that this is not the same as the number of training samples).
total_steps = len(train_dataloader) * epochs
# Create the learning rate scheduler.
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
# + id="Zya6ioUOjpMX"
import numpy as np
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
import time
import datetime
def format_time(elapsed):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
# + colab={"base_uri": "https://localhost:8080/"} id="FMR_tHhIkCV7" outputId="d8c5f44d-ab77-435a-eb0c-457d6712141b"
import torch
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# + colab={"base_uri": "https://localhost:8080/"} id="Hwj7bRK4jsAT" outputId="f25e38c3-6c4e-42f8-b622-030215b79f01"
import random
import numpy as np
# This training code is based on the `run_glue.py` script here:
# https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128
# Set the seed value all over the place to make this reproducible.
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
# We'll store a number of quantities such as training and validation loss,
# validation accuracy, and timings.
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
# Reset the total loss for this epoch.
total_train_loss = 0
# Put the model into training mode. Don't be mislead--the call to
# `train` just changes the *mode*, it doesn't *perform* the training.
# `dropout` and `batchnorm` layers behave differently during training
# vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using the
# `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Always clear any previously calculated gradients before performing a
# backward pass. PyTorch doesn't do this automatically because
# accumulating the gradients is "convenient while training RNNs".
# (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)
model.zero_grad()
# Perform a forward pass (evaluate the model on this training batch).
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# It returns different numbers of parameters depending on what arguments
# arge given and what flags are set. For our useage here, it returns
# the loss (because we provided labels) and the "logits"--the model
# outputs prior to activation.
model_output = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
loss, logits = model_output.loss, model_output.logits
# Accumulate the training loss over all of the batches so that we can
# calculate the average loss at the end. `loss` is a Tensor containing a
# single value; the `.item()` function just returns the Python value
# from the tensor.
total_train_loss += loss.item()
# Perform a backward pass to calculate the gradients.
loss.backward()
# Clip the norm of the gradients to 1.0.
# This is to help prevent the "exploding gradients" problem.
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# Update parameters and take a step using the computed gradient.
# The optimizer dictates the "update rule"--how the parameters are
# modified based on their gradients, the learning rate, etc.
optimizer.step()
# Update the learning rate.
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
# Put the model in evaluation mode--the dropout layers behave differently
# during evaluation.
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
# Unpack this training batch from our dataloader.
#
# As we unpack the batch, we'll also copy each tensor to the GPU using
# the `to` method.
#
# `batch` contains three pytorch tensors:
# [0]: input ids
# [1]: attention masks
# [2]: labels
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
# Tell pytorch not to bother with constructing the compute graph during
# the forward pass, since this is only needed for backprop (training).
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# The documentation for this `model` function is here:
# https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
model_output = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
(loss, logits) = model_output.loss, model_output.logits
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
# + id="yXahfuusj89y"
prediction_dataloader = validation_dataloader
# + colab={"base_uri": "https://localhost:8080/"} id="AiwrJF7xos7J" outputId="eed2c67a-33e1-4f33-8f71-e4d0dc242b6d"
# Prediction on test set
print('Predicting labels for {:,} test sentences...'.format(len(input_ids)))
# Put model in evaluation mode
model.eval()
# Tracking variables
predictions , true_labels = [], []
# Predict
for batch in prediction_dataloader:
# Add batch to GPU
batch = tuple(t.to(device) for t in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels = batch
# Telling the model not to compute or store gradients, saving memory and
# speeding up prediction
with torch.no_grad():
# Forward pass, calculate logit predictions
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = outputs[0]
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Store predictions and true labels
predictions.append(logits)
true_labels.append(label_ids)
print(' DONE.')
# + id="X954ykTko2o2"
true_labels = np.concatenate(true_labels, axis=0)
predictions = np.array([1 if x[0] < 0.5 else 0 for x in np.concatenate(predictions, axis=0)])
# + colab={"base_uri": "https://localhost:8080/"} id="YJXvEUPIo634" outputId="b4e12b30-f3c5-44f1-9a64-d2caca1f06c0"
from sklearn.metrics import classification_report
print(classification_report(true_labels, predictions))
# + id="X6siD6qZpsjm"
def predict(paragraph):
input_ids = []
attention_masks = []
for sent in tqdm([paragraph]):
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode.
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = 64, # Pad & truncate all sentences.
pad_to_max_length = True,
return_attention_mask = True, # Construct attn. masks.
return_tensors = 'pt', # Return pytorch tensors.
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
dataset = TensorDataset(input_ids, attention_masks)
dataloader = DataLoader(
dataset,
batch_size = 1
)
for batch in dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask = batch
with torch.no_grad():
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = outputs[0].detach().cpu().numpy()
predictions = np.array([1 if x[0] < 0.5 else 0 for x in np.concatenate([logits], axis=0)])
return predictions[0]
# + id="sJlo1IsEwKey" outputId="430e0171-406b-487e-f710-7771ce29faa9" colab={"base_uri": "https://localhost:8080/"}
tekst = input()
predict(tekst)
# + id="cjKvHRP1xlXg"
|
BERT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>
#
# In this notebook, I'll build a character-wise RNN trained on <NAME>, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
#
# This network is based off of <NAME>'s [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [<NAME>](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.
#
# <img src="assets/charseq.jpeg" width="500">
# +
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
# -
# First we'll load the text file and convert it into integers for our network to use.
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
# Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
#
# Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.
#
# The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the `split_frac` keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
# I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size `batch_size X num_steps`. For example, if we want our network to train on a sequence of 100 characters, `num_steps = 100`. For the next batch, we'll shift this window the next sequence of `num_steps` characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
with tf.name_scope("RNN_cells"):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN outputs to a softmax layer and calculate the cost
with tf.name_scope('logits'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
tf.summary.histogram('softmax_w', softmax_w)
tf.summary.histogram('softmax_b', softmax_b)
with tf.name_scope('predictions'):
preds = tf.nn.softmax(logits, name='predictions')
tf.summary.histogram('predictions', preds)
with tf.name_scope('cost'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
tf.summary.scalar('cost', cost)
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('train'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
merged = tf.summary.merge_all()
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer', 'merged']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
# ## Hyperparameters
#
# Here I'm defining the hyperparameters for the network. The two you probably haven't seen before are `lstm_size` and `num_layers`. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
# ## Training
#
# Time for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I calculate the validation loss and save a checkpoint.
# !mkdir -p checkpoints/anna
def train(model, epochs, file_writer):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
summary, batch_loss, new_state, _ = sess.run([model.merged, model.cost,
model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
file_writer.add_summary(summary, iteration)
# +
epochs = 20
batch_size = 100
num_steps = 100
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
for lstm_size in [128,256,512]:
for num_layers in [1, 2]:
for learning_rate in [0.002, 0.001]:
log_string = 'logs/4/lr={},rl={},ru={}'.format(learning_rate, num_layers, lstm_size)
writer = tf.summary.FileWriter(log_string)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
train(model, epochs, writer)
# -
tf.train.get_checkpoint_state('checkpoints/anna')
# ## Sampling
#
# Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
#
# The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
#
#
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
|
tensorboard/Anna_KaRNNa_Hyperparameters.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
theses = pd.read_csv('theses.csv')
pd.options.display.max_rows = 100
theses
theses['department'].value_counts()
def department_cleanup(dep):
if dep == 'Hubert Department of Global Health': return 'Global Health'
if dep == 'Career Masters of Public Health': return 'Public Health'
if dep == 'Executive Masters of Public Health - MPH': return 'Public Health'
if dep == 'Executive Masters of Public Health': return 'Public Health'
if dep == 'Computer Science and Informatics': return 'Computer Science'
if dep == 'Mathematics and Economics': return 'Economics and Mathematics'
if dep == 'Mathematics and Computer Science': return 'Math and Computer Science'
if dep == 'Clinical Research Curriculum Program': return 'Clinical Research'
if dep == 'Women\'s Studies': return 'Women\'s, Gender, and Sexuality Studies'
if dep == 'Middle Eastern Studies': return 'Middle Eastern and South Asian Studies'
if dep == 'Biostatistics': return 'Biostatistics and Bioinformatics'
if dep == 'English': return 'English and Creative Writing'
if dep == 'Film Studies': return 'Film and Media Studies'
if dep == 'Nutrition and Health Sciences': return 'Human Health'
if dep == 'Health Services and Research Health Policy': return 'Health Policy and Management'
if dep == 'French Studies': return 'French'
if dep == 'Environmental Health - MPH': return 'Environmental Health'
if dep == 'Women\'s, Gender and Sexuality Studies': return 'Women\'s, Gender, and Sexuality Studies'
if dep == 'Global Epidemiology': return 'Epidemiology'
if dep == 'Applied Epidemiology': return 'Epidemiology'
if dep == 'Classical Civilizations, Greek, and Latin': return 'Classics'
if dep == 'Applied Public Health Informatics': return 'Public Health'
if dep == 'Physics and Astronomy': return 'Physics'
if dep == 'Environmental Sciences': return 'Environmental Studies'
if dep == 'Biochemistry, Cell & Developmental Biology': return 'Biology'
if dep == 'Environmental Health Sciences': return 'Environmental Health'
if dep == 'Health Services Research and Health Policy': return 'Health Policy and Management'
if dep == 'Health Care Outcomes Management': return 'Health Policy and Management'
if dep == 'Anthropology and Religion': return 'Religion and Anthropology'
if dep == 'Political Science and Mathematics': return 'Mathematics and Political Science'
if dep == 'Russian': return 'Russian and East European Studies'
if dep == 'Spanish': return 'Spanish and Portuguese'
return dep
theses['department'] = theses['department'].apply(department_cleanup)
theses['department'].nunique()
theses.to_csv('theses_v2.csv')
|
src/department_cleanup.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CNTK 303: Deep Structured Semantic Modeling with LSTM Networks
#
# DSSM stands for Deep Structured Semantic Model, or more general, Deep Semantic Similarity Model. DSSM, developed by the MSR Deep Learning Technology Center(DLTC), is a deep neural network (DNN) modeling technique for representing text strings (sentences, queries, predicates, entity mentions, etc.) in a continuous semantic space and modeling semantic similarity between two text strings (e.g., Sent2Vec). DSSM has wide applications including information retrieval and web search ranking ([Huang et al. 2013](https://www.microsoft.com/en-us/research/publication/learning-deep-structured-semantic-models-for-web-search-using-clickthrough-data/); [Shen et al. 2014a](https://www.microsoft.com/en-us/research/publication/learning-semantic-representations-using-convolutional-neural-networks-for-web-search/),[2014b](https://www.microsoft.com/en-us/research/publication/a-latent-semantic-model-with-convolutional-pooling-structure-for-information-retrieval/)), ad selection/relevance, contextual entity search and interestingness tasks ([Gao et al. 2014a](https://www.microsoft.com/en-us/research/publication/modeling-interestingness-with-deep-neural-networks/), question answering ([Yih et al., 2014](https://www.microsoft.com/en-us/research/publication/semantic-parsing-for-single-relation-question-answering/)), image captioning ([Fang et al., 2014](https://arxiv.org/abs/1411.4952)), and machine translation ([Gao et al., 2014b](https://www.microsoft.com/en-us/research/publication/learning-continuous-phrase-representations-for-translation-modeling/)) etc.
#
# DSSM can be used to develop latent semantic models that project entities of different types (e.g., queries and documents) into a common low-dimensional semantic space for a variety of machine learning tasks such as ranking and classification. For example, in web search ranking, the relevance of a document given a query can be readily computed as the distance between them in that space. With the latest GPUs from Nvidia, we can train our models on billions of words. Readers that are interested in deep learning for text processing may refer to the tutorial by [He et al., 2014](https://www.microsoft.com/en-us/research/publication/deep-learning-for-natural-language-processing-theory-and-practice-tutorial/).
# We released the predictors and trained model files of the DSSM (also a.k.a. Sent2Vec).
#
# ## Goal
#
# To develop mechanism such that given a pair of documents say a query and a set of web page documents, the model would map the inputs to a pair of feature vectors in a continuous, low dimensional space where one could compare the semantic similarity between the text strings using the cosine similarity between their vectors in that space.
#
# 
#
# In the figure above one can see how given a query ($Q$) and set of documents ($D_1, D_2, \ldots, D_n$), one can generate latent representation a.k.a. semantic features, which can then be used to generate pairwise distance metric. The metric evaluated can be used for ranking.
# In the picture above, one can see that the query and the document are each mapped to a term vector. While a [bag of word](https://en.wikipedia.org/wiki/Bag-of-words_model) based modeling is a first step one takes while building NLP models, they are limited in their ability to capture relative positions amongst words. Convolution based, or recurrence based models perform better due to their inherent ability to leverage the positions of words. In this tutorial, we will use a simple illustrative model using LSTM to encode the term vector following the work done by [Palangi et. al.](https://www.microsoft.com/en-us/research/wp-content/uploads/2017/02/LSTM_DSSM_IEEE_TASLP.pdf).
#
# In this tutorial, we show you how to build such a network. We use a small sample from the Question-Answering corpus. Additionally we will use a recurrent network to develop the semantic model as it allows to inherently incorporate the positional information with the word tokens.
#
# **Note**: The data set is very small and the emphasis of this tutorial is in showing how to create an end-to-end modeling workflow for the DSSM network and not so much on the specific numerical performance we are able to get on this small data set.
# Upgrade to CNTK 2.3.1
# !pip install --upgrade --no-deps https://cntk.ai/PythonWheel/CPU-Only/cntk-2.3.1-cp35-cp35m-linux_x86_64.whl
# +
# Import the relevant libraries
import math
import numpy as np
import os
from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
import cntk as C
import cntk.tests.test_utils
cntk.tests.test_utils.set_device_from_pytest_env() # (only needed for our build system)
C.cntk_py.set_fixed_random_seed(1) # fix a random seed for CNTK components
# -
# ## Data Preparation
#
# ### Download
#
# We use a sampling of the Question Answering data set for illustrating how to model DSSM networks. The data set consists of pair of sentences with [Questions and Answers](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/ACL15-STAGG.pdf). In this tutorial, we have preprocessed the data into two parts:
# - Vocabulary files: 1 file each for question and answers. There are 1204 and 1019 words in the question and answers vocabulary, respectively.
# - QA files: 1 file each for training and validation data (hold-out) where each of the files are converted in the [CTF format](https://cntk.ai/pythondocs/CNTK_202_Language_Understanding.html). The training and validation files have 3500 and 409 sentence pairs respectively.
#
# Note: a small portion of the original data was provided by the author of the paper for creating an exemplar network for illustration purposes.
# +
location = os.path.normpath('data/DSSM')
data = {
'train': { 'file': 'train.pair.tok.ctf' },
'val':{ 'file': 'valid.pair.tok.ctf' },
'query': { 'file': 'vocab_Q.wl' },
'answer': { 'file': 'vocab_A.wl' }
}
import requests
def download(url, filename):
""" utility function to download a file """
response = requests.get(url, stream=True)
with open(filename, "wb") as handle:
for data in response.iter_content():
handle.write(data)
if not os.path.exists(location):
os.mkdir(location)
for item in data.values():
path = os.path.normpath(os.path.join(location, item['file']))
if os.path.exists(path):
print("Reusing locally cached:", path)
else:
print("Starting download:", item['file'])
url = "http://www.cntk.ai/jup/dat/DSSM/%s.csv"%(item['file'])
print(url)
download(url, path)
print("Download completed")
item['file'] = path
# -
# ### Reader
#
# We will be using the CTF deserializer to read the input data. However, one can write their own readers or use numpy arrays to provide data into CNTK modeling workflow. You may want to open the CTF files with a text editor to parse the input. Note, the CTF deserializer has the capability to scale across production scale data sizes spanning mulitple disks. The reader also abstracts the randomization of the large scale with a simple flag, an added convenience and time savings for the programmer.
# +
# Define the vocabulary size (QRY-stands for question and ANS stands for answer)
QRY_SIZE = 1204
ANS_SIZE = 1019
def create_reader(path, is_training):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
query = C.io.StreamDef(field='S0', shape=QRY_SIZE, is_sparse=True),
answer = C.io.StreamDef(field='S1', shape=ANS_SIZE, is_sparse=True)
)), randomize=is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
# +
train_file = data['train']['file']
print(train_file)
if os.path.exists(train_file):
train_source = create_reader(train_file, is_training=True)
else:
raise ValueError("Cannot locate file {0} in current directory {1}".format(train_file, os.getcwd()))
validation_file = data['val']['file']
print(validation_file)
if os.path.exists(validation_file):
val_source = create_reader(validation_file, is_training=False)
else:
raise ValueError("Cannot locate file {0} in current directory {1}".format(validation_file, os.getcwd()))
# -
# ## Model creation
#
# The proposed LSTM-RNN model sequentially takes each word in a sentence, extracts its information, and embeds it into a semantic vector. Due to its ability to capture long term memory, the LSTM-RNN accumulates increasingly richer information as it goes through the sentence, and when it reaches the last word, the hidden layer of the network provides a semantic representation of the whole sentence. The `last` block is then projected to a `query_vector` space, also referred to semantic feature in the figure above.
#
#
# "query vector"
# ^
# |
# +-------+
# | Dense |
# +-------+
# ^
# |
# +---------+
# | Dropout |
# +---------+
# ^
# |
# +-------+
# | Dense |
# +-------+
# ^
# |
# +------+
# | last |
# +------+
# ^
# |
# +------+ +------+ +------+ +------+ +------+
# 0 -->| LSTM |-->| LSTM |-->| LSTM |-->| LSTM |-->| LSTM |
# +------+ +------+ +------+ +------+ +------+
# ^ ^ ^ ^ ^
# | | | | |
# +-------+ +-------+ +-------+ +-------+ +-------+
# | Embed | | Embed | | Embed | | Embed | | Embed |
# +-------+ +-------+ +-------+ +-------+ +-------+
# ^ ^ ^ ^ ^
# | | | | |
# query ------>+--------->+--------->+--------->+--------->+
#
#
# Similarly we can project the answer sentence to `answer_vector`. However, before we create our model. Let us define the input variables for our model. Note, there is a query and paired with it there is an answer. Given both of these are a sequence of words we define
# Create the containers for input feature (x) and the label (y)
qry = C.sequence.input_variable(QRY_SIZE)
ans = C.sequence.input_variable(ANS_SIZE)
# **Notice**: Do you smell any problem with the aforementioned statements. If you want to see what would happen if you were to go with the declarations above, please comment out the 4 statements below and run the model. You will find that your model throws an exception. The details of the exception is explained [here](https://cntk.ai/pythondocs/Manual_How_to_debug.html#Runtime-errors).
#
# Each sequence in CNTK, is associated with a dynamic axis representing the number of words in the sequence. Intuitively, when you have sequences of different sizes and vocabularies, each of them need to have their own dynamic axis. This is facilitated by declaring the input data containers with a named axis. Strictly speaking you could name just one, the other one would be a default dynamic axis. However, for clarity we name the two axis separately.
# +
# Create the containers for input feature (x) and the label (y)
axis_qry = C.Axis.new_unique_dynamic_axis('axis_qry')
qry = C.sequence.input_variable(QRY_SIZE, sequence_axis=axis_qry)
axis_ans = C.Axis.new_unique_dynamic_axis('axis_ans')
ans = C.sequence.input_variable(ANS_SIZE, sequence_axis=axis_ans)
# -
# Before we can create the model we need to specify a few parameters associated with the network architecture.
EMB_DIM = 25 # Embedding dimension
HIDDEN_DIM = 50 # LSTM dimension
DSSM_DIM = 25 # Dense layer dimension
NEGATIVE_SAMPLES = 5
DROPOUT_RATIO = 0.2
# +
def create_model(qry, ans):
with C.layers.default_options(initial_state=0.1):
qry_vector = C.layers.Sequential([
C.layers.Embedding(EMB_DIM, name='embed'),
C.layers.Recurrence(C.layers.LSTM(HIDDEN_DIM), go_backwards=False),
C.sequence.last,
C.layers.Dense(DSSM_DIM, activation=C.relu, name='q_proj'),
C.layers.Dropout(DROPOUT_RATIO, name='dropout qdo1'),
C.layers.Dense(DSSM_DIM, activation=C.tanh, name='q_enc')
])
ans_vector = C.layers.Sequential([
C.layers.Embedding(EMB_DIM, name='embed'),
C.layers.Recurrence(C.layers.LSTM(HIDDEN_DIM), go_backwards=False),
C.sequence.last,
C.layers.Dense(DSSM_DIM, activation=C.relu, name='a_proj'),
C.layers.Dropout(DROPOUT_RATIO, name='dropout ado1'),
C.layers.Dense(DSSM_DIM, activation=C.tanh, name='a_enc')
])
return {
'query_vector': qry_vector(qry),
'answer_vector': ans_vector(ans)
}
# Create the model and store reference in `network` dictionary
network = create_model(qry, ans)
network['query'], network['axis_qry'] = qry, axis_qry
network['answer'], network['axis_ans'] = ans, axis_ans
# -
# ## Training
#
# Now that we have created a network, the next step is to find a suitable loss function where if a `question` is paired with the correct `answer`, the loss would be 0 else it would be 1. In other words, this loss should maximize the similarity (dot product) between the answer vector which appears close to the answer vector and minimize the similarity of between the answer and question vector that do not answer each other.
#
# The use cases of DSSM often appear in information retrieval where for a given query or question there are few answers amongst an ocean of poor or non-answers. The input data as in this case is a pair of query and answer (document or advertisement) that attracted a click. A classical way to train would be a a binary classifier to predict click / no-click (or equivalently a 2-class classifier - one class each for click or no click). One could generate pairs of query and incorrect answers (as no-click data). However, one way to simulate no-click data is to use query and answers for other queries within a minibatch. This is the concept behind `cosine_distance_with_negative_samples` function. Note: This function returns 1 for correct the question and answer pair and 0 for incorrect, which is referred to as *similarity*. Hence, we use 1- `cosine_distance_with_negative_samples` as our loss function.
def create_loss(vector_a, vector_b):
qry_ans_similarity = C.cosine_distance_with_negative_samples(vector_a, \
vector_b, \
shift=1, \
num_negative_samples=5)
return 1 - qry_ans_similarity
# Model parameters
MAX_EPOCHS = 5
EPOCH_SIZE = 10000
MINIBATCH_SIZE = 50
# Create trainer
def create_trainer(reader, network):
# Setup the progress updater
progress_writer = C.logging.ProgressPrinter(tag='Training', num_epochs=MAX_EPOCHS)
# Set learning parameters
lr_per_sample = [0.0015625]*20 + \
[0.00046875]*20 + \
[0.00015625]*20 + \
[0.000046875]*10 + \
[0.000015625]
lr_schedule = C.learning_parameter_schedule_per_sample(lr_per_sample, \
epoch_size=EPOCH_SIZE)
mms = [0]*20 + [0.9200444146293233]*20 + [0.9591894571091382]
mm_schedule = C.learners.momentum_schedule(mms, \
epoch_size=EPOCH_SIZE, \
minibatch_size=MINIBATCH_SIZE)
l2_reg_weight = 0.0002
model = C.combine(network['query_vector'], network['answer_vector'])
#Notify the network that the two dynamic axes are indeed same
query_reconciled = C.reconcile_dynamic_axes(network['query_vector'], network['answer_vector'])
network['loss'] = create_loss(query_reconciled, network['answer_vector'])
network['error'] = None
print('Using momentum sgd with no l2')
dssm_learner = C.learners.momentum_sgd(model.parameters, lr_schedule, mm_schedule)
network['learner'] = dssm_learner
print('Using local learner')
# Create trainer
return C.Trainer(model, (network['loss'], network['error']), network['learner'], progress_writer)
# Instantiate the trainer
trainer = create_trainer(train_source, network)
# Train
def do_train(network, trainer, train_source):
# define mapping from intput streams to network inputs
input_map = {
network['query']: train_source.streams.query,
network['answer']: train_source.streams.answer
}
t = 0
for epoch in range(MAX_EPOCHS): # loop over epochs
epoch_end = (epoch+1) * EPOCH_SIZE
while t < epoch_end: # loop over minibatches on the epoch
data = train_source.next_minibatch(MINIBATCH_SIZE, input_map= input_map) # fetch minibatch
trainer.train_minibatch(data) # update model with it
t += MINIBATCH_SIZE
trainer.summarize_training_progress()
do_train(network, trainer, train_source)
# ## Validate
#
# Once the model is trained we want to select a model that has similar error with the validation (hold-out set) as the error with the training set.
#
# **Suggested Activity**: Vary the number of epochs and check the training and the validation error.
#
# The chosen model would then be used for prediction.
# Validate
def do_validate(network, val_source):
# process minibatches and perform evaluation
progress_printer = C.logging.ProgressPrinter(tag='Evaluation', num_epochs=0)
val_map = {
network['query']: val_source.streams.query,
network['answer']: val_source.streams.answer
}
evaluator = C.eval.Evaluator(network['loss'], progress_printer)
while True:
minibatch_size = 100
data = val_source.next_minibatch(minibatch_size, input_map=val_map)
if not data: # until we hit the end
break
evaluator.test_minibatch(data)
evaluator.summarize_test_progress()
do_validate(network, val_source)
# ## Predict
#
# We will now create a vector representation of the query and the answer. Then compute the cosine similarity between the two vectors. When the answer is close to the question one would get a high similarity, while an incorrect / partially relevant question / answer pair would result in a smaller similarity. These scores are often used for ranking web documents in response to a query.
# +
# load dictionaries
query_wl = [line.rstrip('\n') for line in open(data['query']['file'])]
answers_wl = [line.rstrip('\n') for line in open(data['answer']['file'])]
query_dict = {query_wl[i]:i for i in range(len(query_wl))}
answers_dict = {answers_wl[i]:i for i in range(len(answers_wl))}
# let's run a sequence through
qry = 'BOS what contribution did e1 made to science in 1665 EOS'
ans = 'BOS book author book_editions_published EOS'
ans_poor = 'BOS language human_language main_country EOS'
qry_idx = [query_dict[w+' '] for w in qry.split()] # convert to query word indices
print('Query Indices:', qry_idx)
ans_idx = [answers_dict[w+' '] for w in ans.split()] # convert to answer word indices
print('Answer Indices:', ans_idx)
ans_poor_idx = [answers_dict[w+' '] for w in ans_poor.split()] # convert to fake answer word indices
print('Poor Answer Indices:', ans_poor_idx)
# -
# Convert the query, answer and the fake answer to one-hot representation. This is a necessary step since the input to our trained network takes one-hot encoded input.
# +
# Create the one hot representations
qry_onehot = np.zeros([len(qry_idx),len(query_dict)], np.float32)
for t in range(len(qry_idx)):
qry_onehot[t,qry_idx[t]] = 1
ans_onehot = np.zeros([len(ans_idx),len(answers_dict)], np.float32)
for t in range(len(ans_idx)):
ans_onehot[t,ans_idx[t]] = 1
ans_poor_onehot = np.zeros([len(ans_poor_idx),len(answers_dict)], np.float32)
for t in range(len(ans_poor_idx)):
ans_poor_onehot[t, ans_poor_idx[t]] = 1
# -
# For each of the query and the answer one-hot encoded input, create the embeddings. Note: we use the answer embedding for both the correct answer and the poor answer. We compute the cosine similarity between the query and answer pair. The relative value of the cosine similarity with a higher value indicating a better answer.
# +
qry_embedding = network['query_vector'].eval([qry_onehot])
ans_embedding = network['answer_vector'].eval([ans_onehot])
ans_poor_embedding = network['answer_vector'].eval([ans_poor_onehot])
from scipy.spatial.distance import cosine
print('Query to Answer similarity:', 1-cosine(qry_embedding, ans_embedding))
print('Query to poor-answer similarity:', 1-cosine(qry_embedding, ans_poor_embedding))
# -
|
7 - Natural Language Processing/Lab03 - CNTK_303_Deep_Structured_Semantic_Modeling_with_LSTM_Networks.ipynb
|
# +
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import probml_utils as pml
x11 = 5
x12 = 3
x21 = 7
x22 = 3
r = np.arange(2, 12, 2)
h = 0.1
M = 10
X1 = X2 = np.arange(0, M + h, h)
tree = np.zeros((len(X1), len(X2)))
L1 = X1 <= x11
R1 = X2 <= x21
L2 = X1 > x11
R2 = X2 <= x22
L3 = X1 > x11
R3 = X2 > x22
L4 = X1 <= min(x11, x22)
R4 = X2 > x21
L5 = (X1 <= x11) & (X1 > x12)
R5 = X2 > x21
for i in range(len(tree)):
for j in range(len(tree[0])):
if L1[i] & R1[j]:
tree[i, j] = r[0]
if L2[i] & R2[j]:
tree[i, j] = r[1]
if L3[i] & R3[j]:
tree[i, j] = r[2]
if L4[i] & R4[j]:
tree[i, j] = r[3]
if L5[i] & R5[j]:
tree[i, j] = r[4]
# colors = np.array([[255, 0, 0], [0, 0, 255], [160, 32, 240], [0, 100, 0], [255, 140, 0]]) / 255
X, Y = np.meshgrid(X1, X2)
Z = tree
fig = plt.figure(figsize=(12, 7))
ax = fig.gca(projection="3d")
ax.plot_surface(X, Y, Z, cmap="coolwarm", lw=0.5, rstride=1, cstride=1, edgecolor=["g"], color="w", antialiased=True)
# ax.plot_surface(X, Y, Z, cmap="jet", lw=0.5, rstride=1, cstride=1, edgecolor =['g'], color='w',antialiased=True)
ax.set_xlabel("X1")
ax.set_ylabel("X2")
ax.set_zlabel("Y")
ax.view_init(None, 50 + 180)
plt.tight_layout()
pml.savefig("regression_tree_surface.pdf", dpi=300)
plt.show()
|
notebooks/book1/18/regtreeSurfaceDemo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview
# The Course Overview will explain the course. The course is divided into four parts:
#
# * Introduction - This part of the course covers history, features, installation, basic language usage and tools
# * Beginner - This part of the course covers fundamental types, builtins, functions and modules
# * Intermediate -
# * Advanced
#
#
#
# ## Prerequisites
#
# ## Python
#
# ### History
#
# ### Comparison
#
# ### Attributes
# * Object-Oriented
# * Dynamic/Run-time Typed
# * Case-sensitive
# * Indent Delineated
# * Batteries Included Philosophy
#
# ### Supported Paradigms
# * **Structured** - a programming paradigm aimed at improving the clarity, quality, and development time of a computer program by making extensive use of subroutines, block structures and for and while loops
# * **Functional** - a programming paradigm—a style of building the structure and elements of computer programs—that treats computation as the evaluation of mathematical functions and avoids changing-state and mutable data
# * **Procedural/Imperative** - a programming paradigm that uses statements that change a program's state
# * **Modular** - a software design technique that emphasizes separating the functionality of a program into independent, interchangeable modules
# * **Object-Oriented** - a programming paradigms model organized around objects rather than "actions" and data rather than logic
#
# ### Implementations
# * CPython (standard Python) - written in C
# * Jython - written in Java
# * Iron Python - written in C#
# * PyPy - written in Python
#
# ### Who Uses Python and For What?
# * Who?
# ..* Google
# ..* Yahoo!
# ..* CERN
# ..* NASA
# ..* Industrial Light and Magic
# ..* Disney Animation Studios
# ..* Mozilla
# ..* PBS
# ..* Reddit
#
# * What?
# ..* Machine and Deep Learning (tensorflow, keras, scikit-learn, pandas)
# ..* Web Frameworks (django, pyramid, flask)
# ..* Scientific Programming (numpy, scipy, earthpy, astropy)
# ..* Gaming (pygame)
# ..* FreeCAD and Blender
#
# ### Environment
# You can program in Python is a number of ways:
#
# ### Python Executable
#
# #### REPL Shell (**REPL** - **R**epeat, **E**valuate, **P**rint, **L**oop)
#
# #### IDLE
# [IDLE Documentation](https://docs.python.org/3/library/idle.html)
#
# #### Python Editors and IDEs
# * PyCharm
# * Eclipse
# * PyScripter
# * Python fo VS Code
#
# #### Command Line
#
# ##### Text Editors
# * Notepad++
# * Atom
# * Vim
# * Emacs
# * Sublime Text
#
#
# #### Jupyter
# * [Jupyter](http://jupyter.org/)
#
# #### Comprehensive List of Editors/IDEs
# * [Python Editors](https://wiki.python.org/moin/PythonEditors)
#
#
# ### Installation
# Link to anaconda and python 3 setup: http://swcarpentry.github.io/workshop-template/#setup
#
# # Approach
# Learn by doing with lots of examples
#
# # Table of Contents
# * Level-01 - input, output, string, int, if/elif/else, project: guess the number
# * Level-02 - list, dict, function, format, project: rock, paper, scissors
# * Level-03 -
#
# # Using Jupyter Notebook
# Add references and provide basic commands here
#
|
Overview.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %load_ext load_style
# %load_style talk.css
# %matplotlib inline
# # Streamfunction and velocity potential from zonal and meridional wind component
# [windspharm](http://ajdawson.github.io/windspharm/) is a Python library developed by
# [<NAME>](http://ajdawson.github.io/) which provides an pythonic interface to the [pyspharm](https://code.google.com/p/pyspharm/) module, which is basically a bindings to the [spherepack] Fortran library
# **Installation**
#
# 1) Download and unpack [pyspharm](https://code.google.com/p/pyspharm/)
#
#
# 2) Download **spherepack** from [http://www2.cisl.ucar.edu/resources/legacy/spherepack](http://www2.cisl.ucar.edu/resources/legacy/spherepack) and unpack
#
# 3) copy all the Fortran files in `[path_to_spherepack]/spherepack3.2/src` to `[path_to_pyspharm]/pyspharm-1.0.8/src`
#
# 4) install **pyspharm**:
#
# ```
# pyspharm-1.0.8 ᐅ python setup.py build
# pyspharm-1.0.8 ᐅ python setup.py install
#
# ```
#
# 5) install **windspharm**
#
# ```
# ᐅ pip install windspharm
# ```
#
# windspharm has 3 different `interfaces`:
#
#
# + **`standard`**: expects numpy arrays as inputs
# + **`cdms`**: [cdms2](http://esg.llnl.gov/cdat/cdms_html/cdms-2.htm) objects (cdms2 is a class for opening netcdf files [amongst other formats]) part of the [cdat-lite](http://proj.badc.rl.ac.uk/cedaservices/wiki/CdatLite) package or [UV-CDAT](http://uvcdat.llnl.gov/) distribution)
# + **`iris`**: [iris](http://scitools.org.uk/iris/index.html) cubes
#
# We are going to use [xray](https://github.com/xray/xray) here, and thus use the standard interface, passing the underlying numpy arrays
from windspharm.standard import VectorWind
from windspharm.tools import prep_data, recover_data, order_latdim
# ### usual imports
import os, sys
import pandas as pd
import numpy as np
from numpy import ma
from matplotlib import pyplot as plt
from mpl_toolkits.basemap import Basemap as bm
dpath = os.path.join(os.environ.get('HOME'), 'data/NCEP1')
# ### defines a function to plot a 2D field map
def plot_field(X, lat, lon, vmin, vmax, step, cmap=plt.get_cmap('jet'), ax=False, title=False, grid=False):
if not ax:
f, ax = plt.subplots(figsize=(10, (X.shape[0] / float(X.shape[1])) * 10))
m.ax = ax
im = m.contourf(lons, lats, X, np.arange(vmin, vmax+step, step), latlon=True, cmap=cmap, extend='both', ax=ax)
m.drawcoastlines()
if grid:
m.drawmeridians(np.arange(0, 360, 60), labels=[0,0,0,1])
m.drawparallels([-40, 0, 40], labels=[1,0,0,0])
m.colorbar(im)
if title:
ax.set_title(title)
# ## load the wind data using xray
import xray; print(xray.__version__)
dset_u = xray.open_dataset(os.path.join(dpath,'uwnd.2014.nc'))
dset_v = xray.open_dataset(os.path.join(dpath,'vwnd.2014.nc'))
dset_u = dset_u.sel(level=200)
dset_v = dset_v.sel(level=200)
dset_u = dset_u.mean('time')
dset_v = dset_v.mean('time')
lats = dset_u['lat'].values
lons = dset_u['lon'].values
uwnd = dset_u['uwnd'].values
vwnd = dset_v['vwnd'].values
# +
uwnd, uwnd_info = prep_data(uwnd, 'yx')
vwnd, vwnd_info = prep_data(vwnd, 'yx')
# It is also required that the latitude dimension is north-to-south. Again the
# bundled tools make this easy.
lats, uwnd, vwnd = order_latdim(lats, uwnd, vwnd)
# -
lons, lats = np.meshgrid(lons, lats)
w = VectorWind(uwnd, vwnd)
sf, vp = w.sfvp()
vp = vp * 10e-6
sf = sf * 10e-6
m = bm(projection='cyl',llcrnrlat=-90,urcrnrlat=90,\
llcrnrlon=0,urcrnrlon=360,\
lat_ts=0,resolution='c')
plot_field(sf.squeeze(), lats, lons, -1500, 1500, 100, cmap=plt.get_cmap('RdBu_r'), \
title="Streamfunction at 200 hPa ($10^6$m$^2$s$^{-1}$)", grid=True)
plot_field(vp.squeeze(), lats, lons, -100, 100, 10, cmap=plt.get_cmap('RdBu_r'), \
title="Velocity Potential at 200 hPa ($10^6$m$^2$s$^{-1}$)", grid=True)
|
notebooks/spharm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Hongchenglong/colab/blob/main/CertifiableBayesianInference/CNN_Experiments/analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="2jrbKgOuIEhB" outputId="cdf178b6-6671-4d5a-9aa7-767d829f8ec5"
import sys, os
from pathlib import Path
path = Path(os.getcwd())
sys.path.append(str(path.parent))
from google.colab import drive
drive.mount('/content/drive')
sys.path.append('/content/drive/MyDrive/CertifiableBayesianInference')
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="0kOqcjAoKSXP" outputId="e42e3782-e16b-445d-997e-c581b16f4e79"
# !/bin/bash /content/drive/MyDrive/CertifiableBayesianInference/CNN_Experiments/runner.sh
|
CertifiableBayesianInference/CNN_Experiments/analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#this allows plots to be displayed inline with the notebook
# %matplotlib inline
import os.path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from losspager.models.semimodel import SemiEmpiricalFatality,make_test_semi_model,URBAN,RURAL,pop_dist
# Here we are demonstrating the "interactive" use of the SemiEmpirical PAGER Loss model. The first thing we have to do is find the necessary input files in the repository where this notebook lives.
homedir = os.getcwd()
invfile = os.path.join(homedir,'..','test','data','semi_inventory.hdf')
colfile = os.path.join(homedir,'..','test','data','semi_collapse_mmi.hdf')
fatfile = os.path.join(homedir,'..','test','data','semi_casualty.hdf')
workfile = os.path.join(homedir,'..','test','data','semi_workforce.hdf')
growthfile = os.path.join(homedir,'..','test','data','WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')
# Next, we create a SemiEmpiricalFatality object using those files.
semi = SemiEmpiricalFatality.loadFromFiles(invfile,colfile,fatfile,workfile,growthfile)
# Establish the parameters which we'd like to test - country code, time of day, population density, input population, and MMI.
ccode = 'ID' #Indonesia
timeofday = 'day'
density = URBAN
pop = 100000
mmi = 8.5
# The <a http://earthquake.usgs.gov/data/pager/prodandref/Jaiswal_Wald_2010_Semi.pdf>paper</a> describing this model in full says that Indonesia daytime residential/non-residential populations should be about 22% and 43% of the total respectively (the remainder are outdoors, and therefore relatively safe from shaking.)
workforce = semi.getWorkforce(ccode)
res,nonres,outside = pop_dist(pop,workforce,timeofday,density)
print(workforce)
print('Daytime Residential Population of %s: %s' % (ccode,format(int(res[0]),',d')))
print('Daytime Non-Residential Population of %s: %s' % (ccode,format(int(nonres[0]),',d')))
# Retrieve the residential/non-residential urban inventories for Indonesia - these are returned as Pandas Series objects
resinv,nonresinv = semi.getInventories(ccode,density)
print('Residential:')
print(resinv)
print()
print('Non-Residential:')
print(nonresinv)
# Retrieve the collapse rates for both inventories at MMI 8.5.
res_collapse = semi.getCollapse(ccode,mmi,resinv)
nonres_collapse = semi.getCollapse(ccode,mmi,nonresinv)
print('Residential Collapse Rates:')
print(res_collapse)
print()
print('Non-Residential Collapse Rates:')
print(nonres_collapse)
# Retrieve the daytime fatality rates given collapse for both inventories.
res_fat_rates = semi.getFatalityRates(ccode,timeofday,resinv)
nonres_fat_rates = semi.getFatalityRates(ccode,timeofday,nonresinv)
print('Residential Fatality Rates:')
print(res_fat_rates)
print()
print('Non-Residential Fatality Rates:')
print(nonres_fat_rates)
# Now we can multiply the residential/non-residential populations through the inventory, collapse, and fatality rates.
res_fats = res * resinv * res_collapse * res_fat_rates
nonres_fats = nonres * nonresinv * nonres_collapse * nonres_fat_rates
print('Residential Fatalities:')
print(res_fats)
print()
print('Non-Residential Fatalities:')
print(nonres_fats)
print()
resfatsum = int(res_fats.sum())
nonresfatsum = int(nonres_fats.sum())
print('Total Residential Fatalities: %s' % (format(resfatsum,',d')))
print('Total Non-Residential Fatalities: %s' % (format(nonresfatsum,',d')))
print('Total Fatalities: %s' % (format(nonresfatsum+resfatsum,',d')))
# Because the production version of the model isn't exactly implemented as above (the code is vectorized to take advantage of numpy speed), the semimodel module has a test method that creates a 1x1 grid which is then used in the SemiEmpiricalFatality.getLosses() method. This is a better test of the full method than the "manual" methods above.
losses,resfat,nonresfat = make_test_semi_model(invfile,colfile,fatfile,workfile,growthfile,ccode,timeofday,density,pop,mmi)
print(losses)
# Series objects have a built-in plot() method, which can take many forms - in this case, a pie chart. Let's compare the *inventory* pie chart to the *fatality* pie chart.
resfat2 = res_fats/resfatsum
fig,axlist = plt.subplots(nrows=1,ncols=2,figsize=(10,6))
ax1 = axlist[0]
ax2 = axlist[1]
plt.subplot(ax1)
resinv.plot(kind='pie',ax=ax1)
ph=plt.title('Building Inventory')
plt.axis('equal')
plt.subplot(ax2)
resfat2.plot(kind='pie',ax=ax2)
plt.axis('equal')
ph=plt.title('Fatalities by Building Type')
ph = plt.suptitle('Indonesia Residential')
nonresfat2 = nonres_fats/resfatsum
fig,axlist = plt.subplots(nrows=1,ncols=2,figsize=(10,6))
ax1 = axlist[0]
ax2 = axlist[1]
plt.subplot(ax1)
nonresinv.plot(kind='pie',ax=ax1)
ph=plt.title('Building Inventory')
plt.axis('equal')
plt.subplot(ax2)
nonresfat2.plot(kind='pie',ax=ax2)
plt.axis('equal')
ph=plt.title('Fatalities by Building Type')
ph = plt.suptitle('Indonesia Non-Residential')
# So we have shown how to calculate fatalities for one population value in one "cell" with a given density class and MMI. How about for a whole ShakeMap? First we need to inform the model about our population, country code, and urban/rural grids.
popfile = os.path.join(homedir,'..','test','data','eventdata','northridge','northridge_lspop2012.flt')
urbfile = os.path.join(homedir,'..','test','data','eventdata','northridge','northridge_urban.bil')
isofile = os.path.join(homedir,'..','test','data','eventdata','northridge','northridge_isogrid.bil')
shakefile = os.path.join(homedir,'..','test','data','eventdata','northridge','northridge_grid.xml')
popyear = 2012
semi.setGlobalFiles(popfile,popyear,urbfile,isofile)
# Then we call the getLosses() method with the Northridge ShakeMap as input:
loss,resfbystruct,nonresfbystruct = semi.getLosses(shakefile)
print('Total estimated losses for Northridge: %s' % format(loss,',d'))
# It turns out for Northridge the model estimates 0 fatalities in non-residential buildings, but we can examine the residential structures:
resinv,nresinv = semi.getInventories('XF',URBAN)
fig,axlist = plt.subplots(nrows=1,ncols=2,figsize=(10,6))
ax1 = axlist[0]
ax2 = axlist[1]
plt.subplot(ax1)
resinv.plot(ax=ax1,kind='pie')
plt.title('Building Inventory');
plt.axis('equal');
plt.subplot(ax2)
resfat = pd.Series(resfbystruct['XF'])
resfat2 = resfat/resfat.sum()
resfat2.plot(kind='pie',ax=ax2)
plt.title('Fatalities by Building Type');
plt.axis('equal');
plt.suptitle('California Residential');
# We have a method getBuildingDesc() that will expand the building codes:
for btype,numfat in resfbystruct['XF'].items():
bdesc = semi.getBuildingDesc(btype,'operational')
print('%40s (%3s) fatalities: %4s' % (bdesc.capitalize(),btype,format(int(numfat),',d')))
#df = pd.DataFrame(resfbystruct['XF'])
#df
|
notebooks/.ipynb_checkpoints/SemiEmpirical-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # COS-Dwarfs Updates (v1.1)
#
# Version History:
#
# v1.0 -- 07 Jul 2016 by JXP [update f-values, convert to JSON]
# v1.1 -- 09 July 2016 by JXP [add Kinematic measures]
# imports
from pyigm.cgm import cos_halos as pch
# ## Updated FITS from Mega structure (9 July 2016)
# unix> ls -l cosmetals_megastructure.sav*
# -rw-r--r-- 1 xavier staff 159K Jul 7 07:01 cosmetals_megastructure.sav
# IDL> mk_mega_fits
# ## Read in Megastructure (from FITS files)
#
# This step will update the f-values (several lines were
# using the old Morton 1991 reference) and associated column densities.
#
# The synthesize_colm() method of AbsComponent is used for final columns.
# These match the original to within 10% in all cases.
reload(pch)
cos_dwarfs = pch.COSDwarfs()
cos_dwarfs.load_mega(skip_ions=False, chk_lowz=False)
# ### Notes on 'failures' [Fixed with July 7 megastructure]
#
# NV in J0212-0737.334_153.fits has vlim = [-650,-350] (i.e. beyond -600 km/s)
# HI in J0820+2334.260_17.fits has vlim = [ -50., 620.] km/s
# J1545+0936_285_81 has no HI measurement
# ### Write to a JSON tarball
cos_dwarfs.cdir# = '/Users/xavier/Dropbox/COS-Halos-Data'
tarfil = cos_dwarfs.cdir+'/cos-dwarfs_systems.v1.0.tar.gz'
cos_dwarfs.write_survey(tarfil)
# ----
# ## Kinematics
from pyigm.cgm import cos_halos as pch
reload(pch)
cos_dwarfs_v10 = pch.COSDwarfs()
cos_dwarfs_v10.load_sys(tfile=cos_dwarfs_v10.cdir+'/cos-dwarfs_systems.v1.0.tar.gz', chk_lowz=False)
# ### Generate from Init file
cos_dwarfs_v10.kin_init_file
cos_dwarfs_v10.load_abskin(flg=1)
# ### Test writing
cgm_sys = cos_dwarfs_v10.cgm_abs[0]
tdict = cgm_sys.igm_sys.to_dict()
tdict['kin']
from linetools import utils as ltu
jdict = ltu.jsonify(tdict)
ltu.savejson('tmp2.json',jdict)
# ### Write tarball
tarfil = cos_dwarfs_v10.cdir+'/cos-dwarfs_systems.v1.1.tar.gz'
cos_dwarfs_v10.write_survey(tarfil)
# ### Read test
cos_dwarfs_v11 = pch.COSDwarfs()
cos_dwarfs_v11.load_sys(tfile=cos_dwarfs_v11.cdir+'/cos-dwarfs_systems.v1.1.tar.gz', chk_lowz=False)
cos_dwarfs_v11.cgm_abs[0].igm_sys.kin
# ## Tests
cos_dwarfs_v11.halo_mass
cos_dwarfs_v11.kin
|
docs/datasets/COS_Dwarfs_updates.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seasonality in time series data
#
# Consider the problem of modeling time series data with multiple seasonal components with different periodicities. Let us take the time series $y_t$ and decompose it explicitly to have a level component and two seasonal components.
#
# $$
# y_t = \mu_t + \gamma^{(1)}_t + \gamma^{(2)}_t
# $$
#
# where $\mu_t$ represents the trend or level, $\gamma^{(1)}_t$ represents a seasonal component with a relatively short period, and $\gamma^{(2)}_t$ represents another seasonal component of longer period. We will have a fixed intercept term for our level and consider both $\gamma^{(2)}_t$ and $\gamma^{(2)}_t$ to be stochastic so that the seasonal patterns can vary over time.
#
# In this notebook, we will generate synthetic data conforming to this model and showcase modeling of the seasonal terms in a few different ways under the unobserved components modeling framework.
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
plt.rc("figure", figsize=(16,8))
plt.rc("font", size=14)
# -
# ### Synthetic data creation
#
# We will create data with multiple seasonal patterns by following equations (3.7) and (3.8) in Durbin and Koopman (2012). We will simulate 300 periods and two seasonal terms parametrized in the frequency domain having periods 10 and 100, respectively, and 3 and 2 number of harmonics, respectively. Further, the variances of their stochastic parts are 4 and 9, respectively.
# First we'll simulate the synthetic data
def simulate_seasonal_term(periodicity, total_cycles, noise_std=1.,
harmonics=None):
duration = periodicity * total_cycles
assert duration == int(duration)
duration = int(duration)
harmonics = harmonics if harmonics else int(np.floor(periodicity / 2))
lambda_p = 2 * np.pi / float(periodicity)
gamma_jt = noise_std * np.random.randn((harmonics))
gamma_star_jt = noise_std * np.random.randn((harmonics))
total_timesteps = 100 * duration # Pad for burn in
series = np.zeros(total_timesteps)
for t in range(total_timesteps):
gamma_jtp1 = np.zeros_like(gamma_jt)
gamma_star_jtp1 = np.zeros_like(gamma_star_jt)
for j in range(1, harmonics + 1):
cos_j = np.cos(lambda_p * j)
sin_j = np.sin(lambda_p * j)
gamma_jtp1[j - 1] = (gamma_jt[j - 1] * cos_j
+ gamma_star_jt[j - 1] * sin_j
+ noise_std * np.random.randn())
gamma_star_jtp1[j - 1] = (- gamma_jt[j - 1] * sin_j
+ gamma_star_jt[j - 1] * cos_j
+ noise_std * np.random.randn())
series[t] = np.sum(gamma_jtp1)
gamma_jt = gamma_jtp1
gamma_star_jt = gamma_star_jtp1
wanted_series = series[-duration:] # Discard burn in
return wanted_series
# +
duration = 100 * 3
periodicities = [10, 100]
num_harmonics = [3, 2]
std = np.array([2, 3])
np.random.seed(8678309)
terms = []
for ix, _ in enumerate(periodicities):
s = simulate_seasonal_term(
periodicities[ix],
duration / periodicities[ix],
harmonics=num_harmonics[ix],
noise_std=std[ix])
terms.append(s)
terms.append(np.ones_like(terms[0]) * 10.)
series = pd.Series(np.sum(terms, axis=0))
df = pd.DataFrame(data={'total': series,
'10(3)': terms[0],
'100(2)': terms[1],
'level':terms[2]})
h1, = plt.plot(df['total'])
h2, = plt.plot(df['10(3)'])
h3, = plt.plot(df['100(2)'])
h4, = plt.plot(df['level'])
plt.legend(['total','10(3)','100(2)', 'level'])
plt.show()
# -
# ### Unobserved components (frequency domain modeling)
#
# The next method is an unobserved components model, where the trend is modeled as a fixed intercept and the seasonal components are modeled using trigonometric functions with primary periodicities of 10 and 100, respectively, and number of harmonics 3 and 2, respectively. Note that this is the correct, generating model. The process for the time series can be written as:
#
# $$
# \begin{align}
# y_t & = \mu_t + \gamma^{(1)}_t + \gamma^{(2)}_t + \epsilon_t\\
# \mu_{t+1} & = \mu_t \\
# \gamma^{(1)}_{t} &= \sum_{j=1}^2 \gamma^{(1)}_{j, t} \\
# \gamma^{(2)}_{t} &= \sum_{j=1}^3 \gamma^{(2)}_{j, t}\\
# \gamma^{(1)}_{j, t+1} &= \gamma^{(1)}_{j, t}\cos(\lambda_j) + \gamma^{*, (1)}_{j, t}\sin(\lambda_j) + \omega^{(1)}_{j,t}, ~j = 1, 2, 3\\
# \gamma^{*, (1)}_{j, t+1} &= -\gamma^{(1)}_{j, t}\sin(\lambda_j) + \gamma^{*, (1)}_{j, t}\cos(\lambda_j) + \omega^{*, (1)}_{j, t}, ~j = 1, 2, 3\\
# \gamma^{(2)}_{j, t+1} &= \gamma^{(2)}_{j, t}\cos(\lambda_j) + \gamma^{*, (2)}_{j, t}\sin(\lambda_j) + \omega^{(2)}_{j,t}, ~j = 1, 2\\
# \gamma^{*, (2)}_{j, t+1} &= -\gamma^{(2)}_{j, t}\sin(\lambda_j) + \gamma^{*, (2)}_{j, t}\cos(\lambda_j) + \omega^{*, (2)}_{j, t}, ~j = 1, 2\\
# \end{align}
# $$
# $$
#
# where $\epsilon_t$ is white noise, $\omega^{(1)}_{j,t}$ are i.i.d. $N(0, \sigma^2_1)$, and $\omega^{(2)}_{j,t}$ are i.i.d. $N(0, \sigma^2_2)$, where $\sigma_1 = 2.$
# +
model = sm.tsa.UnobservedComponents(series.values,
level='fixed intercept',
freq_seasonal=[{'period': 10,
'harmonics': 3},
{'period': 100,
'harmonics': 2}])
res_f = model.fit(disp=False)
print(res_f.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_f.smoother_results.smoothed_state[0,-1:][0]))
res_f.plot_components()
plt.show()
# -
model.ssm.transition[:, :, 0]
# Observe that the fitted variances are pretty close to the true variances of 4 and 9. Further, the individual seasonal components look pretty close to the true seasonal components. The smoothed level term is kind of close to the true level of 10. Finally, our diagnostics look solid; the test statistics are small enough to fail to reject our three tests.
# ### Unobserved components (mixed time and frequency domain modeling)
#
# The second method is an unobserved components model, where the trend is modeled as a fixed intercept and the seasonal components are modeled using 10 constants summing to 0 and trigonometric functions with a primary periodicities of 100 with 2 harmonics total. Note that this is not the generating model, as it presupposes that there are more state errors for the shorter seasonal component than in reality. The process for the time series can be written as:
#
# $$
# \begin{align}
# y_t & = \mu_t + \gamma^{(1)}_t + \gamma^{(2)}_t + \epsilon_t\\
# \mu_{t+1} & = \mu_t \\
# \gamma^{(1)}_{t + 1} &= - \sum_{j=1}^9 \gamma^{(1)}_{t + 1 - j} + \omega^{(1)}_t\\
# \gamma^{(2)}_{j, t+1} &= \gamma^{(2)}_{j, t}\cos(\lambda_j) + \gamma^{*, (2)}_{j, t}\sin(\lambda_j) + \omega^{(2)}_{j,t}, ~j = 1, 2\\
# \gamma^{*, (2)}_{j, t+1} &= -\gamma^{(2)}_{j, t}\sin(\lambda_j) + \gamma^{*, (2)}_{j, t}\cos(\lambda_j) + \omega^{*, (2)}_{j, t}, ~j = 1, 2\\
# \end{align}
# $$
#
# where $\epsilon_t$ is white noise, $\omega^{(1)}_{t}$ are i.i.d. $N(0, \sigma^2_1)$, and $\omega^{(2)}_{j,t}$ are i.i.d. $N(0, \sigma^2_2)$.
# +
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=10,
freq_seasonal=[{'period': 100,
'harmonics': 2}])
res_tf = model.fit(disp=False)
print(res_tf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_tf.smoother_results.smoothed_state[0,-1:][0]))
res_tf.plot_components()
plt.show()
# -
# The plotted components look good. However, the estimated variance of the second seasonal term is inflated from reality. Additionally, we reject the Ljung-Box statistic, indicating we may have remaining autocorrelation after accounting for our components.
# ### Unobserved components (lazy frequency domain modeling)
#
# The third method is an unobserved components model with a fixed intercept and one seasonal component, which is modeled using trigonometric functions with primary periodicity 100 and 50 harmonics. Note that this is not the generating model, as it presupposes that there are more harmonics then in reality. Because the variances are tied together, we are not able to drive the estimated covariance of the non-existent harmonics to 0. What is lazy about this model specification is that we have not bothered to specify the two different seasonal components and instead chosen to model them using a single component with enough harmonics to cover both. We will not be able to capture any differences in variances between the two true components. The process for the time series can be written as:
#
# $$
# \begin{align}
# y_t & = \mu_t + \gamma^{(1)}_t + \epsilon_t\\
# \mu_{t+1} &= \mu_t\\
# \gamma^{(1)}_{t} &= \sum_{j=1}^{50}\gamma^{(1)}_{j, t}\\
# \gamma^{(1)}_{j, t+1} &= \gamma^{(1)}_{j, t}\cos(\lambda_j) + \gamma^{*, (1)}_{j, t}\sin(\lambda_j) + \omega^{(1}_{j,t}, ~j = 1, 2, \dots, 50\\
# \gamma^{*, (1)}_{j, t+1} &= -\gamma^{(1)}_{j, t}\sin(\lambda_j) + \gamma^{*, (1)}_{j, t}\cos(\lambda_j) + \omega^{*, (1)}_{j, t}, ~j = 1, 2, \dots, 50\\
# \end{align}
# $$
#
# where $\epsilon_t$ is white noise, $\omega^{(1)}_{t}$ are i.i.d. $N(0, \sigma^2_1)$.
# +
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
freq_seasonal=[{'period': 100}])
res_lf = model.fit(disp=False)
print(res_lf.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lf.smoother_results.smoothed_state[0,-1:][0]))
res_lf.plot_components()
plt.show()
# -
# Note that one of our diagnostic tests would be rejected at the .05 level.
# ### Unobserved components (lazy time domain seasonal modeling)
#
# The fourth method is an unobserved components model with a fixed intercept and a single seasonal component modeled using a time-domain seasonal model of 100 constants. The process for the time series can be written as:
#
# $$
# \begin{align}
# y_t & =\mu_t + \gamma^{(1)}_t + \epsilon_t\\
# \mu_{t+1} &= \mu_{t} \\
# \gamma^{(1)}_{t + 1} &= - \sum_{j=1}^{99} \gamma^{(1)}_{t + 1 - j} + \omega^{(1)}_t\\
# \end{align}
# $$
#
# where $\epsilon_t$ is white noise, $\omega^{(1)}_{t}$ are i.i.d. $N(0, \sigma^2_1)$.
# +
model = sm.tsa.UnobservedComponents(series,
level='fixed intercept',
seasonal=100)
res_lt = model.fit(disp=False)
print(res_lt.summary())
# The first state variable holds our estimate of the intercept
print("fixed intercept estimated as {0:.3f}".format(res_lt.smoother_results.smoothed_state[0,-1:][0]))
res_lt.plot_components()
plt.show()
# -
# The seasonal component itself looks good--it is the primary signal. The estimated variance of the seasonal term is very high ($>10^5$), leading to a lot of uncertainty in our one-step-ahead predictions and slow responsiveness to new data, as evidenced by large errors in one-step ahead predictions and observations. Finally, all three of our diagnostic tests were rejected.
# ### Comparison of filtered estimates
#
# The plots below show that explicitly modeling the individual components results in the filtered state being close to the true state within roughly half a period. The lazy models took longer (almost a full period) to do the same on the combined true state.
# Assign better names for our seasonal terms
true_seasonal_10_3 = terms[0]
true_seasonal_100_2 = terms[1]
true_sum = true_seasonal_10_3 + true_seasonal_100_2
time_s = np.s_[:50] # After this they basically agree
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
idx = np.asarray(series.index)
h1, = ax1.plot(idx[time_s], res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h2, = ax1.plot(idx[time_s], res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h3, = ax1.plot(idx[time_s], true_seasonal_10_3[time_s], label='True Seasonal 10(3)')
plt.legend([h1, h2, h3], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 10(3) component')
plt.show()
time_s = np.s_[:50] # After this they basically agree
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
h21, = ax2.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s], label='Double Freq. Seas')
h22, = ax2.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s], label='Mixed Domain Seas')
h23, = ax2.plot(idx[time_s], true_seasonal_100_2[time_s], label='True Seasonal 100(2)')
plt.legend([h21, h22, h23], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth'], loc=2)
plt.title('Seasonal 100(2) component')
plt.show()
# +
time_s = np.s_[:100]
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
h31, = ax3.plot(idx[time_s], res_f.freq_seasonal[1].filtered[time_s] + res_f.freq_seasonal[0].filtered[time_s], label='Double Freq. Seas')
h32, = ax3.plot(idx[time_s], res_tf.freq_seasonal[0].filtered[time_s] + res_tf.seasonal.filtered[time_s], label='Mixed Domain Seas')
h33, = ax3.plot(idx[time_s], true_sum[time_s], label='True Seasonal 100(2)')
h34, = ax3.plot(idx[time_s], res_lf.freq_seasonal[0].filtered[time_s], label='Lazy Freq. Seas')
h35, = ax3.plot(idx[time_s], res_lt.seasonal.filtered[time_s], label='Lazy Time Seas')
plt.legend([h31, h32, h33, h34, h35], ['Double Freq. Seasonal','Mixed Domain Seasonal','Truth', 'Lazy Freq. Seas', 'Lazy Time Seas'], loc=1)
plt.title('Seasonal components combined')
plt.show()
# -
# ##### Conclusions
#
# In this notebook, we simulated a time series with two seasonal components of different periods. We modeled them using structural time series models with (a) two frequency domain components of correct periods and numbers of harmonics, (b) time domain seasonal component for the shorter term and a frequency domain term with correct period and number of harmonics, (c) a single frequency domain term with the longer period and full number of harmonics, and (d) a single time domain term with the longer period. We saw a variety of diagnostic results, with only the correct generating model, (a), failing to reject any of the tests. Thus, more flexible seasonal modeling allowing for multiple components with specifiable harmonics can be a useful tool for time series modeling. Finally, we can represent seasonal components with fewer total states in this way, allowing for the user to attempt to make the bias-variance trade-off themselves instead of being forced to choose "lazy" models, which use a large number of states and incur additional variance as a result.
|
examples/notebooks/statespace_seasonal.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/FairozaAmira/AI_Programming_1_e/blob/master/Lesson06/NoneType_and_boolean.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="oqe0gWj1gMO5"
# ##NoneType
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="W6iqtYo1fKqN" outputId="32b7e978-7018-419f-e664-57f9ec845b86"
type(None)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="AAnAs1mofjxO" outputId="d9b7297c-34b1-49ee-b785-8ac66d579174"
value = print("abc")
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="H0QUv9ftfn1G" outputId="32e52bd5-2e95-4a3f-e877-5038e7ea3b52"
print(value)
# + [markdown] colab_type="text" id="BSAiBArUgD90"
# ## Boolean Type
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Dy4i75LRgF1e" outputId="7407bf1e-7f06-42d7-dcbb-378c6c3fc2cd"
x = (4<5)
x
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="BvM4rbhEgIc4" outputId="13c3124b-fc0c-46bf-99ae-ac01a030ece6"
type(x)
|
Types/NoneType_and_boolean.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .fs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (F#)
// language: F#
// name: .net-fsharp
// ---
// [this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/fsharp/Docs)
//
// # Importing Packages, Libraries, and Scripts
// You can load packages into a .NET notebook from NuGet using the following syntax:
//
// ```fsharp
// #r "nuget:<package name>[,<version=package version>]"
// ```
//
// You can specify alternative sources using `#i`:
//
// ```fsharp
// #i "nuget:<nuget-source>"
// ```
//
//
// If you don't provide an explicit package version, the latest available non-preview version will be loaded.
//
// Here's an example:
#r "nuget:FSharp.Data"
// Now that the package is loaded, we can add some `using` statements and write some code.
// +
open FSharp.Data
[<Literal>]
let url = "https://en.wikipedia.org/wiki/2017_Formula_One_World_Championship"
type F1_2017 = HtmlProvider<url>
let f1Calendar = F1_2017.Load(url).Tables.``Season calendar``
f1Calendar.Rows
|> Seq.map (fun x -> x.Circuit, x.Date)
// -
// If you want to load an assembly that's already on disk, you can do so using this syntax:
//
// ```fsharp
// #r "<path to .dll>"
// ```
// You can load an F# script (typically a `.fsx` file) into the notebook using this syntax:
//
// ```fsharp
// #load "<path to .fsx file>"
// ```
// Example:
#load "some-fsharp-script-file.fsx"
|
samples/notebooks/fsharp/Docs/Importing-packages.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv_playground
# language: python
# name: venv_playground
# ---
# # **Algoritmos Não-Supervisionados e k-means**
#
# Na aula de hoje, vamos explorar os seguintes tópicos em Python:
#
# - 1) Algoritmos não-supervisionados
# - 2) K-means
# - 3) Exemplo real
#
#
# ## **TOC:**
# Na aula de hoje, vamos explorar os seguintes tópicos em Python:
#
# - 1) [Algoritmos não-supervisionados](#nao_supervisionado)
# - 2) [K-means](#amostragem)
# - 2.1) [Construindo o modelo](#build_model)
# - 2.2) [Determinando o k](#find_k)
# - 2.3) [Algoritmo](#algorithm)
# - 2.4) [Quando uso algoritmos de clusterização](#use_case)
# - 3) [Exemplo real](#real)
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_circles
# -
# ---
# ## 1) **Algoritmos não-supervisionados** <a class="anchor" id="nao_supervisionado"></a>
#
#
# Um dos problemas que podem ser resolvidos com Machine Learning é o da __clusterização__!
#
# Este tipo de problema consiste em __agrupar__ itens semelhantes, isto é, criar __grupos__ (ou __clusters__) dos dados que são parecidos entre si.
#
# > O objetivo central é **dividir os dados em grupos distintos**, tais que **membros de cada grupo sejam similares entre si**
#
# Problemas como estes podem aparecer em diversos contextos:
#
# - Identificação de tipos de clientes parecidos, para o direcionamento de marketing;
# - Agrupamento de cidades próximas para melhor logística de entrega de produtos;
# - Identificação de padrões climáticos;
# - Identificação de genes relacionados à determinada doença;
# - Identificação de documentos semelhantes em processos legais;
#
# ...e qualquer outro problema em que você deseje **AGRUPAR DADOS SIMILARES** ou ainda **ENCONTRAR ALGUMA ESTRUTURA NOS SEUS DADOS!**
#
# Veremos agora um dos principais algoritmos de clusterização, o **k-means**
#
#
# ___
# ## 2) **K-means** <a class="anchor" id="k-means"></a>
#
# Documentação: [clique aqui!](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans)
#
# O k-means é utilizado para a determinação de um número **k de clusters em nossos dados**
#
# O primeiro passo pra aplicar o k-means é:
#
# - Determinar o número k de clusters!
#
# Por exemplo, só de olhar pros dados plotados a seguir, fica fácil de identificar 4 grupos distintos, não é mesmo?
#
# Ref: [make_blobs](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html)
#
#
# Mas, como o computador pode identificar estes grupos? É isso que o algoritmo responde!
#
# Uma vez determinado o número k de clusters, podemos construir nosso modelo!
# Primeiramente, lemos nossos dados e os armazenamos na variável X. Para os dados acima, usamos um dataset do próprio sklearn:
# ### **2.1) Construindo o modelo** <a class="anchor" id="build_model"></a>
#
# Note que temos apenas as **features** dos dados (no caso, coordenadas x e y). Iso caracteriza um problema de clusterização **não-supervisionado**: quando nossos dados **não têm targets**, apenas features!
# Temos vários argumentos na classe, mas os principais são:
#
# - n_clusters: quantos clusters queremos (o número k);
# - max_iter: é o número máximos de iterações que o algoritmo fará, se ele não convergir antes disso. É uma boa ideia não colocar um número tão grande, ou o algoritmo pode ficar bem lento. Algo da ordem de 1000, em geral é uma boa escolha.
#
# Por fim, pra fitar o modelo, fazemos:
# Em algoritmos **não supervisionados**, não existe a divisão em dados de treino e dados de teste, porque **não há o que testar!**. Queremos apenas **econtrar estrutura** nos dados!
#
# Então, basta fitar o modelo com nossos dados todos (no caso, o array X)
# Agora que o modelo está treinado, podemos fazer predições:
#
# Isto retorna uma lista com número de elementos igual ao número de pontos do dataset, e com valores entre 0 e k-1, indicando qual é o número do cluster (a contagem começa com zero).
#
# No nosso caso, como k = 4, teremos os clusters 0, 1, 2 e 3.
#
# Pra visualizarmos os clusters, podemos fazer:
# _____
# ### **2.2) - Determinando o k** <a class="anchor" id="find_k"></a>
#
# Mas e se não for tão fácil de plotar os dados para determinar o k?
#
# Pode ser que não consigamos visualizar nossos dados em 2D, se, por exemplo, tivermos mais de 3 features em nossos dados...
#
# Neste caso, podemos usar o __método do cotovelo__, que consiste em rodar o k-means várias vezes, para diferentes valores de k, e depois plotar um gráfico com a **inércia** de cada uma das rodadas.
#
# A inércia também é chamada de **WCSS** (Within-Cluster-Sum-of-Squares), isto é, "soma de quadrados intra-cluster", que é calculada como a soma das distâncias (ao quadrado) entre os pontos e os centróides dos clusters.
#
# Quanto menor o WCSS, mais eficiente foi a clusterização, **mas até certo ponto!**
#
# Conforme o número de clusters (k) aumenta, o WCSS diminui, sendo mínimo quando cada ponto é seu próprio cluster isolado (o que não é nada útil, pois se cada ponto for um cluster, não há clusterização alguma!).
#
# Assim, o que queremos não é encontrar um k que minimize o WCSS, mas sim um k a partir do qual o WCSS **para de decrescer tão rapidamente!**
#
# Quando encontramos este k, encontramos o número ideal de clusters!
#
# Ao plotarmos o WCSS (inércia) em função de k, o que buscaremos será então o valor de k onde **o gráfico deixa de ser tão inclinado**. Esses pontos são visualizados como "quinas", ou **cotovelos** no gráfico -- e daí vem o nome do método!
#
# Para aplicar o método, fazemos:
#
#
#
# O valor de k mais adequado é aquele em que o gráfico tem uma "quina" bem abrupta: no exemplo acima, k = 4, como já sabíamos!
#
#
#
#
# ___
# ### Vamos fazer o exemplo com mais features
# **Aplicando o método do cotovelo...**
# **Vamos tentar separadamente** $k = 3$ e $k = 6$
# As projeções em duas dimensões mostram que $k=6$ de fato é a melhor escolha! (O que faz sentido, pois nossos dados artificiais foram preparados para conter 6 clusters!)
# ____
# + [markdown] tags=[]
# ### **2.3) Algoritmo** <a class="anchor" id="algorithm"></a>
#
# Uma vez escolhido o número de clusters, o k-means segue as seguintes etapas:
#
# - 1) k pontos são escolhidos aleatoriamente como sendo os centroides dos clusters (centroide é o centro do cluster);
#
# - 2) Para cada ponto, vamos calcular qual é a distância entre ele e os k centroides. Aquele centroide que estiver mais perto, será o cluster ao qual este ponto pertencerá. Fazemos isso para todos os pontos!
#
# - 3) Ao fim do passo 2, teremos k clusters, cada um com seu centroide, e todos os pontos pertencerão a determinado cluster!
#
# - 4) Uma vez que temos os clusters, calculamos qual é de fato o centro de cada um deles. Isso é feito tomando a média da posição de todos os pontos;
#
# - 5) Após determinar os novos k centroides, repetimos o processo!
#
# - 6) E o processo se repete até que os centroides não mudem mais. Quando esta convergência for alcançada (ou após o número determinado de iterações), o algoritmo termina!
#
# <center><img src="https://stanford.edu/~cpiech/cs221/img/kmeansViz.png" width=700></center>
#
# <center><img src="https://miro.medium.com/max/1280/1*rwYaxuY-jeiVXH0fyqC_oA.gif" width=500></center>
#
# <center><img src="https://miro.medium.com/max/670/1*JUm9BrH21dEiGpHg76AImw.gif" width=500></center>
# -
# _____
# ### **2.4) Quando uso algoritmos de clusterização** <a class="anchor" id="use_case"></a>
#
#
# De certa fora, algoritmos de clusterização podem ser vistos como classificadores, uma vez que os clusters podem caracterizar um grupo, ou uma classe.
#
# No entanto, há uma diferença bem importante entre problemas de classificação e clusterização:
#
# - **Problemas de classificação** em geral são **supervisionados**, isto é, os dados que utilizamos têm tanto as features como os **targets**. Em outras palavras, neste tipo de problema, sabemos de antemão quais são as classes de interesse!
#
# - **Problemas de clusterização**, por outro lado, são **não-supervisionados**. ou seja, os dados **não têm** targets, temos apenas as features! O nosso objetivo é justamente descobrir **alguma estrutura de agrupamento** nos dados, mas sem qualquer informação prévia quanto aos grupos a serem formados.
#
# Foi exatamente o caso do nosso exemplo: nós tínhamos apenas as **features** dos dados (f1, f2, etc), e **nenhuma** informação quanto aos grupos que seriam formados.
#
# Foi só depois que fizemos a análise exploratória dos dados (plot), que pudemos identificar alguma estrutura (4 clusters), para então aplicar o k-means!
#
# No segundo caso, só pudemos determinar o número de clusters de forma segura utilizando o **método do cotovelo**.
#
# Assim sendo, via de regra, a utilização ou não de algoritmos de clusterização, além do tipo de problema, depende dos **dados disponíveis**:
#
# - Se os dados são previamente classificados (temos **features e targets**), a melhor estratégia é usar **algoritmos de classificação** (regressão logística, árvores, SVM, etc.);
#
# - Mas, se os dados não são previamente classificados (temos **apenas as features**), a melhor estratégia é usar **algoritmos de clusterização** (k-means, [DBSCAN](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN), [hierarchical clustering](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html#sklearn.cluster.AgglomerativeClustering), etc.)
#
# ### **2.5 Diferentes algoritmos de clusterização**
# Ref: [Sobre clusterização](https://scikit-learn.org/stable/modules/clustering.html)
# ____
# ## 3) **Exemplo real** <a class="anchor" id="real"></a>
#
# Vamos agora a um exemplo real de um problema de clusterização onde podemos usar o k-means?
#
# Usaremos [dados sobre faculdades americanas](https://www.kaggle.com/flyingwombat/us-news-and-world-reports-college-data?select=College.csv)
#
# Os dados são referentes ao ano de 1995, e contêm as seguintes variáveis:
#
# - Apps: número de aplicações (inscrições para processo seletivo) recebidas
#
# - Accept: número de aplicações aceitas
#
# - Enroll: número de novos alunos admitidos naquele ano
#
# - Top10perc: porcentagem de novos alunos que vieram das top 10% melhores escolas de ensino médio
#
# - Top25perc: porcentagem de novos alunos que vieram das top 25% melhores escolas de ensino médio
#
# - F.Undergrad: número de alunos de graduação que estudam em período integral
#
# - P.Undergrad: número de alunos de graduação que estudam meio período
#
# - Outstate: valor da anual da faculdade para alunos fora do estado
#
# - Room.Board: custos anuais de aluguel
#
# - Books: gasto estimado com livros
#
# - Personal: gastos pessoais de custo de vida
#
# - PhD: porcentagem do corpo docente com doutorado
#
# - Terminal: porcentagem do corpo docente com o maior grau de escolaridade possível
#
# - S.F.Ratio: razão de número de estudantes/corpo docente
#
# - perc.alumni: porcentagem de ex-alunos que doaram dinheiro para a universidade
#
# - Expend: gastos institucionais com os estudantes
#
# - Grad.Rate: taxa de graduação
#
#
# Obs.: originalmente, a base é **supervisionada**, com o target "Private" que indica se a faculdade é pública ou privada. Mas, como estamos estudando problemas não-supervisionados, eu modifiquei a base para não conter esta label.
#
#
# Vamos explorar os dados para ver se encontramos alguma estrutura neles?
# +
# leia os dados
# a base deve estar em '../datasets/college.csv'
df = pd.read_csv('data/college.csv')
# -
# Agora é sua vez!
#
# Lembre de tudo o que vimos no primeiro mês de curso, e **explore os dados**!
#
# Fça quantos gráficos você quiser, formule e responda quantas perguntas vc quiser!
# Não é muito óbvio, de imediato, que existem dois grupos distintos, né?
#
# Mas vamos treinar nosso algoritmo para vermos se é possível encontrar alguma estrutura quando todas as features são consideradas juntas!
# Agora que nosso modelo foi criado, será que ganhamos algum novo insight olhando pro pairplot?
# ____
|
semana_6/kmeans.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=false editable=false
# Initialize Otter
import otter
grader = otter.Notebook()
# -
import matplotlib.pyplot as plt
import numpy as np
# + [markdown] deletable=false editable=false
# **Question 1.** Assign `x` to the smallest prime number.
#
# <!--
# BEGIN QUESTION
# name: q1
# manual: false
# points: 2
# -->
# -
x = ...
# + deletable=false editable=false
grader.check("q1")
# + [markdown] deletable=false editable=false
# <!-- BEGIN QUESTION -->
#
# **Question 2.** Visualize the answer
#
# <!--
# BEGIN QUESTION
# name: q2
# manual: true
# -->
# -
## solution ##
plt.plot(...);
# <!-- END QUESTION -->
#
#
#
# This cell is not part of a question.
y = 3
# + [markdown] deletable=false editable=false
# **Question 3.** Define `square` and assign `nine` to 3 squared.
#
# <!--
# BEGIN QUESTION
# name: q3
# private_points: 2
# -->
# +
def square(x):
y = ...
...
nine = square(3)
# + deletable=false editable=false
grader.check("q3")
# + [markdown] deletable=false editable=false
# <!-- BEGIN QUESTION -->
#
# **Question 4.** What does equilateral mean?
#
# <!--
# BEGIN QUESTION
# name: q4
# points: 2
# manual: True
# -->
# -
# _Type your answer here, replacing this text._
# + [markdown] deletable=false editable=false
# <!-- END QUESTION -->
# +
# this isn't part of a question
# it's here to make sure that we get a MD cell above to close the export
# of question 4
# + [markdown] deletable=false editable=false
# **Question 5.** Approximate the area and circumference of a circle with radius 3.
#
# <!--
# BEGIN QUESTION
# name: question5
# -->
# +
pi = 3.14
if True:
...
print('A circle with radius', radius, 'has area', area)
def circumference(r):
# Next, define a circumference function.
pass
# +
# This question has no tests.
# + [markdown] deletable=false editable=false
# <!-- BEGIN QUESTION -->
#
# **Question 6.** Write something
#
# _This question has a custom prompt below, so that prompt should be in the output. It also has no solution!_
#
# <!--
# BEGIN QUESTION
# name: question6
# manual: true
# -->
# -
# _Write your thing here._
# + [markdown] deletable=false editable=false
# <!-- END QUESTION -->
#
# <!-- BEGIN QUESTION -->
#
# **Question 7:** What is the answer?
#
# <!--
# BEGIN QUESTION
# name: q7
# manual: true
# -->
# -
# _Type your answer here, replacing this text._
# + [markdown] deletable=false editable=false
# <!-- END QUESTION -->
#
# **Question 8:** Test intercell seeding by generating 10 random $N(4,2)$ numbers.
#
# <!--
# BEGIN QUESTION
# name: q8
# -->
# -
z = ...
z
# + deletable=false editable=false
grader.check("q8")
# -
# **You're done!**
#
# + [markdown] deletable=false editable=false
# ## Submission
#
# Make sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a zip file for you to submit. **Please save before exporting!**
# + deletable=false editable=false
# Save your notebook first, then run this cell to export your submission.
grader.export("generate-gradescope.ipynb")
# -
#
|
test/test-assign/gs-correct/student/generate-gradescope.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementing a Neural Network
#
# **ATTENTION**: When hand in your homework, all print info has to be kept which means that output results of each cell could be seen in your submissions. Homework scores will be judged by those print info. So show us the best result in your experiment. More details can be found in assignment1_tutor.pdf.
#
# In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the Fashion_mnist dataset.
# +
# A bit of setup
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from MS325.classifiers.neural_net import TwoLayerNet
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# -
# We will use the class `TwoLayerNet` in the file `MS325/classifiers/neural_net.py` to represent instances of our network. The network parameters are stored in the instance variable `self.params` where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
# +
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
net = init_toy_model()
X, y = init_toy_data()
# -
# # Forward pass: compute scores
# Open the file `MS325/classifiers/neural_net.py` and look at the method `TwoLayerNet.loss`. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters.
#
# Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.
# +
scores = net.loss(X)
print('Your scores:')
print(scores)
print()
print('correct scores:')
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print(correct_scores)
print()
# The difference should be very small. We get < 1e-7
print('Difference between your scores and correct scores:')
print(np.sum(np.abs(scores - correct_scores)))
# -
# # Forward pass: compute loss
# In the same function, implement the second part that computes the data and regularizaion loss.
# +
loss, _ = net.loss(X, y, reg=0.05)
correct_loss = 1.30378789133
# should be very small, we get < 1e-12
print('Difference between your loss and correct loss:')
print(np.sum(np.abs(loss - correct_loss)))
# -
# # Backward pass
# Implement the rest of the function. This will compute the gradient of the loss with respect to the variables `W1`, `b1`, `W2`, and `b2`. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:
# +
from MS325.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.05)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.05)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
# -
# # Train the network
# To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function `TwoLayerNet.train` and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement `TwoLayerNet.predict`, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.
#
# Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.
# +
net = init_toy_model()
stats = net.train(X, y, X, y,
learning_rate=1e-1, reg=5e-6,
num_iters=100, verbose=False)
print('Final training loss: ', stats['loss_history'][-1])
# plot the loss history
plt.plot(stats['loss_history'])
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
# -
# # Load the data
# Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite Fashion_mnist data so we can use it to train a classifier on a real dataset.
# +
from MS325.datasets.fashion_mnist.utils import mnist_reader
def get_mnist_data(num_training=58000, num_validation=2000, num_test=10000):
"""
Load the Fashion_mnist dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw Fashion_mnis data
X_train, y_train = mnist_reader.load_mnist('MS325/datasets/fashion_mnist/data/fashion', kind='train')
X_test, y_test = mnist_reader.load_mnist('MS325/datasets/fashion_mnist/data/fashion', kind='t10k')
X_train = X_train.astype('float64')
X_test = X_test.astype('float64')
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0).astype('uint8')
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_mnist_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# -
# # Train a network
# To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.
# +
input_size = 28*28
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=1e-4, learning_rate_decay=0.95,
reg=0.25, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)
# -
# # Debug the training
# With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
#
# One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
#
# Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.
# +
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
# -
# # Tune your hyperparameters
#
# **What's wrong?**. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.
#
# **Tuning**. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.
#
# **Approximate results**. You should be aim to achieve a classification accuracy of greater than 70% on the validation set. Our best network gets over 73% on the validation set.
#
# **Experiment**: You goal in this exercise is to get as good of a result on Fashion_mnist as you can, with a fully-connected Neural Network. For every 1% above 73% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).
# +
best_net = None # store the best model into this
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
#################################################################################
hidden_size = [100, 125]
results = {}
best_val_acc = 0
best_net = None
learning_rates = [5e-3]
regularization_strengths = [0.25, 0.025]
for hs in hidden_size:
for lr in learning_rates:
for reg in regularization_strengths:
net = TwoLayerNet(input_size, hs, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val, num_iters=10000, batch_size=200,
learning_rate=lr, learning_rate_decay=0.98, reg=reg , verbose=False)
val_acc = (net.predict(X_val) == y_val).mean()
if val_acc > best_val_acc:
best_val_acc = val_acc
best_net = net
results[(hs,lr,reg)] = val_acc
# Print out results.
for hs,lr, reg in sorted(results):
val_acc = results[(hs, lr, reg)]
print ('hs %d lr %e reg %e val accuracy: %f' % (hs, lr, reg, val_acc))
print ('best validation accuracy achieved during cross-validation: %f' % best_val_acc)
#################################################################################
# END OF YOUR CODE #
#################################################################################
# +
from MS325.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(28, 28, -1).transpose(2, 0, 1)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(best_net)
# -
# # Run on the test set
# When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.
#
# **We will give you extra bonus point for every 1% of accuracy above 73%.**
test_acc = (best_net.predict(X_test) == y_test).mean()
print('Test accuracy: ', test_acc)
|
two_layer_net.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="BW1qX94d-ot9"
# ## Hands-on Exercise: Mall Customers
#
# Source:
# https://www.kaggle.com/vjchoudhary7/customer-segmentation-tutorial-in-python
#
# `Background`: Suppose you are owing a supermarket mall and through membership cards , you have some basic data about your customers like Customer ID, age, gender, annual income and spending score.
# Spending Score is something you assign to the customer based on your defined parameters like customer behavior and purchasing data.
#
# `Problem Statement`: You own the mall and want to understand the customers like who can be easily converge [Target Customers] so that the sense can be given to marketing team and plan the strategy accordingly.
#
#
# + id="ZchWW6hT_QqF"
import pandas as pd
from sklearn.metrics import silhouette_score
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# + id="EmJcYVablYI2"
# + [markdown] id="GmZPOrhV_rte"
# ### 1. Data Load In and Preprocess
# Load data from the link: https://raw.githubusercontent.com/XiaomengYan/MachineLearning_dataset/main/Mall_Customers.csv. The analysis is based on three columns: "Age", "Annual Income (k$)" and "Spending Score (1-100)" of the data frame.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="vyTw0jlS_MmM" executionInfo={"elapsed": 758, "status": "ok", "timestamp": 1618787700409, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="b659bde5-7522-470d-cd05-a8381baee196"
url = 'https://raw.githubusercontent.com/XiaomengYan/MachineLearning_dataset/main/Mall_Customers.csv'
dataset = pd.read_csv(url)
dataset.head(10) #Printing first 10 rows of the dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="jIMecaE5_9x7" executionInfo={"elapsed": 692, "status": "ok", "timestamp": 1618787700964, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="a31b384f-5b47-4ebe-e5a8-2bb39b124d2d"
X_market = dataset[['Age', 'Annual Income (k$)', 'Spending Score (1-100)']]
X_market.head()
# + [markdown] id="BQzxsJxI_g0R"
# ### 2. K-Means
#
# + [markdown] id="mR_RaXl_FiIj"
# Perform a K-Means algorithm towards the data frame (consisting of three columns mentioned above) and interpret the clustering results.
#
#
# #### 2.1 Select the parameter $K$
#
# To determine the best number of clusters parameter $K$, we will use grid search and select the parameter $K$ based on two metrics described below:
# 1. ELBOW method : ELBOW method looks at the total within-cluster sum of squares (WSS) as a function of number of clusters. The location of a knee in the plot is usually considered as an indicator of the appropriate number of clusters because it means that adding another cluster does not improve much better the partition.
#
# 2. Sihouette method: The Sihouette method measures the quality of a clustering and determine how well each point lies within ites cluster. Its value ranges from -1 to 1, where a high value indicates that the object is well matched to its own cluster and poorly matched to neighboring clusters.
#
# + id="RvJGYxzOAxXB"
from sklearn.cluster import KMeans
#We assume the max number of cluster would be 10
#you can judge the number of clusters by doing averaging
###Static code to get max no of clusters
n_clusters = [2,3,4,5,6,7,8,9,10] # number of clusters
clusters_inertia = [] # inertia of clusters
s_scores = [] # silhouette scores
for n in n_clusters:
KM_est = KMeans(n_clusters=n, init='k-means++').fit(X_market)
clusters_inertia.append(KM_est.inertia_) # data for the elbow method
silhouette_avg = silhouette_score(X_market, KM_est.labels_)
s_scores.append(silhouette_avg) # data for the silhouette score method
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="rrmfYCY6Axyw" executionInfo={"elapsed": 490, "status": "ok", "timestamp": 1618787703952, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="d5e2c5c2-9a74-488e-8dc1-09b032752a60"
#Visualizing the ELBOW method to get the optimal value of K
## ELBOW: It is the sum of squared distances of samples to their closest cluster center.
fig, ax = plt.subplots(figsize=(12,5))
ax = sns.lineplot(n_clusters, clusters_inertia, marker='o', ax=ax)
ax.set_title("Elbow method")
ax.set_xlabel("number of clusters")
ax.set_ylabel("clusters inertia")
ax.axvline(5, ls="--", c="red")
ax.axvline(6, ls="--", c="red")
plt.grid()
plt.show()
# + [markdown] id="29-cLb_iBdld"
# There is no clear "elbow" visible. A choice of 5 or 6 clusters seems to be fair. Let's see the Silhouette score.
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="1CtaF9mkBEFz" executionInfo={"elapsed": 617, "status": "ok", "timestamp": 1618787705981, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="7f180ad3-8b8c-4a36-8f16-a46aa2aefe2c"
# The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters.
# This measure has a range of [-1, 1]
fig, ax = plt.subplots(figsize=(12,5))
ax = sns.lineplot(n_clusters, s_scores, marker='o', ax=ax)
ax.set_title("Silhouette score method")
ax.set_xlabel("number of clusters")
ax.set_ylabel("Silhouette score")
ax.axvline(6, ls="--", c="red")
plt.grid()
plt.show()
# + [markdown] id="NGHbbawPBklW"
# Silhouette score method indicates the best options would be respectively 6 or 5 clusters. Let's compare by fixing 5 for the analysis.
# + [markdown] id="UJypdP2eI5G0"
# #### 2.2 Perform K-Means and Visualize the Output
# + id="jzQCW0psBhb6"
KM_5_clusters = KMeans(n_clusters=5, init='k-means++').fit(X_market) # initialise and fit K-Means model
KM5_clustered = X_market.copy()
KM5_clustered.loc[:,'Cluster'] = KM_5_clusters.labels_ # append labels to points
# + colab={"base_uri": "https://localhost:8080/", "height": 334} id="0KmH9-H1Bnqq" executionInfo={"elapsed": 687, "status": "ok", "timestamp": 1618787710259, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="a5faf0c7-08db-4efc-bfbe-77204f0b69fa"
fig1, (axes) = plt.subplots(1,2,figsize=(12,5))
scat_1 = sns.scatterplot('Annual Income (k$)', 'Spending Score (1-100)', data=KM5_clustered,
hue='Cluster', ax=axes[0], palette='Set1', legend='full')
sns.scatterplot('Age', 'Spending Score (1-100)', data=KM5_clustered,
hue='Cluster', palette='Set1', ax=axes[1], legend='full')
axes[0].scatter(KM_5_clusters.cluster_centers_[:,1],KM_5_clusters.cluster_centers_[:,2], marker='s', s=40, c="blue")
axes[1].scatter(KM_5_clusters.cluster_centers_[:,0],KM_5_clusters.cluster_centers_[:,2], marker='s', s=40, c="blue")
plt.show()
# + [markdown] id="vUHJJeg_Bshg"
# K-Means algorithm generated the following 5 clusters:
#
# * clients with low annual income and high spending score
# * clients with medium annual income and medium spending score
# * clients with high annual income and low spending score
# * clients with high annual income and high spending score
# * clients with low annual income and low spending score
#
# There are no distinct groups is terms of customers age.
# + [markdown] id="rYcTsoH5BwWZ"
# Below there is a 3D projection of 5 generated clusters. It is not very helpful in terms of a visualisation in a static mode but if you run the code in an interactive environment (e.g. Spyder) you can rotate it!
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="J8LSkDigBpaR" executionInfo={"elapsed": 549, "status": "ok", "timestamp": 1618787713268, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="71c28ee0-37af-4a9f-c157-2468e86093f2"
import plotly as py
import plotly.graph_objs as go
def tracer(db, n, name):
'''
This function returns trace object for Plotly
'''
return go.Scatter3d(
x = db[db['Cluster']==n]['Age'],
y = db[db['Cluster']==n]['Spending Score (1-100)'],
z = db[db['Cluster']==n]['Annual Income (k$)'],
mode = 'markers',
name = name,
marker = dict(
size = 5
)
)
trace0 = tracer(KM5_clustered, 0, 'Cluster 0')
trace1 = tracer(KM5_clustered, 1, 'Cluster 1')
trace2 = tracer(KM5_clustered, 2, 'Cluster 2')
trace3 = tracer(KM5_clustered, 3, 'Cluster 3')
trace4 = tracer(KM5_clustered, 4, 'Cluster 4')
data = [trace0, trace1, trace2, trace3, trace4]
layout = go.Layout(
title = 'Clusters by K-Means',
scene = dict(
xaxis = dict(title = 'Age'),
yaxis = dict(title = 'Spending Score'),
zaxis = dict(title = 'Annual Income')
)
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig)
# + [markdown] id="3ZK7nvDjB7GT"
# ### 3. Gaussian Mixture Models
# + [markdown] id="n1EW5TnTJJbO"
# Fit a Gaussian Mixture Models todays the dataset with 5 clusters and print out the labels.
# + colab={"base_uri": "https://localhost:8080/"} id="xQlBloMRB1EA" executionInfo={"elapsed": 182, "status": "ok", "timestamp": 1618787716933, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="a69440d1-2b9f-4a95-dd8f-3d5c1effed6a"
from sklearn.mixture import GaussianMixture
GMM_model = GaussianMixture(n_components=5, random_state=42).fit(X_market)
labels_gmm = GMM_model.predict(X_market)
labels_gmm
# + colab={"base_uri": "https://localhost:8080/", "height": 334} id="BiQe_FlIP3yc" executionInfo={"elapsed": 639, "status": "ok", "timestamp": 1618787718298, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="38cf87fb-2b8c-4849-d13a-85a35817521f"
GMM_clustered = X_market.copy()
GMM_clustered.loc[:,'Cluster'] = labels_gmm
fig1, (axes) = plt.subplots(1,2,figsize=(12,5))
scat_1 = sns.scatterplot('Annual Income (k$)', 'Spending Score (1-100)', data=GMM_clustered,
hue='Cluster', ax=axes[0], palette='Set1', legend='full')
sns.scatterplot('Age', 'Spending Score (1-100)', data=GMM_clustered,
hue='Cluster', palette='Set1', ax=axes[1], legend='full')
plt.show()
# + [markdown] id="jbeT7XqVCNkO"
# ### 4. DBSCAN
#
# In this section, a DBSCAN algorithm is utilized to cluster the Mall Customer dataset. In DBSCAN algorithm, there are two paramters `n_samples` and `eps`. We can use a heatmap to look at the configuration of eps and n_samples to produce our desired n_clusters and Silhouette score.
# + id="BQvVs1XzCD1V"
from itertools import product
from sklearn.cluster import DBSCAN
from sklearn import metrics
eps_values = np.arange(8,12.75,0.25) # eps values to be investigated
min_samples = np.arange(3,10) # min_samples values to be investigated
DBSCAN_params = list(product(eps_values, min_samples))
no_of_clusters = []
sil_score = []
for p in DBSCAN_params:
DBS_clustering = DBSCAN(eps=p[0], min_samples=p[1]).fit(X_market)
no_of_clusters.append(len(np.unique(DBS_clustering.labels_)))
sil_score.append(metrics.silhouette_score(X_market, DBS_clustering.labels_))
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="88FTk_FWCgib" executionInfo={"elapsed": 1037, "status": "ok", "timestamp": 1618787726862, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="d96c13dc-8dd3-4d3c-d4f8-41f62e910a6c"
import seaborn as sns
# A heatplot below shows how many clusters were genreated by the algorithm for the respective parameters combinations.
tmp = pd.DataFrame.from_records(DBSCAN_params, columns =['Eps', 'Min_samples'])
tmp['No_of_clusters'] = no_of_clusters
pivot_1 = pd.pivot_table(tmp, values='No_of_clusters', index='Min_samples', columns='Eps')
fig, ax = plt.subplots(figsize=(12,6))
sns.heatmap(pivot_1, annot=True,annot_kws={"size": 16}, cmap="YlGnBu", ax=ax)
ax.set_title('Number of clusters')
plt.show()
# + [markdown] id="ShIXgOVJCt7s"
# As the heatplot above shows, the number of clusters vary from 4 to 17.
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="So-F4GKTCqSq" executionInfo={"elapsed": 1067, "status": "ok", "timestamp": 1618787729757, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="812f07a7-d809-4814-ce24-dc9d85bd7142"
# A heatplot below shows Silhouette score for the respective parameters combinations.
tmp = pd.DataFrame.from_records(DBSCAN_params, columns =['Eps', 'Min_samples'])
tmp['Sil_score'] = sil_score
pivot_1 = pd.pivot_table(tmp, values='Sil_score', index='Min_samples', columns='Eps')
fig, ax = plt.subplots(figsize=(18,6))
sns.heatmap(pivot_1, annot=True, annot_kws={"size": 10}, cmap="YlGnBu", ax=ax)
plt.show()
# + [markdown] id="dh8ENFTsCwwN"
# Global maximum is 0.26 for eps=12.5 and min_samples=4.
# + [markdown] id="0ppJF6H9Cwmt"
# ### 5. Hierarchical Clustering
# + [markdown] id="r57Gif6qTLCk"
# Perform hierarchical clustering and plot the dendrogram.
# + colab={"base_uri": "https://localhost:8080/"} id="fRE4x3McR8mC" executionInfo={"elapsed": 188, "status": "ok", "timestamp": 1618788327341, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="52b530a9-2869-4a7b-8a7d-e57e94157d4a"
from scipy.cluster import hierarchy
from sklearn.cluster import AgglomerativeClustering
# Define a clustering model
# linkage has multiple options: 'ward', 'single', 'complete', 'average'
HC_model = AgglomerativeClustering(n_clusters=5, linkage ='ward')
pred_labels = HC_model.fit_predict(X_market)
pred_labels
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="0zD8CEyoS2nT" executionInfo={"elapsed": 794, "status": "ok", "timestamp": 1618788328693, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="7ebc6393-c3ac-4121-c51f-d379d0665be0"
HC_clustered = X_market.copy()
HC_clustered.loc[:,'Cluster'] = pred_labels
fig1, (axes) = plt.subplots(1,2,figsize=(12,5))
scat_1 = sns.scatterplot('Annual Income (k$)', 'Spending Score (1-100)', data=HC_clustered,
hue='Cluster', ax=axes[0], palette='Set1', legend='full')
sns.scatterplot('Age', 'Spending Score (1-100)', data=HC_clustered,
hue='Cluster', palette='Set1', ax=axes[1], legend='full')
# + colab={"base_uri": "https://localhost:8080/", "height": 352} id="baBptWXGCslz" executionInfo={"elapsed": 5359, "status": "ok", "timestamp": 1618788334819, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11420231255337685715"}, "user_tz": 300} outputId="0d1e7a93-db56-412c-a80e-cf0edc6a316a"
plt.figure(figsize=(10,5))
dendrogram = hierarchy.dendrogram(hierarchy.linkage(X_market, method = "ward"))
plt.title('Dendrogram')
plt.ylabel('Euclidean distances')
# + id="eKaWqA-qDHNl"
|
Mod3_3-ML-USL-clustering-Hands-on-Exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Microsoft App Store Data Sci Project
# ## Opening / Viewing Data
# +
#importing libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# -
# Reading CSV
df = pd.read_csv('msft.csv')
df.head()
# interesting..., what is ₹?
df.tail()
# +
#checking shape
df.shape
# +
# checking dtypes
df.dtypes
# -
# ## Data Organizing/Cleaning
df.head()
# +
# making all columns lowercase for easier data access
df.columns = df.columns.str.lower()
# -
df.head()
# +
# renaming no people rated to num_of_people_rated
df.rename(columns={'name':'app','no of people rated' : 'num_of_people_rated'}, inplace=True)
# -
df.head()
# +
#change category datatype into categorical variable
df.category = df.category.astype('category')
# +
#Convert date into datetime format
df.date = pd.to_datetime(df.date)
# -
#added a year column
df['year'] = pd.DatetimeIndex(df.date).year
# +
#added a month_name column
df['month'] = pd.DatetimeIndex(df.date).month_name()
# -
df.head()
#validate changes
df.dtypes
df.head()
# +
#Checking for NaN values in all rows
df.isnull().sum()
# -
#using boolean masking to see which rows are NaN
df[df.isnull().any(axis=1)]
#drop rows with non type values
df.dropna(axis='rows', how='any', inplace=True)
# +
#change year and month from float64 to int64, reassign to original dataframe
df = df.astype({'year': 'int64', 'month' : 'category'})
# -
df.isna().sum()
#checking for duplicates
df.duplicated().value_counts()
df.head()
# +
#round ratings go remove 3.5
df.rating = df.rating.apply(lambda x: np.round(x))
# -
df.head()
# +
# check if price values are all free or not
df.price.unique()
# +
# change value 'Free' to '0' , if int64 it will affect all the other values
df.loc[df['price'] == 'Free', 'price'] = '0'
# -
df.price.unique()
# +
# remove ₹, represents Indian Rupee Currency, remove whitespace with strip
df.price = df.price.apply(lambda x: x.replace("₹","").strip())
# -
df.price.unique()
# +
#subsitute , with "" for numbers like 2,599.00
df.price = df.price.apply(lambda x: x.replace(",",""))
# -
df.price.unique()
# +
#convert everything to a float
df.price = df.price.astype('float32')
# -
#validate the values in the series
df.price.sample(10)
df.head()
# +
#1) create two dataframes to analyze
#2) view sample sizes, and need to make them all equal to each other for accurate analysis
free_apps = df.loc[df['price'] == 0]
priced_apps = df.loc[df['price'] != 0]
# -
free_apps.rating.value_counts().sort_index()
priced_apps.rating.value_counts().sort_index()
# +
# because the number of apps are different, we will cut into two dataframes that will have equal sample sizes
# for free apps and priced apps
# +
# create a for loop that takes in the ratings 2.0, 3.0, 4.0, 5.0
# reset_index, and we can drop certain rows to get 143 rows with .iloc
# the variable in the loop will be the new dataframe concatenated by an empty declared dataframe(outside loop)
# # + the edited dataframes we sliced with .iloc
# then we can open this dataframe and it should work
empty_df = free_apps[free_apps['rating'] == 1.0].reset_index(drop=True).iloc[0:143, :]
for i in [2.0, 3.0, 4.0, 5.0]:
free_df = free_apps[free_apps['rating'] == i].reset_index(drop=True).iloc[0:143, :]
empty_df = pd.concat([empty_df, free_df])
display(empty_df)
# +
# we can now reset the index of the new data frame
no_cost_df = empty_df.reset_index(drop=True)
# +
#validating that there is an equal sample size to analyze
no_cost_df['rating'].value_counts().sort_index()
# -
no_cost_df
# +
# apply the same for loop to form no_cost_df to the priced apps, reduce to only 10 values to have equal sample sizes
priced_df = df[(df['price'] != 0) & (df['rating'] == 3.0)].reset_index(drop=True)
for i in [1.0, 2.0, 4.0, 5.0]:
df_to_concat = df[(df['price'] != 0) & (df['rating'] == i)].reset_index(drop=True).iloc[0:10, :]
priced_df = pd.concat([priced_df, df_to_concat])
# -
priced_df = priced_df.sort_values('rating').reset_index(drop=True)
# +
display(priced_df.head())
priced_df.shape
# +
# because price is in terms of indian rupees, need to multiply by 0.014 to obtain US dollars
# to both original df, and priced_df 'price' columns
# -
df['price'] = np.round(df['price'] * 0.014)
priced_df['price'] = np.round(priced_df['price'] * 0.014)
priced_df.head()
# +
# We now have three dataframes, the original dataframe w/ free + priced (df), free app dataframe(no_cost_df)
# and priced app dataframe (priced_df)
display(df.head())
df.shape
# +
#free app dataframe(no_cost_df)
display(no_cost_df.head())
no_cost_df.shape
# +
# and priced app dataframe (priced_df)
display(priced_df.head())
priced_df.shape
# -
# ## Visualizations / Analysis
# ### For a certain rating, which rating has the highest number of people involved in rating the app?
# +
#creating separate dataframe based on groupby that takes the total sum of the number of ratings for an free app.
free_apps_sum = no_cost_df.groupby('rating')['num_of_people_rated'].sum().reset_index()
# -
free_apps_sum
# +
#creating separate dataframe based on groupby that takes the total sum of the number of ratings for an priced app.
priced_apps_sum = priced_df.groupby('rating')['num_of_people_rated'].sum().reset_index()
# -
priced_apps_sum
# +
#no_cost_df(free apps)
rating_map = {1.0:'One Star', 2.0:'Two Star', 3.0:'Three Star', 4.0:'Four Star', 5.0:'Five Star'}
# +
# using .map series function to make these ratings into categories
free_apps_sum.rating = free_apps_sum.rating.map(rating_map)
# -
# using .map series function to make these ratings into categories
priced_apps_sum.rating = priced_apps_sum.rating.map(rating_map)
# +
# visualization of the free apps
sns.set(style='darkgrid',palette = 'Paired',color_codes = True)
fig, axes = plt.subplots(1,2, figsize = (10,4), sharex = True, sharey = False )
ax1 = sns.barplot('rating','num_of_people_rated', data=free_apps_sum ,orient="v", edgecolor = 'black', ax=axes[0])
ax2 = sns.barplot('rating','num_of_people_rated', data=priced_apps_sum, orient="v", edgecolor = 'black', ax=axes[1])
ax1.set(title='Num. of Total Ratings For Free Apps', xlabel = 'Type of App', ylabel = 'Total Count')
ax2.set(title='Num. of Total Ratings For Priced Apps', xlabel = 'Type of App', ylabel = 'Total Count')
plt.tight_layout()
plt.show()
# -
# ### What is the average rating of each free/priced app? What are the max, min, and standard deviation values?
# +
#priced_cost_df(priced df)
no_cost_df.rating = no_cost_df.rating.map(rating_map)
# -
priced_df.rating = priced_df.rating.map(rating_map)
# +
# Statistical Data on Free Apps
print('Statistical Data on Free Apps')
display(no_cost_df.groupby('rating')['num_of_people_rated'].describe().sort_values('mean',
ascending = False).reset_index())
# Statistical Data on Priced Apps
print('Statistical Data on Priced Apps')
display(priced_df.groupby('rating')['num_of_people_rated'].describe().sort_values('mean',
ascending = False).reset_index())
#Using Box Plot to display the Data
sns.set(style='darkgrid', palette = 'Paired', color_codes = True)
fig, axes = plt.subplots(1,2, figsize = (13, 4))
ax1 = sns.boxplot('rating', 'num_of_people_rated',data = no_cost_df, ax=axes[0], linewidth = 2.0)
ax2 = sns.boxplot('rating', 'num_of_people_rated',data = priced_df, ax=axes[1], linewidth = 2.0)
ax1.set(title='Distribution of The Total Num. Of Ratings For A Type of Free App',
xlabel = 'Type of App', ylabel = 'Overall Num. of Ratings')
ax2.set(title='Distribution of The Total Num. Of Ratings For A Type of Priced App',
xlabel = 'Type of App', ylabel = 'Overall Num. of Ratings')
fig.tight_layout()
# -
# ### List the 5 months that have the highest average number of ratings for free/priced apps?
# +
#Free Apps groupby which groups the data by month, and
#returns the highest average number of ratings for a free app in a specific month.
no_cost_df.groupby('month')['num_of_people_rated'].agg(['mean']).sort_values('mean',
ascending = False).reset_index().head()
# +
#Priced Apps groupby which groups the data by month, and
#returnsthe highest average number of ratings for a priced app in a specific month
priced_df.groupby('month')['num_of_people_rated'].agg(['mean']).sort_values('mean',
ascending = False).reset_index().head()
# -
# ### Does the number of ratings for an app increase when an app is priced?
# +
#Using the price_df data to graph
price_num_df = priced_df[['price', 'num_of_people_rated']]
# -
# sets the df to sort the values by price, resets index to be graphable
price_num_df = price_num_df.sort_values('price').reset_index(drop=True)
# +
#visualization
sns.set(style = 'darkgrid')
figure, ax = plt.subplots(figsize = (10,5))
ax1 = sns.lineplot('price', 'num_of_people_rated', data=price_num_df,
color = "#2297E0", ax = ax, err_style = 'band', ci = 'sd')
ax1.set_ylim(0,900)
ax1.set_xlim(1,70)
ax1.set_xlabel('Price Of App', fontdict={'fontsize' : 13})
ax1.set_ylabel('Num. of Ratings', fontdict={'fontsize': 13})
ax1.set_title('Price of App vs. Num. of Ratings', fontdict = {'fontsize': 13})
plt.show()
# -
# ### For a certain year, which year accumulated the highest num. of ratings for free/priced apps?
# +
# free apps groupby, groups the total number of people rated for a free app in a specific year
year_free_df = no_cost_df.groupby('year')['num_of_people_rated'].sum().reset_index()
# -
year_free_df
# +
# priced apps groupby, groups the total number of people rated for a priced app in a specific year
year_price_df = priced_df.groupby('year')['num_of_people_rated'].sum().reset_index()
# -
year_price_df
# +
#visualization
sns.set(style='darkgrid',palette = 'Paired',color_codes = True)
fig, axes = plt.subplots(1,2, figsize = (10,5), sharex = True, sharey = False )
ax1 = sns.barplot('year','num_of_people_rated', data= year_free_df ,orient="v", edgecolor = 'black', ax=axes[0])
ax2 = sns.barplot('year','num_of_people_rated', data= year_price_df, orient="v", edgecolor = 'black', ax=axes[1])
ax1.set(title='Num. of Total Ratings For Free Apps In A Certain Year', xlabel = 'Year', ylabel = 'Total Count')
ax2.set(title='Num. of Total Ratings For Priced Apps In A Certain Year', xlabel = 'Year', ylabel = 'Total Count')
plt.tight_layout()
plt.show()
# -
# ### In a specific category, which free app had the highest/lowest number of ratings?
# +
# used google translator because the max values had apps in another language
import googletrans
from googletrans import Translator
# -
translate = Translator()
# subsetting columns into smaller dataframe, using the no_cost_df we created that has equal sample sizes based on ratings
category_df = no_cost_df[['app', 'category', 'num_of_people_rated']]
# +
# using groupby to obtain the maximum value for an app
max_values_free_apps = no_cost_df.groupby(['category'])[['app','num_of_people_rated']].max().reset_index()
#using groupby to obtain the minimum value for an app
min_values_free_apps = no_cost_df.groupby(['category'])[['app','num_of_people_rated']].min().reset_index()
# -
max_values_free_apps.app = max_values_free_apps.app.apply(lambda x: translate.translate(x).text)
# +
#displaying dataframes, min and then max
print('Free Apps In a Specific Category With The Lowest Number of Ratings')
display(min_values_free_apps)
print('Free Apps In a Specific Category With The Highest Number of Ratings')
display(max_values_free_apps)
# -
# ### In a specific category, which priced app had the highest/lowest number of ratings?
# +
# creating subset
category_price_df = priced_df[['app', 'category','price','num_of_people_rated']]
# -
#finding that there are only three categories in the priced dataset
category_price_df.category.unique()
# +
#this for loop looks for an priced app in a specific category,
#and selects the app if it has the highest number of ratings.
best_price_app = pd.DataFrame()
for index in range(len(category_price_df.category.unique())):
if index == 0:
x = category_price_df.loc[category_price_df['category'] == category_price_df.category.unique()[index], \
['app','category','num_of_people_rated']]. \
sort_values('num_of_people_rated',ascending = False).head(1)
best_price_app = pd.concat([best_price_app, x])
elif index == 1:
y = category_price_df.loc[category_price_df['category'] == category_price_df.category.unique()[index], \
['app','category','num_of_people_rated']]. \
sort_values('num_of_people_rated',ascending = False).head(1)
best_price_app = pd.concat([best_price_app, y])
elif index == 2:
z = category_price_df.loc[category_price_df['category'] == category_price_df.category.unique()[index], \
['app','category','num_of_people_rated']]. \
sort_values('num_of_people_rated',ascending = False).head(1)
best_price_app = pd.concat([best_price_app, z])
# +
#this for loop looks for an priced app in a specific category,
#and selects the app if it has the lowest number of ratings.
low_price_app = pd.DataFrame()
for index in range(len(category_price_df.category.unique())):
if index == 0:
x = category_price_df.loc[category_price_df['category'] == category_price_df.category.unique()[index], \
['app','category','num_of_people_rated']]. \
sort_values('num_of_people_rated',ascending = True).head(1)
low_price_app = pd.concat([low_price_app, x])
elif index == 1:
y = category_price_df.loc[category_price_df['category'] == category_price_df.category.unique()[index], \
['app','category','num_of_people_rated']]. \
sort_values('num_of_people_rated',ascending = True).head(1)
low_price_app = pd.concat([low_price_app, y])
elif index == 2:
z = category_price_df.loc[category_price_df['category'] == category_price_df.category.unique()[index], \
['app','category','num_of_people_rated']]. \
sort_values('num_of_people_rated',ascending = True).head(1)
low_price_app = pd.concat([low_price_app, z])
# +
print('Priced Apps In a Specific Category With The Lowest Number of Ratings')
display(low_price_app.reset_index(drop=True))
print('Priced Apps In a Specific Category With The Highest Number of Ratings')
display(best_price_app.reset_index(drop=True))
# -
# ### Final Thoughts
# While there could be other statistical questions that could be asked of this dataset, I think this is the best I could do with the data that I've been provided. However, I never took a statistics course, and I think my approach to editing the datasets could have been much more precise if I had statistical knowledge. Moreover, there are probably other statistical visualizations that I could have done, but I'm certain that the only visualizations I could do are categorical data plots.
#
# Lastly, I'm glad that I finished my first dataset project. I learned a lot of new skills and topics from this project, and I hope to apply what I learned to other projects that I will do in the future.
#
# Thank you for viewing this project :).
#
#
|
msft/msft.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bokeh Inverted Triangle Glyph
# +
from bokeh.plotting import figure, output_file, show
from bokeh.models import Range1d
from bokeh.io import export_png
fill_color = '#a6bddb'
line_color = '#1c9099'
output_file("../../figures/inverted_triangle.html")
p = figure(plot_width=400, plot_height=400)
p.inverted_triangle(x=0,y=0,size=100, fill_alpha=1,fill_color=fill_color,
line_alpha=1, line_color=line_color, line_dash='dashed', line_width=5)
p.inverted_triangle(x=0,y=1,size=100, fill_alpha=0.8, fill_color=fill_color,
line_alpha=1, line_color=line_color, line_dash='dotdash', line_width=8)
p.inverted_triangle(x=1,y=0,size=100, fill_alpha=0.6, fill_color = fill_color,
line_alpha=1, line_color=line_color, line_dash='dotted', line_width=13)
p.inverted_triangle(x=1,y=1,size=100, fill_alpha=0.4, fill_color = fill_color,
line_alpha=1, line_color=line_color, line_dash='solid', line_width=17)
p.x_range = Range1d(-0.5,1.5, bounds=(-1,2))
p.y_range = Range1d(-0.5,1.5, bounds=(-1,2))
show(p)
export_png(p, filename="../../figures/inverted_triangle.png");
|
visualizations/bokeh/notebooks/glyphs/inverted_triangle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from utils import hello_world, load_dataframes_fannie
hello_world()
# # Setting Up Quandl Interactions
#
# Quandl's About Us:
#
# >Our mission is to extract value from the world's data.
# We believe that data is one of the most important resources of the 21st century. It is ubiquitous. Data now arises from virtually everything humans and businesses do, every minute of every day. But how much of it has tangible value to capital markets? That is what we ask ourselves every day.
# >
# >We want to inspire customers to make new discoveries and incorporate them into trading strategies. We find opportunities in places people otherwise wouldn’t think about. We believe there are new and better ways to understand the complex information that creates markets and market movement.
# >
# >Quandl’s diverse team members have backgrounds in finance, technology, data science and astrophysics, just to name a few. Our team is purpose-built to extract signal from a world increasingly full of noise. We uncover valuable data that is difficult to find, shape it and share it with our clients.
#
# We believe that data, and alternative data in particular, is going to become the primary driver of active investment performance over the next decade.
#
# https://www.quandl.com/
#
# Examples Using: https://www.quandl.com/data/ZILLOW-Zillow-Real-Estate-Research
# # !pip install quandl
import quandl
quandl.ApiConfig.api_key = "YOURAPIKEY"
quandl.ApiConfig.api_key = '<KEY>'
df = quandl.get('ZILLOW/M1300_MPPRSF')
df.head()
fig = plt.plot(df['Value'])
# ## Bulk Download of Entire Data Set
#
# >An entire time-series dataset's data can be downloaded.
# >
# >For example, to download the dataset ZEA: quandl.bulkdownload("ZEA")
# >
# >This call will download an entire time-series dataset as a ZIP file.
# +
# Takes a While, be careful
#quandl.bulkdownload("ZILLOW")
# -
# |Code | Description |
# |-----| ------|
# | C10012_ZHVI2B | Zillow Home Value Index (City): Zillow Home Value Index - Two Bedrooms - Pembroke, Merrimack, NH |
# | C10012_ZHVI3B | Zillow Home Value Index (City): Zillow Home Value Index - Three Bedrooms - Pembroke, Merrimack, NH |
# | C10012_ZHVI4B | Zillow Home Value Index (City): Zillow Home Value Index - Four Bedrooms - Pembroke, Merrimack, NH |
# | C10012_ZHVIAH | Zillow Home Value Index (City): Zillow Home Value Index - All Homes - Pembroke, Merrimack, NH |
# | C10012_ZHVIBT | Zillow Home Value Index (City): Zillow Home Value Index - Bottom Tier - Pembroke, Merrimack, NH |
# | C10012_ZHVIBTY | Zillow Home Value Index (City): Zillow Home Value Index - Bottom Tier Year-Over-Year - Pembroke, Merrimack, NH |
# | C10012_ZHVIMT | Zillow Home Value Index (City): Zillow Home Value Index - Middle Tier - Pembroke, Merrimack, NH |
# | C10012_ZHVIMTY | Zillow Home Value Index (City): Zillow Home Value Index - Middle Tier - Year-Over-Year - Pembroke, Merrimack, NH |
# | C10012_ZHVISF | Zillow Home Value Index (City): Zillow Home Value Index - Single-Family Residence - Pembroke, Merrimack, NH |
# | C10012_ZHVITT | Zillow Home Value Index (City): Zillow Home Value Index - Top Tier - Pembroke, Merrimack, NH |
# | C10012_ZHVITTY | Zillow Home Value Index (City): Zillow Home Value Index - Top Tier - Year-Over-Year - Pembroke, Merrimack, NH |
# | C10012_ZRIAH | Zillow Home Value Index (City): Zillow Rental Index - All Homes - Pembroke, Merrimack, NH |
# | C10012_ZRIAHMF | Zillow Home Value Index (City): Zillow Rental Index - All Homes Plus Multi-Family - Pembroke, Merrimack, NH |
# | C10012_ZRIFAH | Zillow Home Value Index (City): Zillow Rental Index Per Square Foot - All Homes - Pembroke, Merrimack, NH |
# | C10012_ZRIMFRR | Zillow Home Value Index (City): Zillow Rental Index - Multi-Family Residence - Pembroke, Merrimack, NH |
# | C10012_ZRISFRR | Zillow Home Value Index (City): Zillow Rental Index - Single-Family Residence - Pembroke, Merrimack, NH |
#
df = quandl.get('ZILLOW/C10012_ZRISFRR')
df.columns
fig = plt.plot(df['Value'])
|
notebooks/DataLoading-Quandl.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score,recall_score,f1_score
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTETomek
from sklearn.model_selection import KFold,GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('mydata.csv')
y=df.Accident_Severity
df = df.drop(['Accident_Severity'], axis=1)
X = df.values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
logistic_regression=LogisticRegression(class_weight="balanced",solver="newton-cg")
from sklearn.metrics import recall_score
from sklearn.metrics import make_scorer
recall_scorer = make_scorer(recall_score,average="micro")
# +
grid={"C":[0.00001,0.0001]}
logistic_regression_cv=GridSearchCV(logistic_regression,grid,cv=10,scoring=recall_scorer)
logistic_regression_cv.fit(X,y)
# -
print("tuned hpyerparameters :(best parameters) ",logistic_regression_cv.best_params_)
kf = KFold(n_splits=10)
def normalize(z):
x = z.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(np.vstack(x))
return x_scaled
for i in range(0,9):
df.iloc[:,i]=normalize(df.iloc[:,i])
# +
# Grid search cross validation
print("Train \t\t\t Test")
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
logistic_regression=LogisticRegression(class_weight="balanced",C=0.0001,solver="newton-cg")
logistic_regression.fit(X_train,y_train)
y_ = logistic_regression.predict(X_test)
y_pred = logistic_regression.predict(X_train)
print(accuracy_score(y_train,y_pred),"\t",accuracy_score(y_test,y_))
# -
confusion_matrix(y_test,y_,labels=[1,2,3])
clf = svm.SVC(class_weight="balanced")
# +
grid={"C":[30,100],"gamma":[0,1]}
clf_cv=GridSearchCV(clf,grid,cv=5,scoring= recall_scorer)
clf_cv.fit(X,y)
# -
clf_cv.best_params_
# Let's check out SVM
# +
print("Train \t\t\t Test")
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = svm.SVC(gamma=0.001,C=0.1,class_weight="balanced",kernel="linear")
clf.fit(X_train, y_train)
y_ = clf.predict(X_test)
y_pred = clf.predict(X_train)
print(accuracy_score(y_train,y_pred),"\t",accuracy_score(y_test,y_))
# -
confusion_matrix(y_test,y_,labels=[1,2,3])
|
Logistic Regession.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Notas a la realización de la práctica
#
# Este documento ofrece algunas indicaciones para la realización de la práctica, que podéis tener en cuenta para evitar errores y para saber cómo recuperar en ciertas situaciones.
# ## Docker, imágenes y contenedores
#
# Docker permite tener imágenes de pequeñas máquinas virtuales. Estas imágenes son conjuntos de ficheros "inertes" que se pueden utilizar como base para lanzar **contenedores**. Los contenedores ejecutan los ficheros de una imagen para tener una instancia en ejecución (contenedor).
# Para saber las imágenes de las que disponemos, podemos hacer `docker images`:
#
# ```bash
# $ docker images
# REPOSITORY TAG IMAGE ID CREATED SIZE
# backupnode-image latest ea8e71ca5aee 2 weeks ago 2.18GB
# namenode-image latest e39efb460f05 2 weeks ago 2.18GB
# datanode-image latest 9f3e878eea85 2 weeks ago 2.18GB
# hadoop-base latest 67efd4a138e9 2 weeks ago 2.06GB
# ```
#
# Esto muestra las distintas imágenes, su tamaño y cuándo se crearon.
# Por otro lado, los contenedores en ejecución se pueden comprobar con `docker ps`. Algunos contenedores estarán en ejecución y otros parados. Si queremos ver todos los contenedores que se ejecutaron en algún momento y que no han sido eliminados, podemos hacer `docker ps -a`:
#
# ```bash
# $ docker ps -a
# CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
# e18adb742526 jupyter/scipy-notebook "tini -g -- start-no…" 4 days ago Up 5 minutes 0.0.0.0:8888->8888/tcp practicas-notebook-1
# c090609eead9 hadoop-base "/bin/bash" 2 weeks ago Exited (0) 4 days ago timelineserver
# f9436f4c87d9 backupnode-image "su hdadmin -c 'JAVA…" 2 weeks ago Exited (143) 4 days ago backupnode
# f8f93fd2a451 datanode-image "/inicio.sh" 2 weeks ago Exited (137) 4 days ago datanode4
# 3d75f2614562 datanode-image "/inicio.sh" 2 weeks ago Exited (137) 4 days ago datanode1
# 243ba043e9e7 namenode-image "/inicio.sh" 2 weeks ago Exited (137) 4 days ago namenode
# ```
#
# A veces un contenedor no funciona como debería, o hemos cometido un error. Para continuar con la práctica, lo más sencillo es eliminarlo (pararlo primero si no está parado con `docker stop contenedor`) con `docker rm contenedor`.
# Si por el contrario el contenedor está correctamente creado, podemos ponerlo a ejecutar como se especifica en la práctica. En este caso se lanzan a la vez el namenode y los cuatro datanodes:
#
# ```bash
# docker start namenode datanode{1..4}
# ```
#
# Al hacer `start`, los servicios del nodo comienzan a ejecutarse de nuevo, y si todos los nodos se ejecutan más o menos al mismo tiempo, la infraestructura de Hadoop hace que se empiecen a ver unos a otros y se pueda utilizar el clúster.
# ## Servicios dentro de un contenedor
# Una vez un contenedor se ha iniciado (con `start` como arriba), se puede "entrar dentro" haciendo uso de la construcción `docker exec` y ejecutando el *shell* de Linux, el bash, teniendo en cuenta pasarle los parámetros `-ti` para que el contenedor mantenga una terminal abierta y podamos interactuar con él:
#
# ```bash
# $ docker exec -ti namenode bash
# ```
#
# El contenedor nos mostrará un *prompt* que nos indicará que estamos dentro:
#
# ```bash
# root@namenode:~#
# ```
#
# Nótese que el contenedor se establece en el superusuario por defecto (`root`), y en algún momento deberemos pasar al usuario `hdadmin`:
#
# ```bash
# # su - hdadmin
# ```
#
# (el "`-`" se utiliza para que el sistema operativo lea el fichero `~/.bashrc`, que si recordáis, se crea en un momento dado con los valores de las variables que indican dónde está instalado Hadoop).
#
# Esto hará que cambie lo que el *shell* nos muestra y ahora sea:
#
# ```bash
# hdadmin@namenode:~$
# ```
#
# Los servicios que se ejecutan dentro de cada contenedor que pertenecen a Hadoop son normalmente servicios java, por lo que con la utilidad `jps` se puede comprobar cuáles tenemos en ejecución. Si no tenemos los servicios que se esperan, puede haber un error en la ejecución o en cómo se construyó el contenedor. Como usuario `hdadmin` (**ESTO ES MUY IMPORTANTE**), hay que ejecutar la utilidad `jps`:
#
# ```bash
# $ docker exec -ti datanode1 bash
# root@datanode1:/# su - hdadmin
# hdadmin@datanode1:~$ jps
# 306 Jps
# 101 NodeManager
# 40 DataNode
# ```
#
# Nótese cómo se cambia al usuario `hdadmin`, y después se ejecuta `jps`. Nótese también cómo en el datanode tienen que estar funcionando el `NodeManager` (gestor del nodo de cara a YARN para asignación de recursos) y el `DataNode` (programa que permite a HDFS almacenar bloques de ficheros).
#
# En el caso del namenode habrá más servicios en ejecución.
#
# Para salir del contenedor hay que escribir `exit` dos veces: una saldrá del usuario `hdadmin` de vuelta hacia `root`, y el último `exit` saldrá del `exec` hacia el contenedor (aunque el contenedor continúa ejecutándose en segundo plano):
#
# ```bash
# hdadmin@namenode:~$ exit
# logout
# root@namenode:/# exit
# exit
# usuario-linux-host:dir$
# ```
#
# ## Comprobación por web de los servicios
#
# Recordad que Hadoop ofrece unas direcciones que se pueden acceder a través del navegador. Podéis usarlas para comprobar los nodos disponibles, el estado de HDFS, etc. A veces la actualización de nuevos nodos o nuevos caídos puede tardar hasta 5 o 10 minutos.
#
# - http://localhost:9870 interfaz web del HDFS
# - http://localhost:8088 interfaz web de YARN
# ## Comprobación de los servicios con las utilidades de Hadoop
#
# Normalmente dentro del `namenode`, se pueden usasr las utilidades de Hadoop para comprobar los nodos conectados y el estado de HDFS:
#
# ```bash
# $ docker exec -ti namenode bash
# root@namenode:/# su - hdadmin
# hdadmin@namenode:~$ hdfs dfsadmin -report
# ...
# hdadmin@namenode:~$ yarn node -list
# ...
# ```
#
# ## Refresco de los nodos al añadir o eliminar
#
# En puntos de la práctica, tenéis que añadir y eliminar nodos. Para forzar que Hadoop los reconsidere, hay dos funciones a utilizar, siempre como el usuario `hdadmin`:
#
# ```bash
# hdadmin@namenode:~$ hdfs dfsadmin -refreshNodes
# hdadmin@namenode:~$ yarn rmadmin -refreshNodes
# ```
|
practicas/p1/notas-p1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:bitathon]
# language: python
# name: conda-env-bitathon-py
# ---
def getBinaryNumTarget(text):
if text =='Yes':
return 1
else:
return 0
import pandas as pd
df=pd.read_csv("bitathon.csv",low_memory=False)
df.tail()
df = df[['Consumer complaint narrative','Consumer disputed?']].dropna()
df['Consumer disputed?']=df['Consumer disputed?'].apply(getBinaryNumTarget)
df.head()
df.isna().sum()
import spacy.cli
spacy.cli.download("en_core_web_sm")
import en_core_web_sm
nlp = en_core_web_sm.load()
df['dispute'].value_counts()
import spacy
from spacy.util import minibatch, compounding
import random
def load_data(train_data, limit=0, split=0.8):
random.shuffle(train_data)
train_data = train_data[-limit:]
texts, labels = zip(*train_data)
cats = [{"NOT DISPUTED": not bool(y), "DISPUTED": bool(y)} for y in labels]
split = int(len(train_data) * split)
return (texts[:split], cats[:split]), (texts[split:], cats[split:])
def evaluate(tokenizer, textcat, texts, cats):
docs = (tokenizer(text) for text in texts)
tp = 0.0 # True positives
fp = 1e-8 # False positives
fn = 1e-8 # False negatives
tn = 0.0 # True negatives
for i, doc in enumerate(textcat.pipe(docs)):
gold = cats[i]
for label, score in doc.cats.items():
if label not in gold:
continue
if label == "NOT DISPUTED":
continue
if score >= 0.5 and gold[label] >= 0.5:
tp += 1.0
elif score >= 0.5 and gold[label] < 0.5:
fp += 1.0
elif score < 0.5 and gold[label] < 0.5:
tn += 1
elif score < 0.5 and gold[label] >= 0.5:
fn += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
if (precision + recall) == 0:
f_score = 0.0
else:
f_score = 2 * (precision * recall) / (precision + recall)
return {"textcat_p": precision, "textcat_r": recall, "textcat_f": f_score}
nlp=spacy.load('en_core_web_sm')
df.replace(to_replace='[\n\r\tXXXX#''"")(!,;/]', value='', regex=True, inplace=True)
df.head()
textcat=nlp.create_pipe( "textcat", config={"exclusive_classes": True, "architecture": "simplecnn"})
nlp.add_pipe(textcat, last=True)
nlp.pipe_names
textcat.add_label("DISPUTED")
textcat.add_label("NOT DISPUTED")
df['tuples'] = df.apply(lambda row: (row['complaint'], row['dispute']), axis=1)
train = df['tuples'].tolist()
(train_texts, train_cats), (dev_texts, dev_cats) = load_data(train, split=0.9)
train_data = list(zip(train_texts,[{'cats': cats} for cats in train_cats]))
n_iter = 20
# Disabling other components
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']
with nlp.disable_pipes(*other_pipes): # only train textcat
optimizer = nlp.begin_training()
print("Training the model...")
print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F'))
# Performing training
for i in range(n_iter):
losses = {}
batches = minibatch(train_data, size=compounding(4., 32., 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=0.2,
losses=losses)
# Calling the evaluate() function and printing the scores
with textcat.model.use_params(optimizer.averages):
scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)
print('{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}'
.format(losses['textcat'], scores['textcat_p'],
scores['textcat_r'], scores['textcat_f']))
# Testing the model(Case of Non Dispute i.e 0)
test_text = "Transunion is continuing to report accounts have been recently deleted due to cancelled contract.Disputed via phone as well as fax and online."
doc=nlp(test_text)
doc.cats
# Testing the model(Case of Dispute i.e 1)
test_text1 = "I have been subjected to harrassment calls. The caller shows no mercy in disturbing throughout the day"
doc=nlp(test_text1)
doc.cats
with nlp.use_params(optimizer.averages):
nlp.to_disk('finalmodelmini')
|
Notebook/Training Notebook Bitathon.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Today's topics
# 1 - The DataFrame
#
# 2 - Index, and Slice
#
# 3 - Manipulating Columns
#
# 4 - Merging
#
# 5 - Calculating Unique And Missing Values
#
# 6 - Groupby
#
# Import pandas package
import pandas as pd
import numpy as np
import os
print(os.getcwd())
print(os.listdir())
# +
#os.chdir('C:/Users/koser/Documents/Floatint Courses/Machine Learning From Scratch/C01L10- Theatrics with dataframe')
# -
print(os.getcwd())
print(os.listdir())
# #### 1. The DataFrame
# loading Csv File
df=pd.read_csv('./data/phone_data.csv',sep=',') # Important to put dot
df.shape
df.head()
df.tail(10)
df.info()
# +
# example=pd.DataFrame({'a' : [2,34,5,67,np.nan],'b' : [45,43,56,56,65] })
# example.info()
# -
df.describe() # Columns numerical value
# #### 2. Index, and Slice
#
# .loc Subset by Column names
#
# .iloc subset by column number
df.head()
# Subset columns
# print(df['item'])
print(df.item.head())
# Works for one column - Series not for data frame
df[['item','duration','month']].head(7)
# #### .loc and .iloc function to the rescue
df.loc[:8,['item','month','duration']] # Can only Subset by column names [row,column]
# +
#df.loc[:8:2,:2]
# -
df.iloc[:8,:4] # Can only subset by column number # numpy indexing
# #### Boolean Indexing
# and &
#
# or |
#
# Equal to ==
#
# Not equal to !=
#
# Not in ~
#
# Equals: ==
#
# Not equals: !=
#
# Greater than, less than: > or <
#
# Greater than or equal to >=
#
# Less than or equal to <=
# Boolean Index
condition = (df.item == 'call') & (df.network == 'Tesco') # and = &
df[condition].head()
condition = (df.item == 'call') # & (df.network == 'Tesco') # and = &
df[~condition].head()
condition = (df.item == 'call') | (df.network == 'Tesco') # or = |
df[condition].head()
# #### 3 - Manipulating Columns
df.shape
# Add new column
# df['batch2'] = 1
df['new_column']=df['duration'] * 3
df.head()
#df.network.unique().sort
df.head()
# Remove column
df.drop(['new_column'],axis = 1, inplace=True)
df.head()
df.describe()
# +
#df.drop(['month','index'],axis=1,inplace=True)
# -
# Basic Operation
print(df['duration'].sum())
# df['duration'].sum()
# df['duration'].min
df['duration'].max()
df.head()
# +
# lambda function - Row wise operation
def new_duration(network, duration):
if network=='world':
new_duration = duration * 2
else:
new_duration = duration * 4
return new_duration
df['new_duration'] = df.apply(lambda x: new_duration(x['network'], x['duration']), axis=1)
# -
df.tail(10)
df.head()
# Sort values
df.sort_values('duration', ascending=False) # Ascending descending
df.sort_values(['duration','network'], ascending=[False,True]).head()
df.info()
# Date manipulation
sum(pd.to_datetime(df['date']) >= '2015-01-01')
# Directive
#
# <b>%a</b> Weekday as locale’s abbreviated name. Sun, Mon, …, Sat (en_US)
# So, Mo, …, Sa (de_DE)
#
# <b>%A</b> Weekday as locale’s full name. Sunday, Monday, …, Saturday (en_US)
# Sonntag, Montag, …, Samstag (de_DE)
#
# <b>%w</b> Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. 0, 1, 2, 3, 4, 5, 6
#
# # %d Day of the month as a zero-padded decimal number. 01, 02, …, 31
#
# # %b Month as locale’s abbreviated name. Jan, Feb, …, Dec (en_US)
# Jan, Feb, …, Dez (de_DE)
#
# # %B Month as locale’s full name. January, February, …, December (en_US)
# Januar, Februar, …, Dezember (de_DE)
#
# # %m Month as a zero-padded decimal number. 01, 02 … 12
#
# # %y Year without century as a zero-padded decimal number. 01, 02, … 99
#
# # %Y Year with century as a decimal number. 0001, 0002, … , 9999
#
# # %H Hour (24-hour clock) as a zero-padded decimal number. 01, 02, … , 23
#
# # %I Hour (12-hour clock) as a zero-padded decimal number. 01, 02, … , 12
#
# # %p Locale’s equivalent of either AM or PM. AM, PM (en_US)
# am, pm (de_DE)
#
# # %M Minute as a zero-padded decimal number. 01, 02, … , 59
#
# # %S Second as a zero-padded decimal number. 01, 02, … , 59
#
# # %f Microsecond as a decimal number, zero-padded on the left. 000000, 000001, …, 999999
# Not applicable with time module.
#
# # %z UTC offset in the form ±HHMM[SS] (empty string if the object is naive). (empty), +0000, -0400, +1030
#
# # %Z Time zone name (empty string if the object is naive). (empty), UTC, IST, CST
#
# # %j Day of the year as a zero-padded decimal number. 001, 002, …, 366
#
# # %U Week number of the year (Sunday as the first day of the week) as a zero padded decimal number.
#
# All days in a new year preceding the first Sunday are considered to be in week 0. 00, 01, …, 53
#
# # %W Week number of the year (Monday as the first day of the week) as a decimal number.
#
# All days in a new year preceding the first Monday are considered to be in week 0. 00, 01, …, 53
# # %c Locale’s appropriate date and time representation. Tue Aug 16 21:30:00 1988 (en_US)
#
#
df.head()
df['date'][0]
from datetime import datetime
from datetime import date
df['new_date']=[datetime.strptime(x,'%d/%m/%y %H:%M') for x in df['date']]
type(df['new_date'][0])
df.head()
# Month year date
print(df['new_date'][0].month)
print(df['new_date'][0].year)
print(df['new_date'][0].day)
print(df['new_date'][0].hour)
print(df['new_date'][0].minute)
print(date.today())
print(datetime.now())
datetime.now() + 1
#Operations
from datetime import timedelta
print(date.today() - timedelta(days=1))
print(datetime.now() + timedelta(hours=1))
print(datetime.now() + timedelta(seconds=60))
print(df['new_date'][0])
print(df['new_date'][5])
df.head(6)
# Difference in two datetime
time_delta=df['new_date'][0]-df['new_date'][5]
type(time_delta)
time_delta.total_seconds()
# ### 4 - Merging
#
# <code> pd.concat( [df] , axis) </code>
#
# <code> pd.merge( df1, df2 ,on,how,suffixes) </code>
# +
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
# -
# Concat both axis
pd.concat([df1, df2,df3])
pd.concat([df1, df2],axis=1)
# +
# Join - Left, Right , inner, outer
raw_data = {
'subject_id': ['1', '2', '3', '4', '5'],
'first_name': ['Alex', 'Amy', 'Allen', 'Alice', 'Ayoung'],
'last_name': ['Anderson', 'Ackerman', 'Ali', 'Aoni', 'Atiches']}
df_a = pd.DataFrame(raw_data, columns = ['subject_id', 'first_name', 'last_name'])
df_a
# -
raw_data = {
'subject_id': ['4', '5', '6', '7', '8'],
'first_name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'],
'last_name': ['Bonder', 'Black', 'Balwner', 'Brice', 'Btisan']}
df_b = pd.DataFrame(raw_data, columns = ['subject_id', 'first_name', 'last_name'])
df_b
df_new=pd.concat([df_a,df_b])
df_new
raw_data = {
'subject_id': ['1', '2', '3', '4', '5', '7', '8', '9', '10', '11'],
'test_id': [51, 15, 15, 61, 16, 14, 15, 1, 61, 16]}
df_n = pd.DataFrame(raw_data, columns = ['subject_id','test_id'])
df_n
df_new.head()
pd.merge(df_new, df_n, on='subject_id') # On - Key column
df_n.rename({'subject_id':'id'},axis=1,inplace=True)
pd.merge(df_new,df_n , left_on='subject_id', right_on='id')
df_a
df_b
# Outer Join
pd.merge(df_a, df_b, on='subject_id', how='outer')
# Inner Join - Common in both
pd.merge(df_a, df_b, on='subject_id', how = 'inner',suffixes=['_tb1','_tb2'])
# right Join
pd.merge(df_a, df_b, on='subject_id', how='right',suffixes=['_tb1','_tb2'])
# left Join
df=pd.merge(df_a, df_b, on='subject_id', how='left',suffixes=['_tb1','_tb2'])
print(df)
# ### 5 - Calculating Unique And Missing Values
# .unique()
#
# .nunique()
#
# .value_counts()
#
# .isnull()
#
# .fillna()
df.info()
df
df.subject_id.unique()
df.subject_id.nunique()
df.subject_id.value_counts()
sum(df.first_name_tb2.isnull())# sum
df.first_name_tb2.fillna(-999,inplace = True) #,inplace = True
df
# ### 6 - Groupby
df.head()
df=pd.read_csv('./data/phone_data.csv',sep=',') # Important to put dot
df.head()
df.month.unique()
df[df.month=='2014-12']['duration'].sum()
df.groupby('month')['duration'].sum().reset_index()
df2=df.groupby('month')['duration'].sum()
# df2.reset_index()
df.groupby('month')['date'].count()
df.head(10)
# What is the sum of durations, for calls only, to each network
df[df['item'] == 'call'].groupby('network')['duration'].sum()
df.groupby(['month', 'item'])['duration'].sum().reset_index()
df.groupby('month', as_index=False).agg({"duration": "sum"})
# Group the data frame by month and item and extract a number of stats from each group
df.groupby(
['month', 'item']
).agg(
{
'duration':sum, # Sum duration per group
'network_type': "count", # get the count of networks
'date': 'first' # get the first date per group
})
# Define the aggregation procedure outside of the groupby operation
aggregations = {
'duration' : {
'duration_sum':'sum',
'duration_scnd_max': lambda x: max(x) - 1
}}
df2=df.groupby('month',as_index=False).agg(aggregations)
df2.head()
df2.columns=df2.columns.droplevel(level=0)
df2.rename({'':'month'},axis=1,inplace=True)
df2.head().columns
# ### Assignment
# ### Q1. Import Pandas and Numpy in the notebook
# ### Q2. Load Automobile_data.csv from data folder
# ### Q3. From given data set print first and last 10 rows
# ### Q4. Clean data and update the CSV file
# Replace all column values which contain ‘?’ and n.a with NaN.
# ### Q5. Find the most expensive car company name
# Print most expensive car’s company name and price.
# ### Q6. Find date with highest price and different of dates in days in highest and second highest
# ### Q7. Calculate company wise maximum horsepower
# ### Q8. Count total cars per company
# ### Q9. Find each company’s Higesht price car
# ### Q10: Update Price of audi cars with twice the amount (Multiply price by 2 for audi in the same column)
# Try to use lambda function
# ### Q11: Sort all data by Car and Price column
# ### Q12 : Find the average mileage of each car making company
# ### Q13: Concatenate two data frames using the following conditions
# GermanCars = {'Company': ['Ford', 'Mercedes', 'BMV', 'Audi'], 'Price': [23845, 171995, 135925 , 71400]}
#
# japaneseCars = {'Company': ['Toyota', 'Honda', 'Nissan', 'Mitsubishi '], 'Price': [29995, 23600, 61500 , 58900]}
#
#
# Merge these two dfs row wise
# ### Thank You. See you in Next Class
|
Pandas - Operations on Dataframes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Name
#
# Deploying a trained model to Cloud Machine Learning Engine
#
#
# # Label
#
# Cloud Storage, Cloud ML Engine, Kubeflow, Pipeline
#
#
# # Summary
#
# A Kubeflow Pipeline component to deploy a trained model from a Cloud Storage location to Cloud ML Engine.
#
#
# # Details
#
#
# ## Intended use
#
# Use the component to deploy a trained model to Cloud ML Engine. The deployed model can serve online or batch predictions in a Kubeflow Pipeline.
#
#
# ## Runtime arguments
#
# | Argument | Description | Optional | Data type | Accepted values | Default |
# |--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------|-----------------|---------|
# | model_uri | The URI of a Cloud Storage directory that contains a trained model file.<br/> Or <br/> An [Estimator export base directory](https://www.tensorflow.org/guide/saved_model#perform_the_export) that contains a list of subdirectories named by timestamp. The directory with the latest timestamp is used to load the trained model file. | No | GCSPath | | |
# | project_id | The ID of the Google Cloud Platform (GCP) project of the serving model. | No | GCPProjectID | | |
# | model_id | The name of the trained model. | Yes | String | | None |
# | version_id | The name of the version of the model. If it is not provided, the operation uses a random name. | Yes | String | | None |
# | runtime_version | The Cloud ML Engine runtime version to use for this deployment. If it is not provided, the default stable version, 1.0, is used. | Yes | String | | None |
# | python_version | The version of Python used in the prediction. If it is not provided, version 2.7 is used. You can use Python 3.5 if runtime_version is set to 1.4 or above. Python 2.7 works with all supported runtime versions. | Yes | String | | 2.7 |
# | model | The JSON payload of the new [model](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models). | Yes | Dict | | None |
# | version | The new [version](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models.versions) of the trained model. | Yes | Dict | | None |
# | replace_existing_version | Indicates whether to replace the existing version in case of a conflict (if the same version number is found.) | Yes | Boolean | | FALSE |
# | set_default | Indicates whether to set the new version as the default version in the model. | Yes | Boolean | | FALSE |
# | wait_interval | The number of seconds to wait in case the operation has a long run time. | Yes | Integer | | 30 |
#
#
#
# ## Input data schema
#
# The component looks for a trained model in the location specified by the `model_uri` runtime argument. The accepted trained models are:
#
#
# * [Tensorflow SavedModel](https://cloud.google.com/ml-engine/docs/tensorflow/exporting-for-prediction)
# * [Scikit-learn & XGBoost model](https://cloud.google.com/ml-engine/docs/scikit/exporting-for-prediction)
#
# The accepted file formats are:
#
# * *.pb
# * *.pbtext
# * model.bst
# * model.joblib
# * model.pkl
#
# `model_uri` can also be an [Estimator export base directory, ](https://www.tensorflow.org/guide/saved_model#perform_the_export)which contains a list of subdirectories named by timestamp. The directory with the latest timestamp is used to load the trained model file.
#
# ## Output
# | Name | Description | Type |
# |:------- |:---- | :--- |
# | job_id | The ID of the created job. | String |
# | job_dir | The Cloud Storage path that contains the trained model output files. | GCSPath |
#
#
# ## Cautions & requirements
#
# To use the component, you must:
#
# * [Set up the cloud environment](https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction#setup).
# * The component can authenticate to GCP. Refer to [Authenticating Pipelines to GCP](https://www.kubeflow.org/docs/gke/authentication-pipelines/) for details.
# * Grant read access to the Cloud Storage bucket that contains the trained model to the Kubeflow user service account.
#
# ## Detailed description
#
# Use the component to:
# * Locate the trained model at the Cloud Storage location you specify.
# * Create a new model if a model provided by you doesn’t exist.
# * Delete the existing model version if `replace_existing_version` is enabled.
# * Create a new version of the model from the trained model.
# * Set the new version as the default version of the model if `set_default` is enabled.
#
# Follow these steps to use the component in a pipeline:
#
# 1. Install the Kubeflow Pipeline SDK:
#
#
# +
# %%capture --no-stderr
# !pip3 install kfp --upgrade
# -
# 2. Load the component using KFP SDK
# +
import kfp.components as comp
mlengine_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/v1.7.0-alpha.3/components/gcp/ml_engine/deploy/component.yaml')
help(mlengine_deploy_op)
# -
# ### Sample
# Note: The following sample code works in IPython notebook or directly in Python code.
#
# In this sample, you deploy a pre-built trained model from `gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/` to Cloud ML Engine. The deployed model is `kfp_sample_model`. A new version is created every time the sample is run, and the latest version is set as the default version of the deployed model.
#
# #### Set sample parameters
# + tags=["parameters"]
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Deploy'
TRAINED_MODEL_PATH = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/'
# -
# #### Example pipeline that uses the component
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML deploy pipeline',
description='CloudML deploy pipeline'
)
def pipeline(
model_uri = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/',
project_id = PROJECT_ID,
model_id = 'kfp_sample_model',
version_id = '',
runtime_version = '1.10',
python_version = '',
version = {},
replace_existing_version = 'False',
set_default = 'True',
wait_interval = '30'):
task = mlengine_deploy_op(
model_uri=model_uri,
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=runtime_version,
python_version=python_version,
version=version,
replace_existing_version=replace_existing_version,
set_default=set_default,
wait_interval=wait_interval)
# #### Compile the pipeline
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# #### Submit the pipeline for execution
# +
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
# -
# ## References
# * [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_deploy.py)
# * [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
# * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/ml_engine/deploy/sample.ipynb)
# * [Cloud Machine Learning Engine Model REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.models)
# * [Cloud Machine Learning Engine Version REST API](https://cloud.google.com/ml-engine/reference/rest/v1/projects.versions)
#
# ## License
# By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
|
components/gcp/ml_engine/deploy/sample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by <NAME>. Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Solution Notebook
# ## Problem: Implement a binary search tree with insert, delete, different traversals & max/min node values
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
# * Is this a binary tree?
# * Yes
# * Is the root set to None initially?
# * Yes
# * Do we care if the tree is balanced?
# * No
# * What do we return for the traversals?
# * Return a list of the data in the desired order
# * What type of data can the tree hold?
# * Assume the tree only takes ints. In a realistic example, we'd use a hash table to convert other types to ints.
# ## Test Cases
#
# ### Insert
#
# * Always start with the root
# * If value is less than the root, go to the left child
# * if value is more than the root, go to the right child
#
#
# ### Delete
#
# * Deleting a node from a binary tree is tricky. Make sure you arrange the tree correctly when deleting a node.
# * Here are some basic [instructions](http://www.algolist.net/Data_structures/Binary_search_tree/Removal)
# * If the value to delete isn't on the tree return False
#
#
# ### Traversals
#
# * In order traversal - left, center, right
# * Pre order traversal - center, left, right
# * Post order traversal - left, right, center
# * Return list for all traversals
#
# ### Max & Min
# * Find the max node in the binary search tree
# * Find the min node in the binary search tree
#
# ### treeIsEmpty
# * check if the tree is empty
#
#
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/binary_tree_implementation/binary_tree_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Algorithm
#
# ### Insert
#
# * If root is none, insert at root
# * Else
# * While node is not None
# * if value is less go left child
# * If value is more go right child
#
#
# * Time complexity: O(log(n))
# * Space complexity: O(n)
#
# ### Min Node
#
# * Keep going to the left child until you reach None and return the value
#
#
# * Time complexity: O(log(n))
# * Space complexity: O(n)
#
# ### Max Node
#
# * Keep going to the right child until you reach None and return the value
#
#
# * Time complexity: O(log(n))
# * Space complexity: O(n)
#
# ### Traversals
#
# * In order
# * While the node is not None
# * Call left child recursively
# * Append data
# * Call right child recursively
#
# * Post order
# * While the node is not None
# * Call left child recursively
# * Call right child recursively
# * Append data
#
# * Pre order
# * While the node is not None
# * Append data
# * Call left child recursively
# * Call right child recursively
#
#
# * Time complexity: O(n) for all traversals
# * Space complexity: O(n)
#
# ### Delete
#
# * First, find value to delete
# * If value is not in tree
# * Return False
# * If value found
# * Check if the node is a left child or right child
# * If node is left child
# * Find the biggest value in all the node's children and replace it with it
# * If node is right child
# * Find the smallest value in all the node's children and replace it with it
#
#
# * Time complexity: O(log(n))
# * Space complexity: O(n)
#
#
# ## Code
# +
# %%writefile binary_search_tree.py
class Node (object):
def __init__ (self, data):
self.data = data
self.rightChild = None
self.leftChild = None
class BinaryTree (object):
def __init__ (self):
self.root = None
def insert (self, newData):
leaf = Node(newData)
if self.root is None:
self.root = leaf
else:
current = self.root
parent = self.root
while current is not None:
parent = current
if newData < current.data:
current = current.leftChild
else:
current = current.rightChild
if newData < parent.data:
parent.leftChild = leaf
else:
parent.rightChild = leaf
# returns false if the item to be deleted is not on the tree
def delete (self, data):
current = self.root
parent = self.root
isLeft = False
if current is None:
return False
while current is not None and current.data is not data:
parent = current
if data < current.data:
current = current.leftChild
isLeft = True
else:
current = current.rightChild
isLeft = False
if current is None:
return False
if current.leftChild is None and current.rightChild is None:
if current is self.root:
self.root = None
elif isLeft:
parent.leftChild = None
else:
parent.rightChild = None
elif current.rightChild is None:
if current is self.root:
self.root = current.leftChild
elif isLeft:
parent.leftChild = current.leftChild
else:
parent.rightChild = current.leftChild
elif current.rightChild is None:
if current is self.root:
self.root = current.rightChild
elif isLeft:
parent.lChild = current.rightChild
else:
parent.rightChild = current.rightChild
else:
successor = current.rightChild
successorParent = current
while successor.leftChild is not None:
successorParent = successor
successor = successor.leftChild
if current is self.root:
self.root = successor
elif isLeft:
parent.leftChild = successor
else:
parent.rightChild = successor
successor.leftChild = current.leftChild
if successor is not current.rightChild:
successorParent.leftChild = successor.rightChild
successor.rightChild = current.rightChild
return True
def minNode (self):
current = self.root
while current.leftChild is not None:
current = current.leftChild
return current.data
def maxNode (self):
current = self.root
while current.rightChild is not None:
current = current.rightChild
return current.data
def printPostOrder (self):
global postOrder
postOrder = []
def PostOrder(node):
if node is not None:
PostOrder(node.leftChild)
PostOrder(node.rightChild)
postOrder.append(node.data)
PostOrder(self.root)
return postOrder
def printInOrder (self):
global inOrder
inOrder = []
def InOrder (node):
if node is not None:
InOrder(node.leftChild)
inOrder.append(node.data)
InOrder(node.rightChild)
InOrder(self.root)
return inOrder
def printPreOrder (self):
global preOrder
preOrder = []
def PreOrder (node):
if node is not None:
preOrder.append(node.data)
PreOrder(node.leftChild)
PreOrder(node.rightChild)
PreOrder(self.root)
return preOrder
def treeIsEmpty (self):
return self.root is None
# -
# %run binary_search_tree.py
# +
# %%writefile test_binary_search_tree.py
from nose.tools import assert_equal
class TestBinaryTree(object):
def test_insert_traversals (self):
myTree = BinaryTree()
myTree2 = BinaryTree()
for num in [50, 30, 70, 10, 40, 60, 80, 7, 25, 38]:
myTree.insert(num)
[myTree2.insert(num) for num in range (1, 100, 10)]
print("Test: insert checking with in order traversal")
expectVal = [7, 10, 25, 30, 38, 40, 50, 60, 70, 80]
assert_equal(myTree.printInOrder(), expectVal)
expectVal = [1, 11, 21, 31, 41, 51, 61, 71, 81, 91]
assert_equal(myTree2.printInOrder(), expectVal)
print("Test: insert checking with post order traversal")
expectVal = [7, 25, 10, 38, 40, 30, 60, 80, 70, 50]
assert_equal(myTree.printPostOrder(), expectVal)
expectVal = [91, 81, 71, 61, 51, 41, 31, 21, 11, 1]
assert_equal(myTree2.printPostOrder(), expectVal)
print("Test: insert checking with pre order traversal")
expectVal = [50, 30, 10, 7, 25, 40, 38, 70, 60, 80]
assert_equal(myTree.printPreOrder(), expectVal)
expectVal = [1, 11, 21, 31, 41, 51, 61, 71, 81, 91]
assert_equal(myTree2.printPreOrder(), expectVal)
print("Success: test_insert_traversals")
def test_max_min_nodes (self):
myTree = BinaryTree()
myTree.insert(5)
myTree.insert(1)
myTree.insert(21)
print("Test: max node")
assert_equal(myTree.maxNode(), 21)
myTree.insert(32)
assert_equal(myTree.maxNode(), 32)
print("Test: min node")
assert_equal(myTree.minNode(), 1)
print("Test: min node inserting negative number")
myTree.insert(-10)
assert_equal(myTree.minNode(), -10)
print("Success: test_max_min_nodes")
def test_delete (self):
myTree = BinaryTree()
myTree.insert(5)
print("Test: delete")
myTree.delete(5)
assert_equal(myTree.treeIsEmpty(), True)
print("Test: more complex deletions")
[myTree.insert(x) for x in range(1, 5)]
myTree.delete(2)
assert_equal(myTree.root.rightChild.data, 3)
print("Test: delete invalid value")
assert_equal(myTree.delete(100), False)
print("Success: test_delete")
def main():
testing = TestBinaryTree()
testing.test_insert_traversals()
testing.test_max_min_nodes()
testing.test_delete()
if __name__=='__main__':
main()
# -
# %run -i test_binary_search_tree.py
|
staging/graphs_trees/binary_tree/binary_tree_solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#IMPORT SEMUA LIBRARY DISINI
# -
#IMPORT LIBRARY PANDAS
import pandas as pd
#IMPORT LIBRARY POSTGRESQL
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
#IMPORT LIBRARY CHART
from matplotlib import pyplot as plt
from matplotlib import style
#IMPORT LIBRARY PDF
from fpdf import FPDF
#IMPORT LIBRARY BASEPATH
import io
#IMPORT LIBRARY BASE64 IMG
import base64
#IMPORT LIBRARY NUMPY
import numpy as np
#IMPORT LIBRARY EXCEL
import xlsxwriter
#IMPORT LIBRARY SIMILARITAS
import n0similarities as n0
# +
#FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL
# -
def uploadToPSQL(host, username, password, database, port, table, judul, filePath, name, subjudul, dataheader, databody):
#TEST KONEKSI KE DATABASE
try:
for t in range(0, len(table)):
#DATA DIJADIKAN LIST
rawstr = [tuple(x) for x in zip(dataheader, databody[t])]
#KONEKSI KE DATABASE
connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=database)
cursor = connection.cursor()
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT);
#CEK TABLE
cursor.execute("SELECT * FROM information_schema.tables where table_name=%s", (table[t],))
exist = bool(cursor.rowcount)
#KALAU ADA DIHAPUS DULU, TERUS DICREATE ULANG
if exist == True:
cursor.execute("DROP TABLE "+ table[t] + " CASCADE")
cursor.execute("CREATE TABLE "+table[t]+" (index SERIAL, tanggal date, total varchar);")
#KALAU GA ADA CREATE DATABASE
else:
cursor.execute("CREATE TABLE "+table[t]+" (index SERIAL, tanggal date, total varchar);")
#MASUKAN DATA KE DATABASE YANG TELAH DIBUAT
cursor.execute('INSERT INTO '+table[t]+'(tanggal, total) values ' +str(rawstr)[1:-1])
#JIKA BERHASIL SEMUA AKAN MENGHASILKAN KELUARAN BENAR (TRUE)
return True
#JIKA KONEKSI GAGAL
except (Exception, psycopg2.Error) as error :
return error
#TUTUP KONEKSI
finally:
if(connection):
cursor.close()
connection.close()
# +
#FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT
#DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF
# -
def makeChart(host, username, password, db, port, table, judul, filePath, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, limitdata, wilayah, tabledata, basePath):
try:
datarowsend = []
for t in range(0, len(table)):
#TEST KONEKSI KE DATABASE
connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db)
cursor = connection.cursor()
#MENGAMBIL DATA DARI DATABASE DENGAN LIMIT YANG SUDAH DIKIRIMKAN DARI VARIABLE DIBAWAH
postgreSQL_select_Query = "SELECT * FROM "+table[t]+" ORDER BY tanggal DESC LIMIT " + str(limitdata)
cursor.execute(postgreSQL_select_Query)
mobile_records = cursor.fetchall()
uid = []
lengthx = []
lengthy = []
#MENYIMPAN DATA DARI DATABASE KE DALAM VARIABLE
for row in mobile_records:
uid.append(row[0])
lengthx.append(row[1])
lengthy.append(row[2])
datarowsend.append(mobile_records)
#JUDUL CHART
judulgraf = A2 + " " + wilayah[t]
#bar
style.use('ggplot')
fig, ax = plt.subplots()
#DATA CHART DIMASUKAN DISINI
ax.bar(uid, lengthy, align='center')
#JUDUL CHART
ax.set_title(judulgraf)
ax.set_ylabel('Total')
ax.set_xlabel('Tanggal')
ax.set_xticks(uid)
ax.set_xticklabels((lengthx))
b = io.BytesIO()
#BUAT CHART MENJADI FORMAT PNG
plt.savefig(b, format='png', bbox_inches="tight")
#CHART DIJADIKAN BASE64
barChart = base64.b64encode(b.getvalue()).decode("utf-8").replace("\n", "")
plt.show()
#line
#DATA CHART DIMASUKAN DISINI
plt.plot(lengthx, lengthy)
plt.xlabel('Tanggal')
plt.ylabel('Total')
#JUDUL CHART
plt.title(judulgraf)
plt.grid(True)
l = io.BytesIO()
#CHART DIJADIKAN GAMBAR
plt.savefig(l, format='png', bbox_inches="tight")
#GAMBAR DIJADIKAN BAS64
lineChart = base64.b64encode(l.getvalue()).decode("utf-8").replace("\n", "")
plt.show()
#pie
#JUDUL CHART
plt.title(judulgraf)
#DATA CHART DIMASUKAN DISINI
plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%',
shadow=True, startangle=180)
plt.plot(legend=None)
plt.axis('equal')
p = io.BytesIO()
#CHART DIJADIKAN GAMBAR
plt.savefig(p, format='png', bbox_inches="tight")
#CHART DICONVERT KE BASE64
pieChart = base64.b64encode(p.getvalue()).decode("utf-8").replace("\n", "")
plt.show()
#CHART DISIMPAN KE DIREKTORI DIJADIKAN FORMAT PNG
#BARCHART
bardata = base64.b64decode(barChart)
barname = basePath+'jupyter/CEIC/21. Sektor Asuransi/img/'+name+''+table[t]+'-bar.png'
with open(barname, 'wb') as f:
f.write(bardata)
#LINECHART
linedata = base64.b64decode(lineChart)
linename = basePath+'jupyter/CEIC/21. Sektor Asuransi/img/'+name+''+table[t]+'-line.png'
with open(linename, 'wb') as f:
f.write(linedata)
#PIECHART
piedata = base64.b64decode(pieChart)
piename = basePath+'jupyter/CEIC/21. Sektor Asuransi/img/'+name+''+table[t]+'-pie.png'
with open(piename, 'wb') as f:
f.write(piedata)
#MEMANGGIL FUNGSI EXCEL
makeExcel(datarowsend, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, name, limitdata, table, wilayah, basePath)
#MEMANGGIL FUNGSI PDF
makePDF(datarowsend, judul, barChart, lineChart, pieChart, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, limitdata, table, wilayah, basePath)
#JIKA KONEKSI GAGAL
except (Exception, psycopg2.Error) as error :
print (error)
#TUTUP KONEKSI
finally:
if(connection):
cursor.close()
connection.close()
# +
#FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2
#PLUGIN YANG DIGUNAKAN ADALAH FPDF
# -
def makePDF(datarow, judul, bar, line, pie, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, lengthPDF, table, wilayah, basePath):
#PDF DIATUR DENGAN SIZE A4 DAN POSISI LANDSCAPE
pdf = FPDF('L', 'mm', [210,297])
#TAMBAH HALAMAN PDF
pdf.add_page()
#SET FONT DAN JUGA PADDING
pdf.set_font('helvetica', 'B', 20.0)
pdf.set_xy(145.0, 15.0)
#TAMPILKAN JUDUL PDF
pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0)
#SET FONT DAN JUGA PADDING
pdf.set_font('arial', '', 14.0)
pdf.set_xy(145.0, 25.0)
#TAMPILKAN SUB JUDUL PDF
pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0)
#BUAT GARIS DIBAWAH SUB JUDUL
pdf.line(10.0, 30.0, 287.0, 30.0)
pdf.set_font('times', '', 10.0)
pdf.set_xy(17.0, 37.0)
pdf.set_font('Times','B',11.0)
pdf.ln(0.5)
th1 = pdf.font_size
#BUAT TABLE DATA DATA DI DPF
pdf.cell(100, 2*th1, "Kategori", border=1, align='C')
pdf.cell(177, 2*th1, A2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Region", border=1, align='C')
pdf.cell(177, 2*th1, B2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Frekuensi", border=1, align='C')
pdf.cell(177, 2*th1, C2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Unit", border=1, align='C')
pdf.cell(177, 2*th1, D2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Sumber", border=1, align='C')
pdf.cell(177, 2*th1, E2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Status", border=1, align='C')
pdf.cell(177, 2*th1, F2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "ID Seri", border=1, align='C')
pdf.cell(177, 2*th1, G2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Kode SR", border=1, align='C')
pdf.cell(177, 2*th1, H2, border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Tanggal Obs. Pertama", border=1, align='C')
pdf.cell(177, 2*th1, str(I2.date()), border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Tanggal Obs. Terakhir ", border=1, align='C')
pdf.cell(177, 2*th1, str(J2.date()), border=1, align='C')
pdf.ln(2*th1)
pdf.cell(100, 2*th1, "Waktu pembaruan terakhir", border=1, align='C')
pdf.cell(177, 2*th1, str(K2.date()), border=1, align='C')
pdf.ln(2*th1)
pdf.set_xy(17.0, 125.0)
pdf.set_font('Times','B',11.0)
epw = pdf.w - 2*pdf.l_margin
col_width = epw/(lengthPDF+1)
pdf.ln(0.5)
th = pdf.font_size
#HEADER TABLE DATA F2
pdf.cell(col_width, 2*th, str("Wilayah"), border=1, align='C')
#TANGAL HEADER DI LOOPING
for row in datarow[0]:
pdf.cell(col_width, 2*th, str(row[1]), border=1, align='C')
pdf.ln(2*th)
#ISI TABLE F2
for w in range(0, len(table)):
data=list(datarow[w])
pdf.set_font('Times','B',10.0)
pdf.set_font('Arial','',9)
pdf.cell(col_width, 2*th, wilayah[w], border=1, align='C')
#DATA BERDASARKAN TANGGAL
for row in data:
pdf.cell(col_width, 2*th, str(row[2]), border=1, align='C')
pdf.ln(2*th)
#PEMANGGILAN GAMBAR
for s in range(0, len(table)):
col = pdf.w - 2*pdf.l_margin
pdf.ln(2*th)
widthcol = col/3
#TAMBAH HALAMAN
pdf.add_page()
#DATA GAMBAR BERDASARKAN DIREKTORI DIATAS
pdf.image(basePath+'jupyter/CEIC/21. Sektor Asuransi/img/'+name+''+table[s]+'-bar.png', link='', type='',x=8, y=80, w=widthcol)
pdf.set_xy(17.0, 144.0)
col = pdf.w - 2*pdf.l_margin
pdf.image(basePath+'jupyter/CEIC/21. Sektor Asuransi/img/'+name+''+table[s]+'-line.png', link='', type='',x=103, y=80, w=widthcol)
pdf.set_xy(17.0, 144.0)
col = pdf.w - 2*pdf.l_margin
pdf.image(basePath+'jupyter/CEIC/21. Sektor Asuransi/img/'+name+''+table[s]+'-pie.png', link='', type='',x=195, y=80, w=widthcol)
pdf.ln(4*th)
#PDF DIBUAT
pdf.output(basePath+'jupyter/CEIC/21. Sektor Asuransi/pdf/'+A2+'.pdf', 'F')
# +
#FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2
#PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER
# -
def makeExcel(datarow, A2, B2, C2, D2, E2, F2, G2, H2, I2, J2, K2, name, limit, table, wilayah, basePath):
#BUAT FILE EXCEL
workbook = xlsxwriter.Workbook(basePath+'jupyter/CEIC/21. Sektor Asuransi/excel/'+A2+'.xlsx')
#BUAT WORKSHEET EXCEL
worksheet = workbook.add_worksheet('sheet1')
#SETTINGAN UNTUK BORDER DAN FONT BOLD
row1 = workbook.add_format({'border': 2, 'bold': 1})
row2 = workbook.add_format({'border': 2})
#HEADER UNTUK TABLE EXCEL F2
header = ["Wilayah", "Kategori","Region","Frekuensi","Unit","Sumber","Status","ID Seri","Kode SR","Tanggal Obs. Pertama","Tanggal Obs. Terakhir ","Waktu pembaruan terakhir"]
#DATA DATA DITAMPUNG PADA VARIABLE
for rowhead2 in datarow[0]:
header.append(str(rowhead2[1]))
#DATA HEADER DARI VARIABLE DIMASUKAN KE SINI UNTUK DITAMPILKAN BERDASARKAN ROW DAN COLUMN
for col_num, data in enumerate(header):
worksheet.write(0, col_num, data, row1)
#DATA ISI TABLE F2 DITAMPILKAN DISINI
for w in range(0, len(table)):
data=list(datarow[w])
body = [wilayah[w], A2, B2, C2, D2, E2, F2, G2, H2, str(I2.date()), str(J2.date()), str(K2.date())]
for rowbody2 in data:
body.append(str(rowbody2[2]))
for col_num, data in enumerate(body):
worksheet.write(w+1, col_num, data, row2)
#FILE EXCEL DITUTUP
workbook.close()
# +
#DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI
#PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART
#DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF
# +
#BASE PATH UNTUK NANTINYA MENGCREATE FILE ATAU MEMANGGIL FILE
basePath = 'C:/Users/ASUS/Documents/bappenas/'
#FILE SIMILARITY WILAYAH
filePathwilayah = basePath+'data mentah/CEIC/allwilayah.xlsx';
#BACA FILE EXCEL DENGAN PANDAS
readexcelwilayah = pd.read_excel(filePathwilayah)
dfwilayah = list(readexcelwilayah.values)
readexcelwilayah.fillna(0)
allwilayah = []
#PEMILIHAN JENIS DATA, APA DATA ITU PROVINSI, KABUPATEN, KECAMATAN ATAU KELURAHAN
tipewilayah = 'prov'
if tipewilayah == 'prov':
for x in range(0, len(dfwilayah)):
allwilayah.append(dfwilayah[x][1])
elif tipewilayah=='kabkot':
for x in range(0, len(dfwilayah)):
allwilayah.append(dfwilayah[x][3])
elif tipewilayah == 'kec':
for x in range(0, len(dfwilayah)):
allwilayah.append(dfwilayah[x][5])
elif tipewilayah == 'kel':
for x in range(0, len(dfwilayah)):
allwilayah.append(dfwilayah[x][7])
semuawilayah = list(set(allwilayah))
#SETTING VARIABLE UNTUK DATABASE DAN DATA YANG INGIN DIKIRIMKAN KE FUNGSI DISINI
name = "02. Statistik Asuransi Bukan Jiwa dan Reasuransi (RGB001-RGB040)"
host = "localhost"
username = "postgres"
password = "1234567890"
port = "5432"
database = "ceic"
judul = "Produk Domestik Bruto (AA001-AA007)"
subjudul = "Badan Perencanaan Pembangunan Nasional"
filePath = basePath+'data mentah/CEIC/21. Sektor Asuransi/'+name+'.xlsx';
limitdata = int(8)
readexcel = pd.read_excel(filePath)
tabledata = []
wilayah = []
databody = []
#DATA EXCEL DIBACA DISINI DENGAN MENGGUNAKAN PANDAS
df = list(readexcel.values)
head = list(readexcel)
body = list(df[0])
readexcel.fillna(0)
#PILIH ROW DATA YANG INGIN DITAMPILKAN
rangeawal = 106
rangeakhir = 107
rowrange = range(rangeawal, rangeakhir)
#INI UNTUK MEMFILTER APAKAH DATA YANG DIPILIH MEMILIKI SIMILARITAS ATAU TIDAK
#ISIKAN 'WILAYAH' UNTUK SIMILARITAS
#ISIKAN BUKAN WILAYAH JIKA BUKAN WILAYAH
jenisdata = "Indonesia"
#ROW DATA DI LOOPING UNTUK MENDAPATKAN SIMILARITAS WILAYAH
#JIKA VARIABLE JENISDATA WILAYAH AKAN MASUK KESINI
if jenisdata == 'Wilayah':
for x in rowrange:
rethasil = 0
big_w = 0
for w in range(0, len(semuawilayah)):
namawilayah = semuawilayah[w].lower().strip()
nama_wilayah_len = len(namawilayah)
hasil = n0.get_levenshtein_similarity(df[x][0].lower().strip()[nama_wilayah_len*-1:], namawilayah)
if hasil > rethasil:
rethasil = hasil
big_w = w
wilayah.append(semuawilayah[big_w].capitalize())
tabledata.append('produkdomestikbruto_'+semuawilayah[big_w].lower().replace(" ", "") + "" + str(x))
testbody = []
for listbody in df[x][11:]:
if ~np.isnan(listbody) == False:
testbody.append(str('0'))
else:
testbody.append(str(listbody))
databody.append(testbody)
#JIKA BUKAN WILAYAH MASUK KESINI
else:
for x in rowrange:
wilayah.append(jenisdata.capitalize())
tabledata.append('produkdomestikbruto_'+jenisdata.lower().replace(" ", "") + "" + str(x))
testbody = []
for listbody in df[x][11:]:
if ~np.isnan(listbody) == False:
testbody.append(str('0'))
else:
testbody.append(str(listbody))
databody.append(testbody)
#HEADER UNTUK PDF DAN EXCEL
A2 = "Data Migas"
B2 = df[rangeawal][1]
C2 = df[rangeawal][2]
D2 = df[rangeawal][3]
E2 = df[rangeawal][4]
F2 = df[rangeawal][5]
G2 = df[rangeawal][6]
H2 = df[rangeawal][7]
I2 = df[rangeawal][8]
J2 = df[rangeawal][9]
K2 = df[rangeawal][10]
#DATA ISI TABLE F2
dataheader = []
for listhead in head[11:]:
dataheader.append(str(listhead))
#FUNGSI UNTUK UPLOAD DATA KE SQL, JIKA BERHASIL AKAN MAMANGGIL FUNGSI UPLOAD CHART
sql = uploadToPSQL(host, username, password, database, port, tabledata, judul, filePath, name, subjudul, dataheader, databody)
if sql == True:
makeChart(host, username, password, database, port, tabledata, judul, filePath, name, subjudul, A2, B2, C2, D2, E2, F2, G2, H2,I2, J2, K2, limitdata, wilayah, tabledata, basePath)
else:
print(sql)
# -
|
jupyter/CEIC/21. Sektor Asuransi/script/02. Statistik Asuransi Bukan Jiwa dan Reasuransi (RGB001-RGB040).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.3.11
# language: julia
# name: julia-0.3
# ---
# ## Load modules
# +
push!(LOAD_PATH, "../dvi")
addprocs(int(CPU_CORES / 2))
using DiscreteValueIteration
using SCAs, SCAIterators, SCAConst
using GridInterpolations
mdp = SCA()
# -
# ## Check size of MDP
# +
function getBytes(x)
total = 0;
fieldNames = typeof(x).names;
if fieldNames == ()
return sizeof(x);
else
for fieldName in fieldNames
total += getBytes(getfield(x,fieldName));
end
return total;
end
end
println("mdp of type ", typeof(mdp), " takes up ", getBytes(mdp) / 1000.0, " kB")
# -
# ## Informal validation of state and action iterators and transition function
# +
testActions = ActionIterator(Actions)
actionCount = 0
for action in testActions
actionCount += 1
if actionCount == 1 || actionCount == NActions
println("action #", actionCount, ": ", typeof(action), " = ", action)
end # if
end # for action
println("nActions = ", NActions)
println("actionCount = ", actionCount, "\n")
testStates = StateIterator(Xs, Ys, Bearings, Speeds, Speeds)
stateCount = 0
for state in testStates
stateCount += 1
if stateCount == 1 || stateCount == NStates
println("state #", stateCount, ": ", typeof(state), " = ", state)
end # if
end # for state
println("nStates = ", NStates)
println("stateCount = ", stateCount)
# -
nextStateIndices, probs = nextStates(mdp, 1, 15)
println("next state indices:\n", nextStateIndices, "\n")
println("probabilities:\n", probs, "\n")
println("probabilities sum to ", sum(probs))
# ## Parallel solution
numProcs = int(CPU_CORES / 2)
solver = ParallelSolver(
numProcs,
maxIterations = 1000,
tolerance = 1e-6,
gaussSiedel = false,
includeV = true,
includeQ = true,
includeA = true)
policy = solve(solver, mdp, verbose = true)
println("")
function sharray2array(sharray::SharedArray{Float64, 2})
result = zeros(sharray.dims)
for i = 1:sharray.dims[1]
for j = 1:sharray.dims[2]
result[i, j] = sharray[i, j]
end # for j
end # for i
return result
end # function sharray2array
# +
using JLD
solQ = sharray2array(policy.Q')
save("../../data/par-alpha.jld", "solQ", solQ)
# -
# ## Check against reference solution by visual inspection
# +
push!(LOAD_PATH, "../serial/")
using Pairwise
d = get_pomdp()
refQ = load("../../data/alpha.jld", "alpha")
solQ = load("../../data/par-alpha.jld", "solQ")
# -
# ### Reference policy plot
viz_pairwise_policy(d, refQ)
# ### Parallel solver policy plot
viz_pairwise_policy(d, solQ)
|
src/parallel/ParSCA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# !ls Applied-Deep-Learning-with-Keras/Lesson06/data
# !head Applied-Deep-Learning-with-Keras/Lesson06/data/Health_Data.csv
patient_data = pd.read_csv("Applied-Deep-Learning-with-Keras/Lesson06/data/Health_Data.csv")
patient_data.head(30)
# +
# pandas. 숫자로 이루어진 필드의 정보들 보여줌
patient_data.describe()
# 평균을 잡는 방법 4가지
# 1. 산술평균
# 2. 사분위
# 3. 중앙값
# 4. 빈도수가 높은 것
# -
# 범주형 데이터(숫자 외 클래스,카테고리)의 정보 보여줌
patient_data.describe(include=['object'])
patient_data.head()
help(pd.get_dummies)
X = patient_data.iloc[:,1:9] # 0번칼럼은 환자 번호라 제외함. 1~8행이 독립변수.
print(X)
y = patient_data.iloc[:, 9] #정앞의 정보로 9번째 컬럼인 Readmitted에 관한 예측을 할 예정(종속변수)
print(y)
print(X.iloc[:, 1])
# 첫 줄 버리기 제목앞에 붙이기
SpecType = pd.get_dummies(X.iloc[:, 1], drop_first = True, prefix = 'SpecType')
print(SpecType)
preDie = pd.get_dummies(X.iloc[:,2], drop_first = True, prefix = 'PreDie')
print(preDie)
# +
# 안 볼 데이터들 버리기 1열씩 메모리에 반영 안될 수도 있으므로 다시 새기기
X.drop(['Admission_type', 'PreExistingDisease', 'Gender'], axis = 1, inplace = True)
print(X)
# -
X = pd.concat([X, SpecType, preDie], axis = 1)
print(X)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 110)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# X_train값의 폭이 다양하므로 정규분포화하기 위해 StandardSacler소환
from sklearn.preprocessing import StandardScaler
print(X_train)
# +
# 가우시안정규분포로 바꿈
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
print(X_train)
# +
X_train = pd.DataFrame(X_train, columns = X_test.columns)
print(X_train)
# -
X_train.head()
X_test = sc.transform(X_test)
X_test = pd.DataFrame(X_test, columns = X_train.columns)
# 데이터를 넘파이로 바꿈
X_train_np = X_train.values
y_train_np = y_train.values
X_test_np = X_test.values
y_test_np = y_test.values
print(type(X_train_np))
# numpy는 C++로 만든 라이브러리. 데이터가 십만개가 넘으면 속도가 빨라짐. 그전은 느림
print(X_train.shape)
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
model = Sequential()
model.add(Dense(units = 6, activation = 'relu',kernel_initializer = 'uniform',input_dim = 10))
model.add(Dropout(rate = 0.3))
model.add(Dense(units = 6, activation = 'relu', kernel_initializer = 'uniform',))
model.add(Dropout(rate = 0.3))
model.add(Dense(units = 12, activation = 'tanh', kernel_initializer = 'uniform',))
model.add(Dropout(rate = 0.3))
model.add(Dense(units = 6, activation = 'tanh',kernel_initializer = 'uniform',))
model.add(Dropout(rate = 0.3))
model.add(Dense(units = 1, activation = 'sigmoid', kernel_initializer = 'uniform',))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, epochs = 400, batch_size = 20)
# y_pred_class는 예측결과, y_pred_prob은 예측확률
y_pred_class = model.predict(X_test_np)
y_pred_prob = model.predict_proba(X_test_np)
print(y_pred_class)
print(y_pred_prob)
y_pred_class[:5]
# +
# 클래스 예측을 위한 임계치 설정
# 예측결과가 0.5이상이면 1(True), 0.5미만은 0(False)
y_pred_class = y_pred_class > 0.5
print(y_pred_class)
# -
# 인트타입으로 보기
print(y_pred_class.astype(int)[:5])
from sklearn.metrics import accuracy_score
# +
# 사이킷런의 accuracy_score함수로 정확성 계산
res = accuracy_score(y_test, y_pred_class)
print(res)
# -
# 널정확을 계산해서 비교해보자
print(type(y_test))
y_test.value_counts()
print(91 / 108)
print(17 / 108)
# 널정확성이 높다는 뜻은 알고리즘 평가에 사용할 측정지표로 정확성이 적절하지 않다는 뜻이다.
y_test.value_counts().head(1) / len(y_test)
from sklearn.metrics import roc_curve
# fpr(거짓 양성률): 실제로는 해당 사항이 없지만 해당 사항이 있다고 판정을 내린 경우
fpr, tpr, thresholds = roc_curve(y_test, y_pred_prob)
print(fpr)
print(tpr)
print(thresholds)
import matplotlib.pyplot as plt
plt.plot(fpr, tpr)
plt.title("ROC Curve")
plt.xlabel("False Positive rate")
plt.ylabel("True Positive rate")
plt.grid(True)
plt.show
def optimum_threshold(my_threshold):
print("Sensitivity:", tpr[thresholds > my_threshold][-1])
print("Specificity:", 1 - fpr[thresholds > my_threshold][-1])
print(tpr[thresholds > 0.5])
# [-1]은 배열을 거꾸로 셈(파이썬만 가능) 맨 뒤의 값을 인덱스
print(tpr[thresholds > 0.5][-1])
optimum_threshold(0.3)
optimum_threshold(0.5)
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test, y_pred_prob)
|
16_Keras05.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1><Center> Machine Learning with Spark</Center></h1>
# # Lesson Goals
# In this lesson we will develop machine learning model with PySpark. I will create a classification model and a regression model using Pipelines.
#
# # Prerequests
# 1. install Apache Spark
# 2. Install Anaconda
#
# NB:make sure Jupyter Notebook and Spark running
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# -
# ### Import Spark SQL and Spark ML Libraries
#
# We'll train a **LogisticRegression** model with a **Pipleline** preparing the data, a **CrossValidator** to tuene the parameters of the model, and a **BinaryClassificationEvaluator** to evaluate our trained model.
# +
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
#from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import VectorAssembler, StringIndexer, VectorIndexer, MinMaxScaler
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml.evaluation import BinaryClassificationEvaluator
spark = SparkSession.builder.master("local[*]").getOrCreate()
# -
# ### Load Source Data
# The data from the flight.csv file data includes specific characteristics (or features) for each flight, as well as a column indicating how many minutes late or early the flight arrived.
csv = spark.read.csv('../input/flights.csv', inferSchema=True, header=True)
csv.show(10)
# ### Prepare the Data for a Classification Model (Decision Tree Learning Model)
# I select a subset of columns to use as features and create a Boolean label field named *label* with values 1 or 0. Specifically, **1** for flight that arrived late, **0** for flight was early or on-time.
data = csv.select("DayofMonth", "DayOfWeek", "Carrier", "OriginAirportID", "DestAirportID", "DepDelay", ((col("ArrDelay") > 15).cast("Int").alias("label")))
data.show(10)
# ### Split the Data
#
# I will use 70% of the data for training, and reserve 30% for testing. In the testing data, the *label* column is renamed to *trueLabel* so I can use it later to compare predicted labels with known actual values.
splits = data.randomSplit([0.7, 0.3])
train = splits[0]
test = splits[1].withColumnRenamed("label", "trueLabel")
train_rows = train.count()
test_rows = test.count()
print("Training Rows:", train_rows, " Testing Rows:", test_rows)
# ### Define the Pipeline
#
# A pipeline consists of a series of transformer and estimator stages that typically prepare a DataFrame for modeling and then train a predictive model. In this case, you will create a pipeline with seven stages:
# * A **StringIndexer estimator** that converts string values to indexes for categorical features
# * A **VectorAssembler** that combines categorical features into a single vector
# * A **VectorIndexer** that creates indexes for a vector of categorical features
# * A **VectorAssembler** that creates a vector of continuous numeric features
# * A **MinMaxScaler** that normalizes continuous numeric features
# * A **VectorAssembler** that creates a vector of categorical and continuous features
# * A **DecisionTreeClassifier** that trains a classification model.
strIdx = StringIndexer(inputCol = "Carrier", outputCol = "CarrierIdx")
catVect = VectorAssembler(inputCols = ["CarrierIdx", "DayofMonth", "DayOfWeek", "OriginAirportID", "DestAirportID"], outputCol="catFeatures")
catIdx = VectorIndexer(inputCol = catVect.getOutputCol(), outputCol = "idxCatFeatures")
numVect = VectorAssembler(inputCols = ["DepDelay"], outputCol="numFeatures")
minMax = MinMaxScaler(inputCol = numVect.getOutputCol(), outputCol="normFeatures")
featVect = VectorAssembler(inputCols=["idxCatFeatures", "normFeatures"], outputCol="features")
lr = LogisticRegression(labelCol="label",featuresCol="features",maxIter=10,regParam=0.3)
#dt = DecisionTreeClassifier(labelCol="label", featuresCol="features")
pipeline = Pipeline(stages=[strIdx, catVect, catIdx, numVect, minMax, featVect, lr])
# ### Run the Pipeline to train a model
# Run the pipeline as an Estimator on the training data to train a model.
piplineModel = pipeline.fit(train)
# ### Generate label predictions
# Transform the test data with all of the stages and the trained model in the pipeline to generate label predictions.
prediction = piplineModel.transform(test)
predicted = prediction.select("features", "prediction", "trueLabel")
predicted.show(100, truncate=False)
# Looking into the results, some trueLabel 1s are predicted as 0. Let's evaluate the model.
# ## Evaluating a Classification Model
# We'll calculate a *Confusion Matrix* and the *Area Under ROC* (Receiver Operating Characteristic) to evaluate the model.
# ### Compute Confusion Matrix
# Classifiers are typically evaluated by creating a *confusion matrix*, which indicates the number of:
# - True Positives
# - True Negatives
# - False Positives
# - False Negatives
#
# From these core measures, other evaluation metrics such as *precision*, *recall* and *F1* can be calculated.
tp = float(predicted.filter("prediction == 1.0 AND truelabel == 1").count())
fp = float(predicted.filter("prediction == 1.0 AND truelabel == 0").count())
tn = float(predicted.filter("prediction == 0.0 AND truelabel == 0").count())
fn = float(predicted.filter("prediction == 0.0 AND truelabel == 1").count())
pr = tp / (tp + fp)
re = tp / (tp + fn)
metrics = spark.createDataFrame([
("TP", tp),
("FP", fp),
("TN", tn),
("FN", fn),
("Precision", pr),
("Recall", re),
("F1", 2*pr*re/(re+pr))],["metric", "value"])
metrics.show()
# Looks like we've got a good *Precision*, but a low *Recall*, therefore our *F1* is not that good.
# ### Review the Area Under ROC
# Another way to assess the performance of a classification model is to measure the area under a ROC (Receiver Operating Characteristic) curve for the model. the spark.ml library includes a **BinaryClassificationEvaluator** class that we can use to compute this. The ROC curve shows the True Positive and False Positive rates plotted for varying thresholds.
evaluator = BinaryClassificationEvaluator(labelCol="trueLabel", rawPredictionCol="rawPrediction", metricName="areaUnderROC")
aur = evaluator.evaluate(prediction)
print ("AUR = ", aur)
# So the AUR shows that our model is ok.
# Let's look deeper.
# ### View the Raw Prediction and Probability
# The prediction is based on a raw prediction score that describes a labelled point in a logistic function. This raw prediction is then converted to a predicted label of 0 or 1 based on a probability vector that indicates the confidence for each possible label value (in this case, 0 and 1). The value with the highest confidence is selected as the prediction.
prediction.select("rawPrediction", "probability", "prediction", "trueLabel").show(100, truncate=False)
# Note that the results include rows where the probability for 0 (the first value in the probability vector) is only slightly higher than the probability for 1 (the second value in the probability vector). The default discrimination threshold (the boundary that decides whether a probability is predicted as a 1 or a 0) is set to 0.5; so the prediction with the highest probability is always used, no matter how close to the threshold.
#
# And we can see from the results above that for those *truelabel* 1s that we predicted 0s, many of them the problibilty of 1 is just slightly less than the threshold 0.5.
# ## Tune Parameters
# To find the best performing parameters, we can use the **CrossValidator** class to evaluate each combination of parameters defined in a **ParameterGrid** against multiple *folds* of the data split into training and validation datasets. Note that this can take a long time to run because every parameter combination is tried multiple times.
# ### Change the Discrimination Threshold
# The AUC score seems to indicate a reasonably good model, but the performance metrics seem to indicate that it predicts a high number of *False Negative* labels (i.e. it predicts 0 when the true label is 1), leading to a low *Recall*. We can improve this by lowering the threshold. Conversely, sometimes we may want to address a large number of *False Positive* by raising the threshold.
#
# In this case, I'll let the **CrossValidator** find the best threshold from 0.45, 0.4 and 0.35, regularization parameter from 0.3 and 0.1, and the maximum number of iterations allowed from 10 and 5.
# +
paramGrid = ParamGridBuilder().addGrid(lr.regParam, [0.3, 0.1]).addGrid(lr.maxIter, [10, 5]).addGrid(lr.threshold,
[0.4, 0.3]).build()
cv = CrossValidator(estimator=pipeline, evaluator=BinaryClassificationEvaluator(), estimatorParamMaps=paramGrid,
numFolds=2)
model = cv.fit(train)
# -
newPrediction = model.transform(test)
newPredicted = prediction.select("features", "prediction", "trueLabel")
newPredicted.show()
# Note that some of the **rawPrediction** and **probability** values that were previously predicted as 0 are now predicted as 1
# Recalculate confusion matrix
tp2 = float(newPrediction.filter("prediction == 1.0 AND truelabel == 1").count())
fp2 = float(newPrediction.filter("prediction == 1.0 AND truelabel == 0").count())
tn2 = float(newPrediction.filter("prediction == 0.0 AND truelabel == 0").count())
fn2 = float(newPrediction.filter("prediction == 0.0 AND truelabel == 1").count())
pr2 = tp2 / (tp2 + fp2)
re2 = tp2 / (tp2 + fn2)
metrics2 = spark.createDataFrame([
("TP", tp2),
("FP", fp2),
("TN", tn2),
("FN", fn2),
("Precision", pr2),
("Recall", re2),
("F1", 2*pr2*re2/(re2+pr2))],["metric", "value"])
metrics2.show()
# Recalculate the Area Under ROC
evaluator2 = BinaryClassificationEvaluator(labelCol="trueLabel", rawPredictionCol="prediction", metricName="areaUnderROC")
aur2 = evaluator.evaluate(prediction)
print( "AUR2 = ", aur2)
# Looks pretty good! The new model improves the *Recall* from 0.11 to 0.37, the *F1* score from 0.20 to 0.54, without compromising other metrics.
#
#
# # Further reading
#
#
# 1.Classification and regression [link](https://spark.apache.org/docs/latest/ml-classification-regression.html).
#
# 2.Advanced data exploration and modeling with Spark [link](https://docs.microsoft.com/en-us/azure/machine-learning/team-data-science-process/spark-advanced-data-exploration-modeling).
#
# 3.Machine Learning with PySpark [link](https://link.springer.com/book/10.1007%2F978-1-4842-4131-8)
#
#
#
# # Summary
# In this tutorial, you discovered how to analyzed unstructure data(text) using Spark. focused on a Python API of pyspark module.
#
# Specifically, you learned:
#
# * How to Prepare the Data for classification.
# * How to define the pipeline.
# * How to generate level predictions.
# * How to evaluate classification models.
# * How to Tune Parameters.
#
#
# ## Next Step
#
# There is still much room to improve the model. For example, I can try more options of lower threshold, or use different classfication models, or prepare data better like adding new features. I'll write another one for this.
|
Machine Learning with Spark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 优化算法
# :label:`chap_optimization`
#
# 如果您在此之前按顺序阅读这本书,则已经使用了许多优化算法来训练深度学习模型。这些工具使我们能够继续更新模型参数并最大限度地减少损失函数的价值,正如培训集评估的那样。事实上,任何人满意将优化视为黑盒装置,以便在简单的环境中最大限度地减少客观功能,都可能会知道存在着一系列此类程序的咒语(名称如 “SGD” 和 “亚当”)。
#
# 但是,为了做得好,还需要更深入的知识。优化算法对于深度学习非常重要。一方面,训练复杂的深度学习模型可能需要数小时、几天甚至数周。优化算法的性能直接影响模型的训练效率。另一方面,了解不同优化算法的原则及其超参数的作用将使我们能够以有针对性的方式调整超参数,以提高深度学习模型的性能。
#
# 在本章中,我们深入探讨常见的深度学习优化算法。深度学习中出现的几乎所有优化问题都是 * nonconvex*。尽管如此,在 *CONVex* 问题背景下设计和分析算法是非常有启发性的。正是出于这个原因,本章包括了凸优化的入门,以及凸目标函数上非常简单的随机梯度下降算法的证明。
#
# :begin_tab:toc
# - [optimization-intro](optimization-intro.ipynb)
# - [convexity](convexity.ipynb)
# - [gd](gd.ipynb)
# - [sgd](sgd.ipynb)
# - [minibatch-sgd](minibatch-sgd.ipynb)
# - [momentum](momentum.ipynb)
# - [adagrad](adagrad.ipynb)
# - [rmsprop](rmsprop.ipynb)
# - [adadelta](adadelta.ipynb)
# - [adam](adam.ipynb)
# - [lr-scheduler](lr-scheduler.ipynb)
# :end_tab:
#
|
d2l/chapter_optimization/index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score, confusion_matrix
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, f1_score
import matplotlib.pyplot as plt
import seaborn as sns
# +
def evaluate_binary_classification(model_name, y_test, y_pred, y_proba=None):
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
#try:
if y_proba != None:
rocauc_score = roc_auc_score(y_test, y_proba)
else:
rocauc_score = "no roc"
#except:
# pass
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True)
plt.tight_layout()
plt.title(f'{model_name}', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plt.show()
print("accuracy: ", accuracy)
print("precision: ", precision)
print("recall: ", recall)
print("f1 score: ", f1)
print("rocauc: ", rocauc_score)
print(cm)
#return accuracy, precision, recall, f1, rocauc_score
# -
# +
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, f1_score
def evaluate_regression(y_test, y_pred):
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print("mae", mae)
print("mse", mse)
print('r2', r2)
# -
# +
##note it is against, for .. then sometimes *by itself* stat% = for/(for+against); stat%_against would be 1-stat%
##basic features to add:
##pp %
##pk stats,
##pk%
##sh%
##sv%
##goal_diff = gf -ga
##goal% = gf/(gf+ga)
##total points (prolly adjust for stupid OT, SO shit)
##pts %
##league rank (based on pts%)
feat_drop = [
'startRinkSide',
'HoA', #this is mp ###many of these are repeated from mp_data
'HoA_bet',
'VH',
'home_or_away',
'team',
'name',
'Team',
'Unnamed: 0',
'playerTeam',
'position',
'blocked', ## Same as bSAAgainst
'pim', ## same as penaltyminFor
'goals', ##goalsFor
'shots',
'giveaways',
'hits',
]
#################################first round
feat_goals = [
'goalsAgainst',
'goalsFor',]
##do ga - gf and maybe gf/(gf+ga) ... can I get rid of OT and SO ? Not so easy ... would need to use situation stuff.
##
feat_SOG = [
'shotsOnGoalAgainst',
'shotsOnGoalFor',
]
##sh%, sv%
feat_saves = [
'savedShotsOnGoalAgainst',
'savedShotsOnGoalFor',
#pair with shots for sv%, sh%
]
##pp, pk, penalties
feat_pen_pp_pk = [
'penalityMinutesAgainst', #Penalties
'penalityMinutesFor',
# 'penaltiesAgainst',
# 'penaltiesFor', not sure so useful compared to minutes
'powerPlayGoals',
'powerPlayOpportunities', #Powerplay
]
# ##! Need to create pk stat and pp%, pk%
##xgoals
feat_xgoals =[
'xGoalsAgainst', #(measure of quality of chances for and against)
'xGoalsFor',
'xGoalsPercentage', #derived from above two
]
##possession
feat_SA = [
'unblockedShotAttemptsAgainst',
'unblockedShotAttemptsFor',
'shotAttemptsAgainst',
'shotAttemptsFor',
'corsiPercentage', ##derived from 4 above
'fenwickPercentage',
]
##a way to get possession
feat_FO = [
'faceOffsWonAgainst',
'faceOffsWonFor',
'faceOffWinPercentage',] #has missing nan ... re-do it using last 2.
##measures of possession loss/gain
feat_give_aways = [
'giveawaysAgainst',
'giveawaysFor',
]
feat_dzone_give_aways = [
'dZoneGiveawaysAgainst',
'dZoneGiveawaysFor',]
##should cause more give aways and recoveries
feat_hits = [
'hitsAgainst',
'hitsFor',
]
#measures defensive stat ... also ability to get shots thru
feat_blocked = [
'blockedShotAttemptsAgainst',
'blockedShotAttemptsFor',]
##measures shooting skill to hit the net or ability to make guys shoot wide if you are in lane (kind of like block)
feat_missed = [
'missedShotsAgainst',
'missedShotsFor',]
##measures how many rebounds you give up (degense)... and how many you generate (offense)
##g/rb
##sht/rb
##hml sht/rb
## xg/rb
feat_rebounds = [
'reboundGoalsAgainst', #could put with goals ... prolly want g/rb; pair with high rebounds for
'reboundGoalsFor',
'reboundsAgainst',
'reboundsFor',
]
##ability to maintain pressure ...
feat_pressure = [
'playContinuedInZoneAgainst', #after a shot is next shot in zone (no events outside+ same players on ice)
'playContinuedInZoneFor',
'playContinuedOutsideZoneAgainst',
'playContinuedOutsideZoneFor',
]
feat_pressure_stoppage = [
'freezeAgainst', # "freeze after shot attempt For/Against"
'freezeFor',
'playStoppedAgainst',
'playStoppedFor', #non-freeze reason
]
################################second round
feat_goals_hml_danger = [
'highDangerGoalsAgainst',
'highDangerGoalsFor',
'mediumDangerGoalsAgainst',
'mediumDangerGoalsFor',
'lowDangerGoalsAgainst',
'lowDangerGoalsFor',
]
feat_saves_fen = [
'savedUnblockedShotAttemptsAgainst', ##mised shots plus saved SOG
'savedUnblockedShotAttemptsFor', #pair with unblocked shots for Fsv%
]
feat_xgoals_adj = [
'scoreVenueAdjustedxGoalsAgainst', ##probably select one of these 3 versions?
'scoreVenueAdjustedxGoalsFor',
'flurryAdjustedxGoalsAgainst',
'flurryAdjustedxGoalsFor',
'flurryScoreVenueAdjustedxGoalsAgainst',
'flurryScoreVenueAdjustedxGoalsFor',
]
feat_xgoals_hml_danger = [
'highDangerxGoalsAgainst',
'highDangerxGoalsFor',
'mediumDangerxGoalsAgainst',
'mediumDangerxGoalsFor',
'lowDangerxGoalsAgainst',
'lowDangerxGoalsFor',
]
feat_xgoals_rebounds = [
'xGoalsFromActualReboundsOfShotsAgainst',
'xGoalsFromActualReboundsOfShotsFor',
'xGoalsFromxReboundsOfShotsAgainst',
'xGoalsFromxReboundsOfShotsFor',
'totalShotCreditAgainst', ##xgoals + xgoalsfromxreb -reboundxgoals ?
'totalShotCreditFor',
]
feat_SA_adj = [
'scoreAdjustedShotsAttemptsAgainst',
'scoreAdjustedShotsAttemptsFor',
'scoreAdjustedUnblockedShotAttemptsAgainst',
'scoreAdjustedUnblockedShotAttemptsFor',
]
feat_SOG_hml_danger = [
'highDangerShotsAgainst',
'highDangerShotsFor',
'mediumDangerShotsAgainst',
'mediumDangerShotsFor',
'lowDangerShotsAgainst',
'lowDangerShotsFor',
]
feat_xrebounds = [
'reboundxGoalsAgainst',
'reboundxGoalsFor',
'xReboundsAgainst',
'xReboundsFor']
feat_xpressure = [
'xPlayStoppedAgainst',
'xPlayStoppedFor',
'xPlayContinuedInZoneAgainst', ##maybe do PCIZA and PCIZA - xPCIZA (measures lucky/unlucky)
'xPlayContinuedInZoneFor',
'xPlayStoppedAgainst',
'xPlayStoppedFor',
]
# +
def perc_null(X):
total = X.isnull().sum().sort_values(ascending=False)
data_types = X.dtypes
percent = (X.isnull().sum()/X.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, data_types, percent], axis=1, keys=['Total','Type' ,'Percent'])
return missing_data
# -
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import Ridge
# +
##raw all the data
data = pd.read_csv("/Users/joejohns/data_bootcamp/GitHub/final_project_nhl_prediction/Data/Shaped_Data/data_bet_stats_mp.csv")
# -
list(data.columns)
null_X = perc_null(data).copy()
ind = list(null_X.index)
null_X.loc[ind[0:10],:]
##only nan are rinkside and fo% --> drop and re-do resp
data.drop(columns =feat_drop, inplace = True)
data['won'] = data['won'].apply(int)
data_playoffs = data.loc[data['playoffGame'] == 1, :].copy() #set aside playoff games ... probably won't use them.
data= data.loc[data['playoffGame'] == 0, :].copy() #X will just be reg season games
# +
#sorted(list(X.columns))
# -
X_2016 = data.loc[(data['season'] ==20162017) , :].copy()
# +
X_2016.sort_values(by = ['mp_date', 'game_id'], inplace = True, ascending = True)
#X_2016.loc[~(X_2016.sort_values(by = ['game_id'])['game_id'].values == X_2016.sort_values(by = ['mp_date', 'game_id'])['game_id'].values), :]
# -
results_dic = {}
results_dic['model_version'] =[]
results_dic['accuracy'] = []
results_dic['f1_score'] = []
results_dic['precision'] = []
results_dic['recall'] = []
results_dic['mae'] = []
results_dic['mse'] = []
results_dic['r2'] = []
X_2016HA = X_2016.loc[:, ['Date',
'season',
'Open',
'mp_date',
'nhl_name',
'game_id',
'team_id',
'HoA_gm_stats',
'won',
'settled_in','goalsFor', 'goalsAgainst']].copy()
dates_2016_1 = list(set(X_2016.loc[(X_2016['Date'] >= 900) & (X_2016['Date'] <= 1231) , :]['mp_date'])) #2016 part
dates_2016_2 = list(set(X_2016.loc[(X_2016['Date'] >= 100) & (X_2016['Date'] <= 800) , :]['mp_date'])) #2017 part
dates_2016 = dates_2016_1 + dates_2016_2 #all dates in order
list(X_2016HA.columns)
X_2016H = X_2016HA.loc[X_2016HA['HoA_gm_stats'] == 'home',:].copy()
X_2016A = X_2016HA.loc[X_2016HA['HoA_gm_stats'] == 'away',:].copy()
X_2016H['goal_difference'] = X_2016H['goalsFor'] - X_2016H['goalsAgainst'] ##note every thing is based in home data
X_2016H.reset_index(drop = True, inplace = True)
X_2016A.reset_index(drop = True, inplace = True)
(X_2016A['game_id'] == X_2016H['game_id']).sum() ##games match! hurray! shape = (1230, ..)
X_2016H.reset_index(drop = True, inplace = True)
df_visitor = pd.get_dummies(X_2016H['nhl_name'], dtype=np.int64)
df_home = pd.get_dummies(X_2016A['nhl_name'], dtype=np.int64)
# +
df_model = df_home.sub(df_visitor)
df_model['mp_date'] = X_2016H['mp_date']
df_model['game_id'] = X_2016H['game_id']
# +
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import Ridge
lr1 = Ridge(alpha=0.001)
lr2 = Ridge(alpha=0.001)
y = X_2016H.loc[:,['mp_date', 'game_id','goal_difference', 'won']].copy()
X = df_model.copy()
# -
dates[:1]
# +
L = [1,32]
L.append([3,4])
int(np.floor(2.1))
# +
def make_win(x):
if x <= 0:
return 0
if x >0:
return 1
v_make_win = np.vectorize(make_win)
# -
y1_pred = lr1.predict(X.loc[X['mp_date'].isin(dates[i:i+5]), :])
y2_pred = lr2.predict(X.loc[X['mp_date'].isin(dates[i:i+5]), :])
y1_pred_win = v_make_win(y1_pred)
y2_pred_win = v_make_win(y2_pred)
y_test = y.loc[y['mp_date'].isin(dates[i:i+5]),'goal_difference' ]
y_test_win = y.loc[y['mp_date'].isin(dates[i:i+5]),'won' ]
# +
precision_score(y_test_win, y1_pred_win)
predictions_dic ={}
# +
preds_dic1 ={}
#preds_dic['model_version'] =[]
#preds_dic['date'] = []
#preds_dic['game_id'] = []
preds_dic1['predictions'] = []
preds_dic1['actual'] = []
preds_dic1['predictions_win'] = []
preds_dic1['actual_win'] = []
preds_dic2={}
#preds_dic['model_version'] =[]
#preds_dic['date'] = []
#preds_dic['game_id'] = []
preds_dic2['predictions'] = []
preds_dic2['actual'] = []
preds_dic2['predictions_win'] = []
preds_dic2['actual_win'] = []
results_dic = {}
results_dic['model_version'] =[]
results_dic['date'] = []
#preds_dic['game_id'] = []
results_dic['accuracy'] = []
results_dic['f1_score'] = []
results_dic['precision'] = []
results_dic['recall'] = []
results_dic['mae'] = []
results_dic['mse'] = []
results_dic['r2'] = []
num_dates = len(dates_2016)
# +
##version 1 ... retrain like mad ...
# %time
for i in range(len(dates_2016)):
lr1.fit(X.loc[X['mp_date'] == dates[i], :], y.loc[y['mp_date'] == dates[i],'goal_difference' ] )
lr2.fit(X.loc[X['mp_date'].isin(dates[:i+1]), :],y.loc[y['mp_date'].isin(dates[:i+1]),'goal_difference' ])
y1_pred = lr1.predict(X.loc[X['mp_date'].isin(dates[i:i+5]), :])
y2_pred = lr2.predict(X.loc[X['mp_date'].isin(dates[i:i+5]), :])
y1_pred_win = v_make_win(y1_pred)
y2_pred_win = v_make_win(y2_pred)
y_test = y.loc[y['mp_date'].isin(dates[i:i+5]),'goal_difference' ]
y_test_win = y.loc[y['mp_date'].isin(dates[i:i+5]),'won' ]
#preds_dic['model_version'].append("RidgeReg(C=0.001)_ONE_day")
#preds_dic['date'].append(dates[i])
preds_dic1['predictions'].append(y1_pred)
preds_dic1['actual'].append(y_test)
preds_dic1['predictions_win'].append(y1_pred_win)
preds_dic1['actual_win'].append(y_test_win)
results_dic['model_version'].append("RidgeReg(C=0.001)_ONE_day")
results_dic['date'].append(dates[i])
#results_dic['predictions'].append(y1_pred)
#results_dic['actual'].append(y_test)
mae = mean_absolute_error(y_test, y1_pred)
mse = mean_squared_error(y_test, y1_pred)
r2 = r2_score(y_test_win, y1_pred_win)
accuracy = accuracy_score(y_test_win, y1_pred_win)
precision = precision_score(y_test_win, y1_pred_win, zero_division = 0)
recall = recall_score(y_test_win, y1_pred_win)
f1 = f1_score(y_test_win, y1_pred_win)
results_dic['mae'].append(mae)
results_dic['mse'].append(mse)
results_dic['r2'].append(r2)
results_dic['accuracy'].append(accuracy)
results_dic['f1_score'].append(f1)
results_dic['precision'].append(precision)
results_dic['recall'].append(recall)
####now do the all_days ones
preds_dic2['predictions'].append(y2_pred)
preds_dic2['actual'].append(y_test)
preds_dic2['predictions_win'].append(y2_pred_win)
preds_dic2['actual_win'].append(y_test_win)
results_dic['model_version'].append("RidgeReg(C=0.001)_ALL_days")
results_dic['date'].append(dates[i])
mae = mean_absolute_error(y_test, y2_pred)
mse = mean_squared_error(y_test, y2_pred)
r2 = r2_score(y_test_win, y2_pred_win)
accuracy = accuracy_score(y_test_win, y2_pred_win)
precision = precision_score(y_test_win, y2_pred_win, zero_division = 0)
recall = recall_score(y_test_win, y2_pred_win)
f1 = f1_score(y_test_win, y2_pred_win)
results_dic['mae'].append(mae)
results_dic['mse'].append(mse)
results_dic['r2'].append(r2)
results_dic['accuracy'].append(accuracy)
results_dic['f1_score'].append(f1)
results_dic['precision'].append(precision)
results_dic['recall'].append(recall)
# +
for key in results_dic.keys():
print(key, len(results_dic[key]))
#df_results = pd.DataFrame(results_dic)
# -
df_results =pd.DataFrame(results_dic)
df_results
# +
dates[:5]
L = []
win = 1
# +
##simplified version
# %time
step = 5
win = 92
pred = 3
v1_sum = 0
v2_sum = 0
counter =0
for i in range(step, len(dates_2016), step): ##step =10, so 17 rounds
lr1.fit(X.loc[X['mp_date'].isin(dates[i-step:i]), :], y.loc[y['mp_date'].isin(dates[i-step:i]),'goal_difference' ] )
lr2.fit(X.loc[X['mp_date'].isin(dates[max(i-win,0):i]), :],y.loc[y['mp_date'].isin(dates[max(i-win,0):i]),'goal_difference' ])
y1_pred = lr1.predict(X.loc[X['mp_date'].isin(dates[i:i+pred]), :])
y2_pred = lr2.predict(X.loc[X['mp_date'].isin(dates[i:i+pred]), :])
y1_pred_win = v_make_win(y1_pred)
y2_pred_win = v_make_win(y2_pred)
y_test = y.loc[y['mp_date'].isin(dates[i:i+pred]),'goal_difference' ]
y_test_win = y.loc[y['mp_date'].isin(dates[i:i+pred]),'won' ]
#preds_dic['model_version'].append("RidgeReg(C=0.001)_ONE_day")
#preds_dic['date'].append(dates[i])
#results_dic['predictions'].append(y1_pred)
#results_dic['actual'].append(y_test)
mae = mean_absolute_error(y_test, y1_pred)
mse = mean_squared_error(y_test, y1_pred)
r2 = r2_score(y_test_win, y1_pred_win)
accuracy1 = accuracy_score(y_test_win, y1_pred_win)
precision = precision_score(y_test_win, y1_pred_win, zero_division = 0)
recall = recall_score(y_test_win, y1_pred_win)
f11 = f1_score(y_test_win, y1_pred_win)
#print('round ', i, 'date', dates[i-10], 'to', dates[i] )
#print('chunk of 10 predictions: ', y1_pred, y_test )
#print('chunk of 10 scores: ',"accuracy: ", accuracy, "precision: ", precision, "recall: ", recall, "f1 score: ", f1,
# "mae: ", mae, "mse: ", mse, 'r2: ', r2)
mae = mean_absolute_error(y_test, y2_pred)
mse = mean_squared_error(y_test, y2_pred)
r2 = r2_score(y_test_win, y2_pred_win)
accuracy2 = accuracy_score(y_test_win, y2_pred_win)
precision = precision_score(y_test_win, y2_pred_win, zero_division = 0)
recall = recall_score(y_test_win, y2_pred_win)
f12 = f1_score(y_test_win, y2_pred_win)
# print('learn up to game i scores: ',"accuracy: ", accuracy, "precision: ", precision, "recall: ", recall, "f1 score: ", f1,
# "mae: ", mae, "mse: ", mse, 'r2: ', r2)
counter +=1
v1_sum+= accuracy1
v2_sum+=accuracy2
print('round ', i, 'date', dates[i-10], 'to', dates[i], accuracy1, accuracy2) #, f11, f12)
v1_avg = v1_sum/counter
v2_avg = v2_sum/counter
L.append(v2_avg)
print(win, 'avg1', v1_avg, 'avg2', v2_avg, L)
win+=3
# -
17*3
|
Note_books/Explore_Models/.ipynb_checkpoints/model1_v3_concat_use_new_data-Copy1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spectrally-resolved Outgoing Longwave Radiation (OLR) with `RRTMG_LW`
#
# In this notebook we will demonstrate how to use `climlab.radiation.RRTMG_LW` to investigate the clear-sky, longwave response of the atmosphere to perturbations in $CO_{2}$ and SST. In particular, we will use the new `return_spectral_olr` feature to explain the behaviour of the OLR to these changes.
#
# Originally contributed by [<NAME>](https://github.com/AndrewWilliams3142)
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import climlab
import xarray as xr
import scipy.integrate as sp #Gives access to the ODE integration package
# ## Set up idealized atmospheric profiles of temperature and humidity
#
# In this example, we will use a temperature profile which is a moist adiabat, pegged to an isothermal stratosphere at $T_{strat}=200 \mathrm{K}$. We will also assume that relative humidity is fixed (a decent first-order assumption) at a constant value of $\mathrm{RH}=0.8$, with a profile given by [climlab.radiation.water_vapor.ManabeWaterVapor](https://climlab.readthedocs.io/en/latest/api/climlab.radiation.water_vapor.html#climlab.radiation.water_vapor.ManabeWaterVapor).
# +
from climlab.utils.thermo import pseudoadiabat
def generate_idealized_temp_profile(SST, plevs, Tstrat=200):
"""
Generates an idealized temperature profile with specified SST and Tstrat
"""
solution = sp.odeint(pseudoadiabat, SST, np.flip(plevs))
temp = solution.reshape(-1)
temp[np.where(temp<Tstrat)] = Tstrat
return np.flip(temp) # need to re-invert the pressure axis
def make_idealized_column(SST, num_lev=100, Tstrat=200):
# Set up a column state
state = climlab.column_state(num_lev=num_lev, num_lat=1)
# Extract the pressure levels
plevs = state['Tatm'].domain.axes['lev'].points
# Set the SST
state['Ts'][:] = SST
# Set the atmospheric profile to be our idealized profile
state['Tatm'][:] = generate_idealized_temp_profile(SST=SST, plevs=plevs, Tstrat=Tstrat)
return state
# +
state = make_idealized_column(300)
# Plot the profile
fig, ax = plt.subplots(dpi=100)
state['Tatm'].to_xarray().plot(ax=ax, y='lev', yincrease=False)
ax.set_xlabel("Temperature (K)")
ax.set_ylabel("Pressure (hPa)")
ax.grid()
# -
# Now, compute specific humidity profile using [climlab.radiation.water_vapor.ManabeWaterVapor](https://climlab.readthedocs.io/en/latest/api/climlab.radiation.water_vapor.html#climlab.radiation.water_vapor.ManabeWaterVapor)
h2o = climlab.radiation.water_vapor.ManabeWaterVapor(state=state,
relative_humidity=0.8)
# +
fig, ax = plt.subplots(dpi=100)
h2o.q.to_xarray().plot(ax=ax, y='lev', yincrease=False)
ax.set_xlabel("Specific humidity (g/g)")
ax.set_ylabel("Pressure (hPa)")
ax.grid()
# -
# ## Run the profiles through `RRTMG_LW`
#
# With $CO_{2}=280\mathrm{ppmv}$ and all other radiatively active gases (aside from water vapour) set to zero.
# +
absorber_vmr = {'CO2':280/1e6,
'CH4':0.,
'N2O':0.,
'O2':0.,
'CFC11':0.,
'CFC12':0.,
'CFC22':0.,
'CCL4':0.,
'O3':0.}
# RRTMG radiation
rad = climlab.radiation.RRTMG_LW(state=state, specific_humidity=h2o.q,
icld=0, # Clear-sky only!
return_spectral_olr=False, # Just return total OLR
absorber_vmr = absorber_vmr)
# -
rad.compute_diagnostics()
rad.OLR
# ## Now, wrap it all into a simple function
#
# This will make it easier to explore the behaviour of the OLR as a function of temperature and $CO_{2}$.
def calc_olr(SST, CO2ppmv, return_spectral_olr=False, RH=0.8, Tstrat=200, qStrat=5e-06):
# Couple water vapor to radiation
## climlab setup
# create surface and atmosperic domains
state = make_idealized_column(SST, Tstrat=Tstrat)
# fixed relative humidity
# Note we pass the qStrat parameter here, which sets a minimum specific humidity
# Set RH=0. and qStrat=0. for fully dry column
h2o = climlab.radiation.water_vapor.ManabeWaterVapor(state=state,
relative_humidity=RH,
qStrat=qStrat,
)
absorber_vmr['CO2'] = CO2ppmv/1e6
# RRTMG radiation
rad = climlab.radiation.rrtm.rrtmg_lw.RRTMG_LW(state=state, specific_humidity=h2o.q,
icld=0, # Clear-sky only!
return_spectral_olr=return_spectral_olr,
absorber_vmr = absorber_vmr)
rad.compute_diagnostics()
return rad
# Test this gives the same as before...
calc_olr(SST=300, CO2ppmv=280).OLR
# Now, lets iterate over a few `(SST, CO2)` pairs
# +
# %%time
n=20
OLRS = np.zeros((n,n))
temparray = np.linspace(280, 290, n)
co2array = np.linspace(280, 1200, n)
for idx1, temp in enumerate(temparray):
for idx2, co2 in enumerate(co2array):
OLRS[idx1, idx2] = calc_olr(temp, co2).OLR
# +
da = xr.DataArray(OLRS, dims=['temp', 'co2'],
coords={'temp':temparray,
'co2':co2array},
)
fig, ax = plt.subplots(dpi=100)
p = da.plot.contourf(ax=ax,
cmap='viridis',
levels=20,
add_colorbar=False)
fig.colorbar(p, label="OLR (W m$^{-2}$)")
ax.set_xlabel("$CO_{2}$ (ppmv)")
ax.set_ylabel("SST (K)")
# -
# ### Okay then! As expected we can see that, all else being equal, increasing CO$_{2}$ <span style="color:blue">decreases the OLR</span>, whereas increasing the SST <span style="color:red">increases the OLR</span> in the model.
#
# So then, what do these changes look like in `wavenumber` space? We can investigate this using the new `return_spectral_olr` argument to `RRTMG_LW`!
#
# First though, let's check the model reproduces the Planck curve!
# +
# To do this, we'll run the model with the idealized temperature profile
# but not include the effects of water vapour (i.e., set RH=0 and qStrat=0)
# We've already set all other absorbing species to 0.
rad1 = calc_olr(SST=300, CO2ppmv=0., RH=0., return_spectral_olr=True, qStrat=0.)
# +
# check that the different OLRs match up...
print(rad1.OLR_spectral.to_xarray().sum('wavenumber').values)
print(rad1.OLR)
# -
# Now, lets check to see if we get the familiar Planck curve
# +
wavenumbers = np.linspace(0.1, 3000) # don't start from zero to avoid divide by zero warnings
# Centers and Widths of the spectral bands, cm-1
spectral_centers = rad1.OLR_spectral.domain.axes['wavenumber'].points
spectral_widths = rad1.OLR_spectral.domain.axes['wavenumber'].delta
def planck_curve(wavenumber, T):
'''Return the Planck curve in units of W/m2/cm-1
Inputs: wavenumber in cm-1
temperature T in units of K'''
# 100pi factor converts from steradians/m to 1/cm
return (climlab.utils.thermo.Planck_wavenumber(wavenumber, T)*100*np.pi)
def make_planck_curve(ax, T, color='orange'):
'''Plot the Planck curve (W/m2/cm-1) on the given ax object'''
ax.plot(wavenumbers, planck_curve(wavenumbers, T),
lw=2, color=color, label="Planck curve, {}K".format(T))
def make_planck_feedback(ax, T, color='orange'):
'''Plot the Planck spectral feedback parameter (mW/m2/cm-1/K) on the given ax object'''
ax.plot(wavenumbers, (planck_curve(wavenumbers, T+1)-planck_curve(wavenumbers, T))*1000,
lw=2, color=color, label="Planck feedback, {}K".format(T))
def make_rrtmg_spectrum(ax, OLR_spectral, color='blue', alpha=0.5, label='RRTMG - 300K'):
# Need to normalize RRTMG spectral outputs by width of each wavenumber band
ax.bar(spectral_centers, np.squeeze(OLR_spectral)/spectral_widths,
width=spectral_widths, color=color, edgecolor='black', alpha=alpha, label=label)
# +
""" Plot ! """
fig, ax = plt.subplots(dpi=100)
make_planck_curve(ax, 300, color='orange')
make_rrtmg_spectrum(ax, rad1.OLR_spectral, label='RRTMG - 300K')
ax.legend(frameon=False)
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel("TOA flux (W/m$^{2}$/cm$^{-1}$)")
ax.grid()
# -
# ## Now, what happens when we include $CO_{2}$?
# +
# Same calculation as above but with some well-mixed CO2 in the column
rad2 = calc_olr(SST=300, CO2ppmv=10, RH=0., qStrat=0., return_spectral_olr=True, )
rad3 = calc_olr(SST=300, CO2ppmv=280, RH=0., qStrat=0., return_spectral_olr=True, )
# +
fig, ax = plt.subplots(dpi=100)
make_planck_curve(ax, 300, color='orange')
make_rrtmg_spectrum(ax, rad1.OLR_spectral, label='RRTMG - 300K, 0ppmv CO2', color='blue')
make_rrtmg_spectrum(ax, rad2.OLR_spectral, label='RRTMG - 300K, 10ppmv CO2', color='orange')
make_rrtmg_spectrum(ax, rad3.OLR_spectral, label='RRTMG - 300K, 280ppmv CO2', color='green')
ax.legend(frameon=False)
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel("TOA flux (W/m$^{2}$/cm$^{-1}$)")
ax.grid()
# -
# As we saw before, including $CO_{2}$ in the radiative transfer calculation reduces the total OLR (i.e., the spectral integral over what we've plotted). This happens predominantly due to absorption at the center of the $15 \mu\mathrm{m}$ $CO_{2}$ band (around $667.5 \mathrm{cm}^{-1}$).
#
# Note that increasing the $CO_{2}$ concentration causes a greater reduction at the center of the band, with increasing absorption at the edges (commonly referred to as the 'wings') of the band.
# ## What about water vapour?
#
# Now, we'll redo the calculation, but include the specific humidity of water vapour in the call to `RRTMG_LW`.
# +
# Our calc_olr() function handles water vapor by setting the RH parameter
rad4 = calc_olr(SST=300, CO2ppmv=0., RH=0.8, return_spectral_olr=True, )
# +
fig, ax = plt.subplots(dpi=100, figsize=(7,4))
make_planck_curve(ax, 300, color='orange')
make_rrtmg_spectrum(ax, rad1.OLR_spectral, label="RRTMG - 300K, 0ppmv CO2", color='blue')
make_rrtmg_spectrum(ax, rad4.OLR_spectral, label="RRTMG - 300K, water vapour, 0ppmv CO2", color='orange')
ax.legend(frameon=False, loc='upper right')
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel("TOA flux (W/m$^{2}$/cm$^{-1}$)")
ax.grid()
# -
# #### Water vapour clearly also influences the OLR spectrum quite a bit! Two interesting things to note:
#
# Firstly, water vapour is a strong absorber at a much wider range of wavelengths than $CO_{2}$!
#
# Secondly, there is a region around 800-1500 $\mathrm{cm}^{-1}$, where water vapour doesn't cause much absorption at all! This is the well-known water vapour *window*, and it is a region where warming can efficiently escape to space from the surface. The behaviour of these *window* region is extremely important in understanding the temperature dependence of Earth's OLR, and thus climate sensitivity (see, for example, Koll and Cronin (2018)).
# ## $\textit{"Last call for orders! The water vapour window is closing!"}$
#
# Clausius-Clapeyron tells us that the saturation water vapor pressure of water (i.e., the water-holding capacity of the atmosphere) increases by about 6-7% for every 1°C rise in temperature. One important consequence of this is that the optical depth of water vapour increases with temperature, which causes these spectral 'window' regions to eventually become optically thick. When this happens, the OLR in these regions becomes fixed and can't increase with warming. Can we see this in our model?
#
# To do this, we'll run the model again at 280K, 300K and 320K, with a varying water vapour profile. We should see that the OLR in this window region eventually saturates to a constant value.
# +
SSTcolors = {320: 'green',
300: 'orange',
280: 'blue',
}
rad = {}
for SST in SSTcolors:
rad[SST] = calc_olr(SST=SST, CO2ppmv=0., RH=0.8, return_spectral_olr=True, )
# +
""" Plot ! """
fig, ax = plt.subplots(dpi=100, figsize=(7,4))
for SST in SSTcolors:
make_planck_curve(ax, SST, color=SSTcolors[SST])
make_rrtmg_spectrum(ax, rad[SST].OLR_spectral,
label="RRTMG - {}K, water vapour, no CO2".format(SST),
color=SSTcolors[SST])
ax.set_xlim(0, 4000)
ax.legend(frameon=False, loc='upper right')
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel("TOA flux (W/m$^{2}$/cm$^{-1}$)")
ax.grid()
# -
# ## Nice!
#
# We can clearly see from this plot that the OLR in the water vapour windows saturates between 300K and 320K
#
# To make this more quantitative, lets consider the 'spectral' feedback parameter $\lambda_{\nu}$ for each SST, which is defined as the change in OLR per degree of warming, which we calculate as:
#
# $$\lambda_{\nu} = \frac{\mathrm{OLR}_{\nu}(\mathrm{SST}+1)- \mathrm{OLR}_{\nu}(\mathrm{SST})}{1\mathrm{K}}$$
#
# Hence, because OLR eventually becomes decoupled from the SST at high enough temperatures, we should expect the feedback parameter to rapidly decline (eventually to zero) in these window regions.
feedback = {}
for SST in SSTcolors:
# Calculate perturbation (+1K) state diagnostics
rad_p1 = calc_olr(SST=SST+1, CO2ppmv=0., RH=0.8, return_spectral_olr=True, )
# Calculate spectral feedback parameter
feedback[SST] = (rad_p1.OLR_spectral-rad[SST].OLR_spectral)
# ## At low temperatures, the feedback parameter in the window region is close the the Planck feedback, indicating efficient emission to space from these wavenumbers.
# +
""" Plot ! """
fig, ax = plt.subplots(dpi=100, figsize=(7,4))
SST=280
make_planck_feedback(ax, SST, color=SSTcolors[SST])
make_rrtmg_spectrum(ax, feedback[SST]*1000,
label="RRTMG - {}K, water vapour, no CO2".format(SST),
color=SSTcolors[SST])
ax.set_xlim(0, 4000)
ax.set_ylim(-0.5, 6)
ax.legend(frameon=False, loc='upper right')
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel(r"$\lambda_{\nu}$ (mW/m$^{2}$/cm$^{-1}/K$)")
ax.grid()
# -
# ### At higher temperatures, water vapour becomes optically thick in the window region, causing the OLR to become less sensitive to changes in surface temperature. As such, the feedback parameter reduces rapidly.
# +
""" Plot ! """
fig, ax = plt.subplots(dpi=100, figsize=(7,4))
SST=300
make_planck_feedback(ax, SST, color=SSTcolors[SST])
make_rrtmg_spectrum(ax, feedback[SST]*1000,
label="RRTMG - {}K, water vapour, no CO2".format(SST),
color=SSTcolors[SST])
ax.set_xlim(0, 4000)
ax.set_ylim(-0.5, 6)
ax.legend(frameon=False, loc='upper right')
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel(r"$\lambda_{\nu}$ (mW/m$^{2}$/cm$^{-1}/K$)")
ax.grid()
# +
""" Plot ! """
fig, ax = plt.subplots(dpi=100, figsize=(7,4))
SST=320
make_planck_feedback(ax, SST, color=SSTcolors[SST])
make_rrtmg_spectrum(ax, feedback[SST]*1000,
label="RRTMG - {}K, water vapour, no CO2".format(SST),
color=SSTcolors[SST])
ax.set_xlim(0, 4000)
ax.set_ylim(-1, 6.5)
ax.legend(frameon=False, loc='upper right')
ax.set_xlabel("Wavenumber (cm$^{-1}$)")
ax.set_ylabel(r"$\lambda_{\nu}$ (mW/m$^{2}$/cm$^{-1}/K$)")
ax.grid()
|
docs/source/courseware/Spectral_OLR_with_RRTMG.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import loadmat
# %matplotlib inline
data=loadmat("E://mldata//ex3data1 (2)")
X=data['X']
y=data['y']
# +
#X=pd.DataFrame(X)
# +
#X
# -
X.insert(0,"ones",1)
X=np.matrix(X)
y=np.matrix(y)
theta=np.matrix(np.zeros((1,401)))
X.shape,theta.shape,y.shape
type(X),type(y),type(theta)
np.unique(y)
def modify_y (y,i):
y_mod=np.zeros((4000,1))
for j in range(len(y)):
if y[j]==i:
y_mod[j]=1
return np.matrix(y_mod)
def sigmoid(z):
z=1/(1+np.exp(-z))
return z
def computeCost(X,y,theta):
first=np.multiply(-y,np.log(sigmoid(X*theta.T)))
second=np.multiply((1-y),np.log(1-sigmoid(X*theta.T)))
return np.sum(first-second)/len(X)
# +
#computeCost(X,y,theta)
# -
def gradientDescent(X,y,theta,alpha,iters):
temp=np.matrix(np.zeros((1,401)))
parameters=401
cost=np.zeros(iters)
for i in range(iters):
error=sigmoid((X*theta.T))-y
for j in range(parameters):
term=np.multiply(error,X[:,j])
temp[0,j]=theta[0,j]-((alpha/len(X))*np.sum(term))
theta=temp
cost[i]=computeCost(X,y,theta)
return theta
all_theta=np.matrix(np.zeros((10,401)))
for i in range (1,11):
theta=np.matrix(np.zeros((1,401)))
y_mod=modify_y(y,i)
all_theta[i-1,:]=gradientDescent(X,y_mod,theta,0.01,1000)
all_theta
p=sigmoid(X*all_theta.T)
p[500]
y_pred=np.argmax(p,axis=1)+1
y_pred
def accuracy(y,y_pred):
count=0
for i in range(len(y)):
if y[i]==y_pred[i]:
count=count+1
return (count/len(y))*100
accuracy(y,y_pred)
|
DigitRecognizationProcess.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Simpson's Rule
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import Image
# ### Part 1: Implementing Simpson's Rule
# The trapeziodal rule approximates the area under a curve by approximating the curve with straight line segments. We can often get a much better result if we approximate the function instead with a curve of some kind. Simpson's rule does exactly this, using quadratic curves. In order to specify a quadratic completely, you need three points, not just two as with a straight line. So in this method, we take a pair of adjacent slices and fit a quadratic through the three points that mark the boundaries of those slices. Simpson's rule involves approximating the integrand with quadratics this way, then calculating the area under those quadratics, which gives an approximation to the area under the true curve.
Image(url="./SimpsonsRule.png", width=400)
# Suppose that our integrand is denoated $f(x)$ and the spacing of adjacent points is $h$. And for the purpose of argument that we have three points $-h, 0, h$. If we fit a quadratic $Ax^2 + Bx + C$ through these points, then by definition:
#
# $$ f(-h) = Ah^2 - Bh + C$$
#
# $$ f(0) = C$$
#
# $$ f(h) = Ah^2 + Bh + C$$
# Solving these equations simultaneously for $A$, $B$ and $C$ gives:
#
# $$ A = \frac{1}{h^2} \left[ \frac{1}{2} f(-h) - f(0) + \frac{1}{2} f(h) \right] $$
#
# $$ B = \frac{1}{2h} \left[ f(h) - f(-h) \right] $$
#
# $$ C = f(0) $$
# The area under the curve of $f(x)$ from $-h$ to $h$ is given by the approximation:
#
# $$ \int^{h}_{-h} (Ax^2 + Bx + C) dx = \frac{2}{3} Ah^3 + 2Ch = \frac{1}{3} \left[ f(-h) + 4f(0) + f(h) \right] $$
# To use Simpson's rule to perform a general integral, note that the equation above does not depend on the fact that our three points are $-h, 0, h$. If we were to slide the curve along the x-axis to higher or lower values, the area underneath would not change. So we can use Simpson's rule for any three uniformly spaced points. If we are integrating from $x=a$ to $x=b$ in slices of width $h$, then the three points bounding the first pair of slices fal at $x=a$, $x=a+h$, and $x = a + 2h$.
# $$I(a, b) \approx \frac{1}{3}h \left[ f(a) + f(b) + 4\sum_{k\ odd} f(a + kh) + 2\sum_{k\ even} f(a + kh) \right]$$
# Where $k\ odd = 1, 3 ... N - 1$ and $k\ even = 2, 4 ... N$.
# <b> Note, $N$ must be an even number. </b>
def my_integrand(x, a, b, c):
# TO DO: Create a function that you can integrate by hand.
fun = a*(x**2) + b*x + c
return fun
def simpsons_rule(f, a, b, N, *args):
"""
Calculates the integral of a function `f` using Simpson's Rule
Parameters:
f (function): the integrand in the form f(x, *args)
a (float): the lower bound on the integral
b (float): the upper bound on the integral
N (int): number of slices to use
args: any additional arguments f needs to be evaluated (specifically included for the implementation below)
Returns:
(float) approximate area under the curve
"""
# TO DO: Implement Simpson's Rule
h = (b-a)/N
things = 0
for x in range(N):
k = x + 1
if k % 2 == 1:
things = things + 4*f(a + k*h, *args)
else:
things = things + 2*f(a + k*h, *args)
area = h/3*(f(a, *args)+ f(b, *args) + things)
return area
assert((simpsons_rule(lambda x: x**2, 3, 5, 10000) - ((5**3)/3 - (3**3)/3)) < 0.01)
assert((simpsons_rule(lambda x: np.sin(x), 0, np.pi/2, 10000) - 1) < 0.01)
# TO DO: Create AT LEAST TWO non-trivial assert statements to test your Simpson's Rule function
assert(simpsons_rule(lambda x: x**3, 1, 3, 10000) - (1/4)*(3**4 - 1**4) < 0.01)
assert(simpsons_rule(my_integrand, 1, 3, 100000, 5,0,0) - (1/6)*(3**6 - 1**6) < 0.01)
# ### Part 2: Applying Simpson's Rule
# Planetary nebula are the outer atmospheres of dying stars ejected into space. Astronomers model these nebulae to learn about the total mass they contain, and the details of how they were ejected. The image is of a rare, spherical-shell planetary nebula, Abell 38, photographed by astronomer George Jacoby (WIYN Observatory) and his colleagues using the giant, 4-meter Mayall Telescope at Kitt Peak, Arizona. Abell-38 is located 7,000 light years away in the constellation Hercules. The nebula is 5 light years in diameter and 1/3 light year thick. For other spectacular nebula images, visit the Hubble Space Telescope archive at
# http://hubblesite.org/newscenter/archive/releases/nebula
Image(url="https://www.researchgate.net/profile/Stefan_Guthe/publication/233532800/figure/fig3/AS:670022301151234@1536757173124/The-planetary-nebula-Abell-39-and-reconstructions-assuming-spherical-symmetry-The.ppm", width=300)
# We want to calculate the intensity of the nebula (shaded shell) at different radii from its center $(b)$ along a series of chords through the nebula $(AB)$. The intensity, $I(b)$ will be proportional to the density of gas within the nebula, which we define as $(r)$. The shell is spherically-symmetric, as is $D(r)$, so there are obvious symmetries in the geometry of the problem. Because $D(r)$ varies along the chord AB, we have to sum-up the contribution to $I(b)$ from each spot along AB.
Image(url="./Diagram.png")
# I'll spare you the effort of creating this integral. The intensity can be written as a function of $b$:
#
# $$ I(b) = \int^{Ro}_{Ri} \frac{D_0 r}{\sqrt{(r^2 - b^2)}} dr $$
#
# For the Abell 38 nebula we are interested in, $Ri = 2.2$ light years, $Ro = 2.5$ light years, and $D_0$ is a constant (for simplicity, just let $D_0 = 1$).
def integrand(r, b, D0):
# TO DO: Write a function for the integrand for the integral described above
integrand = D0*r/(r**2 - b**2)**(1/2)
return integrand
def intensity(b):
# TO DO: Write a function for the intensity integral using your Simpson's Rule function
Ri = 2.2
Ro = 2.5
D0 = 1
return simpsons_rule(integrand, Ri, Ro, 1000, b, D0)
# Now let's plot the intensity versus $b$ (the radial distance from the center of the nebula) from $b = 0$ to $b = 3.0$. Notice once $b^2 > r^2$ the value inside the square root will be negative. This is ok. We can handle this by letting `
# b` be a complex array and just plotting the real value of `b` and `I`. I've created the `b` array for you below.
# TO DO: Create a line plot of the intensity using b values from 0 to 3.0
b = np.linspace(0, 3.0, 100, dtype=np.complex128)
plt.plot(b, intensity(b))
# Now, let's create a density plot of this intensity. Since `b` is a radial value, you'll have to `meshgrid` X and X arrays and then convert them to a radius $(r = \sqrt{x^2 + y^2})$. Then pass this "radial matrix" into your intensity function. When you plot the output it should look like the nebula image above!
# Hint: Your nebula will be centered at (0,0) so in order to get the 'full nebula' in the picture, you'll want your X and Y arrays to go from [-3.0, 3.0].
# TO DO: Create a density plot of your intensity values for b values from 0 to 3.0
x = np.linspace(-3.0,3.0,400, dtype = np.complex128)
y = np.linspace(-3.0,3.0,400, dtype = np.complex128)
xx, yy = np.meshgrid(x,y)
r = (xx**2 + yy**2)**(1/2)
plt.figure(figsize=(10,10))
plt.imshow(intensity(r).real, vmax = 4, cmap = "gnuplot2")
# print(0)
|
SimpsonsRule.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Simple housing version
# * State: $[w, n, M, e, \hat{S}, z]$, where $z$ is the stock trading experience, which took value of 0 and 1. And $\hat{S}$ now contains 27 states.
# * Action: $[c, b, k, q]$ where $q$ only takes 2 value: $1$ or $\frac{1}{2}$
from scipy.interpolate import interpn
from multiprocessing import Pool
from functools import partial
from constant import *
import warnings
warnings.filterwarnings("ignore")
# +
#Define the utility function
def u(c):
return (np.float_power(c, 1-gamma) - 1)/(1 - gamma)
#Define the bequeath function, which is a function of wealth
def uB(tb):
return B*u(tb)
#Calcualte HE
def calHE(x):
# the input x is a numpy array
# w, n, M, e, s, z = x
HE = H*pt - x[:,2]
return HE
#Calculate TB
def calTB(x):
# the input x as a numpy array
# w, n, M, e, s, z = x
TB = x[:,0] + x[:,1] + calHE(x)
return TB
#The reward function
def R(x, a):
'''
Input:
state x: w, n, M, e, s, z
action a: c, b, k, q = a which is a np array
Output:
reward value: the length of return should be equal to the length of a
'''
w, n, M, e, s, z = x
reward = np.zeros(a.shape[0])
# actions with not renting out
nrent_index = (a[:,3]==1)
# actions with renting out
rent_index = (a[:,3]!=1)
# housing consumption not renting out
nrent_Vh = (1+kappa)*H
# housing consumption renting out
rent_Vh = (1-kappa)*(H/2)
# combined consumption with housing consumption
nrent_C = np.float_power(a[nrent_index][:,0], alpha) * np.float_power(nrent_Vh, 1-alpha)
rent_C = np.float_power(a[rent_index][:,0], alpha) * np.float_power(rent_Vh, 1-alpha)
reward[nrent_index] = u(nrent_C)
reward[rent_index] = u(rent_C)
return reward
# -
def transition(x, a, t):
'''
Input: state and action and time, where action is an array
Output: possible future states and corresponding probability
'''
w, n, M, e, s, z = x
s = int(s)
e = int(e)
nX = len(x)
aSize = len(a)
# mortgage payment
m = M/D[T_max-t]
M_next = M*(1+rh) - m
# actions
b = a[:,1]
k = a[:,2]
q = a[:,3]
# transition of z
z_next = np.ones(aSize)
if z == 0:
z_next[k==0] = 0
# we want the output format to be array of all possible future states and corresponding
# probability. x = [w_next, n_next, M_next, e_next, s_next, z_next]
# create the empty numpy array to collect future states and probability
if t >= T_R:
future_states = np.zeros((aSize*nS,nX))
n_next = gn(t, n, x, (r_k+r_b)/2)
future_states[:,0] = np.repeat(b*(1+r_b[s]), nS) + np.repeat(k, nS)*(1+np.tile(r_k, aSize))
future_states[:,1] = np.tile(n_next,aSize)
future_states[:,2] = M_next
future_states[:,3] = 0
future_states[:,4] = np.tile(range(nS),aSize)
future_states[:,5] = np.repeat(z_next,nS)
future_probs = np.tile(Ps[s],aSize)
else:
future_states = np.zeros((2*aSize*nS,nX))
n_next = gn(t, n, x, (r_k+r_b)/2)
future_states[:,0] = np.repeat(b*(1+r_b[s]), 2*nS) + np.repeat(k, 2*nS)*(1+np.tile(r_k, 2*aSize))
future_states[:,1] = np.tile(n_next,2*aSize)
future_states[:,2] = M_next
future_states[:,3] = np.tile(np.repeat([0,1],nS), aSize)
future_states[:,4] = np.tile(range(nS),2*aSize)
future_states[:,5] = np.repeat(z_next,2*nS)
# employed right now:
if e == 1:
future_probs = np.tile(np.append(Ps[s]*Pe[s,e], Ps[s]*(1-Pe[s,e])),aSize)
else:
future_probs = np.tile(np.append(Ps[s]*(1-Pe[s,e]), Ps[s]*Pe[s,e]),aSize)
return future_states, future_probs
# Use to approximate the discrete values in V
class Approxy(object):
def __init__(self, points, Vgrid):
self.V = Vgrid
self.p = points
def predict(self, xx):
pvalues = np.zeros(xx.shape[0])
for e in [0,1]:
for s in range(nS):
for z in [0,1]:
index = (xx[:,3] == e) & (xx[:,4] == s) & (xx[:,5] == z)
pvalues[index]=interpn(self.p, self.V[:,:,:,e,s,z], xx[index][:,:3],
bounds_error = False, fill_value = None)
return pvalues
# used to calculate dot product
def dotProduct(p_next, uBTB, t):
if t >= T_R:
return (p_next*uBTB).reshape((len(p_next)//(nS),(nS))).sum(axis = 1)
else:
return (p_next*uBTB).reshape((len(p_next)//(2*nS),(2*nS))).sum(axis = 1)
# Value function is a function of state and time t < T
def V(x, t, NN):
w, n, M, e, s, z = x
yat = yAT(t,x)
m = M/D[T_max - t]
# If the agent can not pay for the ortgage
if yat + w < m:
return [0, [0,0,0,0,0]]
# The agent can pay for the mortgage
if t == T_max-1:
# The objective functions of terminal state
def obj(actions):
# Not renting out case
# a = [c, b, k, q]
x_next, p_next = transition(x, actions, t)
uBTB = uB(calTB(x_next)) # conditional on being dead in the future
return R(x, actions) + beta * dotProduct(uBTB, p_next, t)
else:
def obj(actions):
# Renting out case
# a = [c, b, k, q]
x_next, p_next = transition(x, actions, t)
V_tilda = NN.predict(x_next) # V_{t+1} conditional on being alive, approximation here
uBTB = uB(calTB(x_next)) # conditional on being dead in the future
return R(x, actions) + beta * (Pa[t] * dotProduct(V_tilda, p_next, t) + (1 - Pa[t]) * dotProduct(uBTB, p_next, t))
def obj_solver(obj):
# Constrain: yat + w - m = c + b + kk
actions = []
budget1 = yat + w - m
for cp in np.linspace(0.001,0.999,11):
c = budget1 * cp
budget2 = budget1 * (1-cp)
#.....................stock participation cost...............
for kp in np.linspace(0,1,11):
# If z == 1 pay for matainance cost Km = 0.5
if z == 1:
# kk is stock allocation
kk = budget2 * kp
if kk > Km:
k = kk - Km
b = budget2 * (1-kp)
else:
k = 0
b = budget2
# If z == 0 and k > 0 payfor participation fee Kc = 5
else:
kk = budget2 * kp
if kk > Kc:
k = kk - Kc
b = budget2 * (1-kp)
else:
k = 0
b = budget2
#..............................................................
# q = 1 not renting in this case
actions.append([c,b,k,1])
# Constrain: yat + w - m + (1-q)*H*pr = c + b + kk
for q in [1,0.5]:
budget1 = yat + w - m + (1-q)*H*pr
for cp in np.linspace(0.001,0.999,11):
c = budget1*cp
budget2 = budget1 * (1-cp)
#.....................stock participation cost...............
for kp in np.linspace(0,1,11):
# If z == 1 pay for matainance cost Km = 0.5
if z == 1:
# kk is stock allocation
kk = budget2 * kp
if kk > Km:
k = kk - Km
b = budget2 * (1-kp)
else:
k = 0
b = budget2
# If z == 0 and k > 0 payfor participation fee Kc = 5
else:
kk = budget2 * kp
if kk > Kc:
k = kk - Kc
b = budget2 * (1-kp)
else:
k = 0
b = budget2
#..............................................................
# i = 0, no housing improvement when renting out
actions.append([c,b,k,q])
actions = np.array(actions)
values = obj(actions)
fun = np.max(values)
ma = actions[np.argmax(values)]
return fun, ma
fun, action = obj_solver(obj)
return np.array([fun, action])
# +
# wealth discretization
ws = np.array([10,25,50,75,100,125,150,175,200,250,500,750,1000,1500,3000])
w_grid_size = len(ws)
# 401k amount discretization
ns = np.array([1, 5, 10, 15, 25, 50, 100, 150, 400, 1000])
n_grid_size = len(ns)
# Mortgage amount
Ms = np.array([0.01*H,0.05*H,0.1*H,0.2*H,0.3*H,0.4*H,0.5*H,0.8*H]) * pt
M_grid_size = len(Ms)
points = (ws,ns,Ms)
# dimentions of the state
dim = (w_grid_size, n_grid_size,M_grid_size,2,nS,2)
dimSize = len(dim)
xgrid = np.array([[w, n, M, e, s, z]
for w in ws
for n in ns
for M in Ms
for e in [0,1]
for s in range(nS)
for z in [0,1]
]).reshape(dim + (dimSize,))
# reshape the state grid into a single line of states to facilitate multiprocessing
xs = xgrid.reshape((np.prod(dim),dimSize))
Vgrid = np.zeros(dim + (T_max,))
cgrid = np.zeros(dim + (T_max,))
bgrid = np.zeros(dim + (T_max,))
kgrid = np.zeros(dim + (T_max,))
qgrid = np.zeros(dim + (T_max,))
print("The size of the housing: ", H)
print("The size of the grid: ", dim + (T_max,))
# +
# %%time
# value iteration part, create multiprocesses 32
pool = Pool()
for t in range(T_max-1,T_max-3, -1):
print(t)
if t == T_max - 1:
f = partial(V, t = t, NN = None)
results = np.array(pool.map(f, xs))
else:
approx = Approxy(points,Vgrid[:,:,:,:,:,:,t+1])
f = partial(V, t = t, NN = approx)
results = np.array(pool.map(f, xs))
Vgrid[:,:,:,:,:,:,t] = results[:,0].reshape(dim)
cgrid[:,:,:,:,:,:,t] = np.array([r[0] for r in results[:,1]]).reshape(dim)
bgrid[:,:,:,:,:,:,t] = np.array([r[1] for r in results[:,1]]).reshape(dim)
kgrid[:,:,:,:,:,:,t] = np.array([r[2] for r in results[:,1]]).reshape(dim)
qgrid[:,:,:,:,:,:,t] = np.array([r[3] for r in results[:,1]]).reshape(dim)
pool.close()
# np.save("Vgrid" + str(H), Vgrid)
# np.save("cgrid" + str(H), cgrid)
# np.save("bgrid" + str(H), bgrid)
# np.save("kgrid" + str(H), kgrid)
# np.save("qgrid" + str(H), qgrid)
|
20201030/simpleHousing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_mi)
# language: python
# name: conda_mi
# ---
# # Lasso
# - シンプルな線形回帰ですが、非常に分かり易くて便利です
#ライブラリの読み込み
import pandas as pd
import matplotlib.pyplot as plt
#データ読み込み
df=pd.read_csv("muki.csv")
df
#使うカラムを指定する
df=df[["Tc",'C_R', 'C_T', 'vol_per_atom', 'Z', 'f4', 'd5', 'L4f', 'S4f', 'J4f',
'(g-1)J4f', '(2-g)J4f']]
#欠損(NaN)が含まれるレコードを消す
df=df.dropna()
# +
#x,yの設定
#Xには説明変数を設定する
X=df[['C_R', 'C_T', 'vol_per_atom', 'Z', 'f4', 'd5', 'L4f', 'S4f', 'J4f',
'(g-1)J4f', '(2-g)J4f']]
#Yには目的変数を設定する
Y=df[["Tc"]]
# -
# # X,Yを正規化します
# - 殆どの回帰モデル(除 Random forest系)は、X,Yを正規化した方が正しく回帰出来ます
# - 正規化: 平均が0、標準偏差が1になるように変換すること
# - 今回はX、Yをそれぞれsc_X、sc_Yに正規化して代入します
# +
#StandardScalerを使えば一発正規化できます
from sklearn.preprocessing import StandardScaler
#X
X_scaler=StandardScaler()
sc_X=X_scaler.fit_transform(X)
#Y
Y_scaler=StandardScaler()
sc_Y=Y_scaler.fit_transform(Y)
# -
# # Lassoで回帰します
# - ハイパーパラメータalphaを変えると回帰の結果が色々と変わります
# +
from sklearn.linear_model import Lasso
model=Lasso(alpha=0.01)
model.fit(sc_X,sc_Y)
#結果の予測
pred_Y=model.predict(sc_X)
plt.scatter(sc_Y,pred_Y)
#回帰係数
model.coef_
pd.DataFrame([X.columns,model.coef_]).T
# +
model=Lasso(alpha=0.5)
model.fit(sc_X,sc_Y)
#結果の予測
pred_Y=model.predict(sc_X)
plt.scatter(sc_Y,pred_Y)
#回帰係数
model.coef_
pd.DataFrame([X.columns,model.coef_]).T
# -
|
2_python_tutorial/3/3.Lasso.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from scipy import stats
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
import xgboost as xgb
#conda install -c jjhelmus py-xgboost-gpu
import os
import math
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import cross_validate
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
# Though the following import is not directly being used, it is required
# for 3D projection to work
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
import gensim
# -
os.getcwd()
os.chdir('C:/Users/Hoc/OneDrive/My Documents/Machine Learning/kaggle/ames/')
#os.chdir('C:/Users/Richard/OneDrive/My Documents/Machine Learning/kaggle/ames/')
train = pd.read_csv('train.csv', na_values='NA')
#print(train.head())
test = pd.read_csv('test.csv', na_values='NA')
#print(test.head())
train.columns
print(train.OverallCond.dtypes)
print(train.OverallQual.dtypes)
train['OverallCond']= train.OverallCond.astype('object')
train['OverallQual']= train.OverallQual.astype('object')
print(train.OverallCond.dtypes)
print(train.OverallQual.dtypes)
text=pd.read_table('PropDes.txt',names=['Id2','Des'])
text.head()
train.dtypes
text.dtypes
text['Id2'] = text.Id2.str.strip()
text[text.Id2=='RL']
pd.merge(train[['Id','MSZoning']], text, left_on=train.MSZoning, right_on=text.Id2, how='left').head().iloc[:,-1]
df = pd.DataFrame({"Des":''},index=train.Id)
categorical_features = train.select_dtypes(include = ["object"]).columns
numerical_features = train.select_dtypes(exclude = ["object"]).columns
for i, c in enumerate(categorical_features):
df['Des'+str(i)] = pd.merge(train, text, left_on=train[c], right_on=text.Id2, how='left').iloc[:,-1]
for c in df.columns:
df[c] = '. '+df[c]
df['Des']=df.fillna('').sum(axis=1)
df = pd.DataFrame(df.Des)
df.to_csv('des.csv', header=df.columns)
|
test/.ipynb_checkpoints/Ames_nlp-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# # Portfolio Project: Berlin Restaurants
#
# For this Data analysis project, I used the [YELP-FUSION](https://www.yelp.com/developers/documentation/v3) API to extract the restaurants based in the urban area of [Berlin](https://www.yelp.de/berlin) with all the information that comes with it. This project showcases the ETL process that is followed for each data-based project.
#
#
# ## Enter Pipeline
#
# The solution to this problem is a data pipeline. Like a physical pipeline system that drives the flow of any material
# from the source to the destination, a data pipeline comes to transfer the flow of data from any source to the desired
# destination. But not only that, it can make all the necessary transformations -while transferring the data- so the data reach their destination in the desired format and ready to be analyzed.
#
# <p align="center">
# <img alt="Pipeline" src="https://mermaid.ink/img/eyJjb2RlIjoiXG5ncmFwaCBURFxuc3ViZ3JhcGggRGVzdGluYXRpb25cbkUoRGF0YSBXYXJlaG91c2UpXG5lbmRcbkEoQ2xvdWQgQnVja2V0KSAtLVBpcGVsaW5lLS0-IEVcbkIoTW9iaWxlIEFwcCkgLS1QaXBlbGluZS0tPiBFXG5DKEFQSSkgLS1QaXBlbGluZS0tPiBFXG5EKExvY2FsIERhdGFiYW5rKSAtLVBpcGVsaW5lLS0-IEVcblxuIiwibWVybWFpZCI6eyJ0aGVtZSI6ImRlZmF1bHQifSwidXBkYXRlRWRpdG9yIjpmYWxzZX0">
# </p>
#
# For this project, the source is just an API and the destination would be a Postgres SQL database. So the pipeline for this project would look like this
#
# <p align="center">
# <img alt="Pipeline" src="https://mermaid.ink/<KEY>">
# </p>
#
#
# Each API lets us make some GET/POST requests and sends back to us a payload with the appropriate information depending on what we send it. The logic behind each API is the same but each one uses a unique style of commands that need to be sent with each request to receive the correct response.
#
# Yelp's API has the following format
#
# ```python
# response = requests.get(api_url, params=params, headers=headers)
# ```
#
# In the params dictionary, we have to define what do we want to look for, for this case it would be the following
#
# ```python
# params = {"term": "restaurants",
# "location": "berlin"}
# ```
#
# Passing to the key terms: `term`:`restaurants` and `location`:`Berlin`, we ask the API to look for the term restaurants located in Berlin.
#
# The request is almost ready, it requires more information. Although it's a public API, someone must be registered to use the API. After registration you will receive a KEY, this key should be passed to the request with the following text into the headers like this
#
# ```python
# headers = {"Authorization": "Bearer {}".format(api_key)}
# ```
#
#
#
#
#
#
#
#
# <div class="list-group" id="list-tab" role="tablist">
# <h3 class="list-group-item list-group-item-action active" data-toggle="list" role="tab" aria-controls="home">
# Notebook Content!</h3>
# <a class="list-group-item list-group-item-action" data-toggle="list" href="#libraries" role="tab"
# aria-controls="profile">Import Libraries<span class="badge badge-primary badge-pill">1</span></a>
# <a class="list-group-item list-group-item-action" data-toggle="list" href="#connect-API" role="tab"
# aria-controls="profile">Connect to the API <span class="badge badge-primary badge-pill">2</span></a>
# <a class="list-group-item list-group-item-action" data-toggle="list" href="#connect-db" role="tab"
# aria-controls="messages">Connect to the SQL Database<span
# class="badge badge-primary badge-pill">3</span></a>
# <a class="list-group-item list-group-item-action" data-toggle="list" href="#data-cleaning" role="tab"
# aria-controls="messages">Data Cleaning<span
# class="badge badge-primary badge-pill">4</span></a>
# <a class="list-group-item list-group-item-action" data-toggle="list" href="#sales-analysis" role="tab"
# aria-controls="messages">Data Analysis<span
# class="badge badge-primary badge-pill">5</span></a>
# <a class="list-group-item list-group-item-action" data-toggle="list" href="#final-coclusion" role="tab"
# aria-controls="messages">Conclusion<span
# class="badge badge-primary badge-pill">5</span></a>
# </div>
# ### Import libraries and set constants <a name='libraries' href='#list-tab'><span>🔼</span></a>
# +
# Libraries
import os
import psycopg2
import requests
import numpy as np
import pandas as pd
from IPython.display import display
from pandas import json_normalize
from sqlalchemy import create_engine
# %load_ext nb_black
# -
# ### Connect to the API <a name='connect-API' href='#list-tab'><span>🔼</span></a>
# As was said before, to make a request we need some credentials it is wise not to share in public projects. There a lot of ways to hide this from the public. I will demonstrate two of them.
#
# **Extract credentials to the environment**
#
# This way demands the creation of a bat file with all the commands needed to set a variable to the local environment, after doing that it is easy to retrieve this information in our code without fear of revealing it to the public.
#
# *.bat file
# >```cmd
# >set API_KEY = 'my-private-key'
# >```
#
# Run the file
# >```cmd
# >my_bat.bat
# >```
#
# **Make a file and import it in your project**
#
# This way is kind of more pythonic. It takes only the creation of a new file where each variable will have the values of some value needed for our project and has to stay private.
#
# environment_consts.py file
# >```python
# >API_KEY = 'api key'
# >```
#
# Project file
# >```python
# >from environment_consts import API_KEY
# >```
#
# +
# Set the values
import constants
os.environ["API_KEY"] = constants.API_KEY
os.environ["HOST"] = constants.HOST
os.environ["DATABASE_NAME"] = constants.DATABASE_NAME
os.environ["DB_USER"] = constants.DB_USER
os.environ["DB_PASSWORD"] = constants.DB_PASSWORD
# +
api_url = "https://api.yelp.com/v3/businesses/search"
params = {"term": "restaurants", "location": "berlin"}
api_key = os.environ.get("API_KEY", "")
headers = {"Authorization": "Bearer {}".format(api_key)}
def extract_data(offset=[0]):
"""
Makes GET requests to the API and recieves restaurants back, each call returns around 20 entries,
thats why we use the offset parameter to take different number of responses each time.
:returns: data_list, a list which each element is a json format entry with information about restaurants
"""
data_list = []
print("Extracting data from the API...")
for each in offset:
params["offset"] = each
response = requests.get(api_url, params=params, headers=headers)
data = response.json()
data_list.append(data)
return data_list
# -
# Call the function
restaurants = extract_data(offset=list(range(0, 240, 20)))
print(f"Lenght of the list: {len(restaurants)}")
print()
print("First entry:")
(restaurants[0]["businesses"][0])
# We see that each entry has a dictionary format. Now it's time to extract from this dictionary the information we need. We can manage this with the `json_normalize` method of the panda's library. The method returns a pandas data frame where each column is a key of the dictionary and each value is the value of that key.
#
# The problem with using this method is that our dictionary is deep nested. This means that there are other dictionaries nested into the dictionary. To capture correctly this morphology wee will use some attributes of the json_nomralize method.
# +
# Raw transform of the dictionary to dataframe
display(json_normalize(restaurants[0]["businesses"][0]).columns)
display(json_normalize(restaurants[0]["businesses"][0]))
# -
record_path = "categories"
meta = [
"id",
"name",
"price",
"review_count",
"rating",
"phone",
"distance",
["coordinates", "latitude"],
["coordinates", "longitude"],
["location", "city"],
["location", "zip_code"],
["location", "address1"],
]
meta_prefix = "restaurant_"
record_prefix = "category_"
# clean transform of the dictionary to dataframe
json_normalize(
restaurants[0]["businesses"][0],
sep="_",
record_path=record_path,
meta=meta,
errors="ignore",
meta_prefix=meta_prefix,
record_prefix=record_prefix,
)
def transform_data(data):
"""
Transforming the data for each reastaurant to dataframe where each key is a column.
:returns - A panda's dataframe
"""
df_list = []
for each in data:
df_list.append(
json_normalize(
each["businesses"],
sep="_",
record_path=record_path,
meta=meta,
errors="ignore",
meta_prefix=meta_prefix,
record_prefix=record_prefix,
)
)
final_df = pd.concat(df_list, ignore_index=True)
print("Final data frame created...")
print(final_df.info())
return final_df
restaurants_df = transform_data(restaurants)
display(restaurants_df.sample(10))
# Check if there are duplicated entries
assert sum(restaurants_df.duplicated()) == 0
# It worked, we have an organized data frame with no duplicated values that holds all the information needed for our purposes. We managed to declutter the noise and keep only what is important for our analysis.
#
# ✔️Load the data
#
# ✔️Transform the data
#
# We have checked two very important steps, now next step is to load the data into a SQL database
#
#
# ### Connect to the database <a name='connect-db' href='#list-tab'><span>🔼</span></a>
# For this project, I choose to work with PostgreSQL and I used the [Psycopg](https://www.psycopg.org/docs/index.html)
# python library to connect to and execute queries to my database. We can execute SQL queries either using the [pgAdmin](https://www.pgadmin.org/) tool or using python.
#
# If we want to run out DB from python we need to export into the environment all the parameters used for the
# a connection such as: *the database name*, *password*, *host*, *, etc*, but this step is already completed.
#
# To execute a query from python we need to make a connection with the DB, after that, we create a `cursor`, an object that deals with the execution of each query. In the end, we need to close the connection. For our luck pandas supports the transfer of a data frame to SQL database automatically but only using the SQLAlchemy library. For only this step we have to use this specific library
host = os.environ.get("HOST", "")
dbname = os.environ.get("DATABASE_NAME", "")
user = os.environ.get("DB_USER", "")
password = os.environ.get("DB_PASSWORD", "")
# Build the template for the connection
connection_template = (
f"host={host} dbname={dbname} user={user} password={password} sslmode=disable"
)
# Connect
connection = psycopg2.connect(connection_template)
connection.set_session(autocommit=True)
cursor = connection.cursor()
cursor.execute("SELECT current_database()")
db_name = cursor.fetchone()
print("You are connected with {} database".format(db_name[0]))
# The message proves that we are connected to the database with the name yelp. This means that now we can run queries from python
def load():
# Save DF to a CSV file locally
restaurants_df.to_csv('./restaurants_df.csv', header=True, index_label='id', index=True, encoding='utf-8')
# Make a connection with the SQLalchemy package
connect = "postgresql+psycopg2://%s:%s@%s:5432/%s" % (
os.environ.get("DB_USER"),
os.environ.get("DB_PASSWORD"),
os.environ.get("HOST"),
os.environ.get('DATABASE_NAME')
)
# Create engine
engine = create_engine(connect)
restaurants_df.to_sql(name="raw_table",con=engine,if_exists="replace",index=True)
# Create a local file with the data, and also push all data to sql database
load()
# Test if everything is fine
q = """
SELECT * FROM raw_table
"""
cursor.execute(q)
cursor.fetchmany(2)
# ✔️ Load the data
#
# We have successfully loaded the raw data into a SQL database. The next step will be to normalize the database into its final form. Using a database manager we can commit different SQL commands and manipulate the tables easier than using python code from a jupyter notebook
# <p align='center'>
# <img alt="database" src="./images/database.png">
# </p>
|
ETL_Pipeline/yelp_api.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ML-NNet
#
# This is a test of the ML-NNet package from [GitHub](https://github.com/randysimpson/ml-nnet). We'll be setting up a few examples of how to use the microservice. In these examples the microservice has already been started using the command `docker run -d -p 9000:9000 randysimpson/ml-nnet:latest`.
import requests
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas
import json
# ## Neural Network Example
#
# This example will use a Neural Network with hidden layers of [10, 10].
# +
#test
url = "http://localhost:9000/api/v1/setup"
data = {
"xShape": 1,
"hiddenLayers": [10, 10],
"tShape": 1,
"nnType": "nn"
}
r = requests.post(url, data = json.dumps(data))
# -
# Generate the data and train the model.
# +
#now train.
url = "http://localhost:9000/api/v1/train"
np.random.seed(42)
X = np.arange(100).reshape((-1, 1))
T = np.sin(X * 0.04)
data = {
"epochs": 2000,
"method": "adam",
"learningRate": 0.01,
"x": X.tolist(),
"t": T.tolist()
}
r = requests.post(url, data = json.dumps(data))
# -
# Wait for the model to train, this can take a while but for the data set we used in this example it is about 2 minutes or less.
# +
#now use.
url = "http://localhost:9000/api/v1/use"
data = {
"x": X.tolist()
}
r = requests.get(url, data = json.dumps(data))
data = r.json()
# -
# Let's take a look at the data after using the trained model:
print(data)
# Now we want to see a plot of the trained data vs the target data we used to train the model
# +
plt.figure(figsize=(15,10))
plt.subplot(1, 3, 1)
plt.plot(T, 'o-', label='Target')
plt.plot(data["y"], 'o-', label='Adam')
plt.xlabel('Sample')
plt.ylabel('Target or Predicted')
plt.legend()
plt.tight_layout()
|
notebooks/Neural Network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
# %matplotlib inline
target_types={6:'Microlensing', 15:'Explosive Type V', 16:'Transits', 42:'Explosive type W', 52:'Explosive Type X',
53:'Long periodic', 62:'Explosive Type Y', 64:'Near Burst', 65:'Flare', 67:'Explosive Type Z',
88:'AGN', 90:'SN Type U', 92:'Periodic', 95:'SN Type T'}
train_series = pd.read_csv('../input/training_set.csv')
train_metadata = pd.read_csv('../input/training_set_metadata.csv')
groups = train_series.groupby(['object_id', 'passband'])
times = groups.apply(
lambda block: block['mjd'].values).reset_index().rename(columns={0: 'seq'})
flux = groups.apply(
lambda block: block['flux'].values
).reset_index().rename(columns={0: 'seq'})
err = groups.apply(
lambda block: block['flux_err'].values
).reset_index().rename(columns={0: 'seq'})
det = groups.apply(
lambda block: block['detected'].astype(bool).values
).reset_index().rename(columns={0: 'seq'})
times_list = times.groupby('object_id').apply(lambda x: x['seq'].tolist()).tolist()
flux_list = flux.groupby('object_id').apply(lambda x: x['seq'].tolist()).tolist()
err_list = err.groupby('object_id').apply(lambda x: x['seq'].tolist()).tolist()
det_list = det.groupby('object_id').apply(lambda x: x['seq'].tolist()).tolist()
object_list=times.groupby('object_id').apply(lambda x: x['object_id'].unique()[0]).tolist()
colors = ['purple', 'blue', 'green', 'orange', 'red', 'black']
def plot_one_object(obj_id):
for band in range(len(colors)):
sample = train_series[(train_series['object_id'] == obj_id) & (train_series['passband']==band)]
plt.errorbar(x=sample['mjd'],y=sample['flux'],yerr=sample['flux_err'],c = colors[band],fmt='o',alpha=0.7)
plot_one_object(20222858)
#plt.xlim(59900,60100)
# ## Periodicity
from astropy.stats import LombScargle
# +
iobj=0
time_obj = np.concatenate(times_list[iobj])
flux_obj = np.concatenate(flux_list[iobj])
err_obj = np.concatenate(err_list[iobj])
frequency, power = LombScargle(time_obj, flux_obj,err_obj).autopower(maximum_frequency=16.0)
#frequency, power = LombScargle(times_list[iobj][1], flux_list[iobj][1],err_list[iobj][1]).autopower()
plt.plot(frequency, power)
plt.figure()
plot_one_object(object_list[iobj])
plt.title(target_types[train_metadata.loc[iobj,'target']])
# -
sorted(train_metadata['target'].unique())
for t in sorted(train_metadata['target'].unique()):
print t,target_types[t],train_metadata[train_metadata['target']== t]['hostgal_specz'].mean()
train_metadata[train_metadata['target']== 92]
# # Interpolation
# 231 bins in time...
time_grid=(np.arange(59550,60705,5.))
# ## Kernel interpolation
# +
iobj=1
band=2
length=5.4
kernel=np.exp(-(np.reshape(times_list[iobj][band],(-1,1)) - times_list[iobj][band])**2/2/length**2)
np.fill_diagonal(kernel,0)
sumw=kernel.dot(1./err_list[iobj][band]**2)
pred=kernel.dot(flux_list[iobj][band]/err_list[iobj][band]**2) / sumw
chi2 = (pred - flux_list[iobj][band])**2 / ( err_list[iobj][band]**2 + 1./sumw )
logl = chi2 + np.log(err_list[iobj][band]**2 + 1./sumw)
plt.errorbar(times_list[iobj][band],flux_list[iobj][band],yerr=err_list[iobj][band],color=colors[band],fmt='o')
plt.errorbar(times_list[iobj][band],pred,yerr=1./np.sqrt(sumw))
plt.ylim(np.min(flux_list[iobj][band]*1.2,0),np.max(flux_list[iobj][band]*1.2,0))
plt.xlim(59800,60000)
len(sumw),np.sum(chi2),np.sum(logl)
# +
def fit_kernel_params(times_band,flux_band,err_band):
def _kernel_likelihood(params):
sigma=params[0]
length=params[1]
kernel=np.exp(-(np.reshape(times_band,(-1,1)) - times_band)**2/2/length**2)
np.fill_diagonal(kernel,0)
sumw=kernel.dot(1./err_band**2) + 1./sigma**2
pred=kernel.dot(flux_band/err_band**2) / sumw
chi2 = (pred - flux_band)**2 / ( err_band**2 + 1./sumw )
# -2 ln likelihood
logl=np.sum(chi2 + np.log(err_band**2 + 1./sumw))
return logl
lguess=(np.max(times_band)-np.min(times_band))/len(times_band)
siguess=np.std(flux_band)/2.
output=optimize.fmin(_kernel_likelihood,(siguess,lguess),disp=False,xtol=0.01,full_output=1)
return output[0], output[1]
def kernel_predict(params,times_band,flux_band,err_band):
sigma=params[0]
length=params[1]
kernel=np.exp(-(np.reshape(time_grid,(-1,1)) - times_band)**2/2/length**2)
sumw=kernel.dot(1./err_band**2) + 1./sigma**2
pred=kernel.dot(flux_band/err_band**2) / sumw
return pred, np.sqrt(1./sumw)
# +
def make_kernel(tlist,flist,elist):
flux_grid = []
err_grid = []
kernel_sigma = []
kernel_length = []
kernel_logl=[]
for times_obj,flux_obj,err_obj in zip(tlist,flist,elist):
flux_grid_obj=[]
err_grid_obj=[]
kernel_sigma_obj = []
kernel_length_obj = []
kernel_logl_obj=[]
for times_band,flux_band,err_band in zip(times_obj,flux_obj,err_obj):
(sigma,length),logl = fit_kernel_params(times_band,flux_band,err_band)
k_flux,k_err=kernel_predict((sigma,length),times_band,flux_band,err_band)
flux_grid_obj.append(k_flux)
err_grid_obj.append(k_err)
kernel_sigma_obj.append(sigma)
kernel_length_obj.append(length)
kernel_logl_obj.append(logl)
flux_grid.append(flux_grid_obj)
err_grid.append(err_grid_obj)
kernel_sigma.append(kernel_sigma_obj)
kernel_length.append(kernel_length_obj)
kernel_logl.append(kernel_logl_obj)
return flux_grid,err_grid, kernel_sigma, kernel_length,kernel_logl
# -
#Remark : not fitting for the sigma accelerates by a factor 3 !
# but it will produce nans...
# The warning "divide by zero" seems to be harmless...
kernel_flux_grid,kernel_err_grid,kernel_sigma,kernel_length,kernel_logl=make_kernel(times_list,flux_list,err_list)
def plot_interpolations(iobj,times_list,flux_list,err_list,flux_grid,err_grid):
fig, axes = plt.subplots(2, 3, sharex=True, sharey=True, figsize=(12, 8))
plt.title(target_types[train_metadata.loc[iobj,'target']])
for band in range(6):
ax = axes[band // 3, band % 3]
ax.errorbar(times_list[iobj][band],flux_list[iobj][band],yerr=err_list[iobj][band],color=colors[band],fmt='o')
ax.plot(time_grid,flux_grid[iobj][band],color=colors[band])
ax.fill_between(time_grid,flux_grid[iobj][band]-err_grid[iobj][band],
flux_grid[iobj][band]+err_grid[iobj][band],alpha=0.3,color=colors[band])
ax.set_xlabel('MJD')
ax.set_ylabel('Flux')
plt.title(target_types[train_metadata.loc[iobj,'target']])
plot_interpolations(31,times_list,flux_list,err_list,kernel_flux_grid,kernel_err_grid)
plt.ylim(-50,200)
plt.xlim(60100,60300)
# Parameters depend also on observation conditions
ddf = train_metadata['ddf']==1
plt.scatter(np.log10(np.abs(kernel_sigma))[ddf==0],np.log10(np.abs(kernel_length))[ddf==0],alpha=0.02)
plt.scatter(np.log10(np.abs(kernel_sigma))[ddf],np.log10(np.abs(kernel_length))[ddf],alpha=0.01)
plt.xlabel('log sigma')
plt.ylabel('log length')
plt.figure()
plt.scatter(np.log10(np.abs(kernel_sigma))[ddf==0],np.log10(np.abs(kernel_length))[ddf==0],alpha=0.02)
plt.scatter(np.log10(np.abs(kernel_sigma))[ddf],np.log10(np.abs(kernel_length))[ddf],alpha=0.01)
plt.xlabel('log sigma')
plt.ylabel('log length')
plt.xlim((-1,4))
plt.ylim(-2,3)
#plt.xlim()
|
TimeInterpolation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>2b. Machine Learning using tf.estimator </h1>
#
# In this notebook, we will create a machine learning model using tf.estimator and evaluate its performance. The dataset is rather small (7700 samples), so we can do it all in-memory. We will also simply pass the raw data in as-is.
# +
import tensorflow as tf
import pandas as pd
import numpy as np
import shutil
print(tf.__version__)
# -
# Read data created in the previous chapter.
# +
# In CSV, label is the first column, after the features, followed by the key
CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
FEATURES = CSV_COLUMNS[1:len(CSV_COLUMNS) - 1]
LABEL = CSV_COLUMNS[0]
df_train = pd.read_csv('./taxi-train.csv', header = None, names = CSV_COLUMNS)
df_valid = pd.read_csv('./taxi-valid.csv', header = None, names = CSV_COLUMNS)
df_test = pd.read_csv('./taxi-test.csv', header = None, names = CSV_COLUMNS)
# -
# <h2> Train and eval input functions to read from Pandas Dataframe </h2>
# TODO: Create an appropriate input_fn to read the training data
def make_train_input_fn(df, num_epochs):
return tf.estimator.inputs.pandas_input_fn(
#ADD CODE HERE
)
# TODO: Create an appropriate input_fn to read the validation data
def make_eval_input_fn(df):
return tf.estimator.inputs.pandas_input_fn(
#ADD CODE HERE
)
# Our input function for predictions is the same except we don't provide a label
# TODO: Create an appropriate prediction_input_fn
def make_prediction_input_fn(df):
return tf.estimator.inputs.pandas_input_fn(
#ADD CODE HERE
)
# ### Create feature columns for estimator
# +
# TODO: Create feature columns
# -
# <h3> Linear Regression with tf.Estimator framework </h3>
# +
tf.logging.set_verbosity(tf.logging.INFO)
OUTDIR = 'taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
# TODO: Train a linear regression model
model = #ADD CODE HERE
model.train(#ADD CODE HERE
)
# -
# Evaluate on the validation data (we should defer using the test data to after we have selected a final model).
def print_rmse(model, df):
metrics = model.evaluate(input_fn = make_eval_input_fn(df))
print('RMSE on dataset = {}'.format(np.sqrt(metrics['average_loss'])))
print_rmse(model, df_valid)
# This is nowhere near our benchmark (RMSE of $6 or so on this data), but it serves to demonstrate what TensorFlow code looks like. Let's use this model for prediction.
# +
# TODO: Predict from the estimator model we trained using test dataset
# -
# This explains why the RMSE was so high -- the model essentially predicts the same amount for every trip. Would a more complex model help? Let's try using a deep neural network. The code to do this is quite straightforward as well.
# <h3> Deep Neural Network regression </h3>
# TODO: Copy your LinearRegressor estimator and replace with DNNRegressor. Remember to add a list of hidden units i.e. [32, 8, 2]
# We are not beating our benchmark with either model ... what's up? Well, we may be using TensorFlow for Machine Learning, but we are not yet using it well. That's what the rest of this course is about!
#
# But, for the record, let's say we had to choose between the two models. We'd choose the one with the lower validation error. Finally, we'd measure the RMSE on the test data with this chosen model.
# <h2> Benchmark dataset </h2>
#
# Let's do this on the benchmark dataset.
# +
from google.cloud import bigquery
import numpy as np
import pandas as pd
def create_query(phase, EVERY_N):
"""
phase: 1 = train 2 = valid
"""
base_query = """
SELECT
(tolls_amount + fare_amount) AS fare_amount,
EXTRACT(DAYOFWEEK FROM pickup_datetime) * 1.0 AS dayofweek,
EXTRACT(HOUR FROM pickup_datetime) * 1.0 AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count * 1.0 AS passengers,
CONCAT(CAST(pickup_datetime AS STRING), CAST(pickup_longitude AS STRING), CAST(pickup_latitude AS STRING), CAST(dropoff_latitude AS STRING), CAST(dropoff_longitude AS STRING)) AS key
FROM
`nyc-tlc.yellow.trips`
WHERE
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
if EVERY_N == None:
if phase < 2:
# Training
query = "{0} AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 4) < 2".format(base_query)
else:
# Validation
query = "{0} AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 4) = {1}".format(base_query, phase)
else:
query = "{0} AND MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), {1}) = {2}".format(base_query, EVERY_N, phase)
return query
query = create_query(2, 100000)
df = bigquery.Client().query(query).to_dataframe()
# -
print_rmse(model, df)
# RMSE on benchmark dataset is <b>9.41</b> (your results will vary because of random seeds).
#
# This is not only way more than our original benchmark of 6.00, but it doesn't even beat our distance-based rule's RMSE of 8.02.
#
# Fear not -- you have learned how to write a TensorFlow model, but not to do all the things that you will have to do to your ML model performant. We will do this in the next chapters. In this chapter though, we will get our TensorFlow model ready for these improvements.
#
# In a software sense, the rest of the labs in this chapter will be about refactoring the code so that we can improve it.
# ## Challenge Exercise
#
# Create a neural network that is capable of finding the volume of a cylinder given the radius of its base (r) and its height (h). Assume that the radius and height of the cylinder are both in the range 0.5 to 2.0. Simulate the necessary training dataset.
# <p>
# Hint (highlight to see):
# <p style='color:white'>
# The input features will be r and h and the label will be $\pi r^2 h$
# Create random values for r and h and compute V.
# Your dataset will consist of r, h and V.
# Then, use a DNN regressor.
# Make sure to generate enough data.
# </p>
# Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
courses/machine_learning/deepdive/03_tensorflow/labs/b_estimator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (timeatlas)
# language: python
# name: pycharm-9df7f84f
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# Available Data Structures
# =========================
# + pycharm={"name": "#%%\n"}
import timeatlas as ta
import pandas as pd
# + [markdown] pycharm={"name": "#%% md\n"}
# TimeSeries
# ----------
#
# > A time series is a series of data points indexed (or listed or graphed)
# > in time order. Most commonly, a time series is a sequence taken at successive
# > equally spaced points in time. - [Wikipedia](https://en.wikipedia.org/wiki/Time_series)
#
# In TimeAtlas, a time series is based on a Pandas DataFrame with a DatetimeIndex.
# There are multiple ways to create a TimeSeries directly in a notebook. For
# instance, directly with the objects :
#
# 1. Create an DatetimeIndex
# 2. Create the DataFrame
# 3. Build the TimeSeries
# + pycharm={"name": "#%%\n"}
index = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04'])
my_series = pd.DataFrame([0.4, 1.0, 0.7, 0.6], index=index)
ts = ta.TimeSeries(my_series)
ts
# + [markdown] pycharm={"name": "#%% md\n"}
# With `TimeSeries.create()` :
#
# 1. Create the empty TimeSeries, by specifying start, end and the frequency
# 2. Add values
#
# .. warning::
# A TimeSeries in TimeAtlas is immutable.
# + pycharm={"name": "#%%\n"}
ts = ta.TimeSeries.create('2019-01-01', '2019-01-04', freq='1D')
ts = ts.fill([0.4, 1.0, 0.7, 0.6])
ts
# + [markdown] pycharm={"name": "#%% md\n"}
# Metadata
# --------
#
# Usually there are some important data that we would like to keep close
# the TimeSeries but are not directly part of the values of the TimeSeries.
#
# TimeAtlas has an object Metadata, where we can store these additional data.
# You can whatever data you feel is important to you by providing a dictionary.
# + pycharm={"name": "#%%\n"}
items = {'Name': "Test",
"Year": 2019,
"Nested": {1: "test",
2: "test",
3: "test",}}
metadata = ta.Metadata(items=items)
metadata
# + [markdown] pycharm={"name": "#%% md\n"}
# There are also some predefined information that might come in handy:
#
# - `Unit` : unit of the data in the TimeSeries
# - `Sensor` : Name of the sensor recording the data
# - `Coords` : coordinates/location, where the data is recorded
# + pycharm={"name": "#%%\n"}
from timeatlas.types import Unit, Sensor, Coords
unit = Unit(name="Volt", symbol="V", data_type="float")
sensor = Sensor(id=1337, name="Temperatur Sensor A1")
location = Coords(long=7.1591, lat=46.7933)
# + pycharm={"name": "#%%\n"}
unit
# + pycharm={"name": "#%%\n"}
sensor
# + pycharm={"name": "#%%\n"}
location
# + [markdown] pycharm={"name": "#%% md\n"}
# Adding data to an existing MeteData-object.
# + pycharm={"name": "#%%\n"}
metadata.add(items=unit)
metadata.add(items=sensor)
metadata.add(items=location)
# + pycharm={"name": "#%%\n"}
metadata
# + pycharm={"name": "#%%\n"}
[a for a in dir(unit) if not a.startswith('__')]
# + [markdown] pycharm={"name": "#%% md\n"}
# TimeSeriesDataset
# -----------------
#
# In TimeAtlas, a TimeSeriesDataset is a collection TimeSeries. The behaviour in
# most cases is similar to a classical list, with some additional functionalities.
#
# To create a TimeSeriesDataset we first need a some TimeSeries. The
# TimeSeriesDataset will be represented by a small overview of statistic on each
# TimeSeries in it.
# + pycharm={"name": "#%%\n"}
ts = ta.TimeSeries.create('2019-01-01', '2019-01-04', freq='1D')
ts = ts.fill([i for i in range(len(ts))])
ts2 = ta.TimeSeries.create('2019-01-01', '2019-01-04', freq='H')
ts2 = ts2.fill([i for i in range(len(ts2))])
ts3 = ta.TimeSeries.create('2019-01-01', '2019-01-10', freq='1D')
ts3 = ts3.fill([i for i in range(len(ts3))])
tsd = ta.TimeSeriesDataset([ts, ts2, ts3])
tsd
# -
# Like in TimeSeries we can also use `TimeSeriesDataset.create()`. This will
# create the specified numbers of TimeSeries all with the same start, end and
# frequency.
# + pycharm={"name": "#%%\n"}
tsd = ta.TimeSeriesDataset.create(length=3, start='2019-01-01', end='2019-01-04', freq="1D")
print(f"TimeSeriesDataset.create() made a TimeSeriesDataset of length: {len(tsd)}")
# + pycharm={"name": "#%%\n"}
|
docs/user_guide/0_available_data_structures.ipynb
|