code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to generate publication quality tables
#
# estimagic helps you generate publication quality html and LaTex tables, given a list of estimation results.
# ## Set up
# +
import io
import re
from collections import namedtuple
from copy import copy
from copy import deepcopy
import numpy as np
import pandas as pd
import statsmodels.api as sm
from IPython.core.display import HTML
from IPython.core.display import Latex
import estimagic.visualization.estimation_table as et
from estimagic.config import TEST_DIR
# -
# Load dataset
df = pd.read_csv(TEST_DIR / "visualization" / "diabetes.csv", index_col=0)
df.head()
# Fit regressions
est = sm.OLS(endog=df["target"], exog=sm.add_constant(df[df.columns[0:4]])).fit()
est2 = sm.OLS(endog=df["target"], exog=sm.add_constant(df[df.columns[0:6]])).fit()
# The estimation results can be passed as `statsmodels` regression results, or as a tuple with attributes `params` (pandas DataFrame), with parameter values, standard errors and/or confidence intervals and p-values, and `info` (dict) with summary statistics of the model.
# +
# Extract `params` and `info`
namedtuplee = namedtuple("namedtuplee", "params info")
est3 = namedtuplee(
params=et._extract_params_from_sm(est),
info={**et._extract_info_from_sm(est)},
)
# Remove redundant information
del est3.info["df_model"]
del est3.info["df_resid"]
# -
# The resulting dictionary contains all the information we need:
est3[0]
est3[1]
# +
# Make copy of estimation results
est4 = {}
est4["params"] = deepcopy(est3.params)
est4["info"] = deepcopy(est3.info)
est5 = {}
est5["params"] = deepcopy(est3.params)
est5["info"] = deepcopy(est3.info)
# -
# ## Basics
# Basic features include custom title and custom names for models, columns, index and parameters.
# ### Basic table, without title
ex_html = et.estimation_table([est, est2, est3, est4, est5], return_type="html")
HTML(ex_html)
ex_latex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
left_decimals=4,
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_latex)
# ### Basic table, with title
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
left_decimals=4,
return_type="latex",
render_options={"caption": "This is a caption"},
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# ### Column names
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
custom_col_names=list("abcde"),
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
render_options={"caption": "This is a caption"},
left_decimals=4,
custom_col_names=list("abcde"),
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# Column names can be hidden by passing `show_col_names=False`:
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
show_col_names=False,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
render_options={"caption": "This is a caption"},
left_decimals=4,
show_col_names=False,
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# ### Model names
custom_mod_names = {"M a": [0], "M b-d": [1, 2, 3], "M e": [4]}
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
custom_model_names=custom_mod_names,
custom_col_names=list("abcde"),
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
render_options={"caption": "This is a caption"},
left_decimals=4,
custom_model_names=custom_mod_names,
custom_col_names=list("abcde"),
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# ### Index name
# By default, the index name is "index":
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption", "index_names": True},
custom_model_names=custom_mod_names,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
render_options={"caption": "This is a caption", "index_names": True},
left_decimals=4,
custom_model_names=custom_mod_names,
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# This can be customized by passing a different index name to `custom_index_names`:
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
custom_model_names=custom_mod_names,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
left_decimals=4,
custom_model_names=custom_mod_names,
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# ### Parameter names
# Custom parameter names can be specified by passing a dictionary to `custom_param_names`:
cust_par_names = {"const": "Intercept", "Sex": "Gender"}
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
custom_model_names=custom_mod_names,
custom_param_names=cust_par_names,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="latex",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
left_decimals=4,
custom_model_names=custom_mod_names,
custom_param_names=cust_par_names,
alignment_warning=False,
siunitx_warning=False,
)
Latex(ex_tex)
# ## Advanced
# ### Confidence intervals
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
custom_model_names=custom_mod_names,
custom_param_names=cust_par_names,
confidence_intervals=True,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4],
return_type="latex",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
left_decimals=4,
custom_model_names=None,
custom_param_names=cust_par_names,
alignment_warning=False,
siunitx_warning=False,
confidence_intervals=True,
)
Latex(ex_tex)
# Passing `confidence_intervals=False` prints standard errors. To hide both standard errors and confidence intervals you need to pass `show_inference=False`:
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
custom_index_names=["Variables"],
custom_model_names=custom_mod_names,
custom_param_names=cust_par_names,
show_inference=False,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4],
return_type="latex",
render_options={
"caption": "This is a caption",
},
custom_index_names=["Variables"],
left_decimals=4,
custom_model_names=None,
custom_param_names=cust_par_names,
alignment_warning=False,
siunitx_warning=False,
show_inference=False,
)
Latex(ex_tex)
# ### Footer
# To avoid printint statistics, such as R-squared and number of observations, pass `show_footer=False`.
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
custom_param_names=cust_par_names,
show_footer=False,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4],
return_type="latex",
render_options={
"caption": "This is a caption",
},
left_decimals=4,
custom_model_names=None,
custom_param_names=cust_par_names,
alignment_warning=False,
siunitx_warning=False,
show_footer=False,
)
Latex(ex_tex)
# ### Custom notes
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
custom_param_names=cust_par_names,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est, est2, est3, est4],
return_type="latex",
render_options={
"caption": "This is a caption",
},
left_decimals=4,
custom_model_names=None,
custom_param_names=cust_par_names,
alignment_warning=False,
siunitx_warning=False,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
)
Latex(ex_tex)
# ### Custom names for summary statistics
ex_html = et.estimation_table(
[est, est2, est3, est4, est5],
return_type="html",
render_options={"caption": "This is a caption"},
custom_param_names=cust_par_names,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
stats_dict={"R$^2$": "rsquared", "N. Obs": "n_obs"},
)
HTML(ex_html)
# ## MultiIndex
# ### Set up
# Convert `params` DataFrame to MultiIndex
df = et._extract_params_from_sm(est)
df.index = pd.MultiIndex.from_arrays(
np.array([["Intercept", "Slope", "Slope", "Slope", "Slope"], df.index.values])
)
df
# Extract info and generate tuple of estimation results for `est1`
info = et._extract_info_from_sm(est)
est_mi = namedtuplee(params=df, info=info)
# Repeat for `est2`
df = et._extract_params_from_sm(est2)
df.index = pd.MultiIndex.from_arrays(
np.array(
[
["Intercept", "Slope", "Slope", "Slope", "Slope", "Else", "Else"],
df.index.values,
]
)
)
info = et._extract_info_from_sm(est2)
est_mi2 = namedtuplee(params=df, info=info)
# ### Basics
ex_html = et.estimation_table(
[est_mi, est_mi2],
return_type="html",
render_options={
"caption": "This is a caption",
},
custom_param_names=cust_par_names,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est_mi, est_mi2],
return_type="latex",
render_options={
"caption": "This is a caption",
},
left_decimals=3,
custom_model_names=None,
custom_param_names=cust_par_names,
alignment_warning=False,
siunitx_warning=False,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
)
Latex(ex_tex)
# ### Parameter names
ex_html = et.estimation_table(
[est_mi, est_mi2],
return_type="html",
render_options={
"caption": "This is a caption",
},
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
custom_param_names={"Age": "Maturity", "Else": "Additionally"},
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est_mi, est_mi2],
return_type="latex",
render_options={
"caption": "This is a caption",
},
left_decimals=3,
custom_model_names=None,
alignment_warning=False,
siunitx_warning=False,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
custom_param_names={"Age": "Maturity", "Else": "Additionally"},
)
Latex(ex_tex)
# ### Index and model names
stats_dict = {
"Observations": "n_obs",
"R$^2$": "rsquared",
"Adj. R$^2$": "rsquared_adj",
"Residual Std. Error": "resid_std_err",
"F Statistic": "fvalue",
"show_dof": True,
}
ex_html = et.estimation_table(
[est_mi, est_mi2, est_mi],
return_type="html",
render_options={
"caption": "This is a caption",
},
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
custom_param_names={"Age": "Maturity", "Else": "Additionally"},
custom_index_names=["Category", "Variable"],
custom_model_names={"M1-2": [0, 1], "M3": [2]},
stats_dict=stats_dict,
)
HTML(ex_html)
ex_tex = et.estimation_table(
[est_mi, est_mi2],
return_type="latex",
render_options={
"caption": "This is a caption",
},
left_decimals=4,
alignment_warning=False,
siunitx_warning=False,
custom_notes=[
"This is the first note of some length",
"This is the second note probably of larger length",
],
custom_param_names={"Age": "Maturity", "Else": "Additionally"},
custom_index_names=["Category", "Variable"],
custom_model_names={"M1": [0], "M2": [1]},
stats_dict=stats_dict,
)
Latex(ex_tex)
|
docs/source/how_to_guides/miscellaneous/how_to_generate_publication_quality_tables.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy.stats import shapiro
import numpy as np
# #### Shapiro-Wilk Test
# * Null: sample is drawn from normal distribution.
# reject null
expo = np.random.exponential(3, 100)
w, p = shapiro(expo)
p
# cannot reject null
gaus = np.random.normal(3, 1, 100)
w, p = shapiro(gaus)
p
|
statistics_test/shapiro_wilk.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
# -
# ## Named Tuples
#
# Approaching interpolation using named tuples
Student = namedtuple('Student', ['name', 'surname', 'mark'])
s1 = Student('John', 'Smith', 30)
s1.surname
Circle = namedtuple('Circle', ['cx', 'cy', 'r'])
my_circle = Circle(0, 0, 7)
my_circle.r
# When interpolating a function we need three things:
# - nodes
# - points
# - matrix
#
# We can use named tuples to pack them all together
ApproximationOperator = namedtuple('ApproximationOperator', ['q', 'x', 'M'])
# +
def build_lagrangian_basis(q,x):
"""
This function takes two 1D numpy array, q and x, and returns
a matrix M such that M[i,j] is the evaluation of the polynomial
ell_i on point x_j
"""
n_nodes = q.shape[0]
n_evaluation_points = x.shape[0]
M = np.zeros((n_evaluation_points, n_nodes), dtype=np.float64)
for i in range(n_evaluation_points):
for j in range(n_nodes):
M[i,j] = 1
for k in range(n_nodes):
if j==k:
continue
M[i,j] = M[i,j] * (x[i]-q[j])/(q[k]-q[j])
return M
# -
my_q = np.linspace(0,1,5)
my_x = np.linspace(0,1,7)
build_lagrangian_basis(my_q, my_x)
def build_equispace_interpolator(n_nodes, x):
"""
Build the operator that interpolates a function on [0,1]
using n_nodes, equally spaced."""
q = np.linspace(0,1,n_nodes)
M = build_lagrangian_basis(q,x)
return ApproximationOperator(q,x,M)
def f1(x):
return np.sin(np.pi*x)
evaluation_points = np.linspace(0,1,257)
I_4 = build_equispace_interpolator(4, evaluation_points)
evaluation_of_interpolation = I_4.M @ f1(I_4.q) # this is matrix prod
plt.plot(evaluation_points, f1(evaluation_points))
plt.plot(evaluation_points, evaluation_of_interpolation, color='red')
|
notebooks/LH_lab03_convergence.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py36]
# language: python
# name: conda-env-py36-py
# ---
# +
import sys
import glob
import re
import fnmatch
import math
import os
from os import listdir
from os.path import join, isfile, basename
import itertools
import numpy as np
from numpy import float32, int32, uint8, dtype, genfromtxt
from scipy.stats import ttest_ind
import pandas as pd
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import colorsys
# +
# VFB labels and label names
labels = [16,64,8,32,2,4,65,66,33,67,34,17,69,70,35,71,9,18,72,36,73,74,37,75,19,76,38,77,39,78,79,20,5,40,80,10,81,82,83,84,85,86,11,22,23,24,12,3,6,49,50,25,51,13,52,26,53,27,54,55,56,28,7,14,57,58,29,59,30,60,15,61,31,62,63]
label_names_file = '/groups/saalfeld/home/bogovicj/vfb/DrosAdultBRAINdomains/refData/Original_Index.tsv'
label_names = pd.read_csv( label_names_file, delimiter='\t', header=0 )
# print label_names[ label_names['Stack id'] == 11 ]['JFRCtempate2010.mask130819' ].iloc[0]
# print label_names[ label_names['Stack id'] == 70 ]['JFRCtempate2010.mask130819' ].iloc[0]
def get_label_name( label_id ):
return label_names[ label_names['Stack id'] == label_id ]['JFRCtempate2010.mask130819' ].iloc[0]
# label_names['JFRCtempate2010.mask130819']
label_shorthand_col ='JFRCtempate2010.mask130819'
label_id_col ='Stack id'
# Find left-right matching labels
rnames = label_names[ label_names.apply( lambda x : x[label_shorthand_col].endswith('_R'), axis=1 )]
lr_pair_list = []
for rn in rnames.loc[:,label_shorthand_col]:
ln = rn.replace('_R','_L')
id_R = label_names[ label_names[label_shorthand_col]==rn ].loc[:,label_id_col]
id_L = label_names[ label_names[label_shorthand_col]==ln ].loc[:,label_id_col]
lr_pair_list += [[id_R.values[0], id_L.values[0]]]
lr_pair_list = np.array( lr_pair_list )
# +
path='/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals/F-antsFlip/cmtkCow/evalComp/stats0.txt'
def readlines( f ):
f = open( f, 'r' )
lines = f.readlines()
f.close()
return lines
def labelstat( lines, label, stat='MEDIAN'):
statupper = stat.upper()
for l in lines:
if l.startswith(str(label)):
if l.find(statupper) >= 0:
return float( l.split()[2] )
# +
base_dir = '/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals'
# templates = ['JFRCtemplate2010']
# reg_methods = [ 'cmtkCow', 'cmtkCOG', 'antsRegYang' ]
templates = ['JFRCtemplate2010', 'JFRC2013_lo', 'F-antsFlip_lo', 'F-cmtkFlip_lof', 'TeforBrain_f']
# reg_methods = [ 'cmtkCow', 'cmtkCOG', 'cmtkHideo' ]
# reg_methods = [ 'antsRegOwl', 'antsRegDog', 'antsRegYang' ]
reg_methods = [ 'cmtkCow', 'cmtkCOG', 'cmtkHideo', 'antsRegOwl', 'antsRegDog', 'antsRegYang' ]
# +
# parse all data into a data frame
expnamelist=[]
algnamelist=[]
templatelist=[]
statlist=[]
datalist=[]
labellist=[]
labelnamelist=[]
linelist=[]
linelabellist=[]
for template,reg in itertools.product( templates, reg_methods ):
exp_dir = join( base_dir, template, reg )
# print exp_dir
name_f = join(exp_dir,'name')
if os.path.isfile( name_f ):
expname = readlines( join(exp_dir,'name'))[0]
else:
expname = '{},{}'.format(template,reg)
# print expname
for line in [0,1,2,3]:
eval_dir=join(exp_dir,'evalComp')
# print eval_dir
# Read label stats
statFile = '{}/stats{}.txt'.format(eval_dir,line)
stat_lines = readlines( statFile )
for sl in stat_lines:
dat = sl.split()
label = int(dat[0])
line_label = str(line) + '_' + str(label)
expnamelist += [ expname.replace('\n','') ]
algnamelist += [ reg ]
templatelist += [ template ]
linelist += [ line ]
labellist += [ label ]
linelabellist += [ line_label ]
labelnamelist += [ get_label_name(label) ]
statlist += [ dat[1] ]
datalist += [ float(dat[2]) ]
# Read total brain mask stats
allStatFile = '{}/all_stats{}.txt'.format(eval_dir,line)
all_stat_lines = readlines( allStatFile )
label = -1
for sl in all_stat_lines:
if( sl.startswith('all ')):
line_label = str(line) + '_' + str(label)
dat = sl.split()
expnamelist += [ expname.replace('\n','') ]
algnamelist += [ reg ]
templatelist += [ template ]
linelist += [ line ]
labellist += [ label ] # -1 indicates total mask label
linelabellist += [ line_label ]
labelnamelist += [ 'ALL' ]
statlist += [ dat[1] ]
datalist += [ float(dat[2]) ]
dist_df = pd.DataFrame( {'EXP':expnamelist,
'ALG':algnamelist,
'TEMPLATE':templatelist,
'STAT':statlist,
'LINE':linelist,
'LABEL':labellist,
'LINELABEL':linelabellist,
'LABELNAME':labelnamelist,
'VALUE':datalist })
# -
# ## Determine the best algorithm
#
# Below is average over median and mean distances per line-label, though it may be worth it to also
# filter by "useful" line-labels, but this should not matter.
# +
# Group by algorithm, then average over mean distances
avg_avg_dists = dist_df.loc[(dist_df.STAT == 'MEAN'),['ALG','VALUE']].groupby(['ALG'],as_index=False).mean()
aads = avg_avg_dists.sort_values('VALUE', ascending=True)
print( 'best by avg_avg is: ',aads.iloc[0]['ALG'])
# Group by algorithm, then average over median distances
avg_med_dists = dist_df.loc[(dist_df.STAT == 'MEDIAN'),['ALG','VALUE']].groupby(['ALG'],as_index=False).mean()
amds = avg_med_dists.sort_values('VALUE', ascending=True)
print( 'best by avg_med is: ', amds.iloc[0]['ALG'])
print( ' ' )
print( ' ' )
print( aads )
print( ' ' )
print ( ' ' )
print( amds )
# -
# ## Significance tests
# and other tests
# +
# Load combined data
# dist_samples_f = '/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals/label_data_line3.csv.gz'
# dist_samples_df = pd.read_csv( dist_samples_f, header=None, names=['TEMPLATE','ALG','LINE','LABEL','DISTANCE'] )
# +
# dist_0_70 = dist_samples_df[ ((dist_samples_df.ALG == 'cmtkCOG') | (dist_samples_df.ALG == 'antsRegDog')) &
# (dist_samples_df.LINE == 0) & (dist_samples_df.LABEL == 70)]
# +
# # dist_0_70.sample(500)
# print( dist_0_70.shape )
# dist_0_70_fantsgrp = dist_0_70[(dist_0_70.TEMPLATE == 'F-antsFlip_lo')]
# print( dist_0_70_fantsgrp.shape )
# +
# d_cog = dist_0_70_fantsgrp[ dist_0_70_fantsgrp.ALG == 'cmtkCOG']
# print( d_cog.shape )
# d_yang = dist_0_70_fantsgrp[ dist_0_70_fantsgrp.ALG == 'antsRegDog']
# print( d_yang.shape )
# t,p = ttest_ind( d_cog['DISTANCE'], d_yang['DISTANCE'])
# print( 't:', t )
# print( 'p:', p )
# +
# print( 'cog: ', d_cog.median())
# print( 'dog: ', d_yang.median())
# +
# print( 'cog: ', d_cog.DISTANCE.mean())
# print( 'dog: ', d_yang.DISTANCE.mean())
# +
# d_cog_yang = dist_0_70_fantsgrp[ (dist_0_70_fantsgrp.ALG == 'cmtkCOG') | (dist_0_70_fantsgrp.ALG == 'antsRegDog') ]
# sns.violinplot( y=d_cog_yang.DISTANCE, x=d_cog_yang.ALG )
# -
# ## Determine the best algorithm
# ### Without averaging across labels
#
# +
# dist_samples_df.head()
# tmp = dist_samples_df.sample(100000)
# tmp
for line in [0,1,2,3]:
# for line in [3]:
print( 'line: ', line )
dist_samples_f = '/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals/label_data_line{}.csv.gz'.format( line )
dist_samples_df = pd.read_csv( dist_samples_f, header=None, names=['TEMPLATE','ALG','LINE','LABEL','DISTANCE'] )
line_templAlg_sorted = dist_samples_df.groupby( ['ALG','TEMPLATE'], as_index=False ).mean().sort_values( 'DISTANCE', ascending=True )
line_templAlg_sorted = line_templAlg_sorted.reset_index(drop=True).reset_index()
# Rank the algs and templates above
# print( line3_templAlg_sorted.index[ line3_templAlg_sorted.ALG == 'antsRegDog' ].tolist())
# print( line3_templAlg_sorted.index[ line3_templAlg_sorted.ALG == 'cmtkCOG' ].tolist())
# line3_templAlg_sorted[['ALG','index']]
print( 'rank by alg' )
print( line_templAlg_sorted[['ALG','index']].groupby(['ALG'], as_index=False ).mean().sort_values('index', ascending=True ))
print()
print( 'rank by template' )
print( line_templAlg_sorted[['TEMPLATE','index']].groupby(['TEMPLATE'], as_index=False ).mean().sort_values('index', ascending=True ))
print( ' ' )
print( 'avg by alg' )
print( line_templAlg_sorted.groupby( ['ALG'], as_index=False ).mean().sort_values('DISTANCE', ascending=True ))
print( ' ' )
print( 'avg by template' )
print( line_templAlg_sorted.groupby( ['TEMPLATE'], as_index=False ).mean().sort_values('DISTANCE', ascending=True ))
print( ' ' )
print( ' ' )
print( '##################################################' )
print( ' ' )
print( ' ' )
# +
dist_samples_f = '/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals/label_data.csv.gz'
dist_samples_df = pd.read_csv( dist_samples_f, header=None, names=['TEMPLATE','ALG','LINE','LABEL','DISTANCE'] )
line_templAlg_sorted = dist_samples_df.groupby( ['ALG','TEMPLATE'], as_index=False ).mean().sort_values( 'DISTANCE', ascending=True )
line_templAlg_sorted = line_templAlg_sorted.reset_index(drop=True).reset_index()
# Rank the algs and templates above
print( 'rank by alg' )
print( line_templAlg_sorted[['ALG','index']].groupby(['ALG'], as_index=False ).mean().sort_values('index', ascending=True ))
print()
print( 'rank by template' )
print( line_templAlg_sorted[['TEMPLATE','index']].groupby(['TEMPLATE'], as_index=False ).mean().sort_values('index', ascending=True ))
print( ' ' )
print( 'avg by alg' )
print( line_templAlg_sorted.groupby( ['ALG'], as_index=False ).mean().sort_values('DISTANCE', ascending=True ))
print( ' ' )
print( 'avg by template' )
print( line_templAlg_sorted.groupby( ['TEMPLATE'], as_index=False ).mean().sort_values('DISTANCE', ascending=True ))
print( ' ' )
print( ' ' )
print( '##################################################' )
print( ' ' )
print( ' ' )
# -
# ## Determine the best template (using the best algorithm)
# +
# Statistics for the best algorithm
best_alg_dists = dist_df[dist_df.ALG == 'cmtkCOG']
best_alg_avg_avg_dists = dist_df.loc[(dist_df.STAT == 'MEAN'),['TEMPLATE','VALUE']].groupby(['TEMPLATE'],as_index=False).mean()
sorted_best_by_avg = best_alg_avg_avg_dists.sort_values('VALUE', ascending=True)
print( 'best by avg_avg is: ', sorted_best_by_avg.iloc[0]['TEMPLATE'])
print(' ')
print('ordering:')
print(sorted_best_by_avg)
print(' ')
print(' ')
best_alg_avg_med_dists = dist_df.loc[(dist_df.STAT == 'MEDIAN'),['TEMPLATE','VALUE']].groupby(['TEMPLATE'],as_index=False).mean()
sorted_best_by_med = best_alg_avg_med_dists.sort_values('VALUE', ascending=True)
print( 'best by avg_med is: ', sorted_best_by_med.iloc[0]['TEMPLATE'] )
print(' ')
print('ordering:')
print(sorted_best_by_med)
# -
# ## Determining the line-labels with the most sample
dist_by_linelabel = (dist_df.loc[ (dist_df.LABEL > 0) & (dist_df.STAT == 'COUNT'), ['LINELABEL','VALUE'] ]).sort_values('VALUE', ascending=False)
# dist_by_linelabel.tail(50)
# ## Cluster line-labels
# +
# get line-label wise statistics for the best algorithm and template
best_exp_df = dist_df[ (dist_df.ALG == 'cmtkCOG') & (dist_df.TEMPLATE == 'F-antsFlip_lo') & (dist_df.LABEL > 0 )]
# Reorganize
best_exp_df_4cluster = best_exp_df.pivot(index='LINELABEL', columns='STAT', values='VALUE')
# print( best_exp_df_4cluster.head() )
log_counts = best_exp_df_4cluster.COUNT.map( lambda x: math.log(x))
# sns.kdeplot( best_exp_df_4cluster.MEAN, best_exp_df_4cluster.STDDEV )
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
# points = plt.scatter( best_exp_df_4cluster['MEAN'], best_exp_df_4cluster['STDDEV'],
# c=log_counts, alpha=0.4, cmap="viridis")
points = plt.scatter( log_counts, best_exp_df_4cluster['MEAN'],
c=best_exp_df_4cluster['STDDEV'], alpha=0.4, cmap="viridis")
plt.xlabel('COUNT')
plt.ylabel('MEAN')
plt.title('Colored by STDDEV')
plt.colorbar( points )
fig = plt.gcf()
a = fig.set_size_inches( 16, 12 )
# -
# The above isn't so clearly "clustered" to my eye, but I do wonder about those line-labels with very high mean distance
# +
# What line-labels have such a high mean?
best_exp_df_4cluster[best_exp_df_4cluster.MEAN > 20 ]
# -
# Okay, so some of these have small or very small sample size, but what of the two with > 10k samples..?
# For each line, Sort labels by average size
gb_line_label = dist_df[ (dist_df.STAT == 'COUNT') ].groupby(['LINE','LABEL','LABELNAME','LINELABEL'], as_index=False )
line_label_avgs = gb_line_label.mean().sort_values('VALUE', ascending=False)
# +
# Figure out colors
numTemplates = 4
numRegalgs = 6
plot_colors = []
for ti,ai in itertools.product( range(numTemplates), range(numRegalgs)):
# print ti,ai
plot_colors += [ colorsys.hsv_to_rgb( float(ti)/numTemplates, 0.25 + 0.75*float(ai)/numRegalgs, 1.0 )]
# +
# For each line, plot the median distance over templates/regMethod
# Only plot for the top N labels
N = 10
line = 0
plt.rc('legend',fontsize=7) # using a size in points
for line in [0,1,2,3]:
# print line
# Get the top N labels for this line
# l = line_label_avgs[ (line_label_avgs.LINE == line ) & (line_label_avgs.LABEL > 0) ].head( N ).LABEL
ln = line_label_avgs[ (line_label_avgs.LINE == line ) & (line_label_avgs.LABEL > 0) ].head( N ).LABELNAME
med_df = dist_df[(dist_df.STAT == 'MEDIAN') & (dist_df.LINE == line) & (dist_df.LABEL > 0) ]
# print med_df.head()
# med_df_piv = med_df[['EXP','LABEL','VALUE']].pivot( index='LABEL', columns='EXP', values='VALUE' )
# med_df_piv.loc[ l ].plot.bar( color=plot_colors )
med_df_piv = med_df[['EXP','LABELNAME','VALUE']].pivot( index='LABELNAME', columns='EXP', values='VALUE' )
med_df_piv.loc[ ln ].plot.bar( color=plot_colors )
fig = plt.gcf()
a = fig.set_size_inches( 18, 8 )
# +
# For each line, plot the median distance over all labels for templates/regMethod
for line in [0,1,2,3]:
med_df = dist_df[(dist_df.STAT == 'MEDIAN') & (dist_df.LINE == line) & (dist_df.LABEL == -1)]
med_df[['EXP','VALUE']].set_index('EXP').plot.bar()
# +
#sz_templates = [ 'F-antsFlip', 'F-antsFlip_lo', 'F-antsFlip_1p52', 'F-antsFlip_2p4']
sz_templates = [ 'F-antsFlip_2p4iso', 'F-antsFlip_1p2iso', 'F-antsFlip_lo', 'F-antsFlip' ]
sz_reg_methods = ['cmtkCOG']
exp_order = []
# parse all data into a data frame
expnamelist=[]
statlist=[]
datalist=[]
labellist=[]
linelist=[]
for template,reg in itertools.product( sz_templates, sz_reg_methods ):
exp_dir = join( base_dir, template, reg )
# print exp_dir
name_f = join(exp_dir,'name')
if os.path.isfile( name_f ):
expname = readlines( join(exp_dir,'name'))[0].rstrip()
else:
expname = '{},{}'.format(template,reg)
exp_order += [expname]
for line in [0,1,2,3]:
eval_dir=join(exp_dir,'evalComp')
# Read label stats
statFile = '{}/stats{}.txt'.format(eval_dir,line)
stat_lines = readlines( statFile )
for sl in stat_lines:
dat = sl.split()
expnamelist += [ expname.replace('\n','') ]
linelist += [ line ]
labellist += [ int(dat[0]) ]
statlist += [ dat[1] ]
datalist += [ float(dat[2]) ]
# Read total brain mask stats
allStatFile = '{}/all_stats{}.txt'.format(eval_dir,line)
all_stat_lines = readlines( allStatFile )
for sl in all_stat_lines:
if( sl.startswith('all ')):
dat = sl.split()
expnamelist += [ expname.replace('\n','') ]
linelist += [ line ]
labellist += [ -1 ] # -1 indicates total mask label
statlist += [ dat[1] ]
datalist += [ float(dat[2]) ]
sz_dist_df = pd.DataFrame( {'EXP':expnamelist,
'STAT':statlist,
'LINE':linelist,
'LABEL':labellist,
'VALUE':datalist })
# +
# For each line, Sort labels by average size
sz_gb_line_label = sz_dist_df[ (sz_dist_df.STAT == 'COUNT') ].groupby(['LINE','LABEL'], as_index=False )
sz_line_label_avgs = sz_gb_line_label.mean().sort_values('VALUE', ascending=False)
# Figure out colors
numTemplates = len(exp_order)
numRegalgs = 1
plot_colors = []
for ti,ai in itertools.product( range(numTemplates), range(numRegalgs)):
# print ti,ai
plot_colors += [ colorsys.hsv_to_rgb( float(ti)/numTemplates, 0.7 + float(ai)/numRegalgs, 1.0 )]
# For each line, plot the median distance over templates/regMethod
# Only plot for the top N labels
N = 10
line = 0
plt.rc('legend',fontsize=7) # using a size in points
for line in [0,1,2,3]:
# print line
# Get the top N labels for this line
l = sz_line_label_avgs[ (sz_line_label_avgs.LINE == line ) & (sz_line_label_avgs.LABEL > 0) ].head( N ).LABEL
med_df = sz_dist_df[(sz_dist_df.STAT == 'MEDIAN') & (sz_dist_df.LINE == line) & (sz_dist_df.LABEL > 0) ]
med_df_piv = med_df[['EXP','LABEL','VALUE']].pivot( index='LABEL', columns='EXP', values='VALUE' )
med_df_piv[exp_order].loc[ l ].plot.bar( color=plot_colors )
fig = plt.gcf()
a = fig.set_size_inches( 18, 8 )
|
python/labelwise_skeleton_distance_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ranjith-arch/Letsupgradepython/blob/master/Day3_Assignment_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="AJ2Y7W-xlCxd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 799} outputId="f2e0ce0a-de8c-4364-e965-0082afed863a"
"Prime numbers from 1 to 200 are:"
a=1
b=200
for num in range(a,b):
if(num>1):
for i in range(2,num):
if(num%i==0):
break
else:
print(num)
|
Day3_Assignment_2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# In this exercise, try to write a program that
# will resample particles according to their weights.
# Particles with higher weights should be sampled
# more frequently (in proportion to their weight).
# Don't modify anything below. Please scroll to the
# bottom to enter your code.
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class robot:
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0;
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
#myrobot = robot()
#myrobot.set_noise(5.0, 0.1, 5.0)
#myrobot.set(30.0, 50.0, pi/2)
#myrobot = myrobot.move(-pi/2, 15.0)
#print myrobot.sense()
#myrobot = myrobot.move(-pi/2, 10.0)
#print myrobot.sense()
myrobot = robot()
myrobot = myrobot.move(0.1, 5.0)
Z = myrobot.sense()
N = 1000
p = []
for i in range(N):
x = robot()
x.set_noise(0.05, 0.05, 5.0)
p.append(x)
p2 = []
for i in range(N):
p2.append(p[i].move(0.1, 5.0))
p = p2
w = []
for i in range(N):
w.append(p[i].measurement_prob(Z))
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER CODE BELOW ####
# You should make sure that p3 contains a list with particles
# resampled according to their weights.
# Also, DO NOT MODIFY p.
p3 = []
|
particle filter/new_particle.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import BasicAer
from qiskit import execute
from qiskit.tools.visualization import plot_histogram
from math import *
# +
# 3 qubits e 3 bits classicos
q = QuantumRegister(3)
c0 = ClassicalRegister(1, "c0")
c1 = ClassicalRegister(1, "c1")
c2 = ClassicalRegister(1, "c2")
qc = QuantumCircuit(q, c0, c1, c2, name="teleport")
# Estado inicial e u1(y) = u3(0,0,y)
#qc.u3(0.3, 0.2, 0.1, q[0])
qc.u1(0.5, q[0])
# prepara um emeranhamento entre o qubit 1 e qubit 2
qc.h(q[1])
qc.cx(q[1], q[2])
# A instrução barrier impede que as otimizações reordenem as portas em toda a linha de origem.
qc.barrier(q)
# realiza a operação CNOT entre qubit 0 e o qubit 1
qc.cx(q[0], q[1])
# realiza a operação Hadamard qubit 0
qc.h(q[0])
# medida do qubit 0
qc.measure(q[0], c0[0])
# medida do qubit 1
qc.measure(q[1], c1[0])
# se necessario, realiza a correção de fase no qubit 2
qc.z(q[2]).c_if(c0, 1)
# se necessario, realiza um bit flip no qubit 2
qc.x(q[2]).c_if(c1, 1)
# medida do qubit 2
qc.measure(q[2], c2[0])
backend = BasicAer.get_backend('qasm_simulator')
result = execute(qc, backend, shots=1000).result()
counts = result.get_counts(qc)
print(counts)
plot_histogram(counts)
# -
qc.draw(output='mpl')
|
teleport.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Transfer Learning
#
# In this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html).
#
# ImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU).
#
# Once trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy.
#
# With `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now.
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
# -
# Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`.
# +
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
# -
# We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.html#id5). Let's print out the model architecture so we can see what's going on.
model = models.densenet121(pretrained=True)
model
# This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers.
# +
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
# -
# With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time.
#
# PyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU.
import time
for device in ['cpu', 'cuda']:
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to(device)
for ii, (inputs, labels) in enumerate(trainloader):
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
start = time.time()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if ii==3:
break
print(f"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds")
# You can write device agnostic code which will automatically use CUDA if it's enabled like so:
# ```python
# # at beginning of the script
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#
# ...
#
# # then whenever you get a new Tensor or Module
# # this won't copy if they are already on the desired device
# input = data.to(device)
# model = MyModule(...).to(device)
# ```
#
# From here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily.
#
# >**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen.
# +
# Use GPU if it's available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 2),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.003)
model.to(device);
# -
epochs = 1
steps = 0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(testloader):.3f}.. "
f"Test accuracy: {accuracy/len(testloader):.3f}")
running_loss = 0
model.train()
|
Udacity/PyTorch Scholarship Challenge Nanodegree Program/Week-4-Introduction to PyTorch/Part-7-Loading Image Data/.ipynb_checkpoints/Part 8 - Transfer Learning (Solution)-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Analyzing m5C patterns in Human oocyte and embryonic stem cells (CPU-based)
#
# ---
#
# Author: <NAME> @ Rui Zhang's Lab, SYSU, China
#
# Email: <EMAIL>
#
# Date: Jan, 2022
# ## Packages
#
# - Python==3.7.8
# - numpy==1.20.0
# - scipy==1.5.1
# - umap-learn==0.5.2
# - sklearn==0.23.1
# - matplotlib==3.2.2
# - seaborn==0.10.1
# - hdbscan==0.8.27
# ## 0. Configure environment
# +
import time
import sys, os
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
from sklearn.preprocessing import OneHotEncoder
import scipy.stats
import tracemalloc
import umap
import hdbscan
# for Linux only, load font file
mpl.font_manager.fontManager.addfont("./arial.ttf")
# configure matplotlib
mpl.rcParams['font.family'] = 'Arial'
mpl.rcParams['pdf.fonttype'] = 42
tracemalloc.start()
# -
# This is a helper function useful for export fasta and motifs.
def extract_fasta_and_draw_motifs(prefix, df, cluster_col="Cluster", filter=None, motif_column=("INFO", "motif_F10"), draw_logos=True):
print("=============== {} ===============".format(prefix))
if os.path.isdir("{}".format(prefix)) == False:
os.mkdir("./{}".format(prefix))
os.mkdir("./{}/fasta".format(prefix))
os.mkdir("./{}/logos_bits".format(prefix))
os.mkdir("./{}/logos_bits_no_axis".format(prefix))
os.mkdir("./{}/logos_freq".format(prefix))
os.mkdir("./{}/logos_freq_png".format(prefix))
os.mkdir("./{}/logos_bits_png".format(prefix))
else:
os.system("rm -r ./{}/*".format(prefix))
os.mkdir("./{}/fasta".format(prefix))
os.mkdir("./{}/logos_bits".format(prefix))
os.mkdir("./{}/logos_bits_no_axis".format(prefix))
os.mkdir("./{}/logos_freq".format(prefix))
os.mkdir("./{}/logos_freq_png".format(prefix))
os.mkdir("./{}/logos_bits_png".format(prefix))
if filter is not None:
df = df[df[filter] == True].copy()
clusters = set(df[cluster_col].tolist())
for g in clusters:
subdf = df[df[cluster_col] == g]
with open("./{}/fasta/cluster_{}.fa".format(prefix, g), "w") as output:
N = 0
for idx, row in subdf.iterrows():
output.write(">{}\n{}\n".format(idx, row[motif_column].replace("T", "U"))) # to RNA bases
N += 1
print("Cluster #{}: {}".format(g, N))
if draw_logos == True:
for g in clusters:
os.system("weblogo -A rna -D fasta -F pdf --resolution 1000 --color-scheme classic --composition none -i -10 -P cluster_{g} -f ./{prefix}/fasta/cluster_{g}.fa > ./{prefix}/logos_bits/cluster_{g}.pdf".format(prefix=prefix, g=g))
os.system("weblogo -A rna -D fasta -F png --resolution 1000 --color-scheme classic --composition none -i -10 -P cluster_{g} -f ./{prefix}/fasta/cluster_{g}.fa > ./{prefix}/logos_bits_png/cluster_{g}.png".format(prefix=prefix, g=g))
os.system("weblogo -A rna -D fasta -F pdf -y Frequency --resolution 1000 --color-scheme classic --units probability --composition none -i -10 -P cluster_{g} -f ./{prefix}/fasta/cluster_{g}.fa > ./{prefix}/logos_freq/cluster_{g}.pdf".format(prefix=prefix, g=g))
os.system("weblogo -A rna -D fasta -F png -y Frequency --resolution 1000 --color-scheme classic --units probability --composition none -i -10 -P cluster_{g} -f ./{prefix}/fasta/cluster_{g}.fa > ./{prefix}/logos_freq_png/cluster_{g}.png".format(prefix=prefix, g=g))
os.system("weblogo -A rna -D fasta -X no -Y no -P \"\" -F pdf --resolution 1000 --color-scheme classic --composition none -i -10 -f ./{prefix}/fasta/cluster_{g}.fa > ./{prefix}/logos_bits_no_axis/cluster_{g}.pdf".format(prefix=prefix, g=g))
def show_logos(prefix, names=None):
for img in os.listdir(prefix):
if img.endswith(".png") == False:
continue
if names is not None and fn not in names:
continue
plt.figure()
image = plt.imread(prefix+"/"+img)
plt.imshow(image)
plt.axis("off")
def show_logos_cols(prefix, names=None, cols=2, figsize=(8,8), auto_size=True):
file_list = []
for img in os.listdir(prefix):
if img.endswith(".png") == False:
continue
if names is not None and fn not in names:
continue
file_list.append(img)
file_list_format = []
for i in file_list:
id = int(i.replace("cluster_", "").replace(".png", ""))
file_list_format.append((i, id))
file_list_format = sorted(file_list_format, key=lambda x:x[1])
if len(file_list_format) % cols == 0:
rows = len(file_list_format) // cols
else:
rows = len(file_list_format) // cols + 1
if auto_size == False:
figsize = figsize
else:
width = 4 * cols
height = 1.5 * rows
figsize = (width, height)
fig, axes = plt.subplots(rows, cols, figsize=figsize)
for ax, image in zip(*[axes.reshape(-1), file_list_format]):
fn, id = image
img = plt.imread(prefix+"/"+fn)
_ = ax.imshow(img)
ax.set_title("cluster_{}".format(id))
for ax in axes.reshape(-1):
ax.axis("off")
plt.tight_layout()
# This is a helper function useful for MEME analysis.
def run_meme(prefix, fasta_prefix, fn, mode=["streme", "meme", "ce", "cd"], nmotifs=5):
if os.path.isdir("./{}/meme".format(prefix)) == False:
os.mkdir("./{}/meme".format(prefix))
else:
os.system("rm -r ./{}/meme/*".format(prefix))
for m in mode:
if m == "meme":
# Normal mode
print("Runing MEME...")
os.system("meme -rna -nmotifs {nmotifs} -minw 5 -maxw 10 -oc ./{prefix}/meme/{out}_meme ./{fasta_prefix}/{fn}".format(fn=fn, prefix=prefix, nmotifs=nmotifs, fasta_prefix=fasta_prefix, out=fn.strip(".fa")))
elif m == "ce":
# CE mode
print("Runing MEME in CE mode...")
os.system("meme -rna -objfun ce -cefrac 0.1 -nmotifs {nmotifs} -minw 5 -maxw 10 -oc ./{prefix}/meme/{out}_meme_ce ./{fasta_prefix}/{fn}".format(fn=fn, prefix=prefix, nmotifs=nmotifs, fasta_prefix=fasta_prefix, out=fn.strip(".fa")))
elif m == "cd":
# CE mode
print("Runing MEME in CD mode...")
os.system("meme -rna -objfun cd -cefrac 0.1 -nmotifs {nmotifs} -minw 5 -maxw 10 -oc ./{prefix}/meme/{out}_meme_cd ./{fasta_prefix}/{fn}".format(fn=fn, prefix=prefix, nmotifs=nmotifs, fasta_prefix=fasta_prefix, out=fn.strip(".fa")))
elif m == "streme":
print("Runing STREME")
os.system("streme --rna --pvt 0.05 -nmotifs {nmotifs} --minw 5 --maxw 10 --objfun cd --oc ./{prefix}/meme/{out}_streme --p ./{fasta_prefix}/{fn}".format(fn=fn, prefix=prefix, fasta_prefix=fasta_prefix, nmotifs=nmotifs, out=fn.strip(".fa")))
# ## 1. Load data and Hot-hot encoded
#
# In pratices, 'ATCG' and 'ATCGN' do not have huge difference. We can also mask 'N' into [0,0,0,0] rather than consider is a kind of base.
#
# Notably, the discard of 'N' saves up to 20% of memory usage!
# +
df = pd.read_csv("./Human_dev.motif.F10.csv", index_col=[0,1,2], header=[0,1])
enc = OneHotEncoder(dtype=np.int8)
enc.fit([[i] for i in "ATCGN"])
def onehot_enc(row):
seq = [[i] for i in row[("INFO", "motif_F10")].upper()]
return enc.transform(seq).toarray().reshape(-1)
onehot_input = []
for idx, row in df.iterrows():
onehot_input.append(onehot_enc(row))
onehot_input = np.array(onehot_input)
# -
# ## 2. Dimensional reduction wit UMAP
#
# In UMAP, we can adjust `min_dist`, `n_neighbors`, and `densmap` for different strength of decomposition. Lower (close to zero) `min_dist` is recommended for motif analysis. Higher `n_neighbors` can result in much condensed clusters, however, this will consume more time and memory, and its effect diminishes marginally. `densmap=True` can result in much condensed clusters, and of course, 2x computation time. With `densmap=True` you might ignore some patterns different but similar (e.g. motifs of NSUN2 and Nop2), hence it is not recommanded.
#
# It is known that duplication of input might reduce the efficiency of UMAP. You can use `unique=True` in UMAP and use `_unique_inverse_` to get back the data. You can also pre-process the data you use and then recover it before HDBSCAN (density is matter, we don't only care about the k-mers feature). [Ignore it if duplication rate is not high.]
#
#
# +
def UMAP(onehot_input, df):
df = df.copy()
# this should takes ~20 sec
print("UMAP")
current, _ = tracemalloc.get_traced_memory()
time0 = time.time()
model = umap.UMAP(init="random", random_state=42, n_components=2, min_dist=0.01, n_neighbors=20, verbose=True, densmap=False)
umap_output = model.fit_transform(onehot_input)
time1 = time.time() - time0
_, peak = tracemalloc.get_traced_memory()
mem = (peak - current)/1024./1024.
print("UMAP time: {} sec".format(time1))
print("UMAP RAM: {} MB".format(mem))
print("==================================================")
print()
df["X"] = umap_output[:, 0]
df["Y"] = umap_output[:, 1]
del model
return time1, mem, df
_, _, df_UMAP = UMAP(onehot_input, df)
# -
# ## 3. Cluster sites with HDBSCAN
#
# In HDBSCAN, we can adjust `min_cluster_size`, `min_samples`, `cluster_selection_epsilon` and `cluster_selection_method` to adjust the strength of clustering. Check https://hdbscan.readthedocs.io/en/latest/index.html for more information.
#
# +
def cluster_HDBSCAN_softclustering(df):
# use multi-code here
df = df.copy()
X = np.stack([df["X"], df["Y"]], axis=1)
current, _ = tracemalloc.get_traced_memory()
time0 = time.time()
model = hdbscan.HDBSCAN(min_cluster_size=100, min_samples=1, cluster_selection_method='eom', core_dist_n_jobs=4, prediction_data=True)
yhat = model.fit(X)
soft_clusters = hdbscan.all_points_membership_vectors(yhat)
labels = [np.argmax(x) for x in soft_clusters]
time1 = time.time() - time0
_, peak = tracemalloc.get_traced_memory()
mem = (peak - current)/1024./1024.
df["Cluster"] = [i+1 if i > -1 else -1 for i in labels ] # re-number lables to make it human-readable
print("HDBSCAN soft clustering time: {} sec".format(time1))
print("HDBSCAN soft clustering RAM: {} Mb".format(mem))
print("HDBSCAN cluster number: {}".format(df["Cluster"].max()))
print("==================================================")
# check cluster number
print(df.groupby("Cluster")["Cluster"].count())
return time1, mem, df, model
_, _, df_HDBSCAN, HDBSCAN_model = cluster_HDBSCAN_softclustering(df_UMAP)
# -
tracemalloc.stop()
# ## 3.1 Draw single_linkage_tree of HDBSCAN (optional, slow)
fig, ax = plt.subplots()
HDBSCAN_model.single_linkage_tree_.plot(cmap='viridis', colorbar=True)
plt.savefig("HDBSCAN_single_linkage_tree.pdf")
plt.show()
# ## 3.2 Draw condensed_tree of HDBSCAN (optional, suggested)
#
# Condensed tree is something benefits us in understanding how well HDBSCAN works. In this tree, we can clear find that HDBSCAN split the dataset into three major leaf/groups (blue, orange, and green). We can also notice that, the blue leaf and green leaf are much condensed, which means that the sequences in these two leafs are more similar to each other intergroup. In contrast, the green one has a more degenerated pattern.
fig, ax = plt.subplots()
HDBSCAN_model.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette())
plt.savefig("HDBSCAN_condensed_tree.pdf")
plt.show()
# ## 4 Draw figure
# +
fig, ax = plt.subplots(1, figsize=(4,4))
def draw_clusters(ax, df, fn):
clusters = set(df["Cluster"].tolist())
for i in clusters:
subdf = df[df["Cluster"]==i]
if i == -1:
ax.scatter(subdf["X"], subdf["Y"], s=1, c="lightgray", lw=None, label=i)
else:
ax.scatter(subdf["X"], subdf["Y"], s=1, alpha=1, lw=None, label=i)
c_X = subdf["X"].mean()
c_Y = subdf["Y"].mean()
ax.annotate("{}".format(i), xy=(c_X, c_Y), color="k", ha="center", va="center", size=9) # , size=13
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
# draw density
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([df["X"], df["Y"]])
kernel = scipy.stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
c = ax.contour(xx, yy, f, linewidths=0.5, colors="k")
ax.set_xlabel("UMAP-1")
ax.set_ylabel("UMAP-2")
ax.xaxis.set_major_locator(ticker.MultipleLocator(3))
ax.yaxis.set_major_locator(ticker.MultipleLocator(3))
df.to_csv("{}".format(fn))
draw_clusters(ax, df_HDBSCAN, "Human_dev.motif.F10.clustered.csv")
plt.tight_layout()
plt.savefig("Human_dev.pdf")
plt.show()
# -
# ## 5.1 Extract fasta and draw motif (require MEME)
#
# Here we use a automatic function to finish fasta extraction and motif drawing.
#
# Our strategy is very fast. With clustering, we can focus on much clear targets in configuration of a run of MEME. We can learn the window range, the position, and other important information about the motif candidates.
extract_fasta_and_draw_motifs("Human_dev", df_HDBSCAN, motif_column=("INFO", "motif_F10"))
show_logos_cols("./Human_dev/logos_bits_png/", cols=3)
# ## 5.2 Motif discovery with MEME (require MEME)
#
# Of course, no method is 100% perfect. With MEME, we can check if we missed something in our clusters. We can also compare the results from MEME **before** and **after** clustering. Without the aid of clustering, MEME works hard in comprehensively uncover all of the motifs.
# +
# run meme with all fasta output
# skipped
#for fn in os.listdir("./Human_dev/fasta/"):
#run_meme("Human_dev", "./Human_dev/fasta/", fn, mode=["streme", "meme", "ce", "cd"], nmotifs=2)
# +
# check all sequences
# skipped
#os.system("cat ./Human_dev/fasta/*fa > ./Human_dev/meme/merged.fa")
# STREME
# #!streme --rna --pvt 0.05 -nmotifs 10 --minw 5 --maxw 10 --objfun cd --oc ./Human_dev/meme/merged_streme -p ./Human_dev/meme/merged.fa
# Normal mode
# #!meme -rna -nmotifs 8 -minw 5 -maxw 10 -oc ./Human_dev/meme/merged_meme ./Human_dev/meme/merged.fa
# CE mode
# #!meme -rna -objfun ce -nmotifs 8 -cefrac 0.1 -minw 5 -maxw 10 -oc ./Human_dev/meme/merged_meme_ce ./Human_dev/meme/merged.fa
# CD mode
# #!meme -rna -objfun cd -nmotifs 8 -minw 5 -maxw 10 -oc ./Human_dev/meme/merged_meme_cd ./Human_dev/meme/merged.fa
|
m5C study/Human_oocyte/Human_development.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Federated Keras MNIST Tutorial
# +
#Install Tensorflow and MNIST dataset if not installed
# !pip install tensorflow==2.5.0
#Alternatively you could use the intel-tensorflow build
# # !pip install intel-tensorflow==2.3.0
# +
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPool2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import mnist
import openfl.native as fx
from openfl.federated import FederatedModel,FederatedDataSet
tf.config.run_functions_eagerly(True)
tf.random.set_seed(0)
np.random.seed(0)
# +
def test_intel_tensorflow():
"""
Check if Intel version of TensorFlow is installed
"""
import tensorflow as tf
print("We are using Tensorflow version {}".format(tf.__version__))
major_version = int(tf.__version__.split(".")[0])
if major_version >= 2:
from tensorflow.python import _pywrap_util_port
print("Intel-optimizations (DNNL) enabled:",
_pywrap_util_port.IsMklEnabled())
else:
print("Intel-optimizations (DNNL) enabled:")
test_intel_tensorflow()
# -
# After importing the required packages, the next step is setting up our openfl workspace. To do this, simply run the `fx.init()` command as follows:
#Setup default workspace, logging, etc.
fx.init('keras_cnn_mnist')
# Now we are ready to define our dataset and model to perform federated learning on. The dataset should be composed of a numpy arrayWe start with a simple fully connected model that is trained on the MNIST dataset.
# +
#Import and process training, validation, and test images/labels
# Set the ratio of validation imgs, can't be 0.0
VALID_PERCENT = 0.3
(X_train, y_train), (X_test, y_test) = mnist.load_data()
split_on = int((1 - VALID_PERCENT) * len(X_train))
train_images = X_train[0:split_on,:,:]
train_labels = to_categorical(y_train)[0:split_on,:]
valid_images = X_train[split_on:,:,:]
valid_labels = to_categorical(y_train)[split_on:,:]
test_images = X_test
test_labels = to_categorical(y_test)
def preprocess(images):
#Normalize
images = (images / 255) - 0.5
images = images.reshape(images.shape[0], -1)
# images = np.expand_dims(images, axis=-1)
return images
# Preprocess the images.
train_images = preprocess(train_images)
valid_images = preprocess(valid_images)
test_images = preprocess(test_images)
feature_shape = train_images.shape[1:]
classes = 10
class UnbalancedFederatedDataset(FederatedDataSet):
def split(self, num_collaborators, shuffle=True, equally=False):
train_idx = self.split_lognormal(self.y_train, num_collaborators)
X_train = np.array([self.X_train[idx] for idx in train_idx])
y_train = np.array([self.y_train[idx] for idx in train_idx])
valid_idx = self.split_lognormal(self.y_valid, num_collaborators)
X_valid = np.array([self.X_valid[idx] for idx in valid_idx])
y_valid = np.array([self.y_valid[idx] for idx in valid_idx])
return [
FederatedDataSet(
X_train[i],
y_train[i],
X_valid[i],
y_valid[i],
batch_size=self.batch_size,
num_classes=self.num_classes
) for i in range(num_collaborators)
]
def split_lognormal(self, labels, num_collaborators):
from tqdm import trange
labels = np.argmax(labels, axis=1)
idx = [[np.nonzero(labels == (col + j) % self.num_classes)[0][np.arange(5) + (col // 10 * 10 + 5 * j)] \
for j in range(2)] for col in range(num_collaborators)]
idx = [np.hstack(tup) for tup in idx]
assert all([len(i) == 10 for i in idx]), 'All collaborators should have 10 elements at this stage'
props = np.random.lognormal(0, 2.0, (10,100,2))
props = np.array([[[len(np.nonzero(labels==label)[0])-1000]] for label in range(10)])*props/np.sum(props,(1,2), keepdims=True)
#idx = 1000*np.ones(10, dtype=np.int64)
for user in trange(1000):
for j in range(2):
l = (user+j)%10
num_samples = int(props[l,user//10,j])
if np.count_nonzero(labels[np.hstack(idx)] == l) + num_samples < len(np.nonzero(labels==l)[0]):
idx_to_append = np.nonzero(labels == (user + j) % 10)[0][np.arange(num_samples) + np.count_nonzero(labels[np.hstack(idx)] == l)]
idx[user] = np.append(idx[user], idx_to_append)
return idx
fl_data = UnbalancedFederatedDataset(train_images,train_labels,valid_images,valid_labels,batch_size=32,num_classes=classes)
# -
from openfl.utilities.optimizers.keras import FedProxOptimizer
def build_model(input_shape,
num_classes,
**kwargs):
"""
Define the model architecture.
Args:
input_shape (numpy.ndarray): The shape of the data
num_classes (int): The number of classes of the dataset
Returns:
tensorflow.python.keras.engine.sequential.Sequential: The model defined in Keras
"""
model = Sequential()
model.add(tf.keras.Input(shape=input_shape))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=FedProxOptimizer(mu=1),
metrics=['accuracy'])
return model
#Create a federated model using the build model function and dataset
fl_model = FederatedModel(build_model, data_loader=fl_data)
# The `FederatedModel` object is a wrapper around your Keras, Tensorflow or PyTorch model that makes it compatible with openfl. It provides built in federated training and validation functions that we will see used below. Using it's `setup` function, collaborator models and datasets can be automatically defined for the experiment.
# +
collaborator_models = fl_model.setup(num_collaborators=1000)
collaborators = {f'col{col}':collaborator_models[col] for col in range(len(collaborator_models))}#, 'three':collaborator_models[2]}
# +
#Original MNIST dataset
print(f'Original training data size: {len(train_images)}')
print(f'Original validation data size: {len(valid_images)}\n')
#Collaborator one's data
print(f'Collaborator one\'s training data size: {len(collaborator_models[0].data_loader.X_train)}')
print(f'Collaborator one\'s validation data size: {len(collaborator_models[0].data_loader.X_valid)}\n')
#Collaborator two's data
print(f'Collaborator two\'s training data size: {len(collaborator_models[1].data_loader.X_train)}')
print(f'Collaborator two\'s validation data size: {len(collaborator_models[1].data_loader.X_valid)}\n')
#Collaborator three's data
#print(f'Collaborator three\'s training data size: {len(collaborator_models[2].data_loader.X_train)}')
#print(f'Collaborator three\'s validation data size: {len(collaborator_models[2].data_loader.X_valid)}')
# -
# We can see the current plan values by running the `fx.get_plan()` function
#Get the current values of the plan. Each of these can be overridden
import json
print(json.dumps(fx.get_plan(), indent=4, sort_keys=True))
# Now we are ready to run our experiment. If we want to pass in custom plan settings, we can easily do that with the `override_config` parameter
#Run experiment, return trained FederatedModel
final_fl_model = fx.run_experiment(collaborators,override_config={'aggregator.settings.rounds_to_train':5, 'collaborator.settings.opt_treatment': 'CONTINUE_GLOBAL'})
#Save final model and load into keras
final_fl_model.save_native('final_model')
model = tf.keras.models.load_model('./final_model')
#Test the final model on our test set
model.evaluate(test_images,test_labels)
# +
import matplotlib.pyplot as plt
import numpy as np
plt.figure(figsize=(9,6), dpi=150)
plt.title('Keras MNIST unbalanced split')
plt.plot([0.07627802075538784, 0.07518334008473902, 0.09541350667830556, 0.13141966053564103, 0.15887578643299638], label='FedAvg')
plt.plot([0.07627802075538784, 0.07518334008473902, 0.09541350667830556, 0.1314459763141349, 0.15887578643299638], linestyle='--', label='FedProx (mu=1e-2)')
plt.plot([0.07627802075538784, 0.0751056043850258, 0.09555227747093886, 0.131649036151357, 0.15966261748969554], linestyle='--', label='FedProx (mu=1e-1)')
plt.plot([0.07627802075538784, 0.07517912408802659, 0.09641592293512076, 0.13676991989742965, 0.1684917744528502], linestyle='--', label='FedProx (mu=1e1)')
plt.legend()
plt.xticks(range(5))
plt.show()
# -
|
openfl-tutorials/Federated_FedProx_Keras_MNIST_Tutorial.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Tutorial de instalación de Anaconda y uso de jupyter notebooks con Bash kernel
#
# Para instalar anaconda pueden revisar los requisitos específicos para su sistema operativo en la página oficial https://www.anaconda.com/products/individual. Yo me instalé esto en Ubuntu 20 y no me dio problemas. Si tienes problemas con Windows, escríbenos y te guiamos, pero preferiblemente usa Linux.
#
# Este documento se ecribe con **Markdown**, una simplificación para HTML usado en programación web. https://www.markdownguide.org/
# Mi procedimiento para **instalar Anaconda**, **crear un ambiente usando conda** e instalar **jupyter lab** y el **kernell de Bash** en el ambiente creado fue el siguiente:
# 1. Descargué el file **Anaconda3-2020.11-Linux-x86_64.sh** desde https://www.anaconda.com/products/individual en mi carpeta de descargas `/home/ju/Downloads/`. También pueden hacerlo con **curl** .
#
# 2. `$ sudo apt-get update`
#
# 3. `$ sha256sum /path/to/file/Anaconda3-2020.11-Linux-x86_64.sh` para comprobar la integridad del archivo de instalación. Este comando va a arrojar un número que es un código hash que se puede comparar con el de Anaconda en https://docs.anaconda.com/anaconda/install/hashes/.
#
# 4. `$ bash /path/to/file/Anaconda3-2020.11-Linux-x86_64.sh` para empezar la instalación. Aceptamos todas las condiciones de instalación hasta que salga el mensaje **Thank you for installing Anaconda 3!**.
#
# 5. `$ source ~/.bashrc` para actualizar el Terminal.
#
# 6. `$ conda info` para verificar la instalación de Anaconda.
#
# 7. Vamos a ver que entre paréntesis al inicio de la primera línea del prompt del Terminal les sale **(base)**. A mi me sale algo así `(base) ju@JU:`. Puede que sea diferente si tienen otra configuración visual de su shell. Esto indica que la shell tiene activa la versión de Python que vino con su versión de Anaconda. En caso de que no quisieran esto porque tienen más versiones de Python instaladas en su computador pueden usar el comando `$ conda config --set auto_activate_base False`. Yo si lo usé porque sino se me dañan unas configuraciones.
#
# 8. `$ conda create --name bash python=3.8` para crear un ambiente de nombre **bash**. Ese es el nombre que escogí para el ambiente porque ahí solo planeo tener cosas relacionadas a bash, pero puedes poner otro nombre si gustas. Es importante colocar la versión de Python porque al dejar vacío ese campo automáticamente se crea un ambiente para Python 2.7, pero esta versión está descontinuada. Usé la versión Python 3.8 porque es la más reciente que he usado y que es estable. Quizá luego debamos cambiar a Python 3.7 pero espero que no. Igual solo implica crear un ambiente.
#
# 9. `$ conda env list` para ver todos los ambientes creados. Debería estar en la lista **bash** o con el nombre que le hayan puesto.
#
# 10. Ahora vamos a crear una carpeta exclusiva para trabajar con este ambiente. En mi caso yo creé la carpeta `/home/ju/Documents/rsg`. Aquí guardo todo lo del RSG. Luego accedemos `$ cd /home/ju/Documents/rsg`.
#
# 11. `$ conda activate bash` dentro de la carpeta que acabamos de crear. Para desactivar el ambiente dentro de la carpeta que creamos solo basta con `$ conda deactivate`. Lo importante de usar amientes de Python es que se modulariza el uso del software. Es importante porque ya que Python es un lenguaje de programación open source, siempre hay actualizaciones al software y a veces es necesario usar una versión no tan actualizada. Así, cada ambiente contiene lo estrictamente necesario para algún proyecto en particular y se evita dañar los proyectos por conflictos de versiones. Podríamos **no haber creado** un ambiente para instalar todo directamente en el ambiente **base**. En mi experiencia es mejor siempre crear un nuevo ambiente para cada proyecto y eitar topar el ambiente base porque así se evitan conflictos que pueden ser complicados de resolver debido a dependencias.
#
# 12. `$ conda install -n bash -c conda-forge jupyterlab` para **instalar jupyterlab**, una versión moderna de jupyter notebooks que se parece más a un IDE. El comando `-n bash` especifica que vamos a instalar el paquete de jupyterlab en el ambiente que creamos antes. El comando `-c conda-forge` indica de qué *channel* conda va a obtener el paquete jupyterlab. En este caso es **conda-forge**, como se especifica en https://jupyter.org/install. Daba lo mismo usar el comando `$ conda install --name bash --channel conda-forge jupyterlab`.
#
# 13. `$ conda install -n bash -c conda-forge bash_kernel` para instalar el **kernel de Bash** en el ambiente que creamos, como se indica en https://anaconda.org/conda-forge/bash_kernel
#
# 14. `$ jupyter lab` dentro de la carpeta donde hayan activado el ambiente en donde instalaron jupyter. También pueden intentar `$ jupyter notebook`.
# Ahora ya podemos crear un notebook :) Podemos también probar el funcionamiento del kernel. Jupyter se abrirá en el explorador de Internet default. Debe quedar algo así:
#
# 
# Como se puede ver, hay 2 opciones para crear un Notebook: Python 3 y Bash. En este caso dan click en Bash y crean un nuevo jupyter notebook. Este tiene el formato `.ipynb`. Ahora vamos a correr un script de bash. Primero colocamos el cursor en la siguiente celda de código y damos click en el **boton de play** si estamos en `jupyterlab` y en el botón de **Run** si estamos en `jupyter notebook`.
#!/bin/bash
# Basic while loop
counter=1
while [ $counter -le 10 ]
do
echo $counter
((counter++))
done
echo All done
# Y eso es todo :)
|
setup-conda/.ipynb_checkpoints/setup_conda-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/madsouza10/Firmware/blob/master/RegressaoLinear_2_Curos.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="RNf05wpefVLt" colab_type="code" colab={}
import numpy as np
# + id="gdAeoPtffxRD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="cc7b7516-c7f1-46d3-9473-dbc0e4217546"
X = np.array( [[18], [23], [28], [33], [38], [43], [48], [53], [58], [63] ])
X
# + id="foPjjo2yfz9B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="f0d6b174-b63d-46d8-a9cd-3de6426cac49"
y = np.array([[871], [1132], [1042], [1356], [1488], [1638], [1569], [1754], [1866], [1900]])
y
# + id="TzQNiz8ZgTJ2" colab_type="code" colab={}
from sklearn.preprocessing import StandardScaler
Scaler_X = StandardScaler()
X = Scaler_X.fit_transform(X)
# + id="QxYdkmiBhZ_B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="2566a0a9-4754-41f0-9843-04b8105787be"
X
# + id="qJ-OuPGIhe_Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="6dcd8f67-cd04-47cb-fef7-149da1537219"
Scaler_y = StandardScaler()
y = Scaler_y.fit_transform(y)
y
# + id="Rpznf3CHh4PN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="0cadbd9e-dac3-45a7-e56f-d09c9348fd55"
import matplotlib.pyplot as plt
plt.scatter(X, y)
# + id="tq348HHtiPBn" colab_type="code" colab={}
#formula de regressão linear simple
#y =b0 + b1 * x
# + id="3d5WmEhQi0Ly" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3d408d0b-2b4c-4341-95d3-6fcbdb00e90d"
np.random.seed(0)
np.random.rand(2)
# + id="xLfD6HWMjGXg" colab_type="code" colab={}
import tensorflow as tf
# + id="UKTYCPuJjGq6" colab_type="code" colab={}
b0 = tf.Variable(0.54)
b1 = tf.Variable(0.71)
# + id="E8SnVUu3jQ76" colab_type="code" colab={}
erro = tf.losses.mean_squared_error(y, (b0 + b1*X))
otimizador = tf.train.GradientDescentOptimizer(learning_rate = 0.001)
treinamento = otimizador.minimize(erro)
init = tf.global_variables_initializer()
# + id="hCQP8quIlXaG" colab_type="code" colab={}
with tf.Session() as sess:
sess.run(init)
#print(sess.run(b0))
#print(sess.run(b1))
for i in range(1000):
sess.run(treinamento)
b0_final, b1_final = sess.run([b0, b1])
# + id="RCKIOHV1pJFJ" colab_type="code" colab={}
b
# + id="Sp-4nLwGpJWF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="34978a8f-0a8f-42ac-ff98-f3cc5157dc93"
b0_final
# + id="a06MFujzpNpy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e56010ef-d861-4400-a47d-a8dc63da1a05"
b1_final
# + id="JhCOaNehpPvW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="494e1c33-a732-4ea8-f409-3382a1ce21a3"
previsoes = b0_final + b1_final*X
previsoes
# + id="h277Jqf0pYy7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="33c39c54-0520-47f8-9e6b-0bca79bb6c27"
type(previsoes)
# + id="fhtzatkMpcx8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="bd7e283e-a163-4df0-f7b8-730ca7e97d82"
plt.plot(X, previsoes, color = 'red')
plt.plot(X,y,'o')
# + id="rddVLA3iqERm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="31d4b889-6f9e-4664-cb20-be284ff0a8c8"
Scaler_X.transform([[40]])
# + id="_SDRkflfpc2n" colab_type="code" colab={}
previsao= Scaler_y.inverse_transform(b0_final + b1_final*Scaler_X.transform([[40]]))
# + id="SBoEQJULpdAK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3c0c8b93-bc35-400b-8643-8167e29363be"
previsao
# + id="ZjcEHv7lp9Q1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="9320f8fb-f18c-4561-cb66-a9fa90e2e577"
y1 = Scaler_y.inverse_transform(y)
y1
# + id="U5zrzmVVsSaG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 185} outputId="314bd376-3553-4314-9149-aad96c52d224"
previsoes1 = Scaler_y.inverse_transform(previsoes)
previsoes1
# + id="CcUkl4MTsfsp" colab_type="code" colab={}
from sklearn.metrics import mean_absolute_error, mean_squared_error
mae = mean_absolute_error(y1, previsoes1)
mse = mean_squared_error(y1, previsoes1)
# + id="piU6LyM_s-5g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fb0d1687-bfe1-4020-993a-e7b89084f02e"
mae
# + id="weoQUZxUtCRW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="02ef3a99-920a-4728-e1b2-bf55304279a1"
mse
|
RegressaoLinear_2_Curos.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import matplotlib as mpl
import seaborn as sns
import statsmodels.api as sm
import os
from scipy.stats import ttest_ind,norm
# %matplotlib inline
pd.options.display.float_format = '{:.3f}'.format
bbb=pd.read_csv('BBB.csv')
bbb.head()
bbb['buyer'].value_counts()['yes']
bbb.describe()
bbb.info()
bbb['total_'].sum()
buy_book=bbb[['child','youth','cook','do_it','refernce','art','geog']]
corrMatrix = buy_book.corr()
plt.figure(figsize=(8, 8))
sns.heatmap(corrMatrix, annot=True, square=True) # annot->put numbers, square->make it square
bbb[['total_','purch']].corr()
bbb['female']=(bbb['gender']=="F")*1
bbb['buyer_dummy']=(bbb['buyer']=="yes")*1
bbb.describe()
x=bbb[['last','total_','female','child','youth','cook','do_it','refernce','art','geog']]
y=bbb['buyer_dummy']
x=sm.add_constant(x)
logit = sm.Logit(y, x)
result=logit.fit()
summary=result.summary()
fig,ax= plt.subplots(figsize=(7,10))
ax.axis('off')
ax.axis('tight')
plt.text(0.01, 0.05, str(summary), {'fontsize': 15}, fontproperties = 'monospace')
plt.savefig('output.png')
bbb['predicted']=result.predict()
bbb.head()
bbb.describe()
def odds_ratios(result_logit):
odds=np.exp(result_logit.params[1:])
se=np.exp(result_logit.params[1:])*result_logit.bse[1:]
z=abs(odds-1)/se
pvals=np.round(norm.sf(z)*2*1000)/1000
lconf=odds-1.94*se
rconf=odds+1.94*se
return pd.DataFrame({'Odds ratios': odds, 'std err': se, 'z': z, 'P>|z|': pvals, '[0.025': lconf, '0.975]': rconf},index=result_logit.params.keys()[1:])
odds_ratioodds_ratios(result)
def ntile(a,n):
q = a.quantile(np.linspace(1/n,1,n))
output = []
for i in a:
if np.isnan(i):
k = np.nan
else:
k = 0
for j in q:
if i<=j:
break
k += 1
output.append(k)
return np.array(output)
bbb['prob_dec']=ntile(bbb['predicted'],10)
bbb['prob_dec'] = 9-bbb['prob_dec']
bbb[ ['buyer_dummy','prob_dec'] ].groupby('prob_dec').describe()
bbb[ ['predicted','prob_dec'] ].groupby('prob_dec').mean().plot(kind='bar')
plt.savefig('response_rate_by_decile.png')
# 3. Generate a report showing number of customers, the number of buyers of "The Art
# History of Florence' and the response rate to the offer by decile for the random sample
# (i.e. the 50,000 customers) in the dataset.
Cust_report=bbb[['buyer','prob_dec']].groupby('prob_dec').count()
Cust_report['#ofcust']=bbb[['buyer','prob_dec']].groupby('prob_dec').count()
Cust_report['#ofbuyer']=bbb[['buyer_dummy','prob_dec']].groupby('prob_dec').sum()
Cust_report['responserate']=Cust_report['#ofbuyer']/Cust_report['#ofcust']
Cust_report=Cust_report.drop("buyer", axis=1)
Cust_report
fig,ax= plt.subplots(figsize=(8,8))
ax.axis('off')
ax.axis('tight')
ax.table(cellText=Cust_report.values,
colLabels=Cust_report.columns,
loc='center',
bbox=[0,0,1,1])
plt.savefig('table.png')
# 4. For the 50,000 customers in the dataset, generate a report showing the mean values of
# the following variables by probability of purchase decile:
# Total $ spent
# Months since last purchase, and
# Number of books purchased for each of the seven categories (i.e., children, youth,
# cookbooks, do-it-yourself, reference, art and geography).
bbb[ ['total_','last','prob_dec','child','youth','cook','do_it','refernce','art','geog'] ].groupby('prob_dec').mean()
# Part III: Lifts and Gains
# 1. Use the information from the report in II.3 above to create a table showing the lift and
# cumulative lift for each decile. You may want to use Excel for these calculations.
# 2. Create a chart showing the cumulative lift by decile.
# 3. Use the information from the report in II.3 above to create a table showing the gains and
# cumulative gains for each decile. You may want to use Excel for these calculations.
# 4. Create a chart showing the cumulative gains by decile along with a reference line
# corresponding to 'no model'.
#
Lift_calc=bbb[['buyer','prob_dec']].groupby('prob_dec').count()
Lift_calc['#ofcust']=bbb[['buyer','prob_dec']].groupby('prob_dec').count()
Lift_calc['cum#ofcust']=Lift_calc['#ofcust'].cumsum()
Lift_calc['%cum#ofcust']=Lift_calc['cum#ofcust']/(Lift_calc['#ofcust'].sum())
Lift_calc['#ofbuyer']=bbb[['buyer_dummy','prob_dec']].groupby('prob_dec').sum()
Lift_calc['cum#ofbuyer']=Lift_calc['#ofbuyer'].cumsum()
Lift_calc['responserate']=Lift_calc['#ofbuyer']/Cust_report['#ofcust']
Lift_calc['cum_responserate']=Lift_calc['cum#ofbuyer']/Lift_calc['cum#ofcust']
avr_resrate=(bbb['buyer_dummy'].sum())/(bbb['buyer'].count())
Lift_calc['Lift']=(Lift_calc['responserate']/avr_resrate)*100
Lift_calc['CumLift']=(Lift_calc['cum_responserate']/avr_resrate)*100
Lift_calc=Lift_calc.drop('buyer', axis=1)
Lift_calc
Lift_calc['CumLift'].plot(kind='line',marker='o')
plt.savefig('cumlift.png')
Lift_calc['Gains']=Lift_calc['#ofbuyer']/(Lift_calc['#ofbuyer'].sum())
Lift_calc['CumGains']=Lift_calc['Gains'].cumsum()
Lift_calc['prob_dec']=bbb['prob_dec']
CumGainData=Lift_calc[['cum#ofcust','Gains','CumGains']]
CumGainData
df1=Lift_calc[['cum#ofcust','Gains','CumGains']]
df2= pd.DataFrame(data=[[0,0,0]],columns=['cum#ofcust','Gains','CumGains'],index=['origin'])
df1=df1.append(df2, ignore_index = False)
df1=df1.sort_values(by=['CumGains'])
df1=df1.rename(index={9: 10,8: 9,7: 8,6: 7,5: 6,4: 5,3:4,2:3,1:2,0: 1,'origin':0})
df1
plt.plot(df1['CumGains'],marker='o' )
plt.plot([0,10],[0,1],'g',)
plt.xlabel('prob_dec')
plt.ylabel('Cum Gains')
plt.legend(['prob_dec model',"No model"], loc=4)
plt.savefig('compare.png')
# Part IV: Profitability Analysis
# Use the following cost information to assess the profitability of using logistic regression to
# determine which of the remaining 500,000 customers should receive a specific offer:
#
# Cost to mail offer to customer:$.50
#
# Selling price (shipping included):$18.00
#
# Wholesale price paid by BookBinders:$9.00
#
# Shipping costs:$3.00
# 1. What is the breakeven response rate?
# 2. For the customers in the dataset, create a new variable (call it "target") with a value of 1
# if the customer's predicted probability is greater than or equal to the breakeven response
# rate and 0 otherwise. PTO
# Page 4
# 3. Considering that there are 500,000 remaining customers, generate a report summarizing
# the number of customers, the expected number of buyers of 'The Art History of Florence' and the
# expected response rate to the offer by the "target" variable.
# 4. For the 500,000 remaining customers, what would the expected gross profit (in dollars,
# and also as a percentage of gross sales) and the expected return on marketing
# expenditures have been if BookBinders had mailed the offer to buy "The Art History of
# Florence" only to customers with a predicted probability of buying that was greater than
# or equal to the breakeven rate?
# The campaign is effective as lons as (18-9-3)*response rate > 0.5
# thus, response rate must be higher than 0.5/6
BER=0.5/6
print("Break evem response rate is", end=' '), print('{:.3g}'.format(BER))
bbb['target']=(bbb['predicted']>=BER)*1
bbb['target'].mean()
target=(bbb[['acctnum','target']].groupby('target').count())*10
target['#ofCust']=(bbb[['acctnum','target']].groupby('target').count())*10
target['E#ofbuyer']=(bbb[['buyer_dummy','target']].groupby('target').sum())*10
target['E_res_rate']=target['E#ofbuyer']/target['#ofCust']
target=target.drop('acctnum',axis=1)
target
E_profit=(155600*0.21356*6)-(0.5*155600)
E_sales=344400*0.0348*6
ROI=(E_profit/(0.5*155600))*100
print('Expexted profit is', end=" "), print('{:.3f}'.format(E_profit))
print('Expexted sales is', end=" "), print('{:.3f}'.format(E_sales))
print('Expexted ROI is', end=" "), print('{:.3f}'.format(ROI), end=""), print('%')
|
BBB_logit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to calculate Sharpness from .wav file (time-varying signal)
# MoSQITo is a unified and modular development framework of key sound quality metrics favoring reproducible science and efficient shared scripting among engineers, teachers and researchers community.
#
# This tutorial explains how to calculate the acoustic sharpness of a time-varying signal from its loudness and specific loudness values (Zwicker method) as described in DIN 45692_2009E.
# +
# Add MoSQITo to the Python path
import sys
sys.path.append('..')
# Import useful packages
import numpy as np
import matplotlib.pyplot as plt
import IPython
# Import MoSQITo module
from mosqito.functions.shared.load import load
from mosqito.functions.sharpness.comp_sharpness import comp_sharpness
# -
# For this tutorial, the test signal n°24 from ISO 532-1 annex B5 is used. It is the recording of a woodpecker that can be heard by using the command below. According to the standard, for the calibration of the WAVE file, 0 dB (relative to full scale) shall correspond to a sound pressure level of 100 dB.
IPython.display.Audio("../mosqito/validations/loudness_zwicker/data/ISO_532-1/Annex B.5/Test signal 16 (hairdryer).wav")
# The signal is loaded with the 'load' function, with the parameter is_stationary = False to indicate it is a time-varying signal.
# Load signal
sig, fs = load(False,"../mosqito/validations/loudness_zwicker/data/ISO_532-1/Annex B.5/Test signal 16 (hairdryer).wav", calib = 2 * 2**0.5 )
# Then the sharpness is calculated with the 'comp_sharpness' function which automatically does the loudness calculation needed. The method chosen to do the calculation is given as a parameter (the parameter 'all' launches all the different calculation methods).
#
# It is necessary to cut the transient effect at the beginning of the analysis for the number of seconds specified (parameter' skip'). This effect is due to the digital filters which can modify the result.
# Sharpness calculation
sharpness = comp_sharpness(False, sig, fs, method='din', skip=0.2)
# The calculation being done, the results can be plotted or used for further analysis.
# Results plotting
S = sharpness['values']
method = sharpness['method']
time = np.linspace(sharpness['skip'], len(sig)/fs, len(S))
plt.title('Sharpness along time')
plt.plot(time, S, label=method)
plt.xlabel('Time [s]')
plt.ylabel('Sharpness [acum]')
plt.legend()
plt.show()
# Mosqito offers several other calculation methods for the sharpness, which can be computed by changing the method parameter.
# +
# Sharpness calculation
sharpness_a = comp_sharpness(False, sig, fs, method='aures', skip=0.2)
sharpness_b = comp_sharpness(False, sig, fs, method='bismarck', skip=0.2)
sharpness_f = comp_sharpness(False, sig, fs, method='fastl', skip=0.2)
# Comparison plot
plt.title('Sharpnesses comparison along time')
plt.plot(time, S, label=method)
plt.plot(time, sharpness_a['values'], label=sharpness_a['method'])
plt.plot(time, sharpness_b['values'], label=sharpness_b['method'])
plt.plot(time, sharpness_f['values'], label=sharpness_f['method'])
plt.xlabel('Time [s]')
plt.ylabel('Sharpness [acum]')
plt.legend()
plt.show()
# -
# The values slightly differ due to the weighting functions differences, except for Aures calculation which differ more (the weighting function takes into account the global loudness value).
|
tutorials/tuto5_Sharpness-time-varying.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
import matplotlib.pyplot as plt
import spnspecs
spnspecs.set_graph_specifications()
figpth = '../Figures'
width = 6.8
dpi = 300
# +
#spnspecs.set_graph_specifications()
# -
def calc_err(tp, datum=1000., ss=1e-5, bt=0., sf0=0.01, sf1=0.99, dt=1., area=1.):
dz = tp - bt
h0 = sf0 * dz + bt
h1 = sf1 * dz + bt
rho1 = ss * area * dz / dt
Q0 = rho1 * (sf0 * h0 - sf1 * h1)
h0 += datum
h1 += datum
Q1 = rho1 * (sf0 * h0 - sf1 * h1)
return Q0 - Q1
def err_function(tp, datum=1000., ss=1e-5, bt=0., sf0=0.01, sf1=0.99, dt=1., area=1.):
dz = tp - bt
rho1 = ss * area * dz / dt
bt1 = bt + datum
return rho1 * (sf1 * bt1 - sf0 * bt1) - rho1 * (sf1 * bt - sf0 * bt)
def err_function_simp(tp, datum=1000., ss=1e-5, bt=0., sf0=0.01, sf1=0.99, dt=1., area=1.):
dz = tp - bt
rho1 = ss * area * dz / dt
return -rho1 * datum * (sf0 - sf1)
top = np.array([0.1, 0.5, 1., 5., 10., 50., 100., 500., 1000.])
err = calc_err(top, ss=1e-5)
plt.plot(top, err)
# +
fig, axes = plt.subplots(
nrows=1,
ncols=1,
constrained_layout=True,
figsize=(width, 0.3 * width),
)
ax = axes
for (sf0, color) in zip(
(0.0, 0.1, 0.2, 0.3, 0.4, 0.5),
("red", "orange", "green", "cyan", "blue", "black")
):
sf1 = 1. - sf0
label = r"S$_F^t$ = {:.2f} S$_F^{{told}}$ = {:.2f}".format(sf1, sf0)
ax.plot(
top,
calc_err(top, ss=1, sf0=sf0, sf1=sf1),
lw=1.25,
color=color,
label=label,
clip_on=False,
)
ax.set_xlim(0, 1000)
ax.set_ylim(0, 1e6)
ax.set_xlabel("Cell thickness, in length units")
ax.set_ylabel(r"Q$_{SS}$" + " Differences,\nin cubic length per time units")
spnspecs.graph_legend(
ax,
ncol=2,
labelspacing=0.2,
columnspacing=0.5,
)
spnspecs.remove_edge_ticks(ax);
# fpth = os.path.join(figpth, 'STOSsError.pdf')
# fig.savefig(fpth, dpi=dpi);
# +
fig, axes = plt.subplots(nrows=1, ncols=1, tight_layout=True,
figsize=(width, width/2))
ax = axes
for (sf0, color) in zip(
(0.0, 0.1, 0.2, 0.3, 0.4, 0.5),
("red", "orange", "green", "cyan", "blue", "black")
):
sf1 = 1. - sf0
label = r"S$_F^t$ = {:.2f} S$_F^{{told}}$ = {:.2f}".format(sf1, sf0)
ax.plot(
top,
err_function(top, ss=1, sf0=sf0, sf1=sf1),
lw=1.25,
color=color,
label=label,
clip_on=False,
)
ax.set_xlim(0, 1000)
ax.set_ylim(0, 1e6)
ax.set_xlabel("Cell thickness, in length units")
ax.set_ylabel("Error, in cubic length per time units")
spnspecs.graph_legend(ax)
spnspecs.remove_edge_ticks(ax);
# +
fig, axes = plt.subplots(
nrows=1,
ncols=1,
constrained_layout=True,
figsize=(width, 0.3 * width),
)
ax = axes
for (sf0, color) in zip(
(0.0, 0.1, 0.2, 0.3, 0.4, 0.5),
("red", "orange", "green", "cyan", "blue", "black")
):
sf1 = 1. - sf0
label = r"S$_F^t$ = {:.2f} S$_F^{{told}}$ = {:.2f}".format(sf1, sf0)
ax.plot(
top,
err_function_simp(top, ss=1e-5, sf0=sf0, sf1=sf1),
lw=1.25,
color=color,
label=label,
clip_on=False,
)
ax.set_xlim(0, 1000)
ax.set_ylim(0, 10)
ax.set_xlabel("Cell thickness, in meters")
ax.set_ylabel(r"$\Delta$ Q$_{SS}$, in meters$^3$/ day")
spnspecs.graph_legend(
ax,
ncol=2,
labelspacing=0.2,
columnspacing=0.5,
)
spnspecs.remove_edge_ticks(ax);
fpth = os.path.join(figpth, 'STOSsError.pdf')
fig.savefig(fpth, dpi=dpi);
# -
|
doc/SuppTechInfo/python/STO-SpecificStorage.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] editable=true
# # ETL Processes
# Use this notebook to develop the ETL process for each of your tables before completing the `etl.py` file to load the whole datasets.
# + editable=true
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
# + editable=true
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=<PASSWORD>")
cur = conn.cursor()
# + editable=true
def get_files(filepath):
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
return all_files
# + [markdown] editable=true
# # Process `song_data`
# In this first part, you'll perform ETL on the first dataset, `song_data`, to create the `songs` and `artists` dimensional tables.
#
# Let's perform ETL on a single song file and load a single record into each table to start.
# - Use the `get_files` function provided above to get a list of all song JSON files in `data/song_data`
# - Select the first song in this list
# - Read the song file and view the data
# + editable=true
song_files = get_files("data/song_data")
# + editable=true
filepath = song_files[0]
# + editable=true
df = pd.read_json(filepath,typ='series')
df.head()
# + [markdown] editable=true
# ## #1: `songs` Table
# #### Extract Data for Songs Table
# - Select columns for song ID, title, artist ID, year, and duration
# - Use `df.values` to select just the values from the dataframe
# - Index to select the first (only) record in the dataframe
# - Convert the array to a list and set it to `song_data`
# + editable=true
song_data = list(df[['song_id', 'title', 'artist_id', 'year','duration']])
song_data
# + [markdown] editable=true
# #### Insert Record into Song Table
# Implement the `song_table_insert` query in `sql_queries.py` and run the cell below to insert a record for this song into the `songs` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `songs` table in the sparkify database.
# + editable=true
cur.execute(song_table_insert, song_data)
conn.commit()
# + [markdown] editable=true
# Run `test.ipynb` to see if you've successfully added a record to this table.
# + [markdown] editable=true
# ## #2: `artists` Table
# #### Extract Data for Artists Table
# - Select columns for artist ID, name, location, latitude, and longitude
# - Use `df.values` to select just the values from the dataframe
# - Index to select the first (only) record in the dataframe
# - Convert the array to a list and set it to `artist_data`
# + editable=true
artist_data = list(df[['artist_id', 'name', 'location', 'latitude', 'longitude']])
artist_data
# + [markdown] editable=true
# #### Insert Record into Artist Table
# Implement the `artist_table_insert` query in `sql_queries.py` and run the cell below to insert a record for this song's artist into the `artists` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `artists` table in the sparkify database.
# + editable=true
cur.execute(artist_table_insert, artist_data)
conn.commit()
# + [markdown] editable=true
# Run `test.ipynb` to see if you've successfully added a record to this table.
# + [markdown] editable=true
# # Process `log_data`
# In this part, you'll perform ETL on the second dataset, `log_data`, to create the `time` and `users` dimensional tables, as well as the `songplays` fact table.
#
# Let's perform ETL on a single log file and load a single record into each table.
# - Use the `get_files` function provided above to get a list of all log JSON files in `data/log_data`
# - Select the first log file in this list
# - Read the log file and view the data
# + editable=true
log_files = get_files("data/log_data")
# + editable=true
filepath = log_files[0]
# + editable=true
df = pd.read_json(filepath,lines=True)
df.head()
# + [markdown] editable=true
# ## #3: `time` Table
# #### Extract Data for Time Table
# - Filter records by `NextSong` action
# - Convert the `ts` timestamp column to datetime
# - Hint: the current timestamp is in milliseconds
# - Extract the timestamp, hour, day, week of year, month, year, and weekday from the `ts` column and set `time_data` to a list containing these values in order
# - Hint: use pandas' [`dt` attribute](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.html) to access easily datetimelike properties.
# - Specify labels for these columns and set to `column_labels`
# - Create a dataframe, `time_df,` containing the time data for this file by combining `column_labels` and `time_data` into a dictionary and converting this into a dataframe
# + editable=true
df.dtypes
# + editable=true
df = df[df.page == 'NextSong']
df.head()
# + editable=true
df.shape
# + editable=true
t = pd.to_datetime(df.ts, unit='ms')
t.head()
# + editable=true
time_data = (df.ts, t.dt.hour, t.dt.day, t.dt.week, t.dt.month, t.dt.year, t.dt.weekday)
# + editable=true
column_labels = ('time', 'hour', 'day', 'week', 'month', 'year', 'weekday')
# + editable=true
import numpy as np
time_dict = dict(zip(column_labels, time_data))
print(len(time_dict))
# + editable=true
time_df = pd.DataFrame(time_dict)
time_df.head()
# + [markdown] editable=true
# #### Insert Records into Time Table
# Implement the `time_table_insert` query in `sql_queries.py` and run the cell below to insert records for the timestamps in this log file into the `time` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `time` table in the sparkify database.
# + editable=true
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
conn.commit()
# + [markdown] editable=true
# Run `test.ipynb` to see if you've successfully added records to this table.
# + [markdown] editable=true
# ## #4: `users` Table
# #### Extract Data for Users Table
# - Select columns for user ID, first name, last name, gender and level and set to `user_df`
# + editable=true
user_df = df[['userId', 'firstName','lastName','gender','level']]
user_df.head()
# + [markdown] editable=true
# #### Insert Records into Users Table
# Implement the `user_table_insert` query in `sql_queries.py` and run the cell below to insert records for the users in this log file into the `users` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `users` table in the sparkify database.
# + editable=true
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
conn.commit()
# + [markdown] editable=true
# Run `test.ipynb` to see if you've successfully added records to this table.
# + [markdown] editable=true
# ## #5: `songplays` Table
# #### Extract Data and Songplays Table
# This one is a little more complicated since information from the songs table, artists table, and original log file are all needed for the `songplays` table. Since the log file does not specify an ID for either the song or the artist, you'll need to get the song ID and artist ID by querying the songs and artists tables to find matches based on song title, artist name, and song duration time.
# - Implement the `song_select` query in `sql_queries.py` to find the song ID and artist ID based on the title, artist name, and duration of a song.
# - Select the timestamp, user ID, level, song ID, artist ID, session ID, location, and user agent and set to `songplay_data`
#
# #### Insert Records into Songplays Table
# - Implement the `songplay_table_insert` query and run the cell below to insert records for the songplay actions in this log file into the `songplays` table. Remember to run `create_tables.py` before running the cell below to ensure you've created/resetted the `songplays` table in the sparkify database.
# + editable=true
print(songplay_table_insert)
# + editable=true
print(songplay_table_create)
# + editable=true
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
conn.commit()
# + [markdown] editable=true
# Run `test.ipynb` to see if you've successfully added records to this table.
# + [markdown] editable=true
# # Close Connection to Sparkify Database
# + editable=true
conn.close()
# + [markdown] editable=true
# # Implement `etl.py`
# Use what you've completed in this notebook to implement `etl.py`.
# + editable=true
|
.ipynb_checkpoints/etl-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pprint import pprint
import pickle
from pymongo import MongoClient
import time
from datetime import date, timedelta
import os
import datetime
from dateutil import tz
import pendulum
import config
import function as func
from schema.fact_document import FactDocumentModel
from schema.fact_performance import FactPerformanceModel
from schema.fact_data_extraction import FactDataExtractionModel
from db_connect import EngineConnect as DatabaseConnect
# +
class EclaimsExecutor:
def __init__(
self,
*kwargs,
environment: str,
uri: str,
database_name: str,
docs_collection_name: str,
trans_collection_name: str,
performance_collection_name: str,
db: DatabaseConnect
):
self.environment = environment
self.uri = uri
self.database_name = database_name
self.docs_collection_name = docs_collection_name
self.trans_collection_name = trans_collection_name
self.performance_collection_name = performance_collection_name
self.db = db
self.start_run = time.time()
self.maxSevSelDelay = 20000
self.start = config.start
self.query = config.ECLAIMS_QUERY
self.performance_query = config.ECLAIMS_PERFORMANCE_QUERY
self.project_id = config.ECLAIMS_PROJECT_ID
self.project_name = config.ECLAIMS_PROJECT_NAME
self.backup_dir = config.BACKUP_DIR
self.project_backup_dir = config.ECLAIMS_BACKUP_DIR
self.project_docs_dir = config.ECLAIMS_DOCS_DIR
self.project_trans_dir = config.ECLAIMS_TRANS_DIR
self.project_performance_dir = config.ECLAIMS_PERFORMANCE_DIR
self.backup_file_type = config.BACKUP_FILE_TYPE
self.schema = config.DWH_ANALYTIC_SCHEMA
self.fact_document_table = config.DWH_FACT_DOCUMENT_TABLE
self.fact_performancec_table = config.DWH_FACT_PERFORMANCE_TABLE
self.fact_data_extraction = config.DWH_FACT_DATA_EXTRACTION_TABLE
def get_docs_and_trans(self):
if self.environment == 'development':
obj_docs = pickle.load(open('./backup/docs/' + self.project_id + '.pickle', 'rb'))
obj_trans = pickle.load(open('./backup/trans/' + self.project_id + '.pickle', 'rb'))
else:
obj_docs = pickle.load(open(self.backup_dir + self.project_backup_dir + self.project_docs_dir + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'rb'))
obj_trans = pickle.load(open(self.backup_dir + self.project_backup_dir + self.project_trans_dir + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'rb'))
data_docs = [item for item in obj_docs]
data_trans = [item for item in obj_trans]
return data_docs, data_trans
def get_performance(self):
if self.environment == 'development':
obj_performance = pickle.load(open('./backup/performance/' + self.project_id + '.pickle', 'rb'))
else:
obj_performance = pickle.load(open(self.backup_dir + self.project_backup_dir + self.project_performance_dir + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'rb'))
data_performance = [item for item in obj_performance]
return data_performance
def fact_document(self):
datas = []
data_docs, data_trans = self.get_docs_and_trans()
list_created = [data['created_date'] for data in data_docs]
meta_datas = [data['project_meta_data'] for data in data_docs]
for data in data_trans:
if len(data['records']) == 0:
continue
records = data['records'][0]
created_date_utc_7 = func.check_index_data_docs(meta_datas, list_created, records['requestId'], records['caseId'], records['caseNumber']) \
+ datetime.timedelta(hours = 7)
last_modified_utc_7 = data['last_modified'] + datetime.timedelta(hours = 7)
import_date_key_utc_7, import_time_key_utc_7 = func.handle_date_to_date_and_time_id(created_date_utc_7)
export_date_key_utc_7, export_time_key_utc_7 = func.handle_date_to_date_and_time_id(last_modified_utc_7)
remark_code = None
if records['remarkCode'] != None and records['remarkCode'] != '':
remark_code = records['remarkCode']
_obj = FactDocumentModel(
project_id = self.project_id,
document_id = func.bson_object_to_string(data['doc_id']),
doc_set_id = func.bson_object_to_string(data['doc_set_id']),
remark_code = remark_code,
remark_description = None,
import_date_key = import_date_key_utc_7,
import_time_key = import_time_key_utc_7,
export_date_key = export_date_key_utc_7,
export_time_key = export_time_key_utc_7,
import_timestamp = created_date_utc_7,
export_timestamp = last_modified_utc_7,
)
datas.append(_obj)
if datas != []:
print(datas[0].__dict__)
self.db.create([item.__dict__ for item in datas], self.schema, self.fact_document_table)
def fact_performance(self):
datas = []
data_performance = self.get_performance()
for performance in data_performance:
captured_date_timestamp = datetime.datetime.strptime(performance['captured_date'], '%d/%m/%Y')
obj_ = FactPerformanceModel(
ori_id = func.bson_object_to_string(performance['_id']),
project_id = self.project_id,
group_id = performance['group_id'],
document_id = performance['documentId'],
reworked = performance['has_rework'],
work_type_id = func.get_working_type_id_by_name(performance['work_type']),
process_key = func.get_process_id_performance(performance['type']),
number_of_record = performance['records'],
user_name = performance['username'],
ip = None,
captured_date_timestamp = captured_date_timestamp,
captured_date_key = func.time_to_date_key(captured_date_timestamp),
captured_time_key = 0,
total_time_second = performance['total_time']/100
)
datas.append(obj_)
if datas != []:
print(datas[0].__dict__)
self.db.create([item.__dict__ for item in datas], self.schema, self.fact_performancec_table)
def check_connect(self):
if self.environment == 'development':
(status, content, time_run) = (True, "good!", time.time()- self.start_run)
else:
client = MongoClient(self.uri, serverSelectionTimeoutMS= self.maxSevSelDelay)
client.server_info()
client.close()
(status, content, time_run) = (True, "good!", time.time()-self.start_run)
print('check_connect done!')
return {"status": status, "content": content, "time": time_run}
def backup_performance(self):
if self.environment == 'development':
objects = pickle.load(open('./backup/performance/' + self.project_id + '.pickle', 'rb'))
data_objects = [item for item in objects]
handle = open('./backup_test/performance_' + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'wb')
pickle.dump(data_objects, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
else:
client = MongoClient(self.uri)
data_query = client[self.database_name][self.performance_collection_name].find(self.query)
data_objects = [item for item in data_query]
client.close()
handle = open(self.backup_dir + self.project_backup_dir + self.project_performance_dir + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'wb')
pickle.dump(data_objects, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
print('backup_performance done!')
def backup_docs(self):
if self.environment == 'development':
objects = pickle.load(open('./backup/docs/' + self.project_id + '.pickle', 'rb'))
data_objects = [item for item in objects]
handle = open('./backup_test/docs_' + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'wb')
pickle.dump(data_objects, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
else:
client = MongoClient(self.uri)
data_query = client[self.database_name][self.docs_collection_name].find(self.query)
data_objects = [item for item in data_query]
client.close()
handle = open(self.backup_dir + self.project_backup_dir + self.project_docs_dir + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'wb')
pickle.dump(data_objects, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
print('backup_docs done!')
def backup_trans(self):
if self.environment == 'development':
objects = pickle.load(open('./backup/trans/' + self.project_id + '.pickle', 'rb'))
data_objects = [item for item in objects]
handle = open('./backup_test/tran_' + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type, 'wb')
pickle.dump(data_objects, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
else:
client = MongoClient(self.uri)
data_query = client[self.database_name][self.trans_collection_name].find(self.query)
data_objects = [item for item in data_query]
client.close()
handle = open(self.backup_dir + self.project_backup_dir + self.project_trans_dir + str(self.start.strftime("%Y-%m-%d")) + self.backup_file_type , 'wb')
pickle.dump(data_objects, handle, protocol=pickle.HIGHEST_PROTOCOL)
handle.close()
print('backup_trans done!')
def report(self):
print('report done!')
def clean(self):
if self.environment == 'development' or self.environment == 'production':
now = self.start - timedelta(days=1)
file_name = str(now.strftime("%Y-%m-%d"))
docs_file_path = self.backup_dir + self.project_backup_dir + self.project_docs_dir + file_name + self.backup_file_type
trans_file_path = self.backup_dir + self.project_backup_dir + self.project_trans_dir + file_name + self.backup_file_type
performance_file_path = self.backup_dir + self.project_backup_dir + self.project_performance_dir + file_name + self.backup_file_type
if os.path.exists(performance_file_path):
os.remove(performance_file_path)
else:
print("The performance_file_path does not exist")
if os.path.exists(docs_file_path):
os.remove(docs_file_path)
else:
print("The docs_file_path does not exist")
if os.path.exists(trans_file_path):
os.remove(trans_file_path)
else:
print("The trans_file_path does not exist")
print('clean done!')
def fact_data_extract(self):
datas = []
data_docs, data_trans = self.get_docs_and_trans()
key_ignore = ("requestId", "caseId", "caseNumber", "created_date", "last_modified", "documentId", 'attachmentId',
'system_processing', 'system_processing123', 'hos_image_type', 'remarkCode', 'remarkDescription')
key_ignore_trans = ("requestId", "caseId", "caseNumber", "created_date", "last_modified", "documentId", 'attachmentId',
'system_processing', 'system_processing123', 'hos_image_type', 'remarkCode', 'remarkDescription', 'Images', 'fileName')
# for data in data_trans:
# records = data['records'][0]
# last_modified_utc_7 = data['last_modified'] + datetime.timedelta(hours = 7)
# list_keys = list(records.keys())
# list_values = list(records.values())
# user_name = None
# step_type = None
# process_type = 'transform'
# module_type = 'transform_data'
# process_key = func.get_process_key(module_type, process_type, step_type)
# for i in range(len(list_keys)):
# last_modified_date_key_utc_7, last_modified_time_key_utc_7 = func.handle_date_to_date_and_time_id(last_modified_utc_7)
# field_name_temp = list_keys[i]
# if field_name_temp in key_ignore_trans or field_name_temp.startswith('classify'):
# continue
# elif (field_name_temp.startswith('cl') or field_name_temp.startswith('ocr_')) and field_name_temp != 'claimNature':
# continue
# field_name = func.lower_first_string(field_name_temp)
# field_value = list_values[i]
# _obj = FactDataExtractionModel(
# project_id = self.project_id,
# document_id = func.bson_object_to_string(data['doc_id']),
# doc_set_id = func.bson_object_to_string(data['doc_set_id']),
# last_modified_date_key = last_modified_date_key_utc_7,
# last_modified_time_key = last_modified_time_key_utc_7,
# last_modified_timestamp = last_modified_utc_7,
# user_name = user_name,
# process_key = process_key,
# field_name = field_name,
# field_value = field_value
# )
# datas.append(_obj)
for data in data_docs:
if len(data['records']) == 0:
continue
meta_data = data['project_meta_data']
last_modified_utc_7 = data['last_modified'] + datetime.timedelta(hours = 7)
last_modified_date_key_utc_7, last_modified_time_key_utc_7 = func.handle_date_to_date_and_time_id(last_modified_utc_7)
records = data['records'][0]
for key, value in records.items():
if key == 'keyed_data':
for keyed_data in value:
if (keyed_data['section'] == 'Auto_Extract' and keyed_data['source'] == 'queue_transform') or \
(keyed_data['section'] == 'Verify_Data' and keyed_data['source'] == 'queue_transform'):
process_type = func.fix_process_type_keyed_data(keyed_data['section'])
user_name = None
data_obj = keyed_data['data'][0]
list_keys = list(data_obj.keys())
list_values = list(data_obj.values())
for i in range(len(list_values)):
field_name_temp = list_keys[i]
step_type = func.fix_step_type_keyed_data(field_name_temp)
field_name = func.fix_field_name_keyed_data(field_name_temp)
if field_name in key_ignore:
continue
field_value = list_values[i]['text']
process_key = func.get_process_key(module_type, process_type, step_type)
_obj = FactDataExtractionModel(
project_id = self.project_id,
document_id = func.bson_object_to_string(meta_data['documents'][0]['documentId']),
doc_set_id = func.bson_object_to_string(data['doc_set_id']),
last_modified_date_key = last_modified_date_key_utc_7,
last_modified_time_key = last_modified_time_key_utc_7,
last_modified_timestamp = last_modified_utc_7,
user_name = user_name,
process_key = process_key,
field_name = field_name,
field_value = field_value
)
datas.append(_obj)
# elif key == 'system_data':
# module_type = 'system_data'
# system_data = value[0]
# data_obj = system_data['data'][0]
# auto_qc_output_data = data_obj['auto_qc_output_data']
# user_name = None
# process_type = 'automaticQualityControl'
# step_type = None
# process_key = func.get_process_key(module_type, process_type, step_type)
# if auto_qc_output_data != []:
# for item in auto_qc_output_data:
# field_name_temp = item['field_name']
# field_name = func.lower_first_string(field_name_temp)
# if field_name in key_ignore:
# continue
# _obj = FactDataExtractionModel(
# project_id = self.project_id,
# document_id = func.bson_object_to_string(meta_data['documents'][0]['documentId']),
# doc_set_id = func.bson_object_to_string(data['doc_set_id']),
# last_modified_date_key = last_modified_date_key_utc_7,
# last_modified_time_key = last_modified_time_key_utc_7,
# last_modified_timestamp = last_modified_utc_7,
# user_name = user_name,
# process_key = process_key,
# field_name = field_name,
# field_value = 1
# )
# datas.append(_obj)
# elif key == 'qc_ed_data':
# module_type = 'qc_ed_data'
# qc_ed_data = value[0][0]
# if 'qc_fields_err' not in qc_ed_data.keys():
# pass
# else:
# data_obj = qc_ed_data['qc_fields_err']
# user_name = qc_ed_data['qcer']
# step_type = None
# process_type = func.fix_process_type_keyed_data(qc_ed_data['section'])
# process_key = func.get_process_key(module_type, process_type, step_type)
# for item in data_obj:
# field_name_temp = item['field']
# if field_name_temp in key_ignore:
# continue
# field_name = func.lower_first_string(field_name_temp)
# field_value = item['value']['text']
# _obj = FactDataExtractionModel(
# project_id = self.project_id,
# document_id = func.bson_object_to_string(meta_data['documents'][0]['documentId']),
# doc_set_id = func.bson_object_to_string(data['doc_set_id']),
# last_modified_date_key = last_modified_date_key_utc_7,
# last_modified_time_key = last_modified_time_key_utc_7,
# last_modified_timestamp = last_modified_utc_7,
# user_name = user_name,
# process_key = process_key,
# field_name = field_name,
# field_value = field_value
# )
# datas.append(_obj)
# elif key == 'apr_ed_data':
# module_type = 'apr_ed_data'
# apr_ed_data = value[0][0]
# process_type = func.fix_process_type_keyed_data(apr_ed_data['section'])
# step_type = None
# process_key = func.get_process_key(module_type, process_type, step_type)
# data_obj = apr_ed_data['data']
# for item in data_obj:
# field_name_temp = item['field']
# if field_name_temp in key_ignore:
# continue
# field_name = func.lower_first_string(field_name_temp)
# field_value = item['value']['text']
# user_name = item['aper']
# _obj = FactDataExtractionModel(
# project_id = self.project_id,
# document_id = func.bson_object_to_string(meta_data['documents'][0]['documentId']),
# doc_set_id = func.bson_object_to_string(data['doc_set_id']),
# last_modified_date_key = last_modified_date_key_utc_7,
# last_modified_time_key = last_modified_time_key_utc_7,
# last_modified_timestamp = last_modified_utc_7,
# user_name = user_name,
# process_key = process_key,
# field_name = field_name,
# field_value = field_value
# )
# datas.append(_obj)
# elif key == 'final_data':
# module_type = 'final_data'
# final_data = value[0]
# user_name = None
# data_obj = final_data['data']
# process_type = 'finalize'
# for item in data_obj:
# field_name_temp = list(item.keys())[0]
# if field_name_temp in key_ignore or field_name_temp.startswith('cl') or field_name_temp.startswith('ocr_'):
# continue
# step_type = None
# field_name = func.fix_field_name_keyed_data(field_name_temp)
# field_value = list(item.values())[0]['text']
# process_key = func.get_process_key(module_type, process_type, step_type)
# _obj = FactDataExtractionModel(
# project_id = self.project_id,
# document_id = func.bson_object_to_string(meta_data['documents'][0]['documentId']),
# doc_set_id = func.bson_object_to_string(data['doc_set_id']),
# last_modified_date_key = last_modified_date_key_utc_7,
# last_modified_time_key = last_modified_time_key_utc_7,
# last_modified_timestamp = last_modified_utc_7,
# user_name = user_name,
# process_key = process_key,
# field_name = field_name,
# field_value = field_value
# )
# datas.append(_obj)
# if datas != []:
# print(datas[0].__dict__)
for data in datas:
print(data.field_name, data.field_value)
self.db.create([item.__dict__ for item in datas], self.schema, self.fact_data_extraction)
# -
db_connect = DatabaseConnect(uri = config.DWH_SQLALCHEMY_URI)
executor = EclaimsExecutor(
environment=config.ENVIRONMENT,
uri=config.ELROND_URI,
database_name=config.ELROND_DATABASE,
docs_collection_name= config.ECLAIMS_DOCS_COLLECTION,
trans_collection_name= config.ECLAIMS_TRANS_COLLECTION,
performance_collection_name = config.ECLAIMS_PERFORMANCE_COLLECTION,
db = db_connect
)
# executor.clean()
# executor.backup_docs()
# executor.backup_trans()
# executor.backup_performance()
# executor.fact_document()
executor.fact_data_extract()
# executor.fact_performance()
# executor.report()
# +
dag_params = {
'dag_id': "dwh_eclaims_project_daily_tmp",
'start_date': datetime.datetime(2021, 1, 6, tzinfo=config.LOCAL_TIME_ZONE),
'schedule_interval': '20 5 * * *'
}
dag = DAG(**dag_params)
clean = PythonOperator(task_id='clean', python_callable=executor.clean, dag=dag)
check_connect = PythonOperator(task_id='check_connect', python_callable=executor.check_connect, dag=dag)
backup_docs_json = PythonOperator(task_id='backup_docs_json', python_callable=executor.backup_docs_json, dag=dag, trigger_rule=TriggerRule.ALL_SUCCESS)
backup_trans_json = PythonOperator(task_id='backup_trans_json', python_callable=executor.backup_trans_json, dag=dag, trigger_rule=TriggerRule.ALL_SUCCESS)
backup_performance = PythonOperator(task_id='backup_performance', python_callable=executor.backup_performance, dag=dag, trigger_rule=TriggerRule.ALL_SUCCESS)
fact_performance = PythonOperator(task_id='fact_performance', python_callable=executor.fact_performance, dag=dag, trigger_rule=TriggerRule.ALL_SUCCESS)
fact_document = PythonOperator(task_id='fact_document', python_callable=executor.fact_document, dag=dag, trigger_rule=TriggerRule.ALL_SUCCESS)
report = PythonOperator(task_id='report', python_callable=executor.report, dag=dag, trigger_rule=TriggerRule.ALL_DONE)
clean >> check_connect >> [backup_trans_json, backup_docs_json, backup_performance]
fact_performance.set_upstream(backup_performance)
fact_document.set_upstream([backup_trans_json, backup_docs_json])
[fact_performance, fact_document] >> report
# +
trans = pickle.load(open('./backup/trans/' + '5db5c87345052400142992e9' + '.pickle', 'rb'))
docs = pickle.load(open('./backup/docs/' + '5db5c87345052400142992e9' + '.pickle', 'rb'))
performance = pickle.load(open('./backup/performance/' + '5db5c87345052400142992e9' + '.pickle', 'rb'))
# keyed_data
# qc
# pprint(performance)
x = []
for data in docs:
pprint(data)
break
ocr_results = data['records'][0]['system_data'][0]['ocr_data'][0]['ocr_results']
for ocr_result in ocr_results:
field_name = ocr_result['field_name']
if field_name not in x:
x.append(field_name)
print(x)
['address', 'birthday', 'expiry', 'home_town', 'id', 'issue_at', 'issue_date', 'name', 'sex']
|
dwh_analytic/dags/data_warehouse_prod/daily_task_eclaims.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import csv
fn = 'trip_data_8.csv'
f = open(fn,'r')
reader = csv.reader(f)
i=0
f2 = open('trip_data_subset.csv','w')
f2.write('')
f2.close()
f2 = open('trip_data_subset.csv','a')
writer = csv.writer(f2,delimiter=',',lineterminator='\n')
for i, row in enumerate(reader):
if i % 1000 == 0:
writer.writerow(row)
f.close()
f2.close()
# -
|
Question 10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="title_ID"></a>
# # JWST Pipeline Validation Testing Notebook: Calwebb_Image3, Resample step
#
# <span style="color:red"> **Instruments Affected**</span>: FGS, MIRI, NIRCam, NIRISS, NIRSpec
#
# Tested on MIRI Simulated data
#
# ### Table of Contents
# <div style="text-align: left">
#
# <br> [Introduction](#intro_ID) <br> [Run JWST Pipelines](#pipeline_ID) <br> [Imports](#imports_ID) <br> [Create an association table for your cal files and run them through calwebb_image3](#runpipeline_ID) <br> [Find Stars in Image and Determine their Coordinates](#runscript_ID) <br> [Compare RA and Dec to expected Values](#residual_ID) <br> [About This Notebook](#about_ID) <br>
#
#
# </div>
# <a id="intro_ID"></a>
# # Introduction
#
#
# This test is designed to test the resample step in the calwebb_image3 pipeline. At the end of the calwebb_image3 pipeline, the set of files defined in an association table will be distortion corrected and combined. Resample is the step that applies the distortion correction using the drizzling algorithm (as defined in the DrizzlePac handbook) and combines the listed files. For more information on the pipeline step visit the links below.
#
# Step description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/resample/main.html
#
# Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/resample
#
# The data for this test were created with the MIRI Data Simulator, and the documentation for that code can be found here: http://miri.ster.kuleuven.be/bin/view/Public/MIRISim_Public
#
#
# ### Calibration WG Requested Algorithm:
#
# A short description and link to the page: https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Image+Combination
#
#
# ### Defining Terms
# Definition of terms or acronymns.
#
# JWST: James Webb Space Telescope
#
# MIRI: Mid-Infrared Instrument
#
# MIRISim: MIRI Data Simulator
#
# ### Description of test
#
# This test is performed by creating a set of simulated data with multiple point sources located at specified coordinates. The simulator puts in the expected distortion, so the initial output data comes out of the simulator in distorted coordinates. When this data is then run through calwebb_detector1, calwebb_image2 and calwebbb_image3, the combined, undistorted image should have the point sources registered at the expected locations. In flight, this test can be repeated with known stars that should be found at their expected coordinates.
#
# ### Create the data for testing
#
# The set of data used in this particular test were created with the MIRI Data Simulator (MIRISim). Referring to the MIRISim link, you can see how to set up and run the simulator to re-create the input files if you wish. The data was run with a scene.ini file that specified what the scene should look like, with coordinates for the stars given in units of arcsecond offsets from the center of the field of view. The scene.ini file as well as the setup files simuation.ini and simulator.ini are needed to run the simulation.
#
# Once in the mirisim conda environment, the simulation is run with the command line:
# > mirisim simulation.ini
#
# The simulator created four files, two exposures each at two different dither positions, using the specified filter. Make sure the WCSAXES header keyword in the SCI extension is set to 2 and not 4. If it is set to 4, change it to 2.
#
#
#
# [Top of Page](#title_ID)
# <a id="pipeline_ID"></a>
# ## Run JWST Pipelines
#
# The four files were then run individually through the calwebb_detector1 and calwebb_image2 pipelines. When running the calwebb_detector1 pipeline, increase the threshold for a detection in the jump step from 4 sigma to 10 sigma to avoid a current issue where the jump detection step flags a large percentage of pixels as jumps. This can be done on the command line. (commands to be typed start with $)
#
# The pipelines can be run on the command line with the following commands or put into a script while using the pipeline conda environment.
#
# $ strun calwebb_detector1.cfg filename --steps.jump.rejection_threshold 10.0
#
# The output of the calwebb_detector1 pipeline is a set of four *rate.fits files which will then be run through the calwebb_image2 pipeline.
#
# $ strun calwebb_image2.cfg filename
#
# The output of the calwebb_image2 pipeline was then a set of four *cal.fits files. An association table was created that included these four files as input, and then the files and the association table were run through the calwebb_image3 pipeline.
#
# The cal files are stored in artifactory, and this notebook is meant to pull those files for the test of resample. Step through the cells of this notebook to run calwebb_image3 and then check the alignment.
#
#
#
# [Top of Page](#title_ID)
#
# <a id="imports_ID"></a>
# # Imports
# The following packages will need to be imported for the scripts to work.
#
#
# * astropy.io for opening files
# * astropy.stats for sigma clipping routine
# * astropy.visualization for image plotting
# * ci_watson.artifactory_helpers to read in data from artifactory
# * jwst.datamodels for opening files as a JWST Datamodel
# * jwst.pipeline to run the pipeline step/module
# * jwst.associations to create association table
# * numpy for calculations
# * matplotlib.pyplot.plt to generate plot
# * os for path information
# * photutils for star finding and aperture photometry
# * regtest to retrieve data from artifactory needed to run notebook
#
#
# [Top of Page](#title_ID)
# + nbpresent={"id": "45177853-942e-4949-9e30-f544d70ef5f4"}
from astropy.io import ascii, fits
from astropy.stats import sigma_clipped_stats
from astropy.table import Column
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from ci_watson.artifactory_helpers import get_bigdata
from itertools import product
from jwst.datamodels import DrizProductModel, ImageModel
from jwst.pipeline import Image3Pipeline
from jwst import associations
from jwst.associations.lib.rules_level3_base import DMS_Level3_Base
from jwst.associations import asn_from_list
import matplotlib.pyplot as plt
import numpy as np
import os
from photutils import CircularAperture, DAOStarFinder, CircularAnnulus, aperture_photometry
from jwst.regtest.regtestdata import RegtestData
# -
# <a id="runpipeline_ID"></a>
# # Open an association table for your cal files and run them through calwebb_image3
#
# Load the association table to use the .cal files that were output from calwebb_image2. That will be the input for calwebb_image3 that uses the resample step to combine each of the individual images.
#
# [Top of Page](#title_ID)
# +
# Use regtest infrastructure to access all input files associated with the association file
rtdata = RegtestData(inputs_root="jwst_validation_notebooks", env="validation_data")
rtdata.get_asn("resample/resample_miri_test/starfield_74_asnfile.json")
rtdata.input #this should be the list of files associated with the asn
# +
# Run Calwebb_image3 on the association table
# set any specific parameters
# tweakreg parameters to allow data to run
fwhm=2.5 # Gaussian kernel FWHM of objects expected, default=2.5
minobj=5 # minimum number of objects needed to match positions for a good fit, default=15
snr= 250 # signal to noise threshold, default=5
sigma= 3 # clipping limit, in sigma units, used when performing fit, default=3
fit_geom='shift' # ftype of affine transformation to be considered when fitting catalogs, default='general'
use2dhist=False # boolean indicating whether to use 2D histogram to find initial offset, default=True
pipe3=Image3Pipeline()
pipe3.tweakreg.kernel_fwhm = fwhm
pipe3.tweakreg.snr_threshold = snr
pipe3.tweakreg.minobj = minobj
pipe3.tweakreg.sigma = sigma
pipe3.tweakreg.fitgeometry = fit_geom
pipe3.tweakreg.use2dhist = use2dhist
#pipe3.skymatch.skip = True # test to see if this affects the final output
pipe3.source_catalog.save_results = True
pipe3.save_results = True
# run Image3
im = pipe3.run(rtdata.input)
# -
# <a id="runscript_ID"></a>
# # Find stars in image and determine their coordinates
#
# The output of the pipeline command in the previous step (given our association table) is an i2d.fits file. This file is in the format of a JWST Data model type of DrizProductModel and should be opened as such. It is this file that we will use for source finding and to determine whether the stars are found in the expected locations. The i2d file and the associated text file containing the input coordinates of the stars can be found in artifactory.
#
# [Top of Page](#title_ID)
# #### Read in combined i2d data file and list of coordinates
# +
# Read in the combined data file and list of coordinates
im = ImageModel('starfield_74_combined_i2d.fits')
coords = get_bigdata('jwst_validation_notebooks',
'validation_data',
'resample',
'resample_miri_test',
'radec_coords.txt')
# read in text file with RA and Dec input coordinates
RA_in, Dec_in = np.loadtxt( coords, dtype=str, unpack=True)
# put RA and Dec into floats
RA_sim = RA_in.astype(float)
Dec_sim = Dec_in.astype(float)
# pull out data portion of input file
data = im.data
# print stats on input image
mean, median, std = sigma_clipped_stats(data, sigma=200.0, maxiters=5) # default sigma=3
print(mean, median, std)
# -
# #### Run DAOStar finder to find sources in the image and examine the image and positions marked.
# The block of code below will find the sources in the image, create apertures for each source found, and output the table of x, y coordinates along with the peak pixel value. It will also show a scaled version of the image and mark in blue the positions of sources found.
#
# +
# Run DAOStarFinder to find sources in image
ap_radius = 4. # radius for aperture for centroiding and photometry
daofind = DAOStarFinder(fwhm=3.0, threshold=10.*std) # default threshold=5*std, fwhm=3
sources = daofind(data)
print(sources['xcentroid','ycentroid','peak'])
# Create apertures for x,y positions
positions = tuple(zip(sources['xcentroid'], sources['ycentroid']))
#print(positions)
#positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r=ap_radius)
# mark sources on image frame to see if the correct sources were found
norm = ImageNormalize(stretch=SqrtStretch())
# keep image stretch in mind for plotting. sky subtracted range ~ (-15, 10), single sample ~ (0, 20)
plt.imshow(data, cmap='Greys', origin='lower', vmin=-15,vmax=10, norm=norm)
apertures.plot(color='blue', lw=1.5, alpha=0.5)
plt.show()
# -
# #### Run photometry on apertures (with a specified annulus for background subtraction)
#
# Set a specified annulus (inner and outer radii for the annulus).
#
# Run photometry on aperture and annuli.
#
# Subtract background values in annulus from aperture photometry.
#
# Output should be a table of photometry values printed to the screen (full table has columns id, xcenter, ycenter, aperture_sum and the added columns annulus_median, aperture_bkg and aperture_sum_bkgsub). You can choose which columns you wish to see printed.
# +
# set values for inner and outer annuli to collect background counts
inner_annulus = 10.
outer_annulus = 15.
# set up annulus for background
background_aper = CircularAnnulus(positions, r_in=inner_annulus, r_out=outer_annulus)
# perform photometry on apertures for targets and background annuli
phot_table = aperture_photometry(im.data, apertures)
# perform background subtraction with outlier rejection
bkg_median = []
bkg_mask = background_aper.to_mask(method='center')
bmask = bkg_mask[0]
for mask in bkg_mask:
aper_data = bmask.multiply(data)
aper_data = aper_data[mask.data > 0]
# perform sigma-clipped median
_, median_sigclip, _ = sigma_clipped_stats(aper_data)
bkg_median.append(median_sigclip)
bkg_median = np.array(bkg_median)
# do calculations on background regions found in annuli
# Get average background per pixel
phot_table['annulus_median'] = bkg_median
# Get total background in the science aperture (per pixel * area in aperture)
phot_table['aperture_bkg'] = bkg_median * apertures.area
# subtract background in aperture from flux in aperture
phot_table['aperture_sum_bkgsub'] = phot_table['aperture_sum'] - phot_table['aperture_bkg']
print(phot_table['aperture_sum','annulus_median','aperture_bkg','aperture_sum_bkgsub'])
# -
# #### Put x, y coordinates into RA and Dec using the wcs information from the files.
# The output of the next block of code should be a table showing the x and y centroid positions as well as the associated RA and Dec values.
# +
# using wcs info from images, put coordinates into RA, Dec
ra, dec = im.meta.wcs(sources['xcentroid'], sources['ycentroid'])
# add RA, Dec to sources table
ra_col = Column(name='RA', data=ra)
dec_col = Column(name='Dec', data=dec)
sources.add_column(ra_col)
sources.add_column(dec_col)
# print RA, Dec for each x, y position found
print(sources['xcentroid', 'ycentroid', 'RA', 'Dec'])
# add option to print out list of sources with flux values
outtable = 'sourcelist_phot_rate.txt'
sources.add_column(phot_table['aperture_sum'])
sources.add_column(phot_table['aperture_sum_bkgsub'])
# -
# #### Compare the RA and Dec positions used to create the simulated data to the values found in the output image.
# Difference each set of RA and Dec coordinates in both the input list and the found coordinates, taking into account any angles close to 360/0 degrees. If the difference for both the RA and Dec are below a set tolerance, then the positions match. Take the matched positions and convert the differences from degrees to milli arcseconds, and output the RA and Dec positions as well as the differences.
# +
# Compare input RA, Dec to found RA, Dec
print(' RA found Dec found RA_Diff (mas) Dec_diff (mas) Bkg sub flux pass/fail')
for i in np.arange(0,len(RA_sim)):
for j in np.arange(0,len(ra)):
ra_diff = 180 - abs(abs(RA_sim[i] - ra[j])-180)
dec_diff = 180 - abs(abs(Dec_sim[i] - dec[j])-180)
if ra_diff < 1e-5 and dec_diff < 1e-5:
# put differences in milliarcseconds
ra_diff = ra_diff * 3600000
dec_diff = dec_diff * 3600000
if ra_diff < 30 and dec_diff < 30:
test = 'pass'
else:
test = 'fail'
print('{:15.6f} {:15.6f} {:15.6f} {:15.6f} {:15.6f} {}'.format(ra[j], dec[j], ra_diff, dec_diff,
phot_table['aperture_sum_bkgsub'][j], test))
# -
# <a id="residual_ID"></a>
# # Compare output RA and Dec to expected values
#
# The output RA and Dec coordinates should match the input RA and Dec coordinates to within 1/10 of a PSF FWHM (~0.03 arcsec for F770W).
#
# Output RA_Diff and Dec_diff above should be on order of 30 or fewer milliarcseconds.
#
# Check to see if your input flux is roughly what you expected based on the input data.
#
# [Top of Page](#title_ID)
# <a id="about_ID"></a>
# ## About this Notebook
# **Author:** <NAME>, Research and Instrument Scientist II, INS/MIRI
# <br>**Updated On:** 08/09/2019 to add in aperture photometry
# An extra optional test that can be done is to plot the flux values against x or y values. Previous testing has shown a spatial dependence of the flux with y values, so a quick plot can show whether this problem is fixed or not. Prior to the resample step, there is no pattern, after the step, a pattern is clear. Just do this as a last check. If the scatter is not random, there may be a problem that needs to be checked. (Of course, this only works if you give an equivalent if not equal input count level to each input star.)
plt.title('Surface brightness vs. y position on detector')
plt.ylim(35500,37500) # help weed out sources that were erroneously 'hits' (bad pixels, cosmic rays, etc)
plt.xlabel('y centroid position')
plt.ylabel('Surface brightness')
plt.plot(sources['ycentroid'], phot_table['aperture_sum_bkgsub'], marker='o',linestyle='') #ylim=(30000,40000))
plt.show()
# [Top of Page](#title_ID)
# <img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
|
jwst_validation_notebooks/resample/jwst_resample_miri_test/jwst_resample_miri_testing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="it1c0jCiNCIM" colab_type="code" outputId="e9cc62e6-5fe6-4912-f7d8-9646b354446e" colab={"base_uri": "https://localhost:8080/", "height": 391}
# !wget --no-check-certificate \
# https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps.zip \
# -O /tmp/rps.zip
# !wget --no-check-certificate \
# https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps-test-set.zip \
# -O /tmp/rps-test-set.zip
# + id="PnYP_HhYNVUK" colab_type="code" colab={}
import os
import zipfile
local_zip = '/tmp/rps.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/')
zip_ref.close()
local_zip = '/tmp/rps-test-set.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/')
zip_ref.close()
# + id="MrxdR83ANgjS" colab_type="code" outputId="15ad7acb-280c-4607-94b4-e77596d21246" colab={"base_uri": "https://localhost:8080/", "height": 139}
rock_dir = os.path.join('/tmp/rps/rock')
paper_dir = os.path.join('/tmp/rps/paper')
scissors_dir = os.path.join('/tmp/rps/scissors')
print('total training rock images:', len(os.listdir(rock_dir)))
print('total training paper images:', len(os.listdir(paper_dir)))
print('total training scissors images:', len(os.listdir(scissors_dir)))
rock_files = os.listdir(rock_dir)
print(rock_files[:10])
paper_files = os.listdir(paper_dir)
print(paper_files[:10])
scissors_files = os.listdir(scissors_dir)
print(scissors_files[:10])
# + id="jp9dLel9N9DS" colab_type="code" outputId="4d7a6a52-7a00-41d8-9ddd-cd74d67afe81" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
pic_index = 2
next_rock = [os.path.join(rock_dir, fname)
for fname in rock_files[pic_index-2:pic_index]]
next_paper = [os.path.join(paper_dir, fname)
for fname in paper_files[pic_index-2:pic_index]]
next_scissors = [os.path.join(scissors_dir, fname)
for fname in scissors_files[pic_index-2:pic_index]]
for i, img_path in enumerate(next_rock+next_paper+next_scissors):
#print(img_path)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.axis('Off')
plt.show()
# + id="LWTisYLQM1aM" colab_type="code" outputId="93e07c51-8b61-46b3-9f62-83dba8da7ea0" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
TRAINING_DIR = "/tmp/rps/"
training_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
VALIDATION_DIR = "/tmp/rps-test-set/"
validation_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = training_datagen.flow_from_directory(
TRAINING_DIR,
target_size=(150,150),
class_mode='categorical'
)
validation_generator = validation_datagen.flow_from_directory(
VALIDATION_DIR,
target_size=(150,150),
class_mode='categorical'
)
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The third convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# The fourth convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
history = model.fit_generator(train_generator, epochs=25, validation_data = validation_generator, verbose = 1)
model.save("rps.h5")
# + id="aeTRVCr6aosw" colab_type="code" outputId="7077d3e6-4ff1-4b52-ccaf-0dff000a1eae" colab={"base_uri": "https://localhost:8080/", "height": 298}
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
# + id="ZABJp7T3VLCU" colab_type="code" outputId="afb2f406-b35f-484f-97a5-152c6c508ee0" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 108}
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(fn)
print(classes)
|
cnn-in-tensorflow/rock_paper_scissors.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Ueilyqz9VChj" colab_type="text"
# # Problem statement
#
# Good morning! Here's your coding interview problem for today.
#
# This problem was asked by Twitter.
#
# Implement an autocomplete system. That is, given a query string s and a set of all possible query strings, return all strings in the set that have s as a prefix.
#
# For example, given the query string ```de``` and the set of strings ```[dog, deer, deal]```, return ```[deer, deal]```.
#
# Hint: Try preprocessing the dictionary into a more efficient data structure to speed up queries.
# + [markdown] id="Z4RCguSuVChl" colab_type="text"
# # Solution one
# + id="2RYw-sRAVChp" colab_type="code" colab={}
def autocomplete(query, words):
assert (len(query) > 0), 'please provide query'
assert (len(words) > 0), 'please provide words'
return [word for word in words if query in word]
# + [markdown] id="-QyTOCcwVCiD" colab_type="text"
# # Test
# + id="_alrC9lQVCiE" colab_type="code" colab={}
assert autocomplete('de', ['dog', 'deer', 'deal']) == ['deer', 'deal']
# + id="BjE9u3GQoJ7I" colab_type="code" colab={}
|
Problem11_LevelMedium.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:xswap-analysis] *
# language: R
# name: conda-env-xswap-analysis-r
# ---
# +
library(tidyverse)
library(cowplot)
library(ggExtra)
library(ggbeeswarm)
`%+replace%` <- ggplot2::`%+replace%`
theme_zietzm <- function(base_size = 11.5, base_family = "") {
# Starts with theme_bw and then modify some parts
# Theme options are documentated at http://docs.ggplot2.org/current/theme.html
ggplot2::theme_classic(base_size = base_size, base_family = base_family) %+replace%
ggplot2::theme(
strip.background = ggplot2::element_rect(fill = NA, colour = 'grey90', size = 0),
strip.text = element_text(vjust = 1, size = 10),
plot.margin = ggplot2::margin(t=2, r=2, b=2, l=2, unit='pt'),
legend.spacing = grid::unit(0.1, 'cm'),
legend.key = ggplot2::element_blank(),
panel.border=element_rect(fill = NA, color = 'black', size = 0.5),
axis.line=element_line(size=0),
)
}
# +
metrics <- read_csv('../../data/task1/calibration/hetionet_calibration_metrics.csv')
metrics %>% head
# +
options(repr.plot.width=4, repr.plot.height=3)
auroc_tasks <- metrics %>%
filter(feature == 'xswap_prior') %>%
# mutate(network = network %>% factor(levels = c('full', 'sampled', 'other'))) %>%
mutate(network = network %>% recode_factor(full = "unsampled", sampled = 'sampled', other = 'separate')) %>%
ggplot(aes(x = network, y = auroc)) +
# geom_boxplot(outlier.shape = NA) +
geom_violin(fill = 'grey90') +
geom_beeswarm(alpha = 0.5, color = 'darkblue', cex = 2.1) +
theme_zietzm() +
ylab('Edge prior AUROC') +
xlab('Network type') +
coord_cartesian(ylim = c(0.5, 1.1)) +
scale_y_continuous(breaks = c(0.5, 0.6, 0.7, 0.8, 0.9, 1.0)) +
geom_label(data = data.frame(network = c('unsampled', 'sampled', 'separate'),
label = c('Task 1', 'Task 2', 'Task 3'),
auroc = rep(x = 1.08, times = 3)),
aes(label = label)) +
theme(axis.title.x = element_text(size = 12), axis.title.y = element_text(size = 12),
axis.text.y = element_text(size = 10), axis.text.x = element_text(size = 11))
ggsave(filename = '../../img/auroc_dists.png', auroc_tasks, dpi = 300, width = 4, height = 3)
auroc_tasks
|
nb/5.fig3.auroc/plot_auroc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import scipy as sp
import nmrglue as ng
from matplotlib import pyplot as plt
dic, fid = ng.varian.read('HMDB00122-1H')
# -
plt.plot(fid)
plt.show()
# +
spectra = np.abs(sp.fft(fid))
plt.plot(spectra)
plt.show()
# -
plt.plot(np.imag(fid))
plt.show()
plt.plot(np.abs(sp.fft(np.imag(fid))))
plt.show()
plt.plot(np.abs(sp.fft(np.real(fid))))
plt.show()
plt.plot(np.abs(sp.fft(fid)))
plt.show()
plt.plot(np.abs(np.real(sp.fft(fid))))
plt.show()
# +
import json
#json.dumps(np.real(fid).tolist())
#json.dumps(np.abs(np.real(sp.fft(fid))).tolist())
# -
udic = ng.bruker.guess_udic(dic, fid)
uc = ng.fileiobase.uc_from_udic(udic)
plt.plot(uc.ppm_scale(), np.abs(np.real(sp.fft(fid))))
plt.show()
|
glucose.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit ('bng')
# metadata:
# interpreter:
# hash: c74b756d0728a8af4513004bc68561ba9d13b4a385434128432f1a1d5c5d10b0
# name: Python 3.8.6 64-bit ('bng')
# ---
# +
import zipfile
from pathlib import Path
import os
from beamngpy import BeamNGpy, Scenario, Vehicle, setup_logging
# -
setup_logging()
beamng = BeamNGpy('localhost', 64256)
scenario = Scenario('smallgrid',
"On how to use custom mods")
vehicle = Vehicle('ego_vehicle', model='etk800', licence='AI')
scenario.add_vehicle(vehicle, pos=(0,0,0), rot_quat=(0, 0, 1, 0))
scenario.make(beamng)
# setting up mod
myModPath = beamng.user / 'mods' / 'genericResearchMod.zip'
geCode = 'gameEngineCode.lua'
zipGEpath = str(Path('lua') / 'ge' / 'extensions' / 'util' / geCode)
veCode = 'vehicleEngineCode.lua'
zipVEpath = str(Path('lua') / 'vehicle' / 'extensions' / veCode)
localDir = Path(os.path.abspath('.'))
with zipfile.ZipFile(str(myModPath), 'w') as ziph:
ziph.write(localDir / geCode, arcname=zipGEpath)
ziph.write(localDir / veCode, arcname=zipVEpath)
bng = beamng.open(extensions=["util/gameEngineCode"])
bng.load_scenario(scenario)
bng.start_scenario()
# data = dict(type='Foo', someName = 'YourName')
# bng.send(data)
data = dict(type='Bar', text = 'lorem ipsum...' )
bng.send(data)
vehicle.skt
bng.skt
|
examples/modInterface/usingmodsWithBNGpy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/m-triassi/ai-projects-472/blob/main/Project1_task2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-LcW5p1upN1d"
#
# Task
#
# This Task consists of using the provided Drug dataset that contains multiple features of various drugs. The goal is to classify a particular drug into 1 of 5 categories: DrugA, DrugB, DrugC, DrugX, or DrugY
# + id="jawP_PLcplFl"
# Import Dependencies
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import os, os.path
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
# + id="_3rR7Fb2_Mui"
# Mount Drive
## Only for development purposes
from google.colab import drive
# drive.mount('/content/drive', force_remount=True)
# + id="2A55UL4joxml" colab={"base_uri": "https://localhost:8080/"} outputId="12633406-7b3c-427d-d601-c4920374bce0"
# Load Dataset
headers = ['Age', 'Sex', 'BP', 'Cholesterol', 'Na_to_K', 'Drug']
# !gdown --id 1owPFp6_Ouaoc_j6BU5utJ07ZRStfqDsW
drugs = pd.read_csv("/content/drug200.csv", header=None, names=headers)[1:]
# + id="xOoZ3gMUpyQb" colab={"base_uri": "https://localhost:8080/", "height": 397} outputId="5b0d2361-5478-40f0-efb9-ed4a2e9fbe1a"
# Plot Distribution
drugs['Drug'].value_counts().plot(kind='bar')
drugs['Drug'].value_counts()
# + id="SIljpi0qp0OB" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="d2f4e86b-375c-46d6-c68a-d69dc8d82701"
# Convert all ordinal and nominal features in numerical format
value_map = {
"Sex": {"M": 0, "F": 1},
"BP": {"LOW": 0, "NORMAL": 1, "HIGH": 2},
"Cholesterol": {"NORMAL": 0, "HIGH": 1},
"Drug": {"drugA": 0, "drugB": 1, "drugC": 2, "drugX": 3, "drugY": 4}
}
drugs_numeric = drugs.replace(value_map)
drugs_numeric.head()
# + id="xrYAYjk5p4rp"
# Split Dataset
y = drugs_numeric["Drug"]
X_drug = drugs_numeric.loc[:, 'Age':'Na_to_K']
X_train, X_test, y_train, y_test = train_test_split(X_drug, y)
# + id="OfXz9Ga_rO3P"
# Define Performance text file function
## Should accept: description_string, confusion matrix, precision, recall, f1-measure, accuracym macro-average f1, weighted-average f1
def generateReport(description_string, confusion_matrix, class_report, accuracy, macro_average_f1, weighted_average_f1):
f = open("drugs_performance.txt", "a")
f.write("\n==========================================================")
f.write("\nAttempt Description: " + description_string)
f.write("\nConfusion Martix:\n")
f.write(str(confusion_matrix))
f.write("\n")
f.write("\nClassification Report (precision, recall, and F1-measure )\n")
f.write(str(class_report))
f.write("\nAccuracy:\n")
f.write(str(accuracy[0]))
f.write("\n")
f.write("\nMacro Average F1:\n")
f.write(str(macro_average_f1[0]))
f.write("\n")
f.write("\nWeighted Average F1:\n")
f.write(str(weighted_average_f1[0]))
f.write("\n")
f.write("\nAverage Accuracy:\n")
f.write(str(sum(accuracy)/len(accuracy)))
f.write("\n")
f.write("\nAverage Macro Average F1:\n")
f.write(str(sum(macro_average_f1)/len(macro_average_f1)))
f.write("\n")
f.write("\nAverage Weighted Average F1:\n")
f.write(str(sum(weighted_average_f1)/len(weighted_average_f1)))
f.write("\n")
f.write("\nAccuracy Standard Deviation:\n")
f.write(str(np.std(accuracy)))
f.write("\n")
f.close()
# + id="9n3wnIK5qBII"
# Classify using Gaussian Naive Bayes
from sklearn.naive_bayes import GaussianNB
nb_clf = GaussianNB()
nb_clf.fit(X_train, y_train)
nb_pred = nb_clf.predict(X_test)
# Append Performance
nb_cm = confusion_matrix(y_test, nb_pred)
nb_cr = classification_report(y_test, nb_pred)
nb_acc_score = [accuracy_score(y_test, nb_pred)]
nb_maf = [f1_score(y_test, nb_pred, average = 'macro')]
nb_waf = [f1_score(y_test, nb_pred, average = 'weighted')]
# Generate Averages
for i in range(0,9):
nb_clf = GaussianNB()
nb_clf.fit(X_train, y_train)
nb_pred = nb_clf.predict(X_test)
nb_acc_score.append(accuracy_score(y_test, nb_pred))
nb_maf.append(f1_score(y_test, nb_pred, average = 'macro'))
nb_waf.append(f1_score(y_test, nb_pred, average = 'weighted'))
# Report!
generateReport("(a) Gaussian Naive-Bayes",nb_cm,nb_cr,nb_acc_score, nb_maf,nb_waf)
# + id="5md4OcEfqNGv"
# Classify using Base Desicion Tree (tree.DecisionTreeClassifier)
from sklearn.tree import DecisionTreeClassifier
bdt_clf = DecisionTreeClassifier()
bdt_clf.fit(X_train, y_train)
bdt_pred = bdt_clf.predict(X_test)
# Append Performance
bdt_cm = confusion_matrix(y_test, bdt_pred)
bdt_cr = classification_report(y_test, bdt_pred)
bdt_acc_score = [accuracy_score(y_test, bdt_pred)]
bdt_maf = [f1_score(y_test, bdt_pred, average = 'macro')]
bdt_waf = [f1_score(y_test, bdt_pred, average = 'weighted')]
# Generate Averages
for i in range(0,9):
nb_clf = GaussianNB()
nb_clf.fit(X_train, y_train)
nb_pred = nb_clf.predict(X_test)
bdt_acc_score.append(accuracy_score(y_test, bdt_pred))
bdt_maf.append(f1_score(y_test, bdt_pred, average = 'macro'))
bdt_waf.append(f1_score(y_test, bdt_pred, average = 'weighted'))
# Report
generateReport("(b) Base Decision Tree",bdt_cm,bdt_cr,bdt_acc_score, bdt_maf,bdt_waf)
# + id="YeQvM-TYqRxb"
# Classify using Top Decision Tree (GridSearchCV)
from sklearn.model_selection import GridSearchCV
tdt_params = {'criterion':['gini','entropy'], 'max_depth':[3,7],'min_samples_split':[2,5,7] }
tdt_clf = GridSearchCV(bdt_clf,tdt_params)
tdt_clf.fit(X_train, y_train)
tdt_pred = tdt_clf.predict(X_test)
# Append Performance
tdt_cm = confusion_matrix(y_test, tdt_pred)
tdt_cr = classification_report(y_test, tdt_pred)
tdt_acc_score = [accuracy_score(y_test, tdt_pred)]
tdt_maf = [f1_score(y_test, tdt_pred, average = 'macro')]
tdt_waf = [f1_score(y_test, tdt_pred, average = 'weighted')]
# Generate Averages
for i in range(0,9):
tdt_clf = GridSearchCV(bdt_clf,tdt_params)
tdt_clf.fit(X_train, y_train)
tdt_pred = tdt_clf.predict(X_test)
tdt_acc_score.append(accuracy_score(y_test, tdt_pred))
tdt_maf.append(f1_score(y_test, tdt_pred, average = 'macro'))
tdt_waf.append(f1_score(y_test, tdt_pred, average = 'weighted'))
# Report
generateReport("(c) Top Decision Tree",tdt_cm,tdt_cr,tdt_acc_score, tdt_maf,tdt_waf)
# + id="SH2hUAbxqb5N" colab={"base_uri": "https://localhost:8080/"} outputId="3ffe019e-ee44-4cf8-8967-2dd49f025522"
# Classify using Perceptron
from sklearn.linear_model import Perceptron
pct_clf = Perceptron()
pct_clf.fit(X_train, y_train)
pct_pred = pct_clf.predict(X_test)
# Append Performance
pct_cm = confusion_matrix(y_test, pct_pred)
pct_cr = classification_report(y_test,pct_pred)
pct_acc_score = [accuracy_score(y_test, pct_pred)]
pct_maf = [f1_score(y_test, pct_pred, average = 'macro')]
pct_waf = [f1_score(y_test, pct_pred, average = 'weighted')]
# Generate Averages
for i in range(0,9):
pct_clf = Perceptron()
pct_clf.fit(X_train, y_train)
pct_pred = pct_clf.predict(X_test)
pct_acc_score.append(accuracy_score(y_test, pct_pred))
pct_maf.append(f1_score(y_test, pct_pred, average = 'macro'))
pct_waf.append(f1_score(y_test, pct_pred, average = 'weighted'))
# Report
generateReport("(d) Perceptron",pct_cm,pct_cr,pct_acc_score, pct_maf,pct_waf)
# + id="cmqOUaKZqhb2" colab={"base_uri": "https://localhost:8080/"} outputId="e90c2b81-ff1d-44c0-d251-d0449cf7300d"
# Classify using Base Multi-layered Perceptron (neural network.MLPClassifier)
from sklearn.neural_network import MLPClassifier
bmlp_clf = MLPClassifier()
bmlp_clf.fit(X_train, y_train)
bmlp_pred = bmlp_clf.predict(X_test)
# Append Performance
bmlp_cm = confusion_matrix(y_test, bmlp_pred)
bmlp_cr = classification_report(y_test, bmlp_pred)
bmlp_acc_score = [accuracy_score(y_test, bmlp_pred)]
bmlp_maf = [f1_score(y_test, bmlp_pred, average = 'macro')]
bmlp_waf = [f1_score(y_test, bmlp_pred, average = 'weighted')]
# Generate Averages
for i in range(0,9):
bmlp_clf = MLPClassifier()
bmlp_clf.fit(X_train, y_train)
bmlp_pred = bmlp_clf.predict(X_test)
bmlp_acc_score.append(accuracy_score(y_test, bmlp_pred))
bmlp_maf.append(f1_score(y_test, bmlp_pred, average = 'macro'))
bmlp_waf.append(f1_score(y_test, bmlp_pred, average = 'weighted'))
# Report
generateReport("(e) Base Multi-Layered Perceptron",bmlp_cm,bmlp_cr,bmlp_acc_score, bmlp_maf,bmlp_waf)
# + id="lwawrilKqme5" colab={"base_uri": "https://localhost:8080/"} outputId="07e4beea-ba3d-41a0-9256-9861bae2480a"
# Classify using Top Multi-layered Perceptron
# gridsearch
tmlp_params={'activation':['identity', 'logistic', 'tanh', 'relu'], 'solver':['sgd', 'adam'], 'hidden_layer_sizes':[(30,30,30), (100,50,25)]}
tmlp_clf = GridSearchCV(bmlp_clf,tmlp_params)
tmlp_clf.fit(X_train, y_train)
tmlp_pred = tmlp_clf.predict(X_test)
# Append Performance
tmlp_cm = confusion_matrix(y_test, tmlp_pred)
tmlp_cr = classification_report(y_test, tmlp_pred)
tmlp_acc_score = [accuracy_score(y_test, tmlp_pred)]
tmlp_maf = [f1_score(y_test, tmlp_pred, average = 'macro')]
tmlp_waf = [f1_score(y_test, tmlp_pred, average = 'weighted')]
# Generate Averages
for i in range(0,9):
tmlp_clf = GridSearchCV(bmlp_clf,tmlp_params)
tmlp_clf.fit(X_train, y_train)
tmlp_pred = tmlp_clf.predict(X_test)
tmlp_acc_score.append(accuracy_score(y_test, tmlp_pred))
tmlp_maf.append(f1_score(y_test, tmlp_pred, average = 'macro'))
tmlp_waf.append(f1_score(y_test, tmlp_pred, average = 'weighted'))
# Report
generateReport("(f) Top Multi-Layered Perceptron",tmlp_cm,tmlp_cr,tmlp_acc_score, tmlp_maf,tmlp_waf)
|
Project 1/Project1_task2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # RL and Advanced DL: Домашнее задание 1
#
# ## Части 2 и 3
#
# <NAME>, ML-32
# +
import gym
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import json
import numpy as np
import scipy as sp
import scipy.stats as st
import scipy.integrate as integrate
from collections import defaultdict
from scipy.stats import multivariate_normal
from sklearn import linear_model
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
import statsmodels.api as sm
from matplotlib.colors import LogNorm
from tqdm import tqdm
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.registration import register, registry
from gym.envs.toy_text.blackjack import *
np.set_printoptions(precision=6, suppress=True)
sns.set_style("whitegrid")
sns.set_palette("colorblind")
palette = sns.color_palette()
figsize = (15,8)
legend_fontsize = 16
from matplotlib import rc
rc('font',**{'family':'sans-serif'})
rc('text', usetex=True)
rc('text.latex',preamble=r'\usepackage[utf8]{inputenc}')
rc('text.latex',preamble=r'\usepackage[russian]{babel}')
rc('figure', **{'dpi': 300})
# -
env = gym.make('Blackjack-v0', natural=True)
env
# # Часть вторая, удвоенная
#
# - 4. Реализуйте новый вариант блекджека на основе окружения Blackjack-v0 из OpenAI Gym, в котором разрешено удвоение ставки.
#
# - 5. Реализуйте метод обучения с подкреплением без модели для этого варианта, постройте графики, аналогичные п.2.
# +
class BlackjackDoublingEnv(BlackjackEnv):
def __init__(self, natural=True, sab=False):
self.natural = natural
super().__init__(self)
self.action_space = spaces.Discrete(3)
def step(self, action):
assert self.action_space.contains(action)
if action == 2:
# draw new card
observation, reward, done, info = super().step(1)
if not done:
# if not busted then stand
observation, reward, done, info = super().step(0)
return observation, reward*2, done, info
else:
return super().step(action)
setattr(gym.envs.toy_text.blackjack, "BlackjackDoublingEnv", BlackjackDoublingEnv)
if 'BlackjackDoubling-v0' in registry.env_specs:
del registry.env_specs['BlackjackDoubling-v0']
gym.envs.register(
id='BlackjackDoubling-v0',
entry_point='gym.envs.toy_text.blackjack:BlackjackDoublingEnv',
)
'BlackjackDoublingEnv' in dir(gym.envs.toy_text.blackjack)
# +
# Test envifonment
env = gym.make('BlackjackDoubling-v0')
frame = env.reset()
reward = 0.0
is_done = False
for i in range(3):
display((f"episode{i}", frame, reward, is_done))
while not is_done:
frame, reward, is_done, _ = env.step(2)
display((f"episode{i}", frame, reward, is_done))
env.close()
# +
EPISODES = 1_000_000
N_STATES=32
N_ACTIONS=3
DIALER=17
SHORTFALL=12
class BaselineStrategy:
"""
Тривиальная стратегия - не добирать.
"""
def __init__(self, n_states=N_STATES, n_actions=N_ACTIONS):
self.n_states, self.n_actions = n_states, n_actions
self.states, self.actions, self.rewards = [], [], []
self.V = defaultdict(float)
self.RetS = defaultdict(int)
self.Q = defaultdict(lambda: [np.random.uniform() for _ in range(n_actions)])
self.RetSA = defaultdict(lambda: [0 for _ in range(n_actions)])
def __str__(self):
return "BaselineStrategy"
def new_episode(self):
self.states, self.rewards, self.actions = [], [], []
def action(self, state):
return 0
def collect(self, state, action, reward, state_next):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
def first_visit_mc(self):
# first visit Monte Carlo estimation
G = 0
for t in range(len(self.states)-1, -1, -1):
S, A, R = self.states[t], self.actions[t], self.rewards[t]
G += R
if S not in self.states[:t]:
self.RetS[S] += 1
self.V[S] += (G - self.V[S]) / self.RetS[S]
self.RetSA[S][A] += 1
self.Q[S][A] += (G - self.Q[S][A]) / self.RetSA[S][A]
# -
def run_episode(strategy, env):
# reset states, actions, rewards
strategy.new_episode()
env.reset()
reward = 0.0
is_done = False
while not is_done:
state = env._get_obs()
action = strategy.action(state)
observation, reward, is_done, _ = env.step(action)
# append states, actions, rewards
strategy.collect(state, action, reward, observation)
env.close()
return reward
def first_visit_mc_estimation(strategy,
env=gym.make('Blackjack-v0', natural=True),
n_episodes=10000,
random_seed=None):
if random_seed is not None:
env.seed(random_seed)
env.action_space.seed(random_seed)
np.random.seed(random_seed)
rewards = []
for i in range(n_episodes):
reward = run_episode(strategy, env)
rewards.append(reward)
strategy.first_visit_mc()
return np.true_divide(np.cumsum(rewards), np.arange(n_episodes) + 1)
# +
# collection of strategies
strats = {}
def run_estimation(strategy,
env=gym.make('Blackjack-v0', natural=True),
n_episodes=EPISODES,
strats=strats,
display_result=True,
min_rewards=-1.0):
#run estimation
wins = first_visit_mc_estimation(strategy, env=env, n_episodes=n_episodes)
# add to result dataframe
strats[str(strategy)] = { "strategy": strategy, "result": wins[-1], "wins": wins}
# display results
if display_result:
display(f"{strategy}: result = {wins[-1]}")
# plot avg rewards
if wins[-1] >= min_rewards:
plt.plot(wins, label=strategy)
return wins
# -
class DoublingStrategy(BaselineStrategy):
"""
Простая стратегия - все время удваивать
"""
def __init__(self, n_states=N_STATES, n_actions=N_ACTIONS):
super().__init__(n_states, n_actions)
def __str__(self):
return f"DoublingStrategy"
def action(self, state):
return 2
# +
strats2 = {}
plt.figure(figsize=(16,4))
baseline_strategy2 = BaselineStrategy(n_actions=3)
baseline_strategy2_wins = run_estimation(baseline_strategy2,
env=gym.make('BlackjackDoubling-v0'),
strats=strats2)
doubling_strategy2 = DoublingStrategy(n_actions=3)
doubling_strategy2_wins = run_estimation(doubling_strategy2,
env=gym.make('BlackjackDoubling-v0'),
strats=strats2)
plt.axhline(y=0.0, color='black')
plt.xlim((-500, 50000))
#plt.ylim((-0.3, 0.1))
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.show()
# -
# ## Стратегии обучения с подкреплением из первой части
#
class MCControlStrategy(BaselineStrategy):
"""
Monte Carlo control with exploring starts
"""
def __init__(self, seed=None, n_states=N_STATES, n_actions=N_ACTIONS):
super().__init__(n_states=n_states, n_actions=n_actions)
self.seed = seed
np.random.seed(seed)
self.Q = defaultdict(lambda: [np.random.uniform() for _ in range(n_actions)])
def __str__(self):
return f"MCControlStrategy(seed={self.seed})"
def action(self, state):
return np.argmax(self.Q[state])
class SoftMCControlStrategy(MCControlStrategy):
"""
Monte Carlo control with soft strategies
"""
def __init__(self, eps=0.95, seed=None, n_states=N_STATES, n_actions=N_ACTIONS):
super().__init__(seed=seed, n_states=n_states, n_actions=n_actions)
self.eps = eps
def __str__(self):
return f"SoftMCControlStrategy(eps={self.eps}, seed={self.seed})"
def action(self, state):
return np.argmax(self.Q[state]) if np.random.uniform() < self.eps else np.random.randint(self.n_actions)
class SarsaStrategy:
"""
Sarsa стратегия.
"""
def __init__(self, eps=0.9, alpha=0.9, gamma=0.9, seed=None, n_states=N_STATES, n_actions=N_ACTIONS):
self.n_states, self.n_actions = n_states, n_actions
self.eps, self.alpha, self.gamma, self.seed = eps, alpha, gamma, seed
self.states, self.actions, self.rewards = [], [], []
# first - always hit
self.next_action = 1
np.random.seed(seed)
self.V = defaultdict(float)
self.RetS = defaultdict(int)
self.Q = defaultdict(lambda: [np.random.uniform() for _ in range(n_actions)])
self.RetSA = defaultdict(lambda: [0 for _ in range(n_actions)])
def __str__(self):
return f"SarsaStrategy(eps={self.eps}, alpha={self.alpha}, gamma={self.gamma}, seed={self.seed})"
def new_episode(self):
self.states, self.rewards, self.actions = [], [], []
# first - always hit
self.next_action = 1
def get_next_action(self, state):
# eps-greedly strategy
if np.random.uniform() < self.eps:
action = np.argmax(self.Q[state])
else:
action = np.random.randint(self.n_actions)
return action
def action(self, state):
# HACK
return self.next_action if state[0] < 19 else 0
def collect(self, state, action, reward, next_state):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
self.next_action = self.get_next_action(next_state)
Q_St_At = self.Q[state][action]
Q_St_At = Q_St_At + self.alpha * (reward + self.gamma * self.Q[next_state][self.next_action] - Q_St_At)
self.Q[state][action] = Q_St_At
def first_visit_mc(self):
# first visit Monte Carlo estimation
G = 0
for t in range(len(self.states)-1, -1, -1):
S, A, R = self.states[t], self.actions[t], self.rewards[t]
G += R
if S not in self.states[:t]:
self.RetS[S] += 1
self.V[S] += (G - self.V[S]) / self.RetS[S]
class QlearningStrategy:
"""
Q-learning стратегия.
"""
def __init__(self, eps=0.9, alpha=0.9, gamma=0.9, seed=None, n_states=N_STATES, n_actions=N_ACTIONS):
self.n_states, self.n_actions = n_states, n_actions
self.eps, self.alpha, self.gamma, self.seed = eps, alpha, gamma, seed
self.states, self.actions, self.rewards = [], [], []
np.random.seed(seed)
self.V = defaultdict(float)
self.RetS = defaultdict(int)
self.Q = defaultdict(lambda: [np.random.uniform() for _ in range(n_actions)])
self.RetSA = defaultdict(lambda: [0 for _ in range(n_actions)])
def __str__(self):
return f"QlearningStrategy(eps={self.eps}, alpha={self.alpha}, gamma={self.gamma}, seed={self.seed})"
def new_episode(self):
self.states, self.rewards, self.actions = [], [], []
def action(self, state):
# eps-greedly strategy
if np.random.uniform() < self.eps:
action = np.argmax(self.Q[state])
else:
action = np.random.randint(self.n_actions)
return action
def collect(self, state, action, reward, next_state):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
Q_St_next = np.amax(self.Q[next_state])
Q_St_At = self.Q[state][action]
Q_St_At = Q_St_At + self.alpha * (reward + self.gamma * Q_St_next - Q_St_At)
self.Q[state][action] = Q_St_At
def first_visit_mc(self):
# first visit Monte Carlo estimation
G = 0
for t in range(len(self.states)-1, -1, -1):
S, A, R = self.states[t], self.actions[t], self.rewards[t]
G += R
if S not in self.states[:t]:
self.RetS[S] += 1
self.V[S] += (G - self.V[S]) / self.RetS[S]
# +
MAX_STRATEGIES = 3
MIN_REWARDS = -0.1
plt.figure(figsize=(16,4))
display(f"{baseline_strategy2}: result = {baseline_strategy2_wins[-1]}")
plt.plot(baseline_strategy2_wins, label=baseline_strategy2)
display(f"{doubling_strategy2}: result = {doubling_strategy2_wins[-1]}")
plt.plot(doubling_strategy2_wins, label=doubling_strategy2)
for i in tqdm(range(MAX_STRATEGIES)):
strategy = MCControlStrategy(seed=i, n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackDoubling-v0'), display_result=False, strats=strats2)
strategy = SoftMCControlStrategy(eps=0.97, seed=i, n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackDoubling-v0'), display_result=False, strats=strats2)
strategy = SarsaStrategy(eps=0.97, seed=i, alpha=0.05, gamma=0.005, n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackDoubling-v0'), display_result=False, strats=strats2)
strategy = QlearningStrategy(eps=0.97, seed=i, alpha=0.05, gamma=0.005, n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackDoubling-v0'), display_result=False, strats=strats2)
df_strats2 = pd.DataFrame(strats2).T \
.sort_values(by="result",ascending=False)
display(df_strats2)
plt.axhline(y=0.0, color='black')
#plt.xlim((-500, 500000))
plt.ylim((-0.7, 0.1))
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.show()
# -
# ## Выводы:
#
# - обыграть казино не удалось
# - стратегии ведут себя аналогично тому, как в первой части задания
# - Q-learning похоже опять не до конца сошлась
# # Часть третья, в главной роли — <NAME>
#
# - 6. Реализуйте вариант окружения Blackjack-v0 из предыдущей части (с удвоением), в котором игрок имеет возможность “считать карты” в колоде. Это можно сделать разными способами; возможно, вам поможет статья википедии о блекджеке (а возможно, и нет).
#
# - 7. Реализуйте метод обучения с подкреплением без модели для этого варианта, постройте графики, аналогичные п.2.
# +
class BlackjackCountingEnv(BlackjackEnv):
def __init__(self, natural=False, sab=False):
# подсчет по системе "Половинки"
self.points = [-1, 0.5, 1, 1, 1.5, 1, 0.5, 0, -0.5, -1]
self.points = dict(zip(range(1, 11), self.points))
self.deck = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
]
self.counter = 0
super().__init__(self)
self.action_space = spaces.Discrete(3)
self.natural = natural
self.sab = sab
def reset(self):
if len(self.deck) < 15:
self.deck = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10,
]
self.counter = 0
self.dealer = self.draw_hand(self.np_random)
self.player = self.draw_hand(self.np_random)
return self._get_obs()
def draw_card(self, np_random):
index = np_random.choice(range(len(self.deck)))
self.counter += self.points[self.deck[index]]
return int(self.deck.pop(index))
def draw_hand(self, np_random):
return [self.draw_card(np_random), self.draw_card(np_random)]
def hit(self):
# hit: add a card to players hand and return
self.player.append(self.draw_card(self.np_random))
if is_bust(self.player):
done = True
reward = -1.0
else:
done = False
reward = 0.0
return reward, done
def stand(self):
# stick: play out the dealers hand, and score
done = True
while sum_hand(self.dealer) < 17:
self.dealer.append(self.draw_card(self.np_random))
reward = cmp(score(self.player), score(self.dealer))
if self.sab and is_natural(self.player) and not is_natural(self.dealer):
# Player automatically wins. Rules consistent with S&B
reward = 1.0
elif (
not self.sab
and self.natural
and is_natural(self.player)
and reward == 1.0
):
# Natural gives extra points, but doesn't autowin. Legacy implementation
reward = 1.5
return reward, done
def step(self, action):
assert self.action_space.contains(action)
if action == 2:
reward, done = self.hit()
if not done:
reward, done = self.stand()
reward *= 2
elif action == 1:
reward, done = self.hit()
elif action == 0:
reward, done = self.stand()
return self._get_obs(), reward, done, {}
def _get_obs(self):
obs = super()._get_obs()
return (obs[0], obs[1], obs[2], self.counter)
setattr(gym.envs.toy_text.blackjack, "BlackjackCountingEnv", BlackjackCountingEnv)
if 'BlackjackCounting-v0' in registry.env_specs:
del registry.env_specs['BlackjackCounting-v0']
gym.envs.register(
id='BlackjackCounting-v0',
entry_point='gym.envs.toy_text.blackjack:BlackjackCountingEnv',
)
'BlackjackCountingEnv' in dir(gym.envs.toy_text.blackjack)
# +
# Test envifonment
env = gym.make('BlackjackCounting-v0', natural=True)
for i in range(3):
frame = env.reset()
reward = 0.0
is_done = False
display((i, ":", frame, -1, reward, is_done))
while not is_done:
action = env.action_space.sample()
frame, reward, is_done, _ = env.step(action)
display((i, ":", frame, action, reward, is_done))
env.close()
# +
MAX_STRATEGIES = 3
MIN_REWARDS = -0.1
plt.figure(figsize=(16,4))
np.random.seed()
seeds = np.random.randint(1000, size=MAX_STRATEGIES*4)
strats3 = {}
for i in tqdm(range(MAX_STRATEGIES)):
strategy = MCControlStrategy(seed=seeds[i], n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackCounting-v0'), display_result=False, strats=strats3)
strategy = SoftMCControlStrategy(eps=0.97, seed=seeds[i+1], n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackCounting-v0'), display_result=False, strats=strats3)
strategy = SarsaStrategy(eps=0.97, alpha=0.05, gamma=0.005, seed=seeds[i+2], n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackCounting-v0'), display_result=False, strats=strats3)
strategy = QlearningStrategy(eps=0.97, alpha=0.05, gamma=0.005, seed=seeds[i+3], n_actions=3)
run_estimation(strategy, env=gym.make('BlackjackCounting-v0'), display_result=False, strats=strats3)
df_strats3 = pd.DataFrame(strats3).T \
.sort_values(by="result",ascending=False)
display(df_strats3)
plt.axhline(y=0.0, color='black')
# plt.xlim((-500, 100000))
plt.ylim((-0.7, 0.1))
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.show()
# -
# ## Выводы:
#
# - обыграть казино не удалось
# - результаты хуже, чем в первых двух частях из-за увеличившегося количества состояний (добавился счетчик)
# - Q-learning похоже опять не до конца сошлась
# - Q-learning долго запрягает, но после 750К быстро едет
|
RL_HW1/RL_HW1_part2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 6) Git for good programming practices
# Related material:
#
# - Main basis (with many thanks) for this ipynb: https://swcarpentry.github.io/git-novice/
# - Related nice reference: http://swcarpentry.github.io/git-novice/reference
# - Additional Git reference: https://git-scm.com/docs
# - https://git-scm.com/book/en/v2
# - https://gist.github.com/trey/2722934
# ## Version control with git
#
# [Git](https://git-scm.com/) is an extremely useful and broadly adopted version control system. Every save copies of files with different names as you work on them to enable going back to older versions?
#
# 
# "Piled Higher and Deeper" by <NAME>, http://www.phdcomics.com
# Git (and other version control software) avoids saving many almost-identical versions of files and then having to sort through them. It makes it very easy to store incremental changes and then compare them. When writing programs, this is especially useful as you might temporarily have to break some functionality while extending it; wouldn't it be nice to make a separate "branch" to work on that, and merge it back when the new functionality is complete?
#
# This is such a common problem that multiple version control tools have been created. Git is the most common. It becomes especially helpful when there are multiple people that want to work on the same project/file.
#
# Git is not to be confused with GitHub; [GitHub](https://github.com/) is an online host (website) for projects which interfaces with git. It is extremely helpful for sharing code, managing projects, and team project development. We will be using both.
# ## Some visuals of what git can do for you:
#
# Save changes sequentially:
# 
#
# Make independent changes to the same file:
# 
#
# Merge changes: If there are conflicts, you will have a chance to review them.
# 
#
# The entire history of saved states (**commits**) and the metadata about them make up a particular git **repository**. These repositories are saved on individual machines, but can easily be can be kept in sync across different computers, facilitating collaboration among different people. Repositories do not need a central server to host the **repo** (the common shorthand for repository). Thus, git repos are described as distributed.
# ## When first using Git on a machine
#
# Below are a few examples of configurations we will set as we get started with Git:
#
# - our name and email address,
# - what our preferred text editor is,
# - and that we want to use these settings globally (i.e. for every project).
# On a command line, Git commands are written as `git verb options`,
# where `verb` is what we actually want to do and `options` is additional optional information which may be needed for the `verb`. So here is how Dracula sets up his new laptop:
#
# ~~~
# $ git config --global user.name "<NAME>"
# $ git config --global user.email "<EMAIL>"
# ~~~
#
# The user name and email you set will be associated with your subsequent Git activity,
# which means that any changes pushed to [GitHub](https://github.com/),
# [BitBucket](https://bitbucket.org/), [GitLab](https://gitlab.com/) or another Git host server.
# ### Line Endings
#
# As with other keys, when you hit <kbd>Return</kbd> on your keyboard,
# your computer encodes this input as a character.
# Different operating systems use different character(s) to represent the end of a line.
# (You may also hear these referred to as newlines or line breaks.)
# Because Git uses these characters to compare files,
# it may cause unexpected issues when editing a file on different machines.
#
# Although it is beyond the scope of this lesson, you can read more about this issue on
# [on this GitHub page](https://help.github.com/articles/dealing-with-line-endings/).
#
# You can change the way Git recognizes and encodes line endings
# using the `core.autocrlf` command to `git config`. Thus, the following settings are recommended:
#
# On macOS and Linux:
#
# ~~~
# $ git config --global core.autocrlf input
# ~~~
#
# And on Windows:
# ~~~
# $ git config --global core.autocrlf true
# ~~~
# We will be interacting with [GitHub](https://github.com/) and so the email address used should be the same as the one used when setting up your GitHub account. If you are concerned about privacy, please review [GitHub's instructions for keeping your email address private](https://help.github.com/articles/keeping-your-email-address-private/).
#
# If you elect to use a private email address with GitHub, then use that same email address for the `user.email` value, e.g. `<EMAIL>` replacing `username` with your GitHub one. You can change the email address later on by using the `git config` command again.
#
# Dracula also has to set his favorite text editor, following this table:
#
# | Editor | Configuration command |
# |:-------------------|:-------------------------------------------------|
# | Atom | `$ git config --global core.editor "atom --wait"`|
# | nano | `$ git config --global core.editor "nano -w"` |
# | BBEdit (Mac, with command line tools) | `$ git config --global core.editor "bbedit -w"` |
# | Sublime Text (Mac) | `$ git config --global core.editor "/Applications/Sublime\ Text.app/Contents/SharedSupport/bin/subl -n -w"` |
# | Sublime Text (Win, 32-bit install) | `$ git config --global core.editor "'c:/program files (x86)/sublime text 3/sublime_text.exe' -w"` |
# | Sublime Text (Win, 64-bit install) | `$ git config --global core.editor "'c:/program files/sublime text 3/sublime_text.exe' -w"` |
# | Notepad++ (Win, 32-bit install) | `$ git config --global core.editor "'c:/program files (x86)/Notepad++/notepad++.exe' -multiInst -notabbar -nosession -noPlugin"`|
# | Notepad++ (Win, 64-bit install) | `$ git config --global core.editor "'c:/program files/Notepad++/notepad++.exe' -multiInst -notabbar -nosession -noPlugin"`|
# | Kate (Linux) | `$ git config --global core.editor "kate"` |
# | Gedit (Linux) | `$ git config --global core.editor "gedit --wait --new-window"` |
# | Scratch (Linux) | `$ git config --global core.editor "scratch-text-editor"` |
# | Emacs | `$ git config --global core.editor "emacs"` |
# | Vim | `$ git config --global core.editor "vim"` |
#
# It is possible to reconfigure the text editor for Git whenever you want to change it.
# The four commands we just ran above only need to be run once: the flag `--global` tells Git
# to use the settings for every project, in your user account, on this computer.
#
# You can check your settings at any time:
#
# ~~~
# $ git config --list
# ~~~
#
# You can change your configuration as many times as you want: just use the
# same commands to choose another editor or update your email address.
# ### Git Help and Manual
#
# If you forget a `git` command, you can access the list of commands by using `-h` and access the Git manual by using `--help` :
#
# ~~~
# $ git config -h
# $ git config --help
# ~~~
# ## Let's make a Git repo!
#
# We will make a repo for a project Wolfman and Dracula are working on, investigating if it is possible to send a planetary lander to Mars.
#
# 
# First, let's create a directory in `Desktop` folder for our work and then move into that directory:
#
# ~~~
# $ cd ~/Desktop
# $ mkdir planets
# $ cd planets
# ~~~
#
# Then we tell Git to make `planets` a repository (where Git can store versions of our files):
#
# ~~~
# $ git init
# ~~~
# Note: that `git init` will create a repository that
# includes subdirectories and their files---there is no need to create
# separate repositories nested within the `planets` repository, whether
# subdirectories are present from the beginning or added later. Also, note
# that the creation of the `planets` directory and its initialization as a
# repository are completely separate processes.
#
# If we use `ls` to show the directory's contents, it appears that nothing has changed:
#
# ~~~
# $ ls
# ~~~
#
# But if we add the `-a` flag to show everything, we can see that Git has created a hidden directory within `planets` called `.git`:
#
# ~~~
# $ ls -a
# ~~~
#
# ~~~
# . .. .git
# ~~~
# Git uses this special sub-directory to store all the information about the project,
# including all files and sub-directories located within the project's directory.
# If we ever delete the `.git` sub-directory,
# we will lose the project's history.
#
# We can check that everything is set up correctly
# by asking Git to tell us the status of our project:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
#
# Initial commit
#
# nothing to commit (create/copy files and use "git add" to track)
# ~~~
#
# If you are using a different version of `git`, the exact wording of the output might be slightly different.
# ### Correcting `git init` Mistakes
# #### USE WITH CAUTION!
#
# To undo accidental creation of a git repo (e.g. did init when in the Desktop directory, instead of after changing directory to planets), you can just remove the `.git` within a current directory using the following command:
#
# ~~~
# $ rm -rf moons/.git
# ~~~
#
# But be careful! Running this command in the wrong directory, will remove the entire Git history of a project you might want to keep. Therefore, always check your current directory using the command `pwd`.
# ## Tracking Changes
#
# Let's create a file called `mars.txt` within the folder `planets` that contains some notes
# about the Red Planet's suitability as a base.
# We'll use `vim` to edit the file; this editor does not have to be the `core.editor` you set globally earlier.
#
# ~~~
# $ vim mars.txt
# ~~~
#
# Type the text below into the `mars.txt` file:
#
# ~~~
# Cold and dry, but everything is my favorite color
# ~~~
# `mars.txt` now contains a single line, which we can see by running:
#
# ~~~
# $ ls
# ~~~
#
# ~~~
# mars.txt
# ~~~
#
# ~~~
# $ cat mars.txt
# ~~~
#
# ~~~
# Cold and dry, but everything is my favorite color
# ~~~
# If we check the status of our project again, Git tells us that it's noticed the new file:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
#
# Initial commit
#
# Untracked files:
# (use "git add <file>..." to include in what will be committed)
#
# mars.txt
# nothing added to commit but untracked files present (use "git add" to track)
# ~~~
# The "untracked files" message means that there's a file in the directory
# that Git isn't keeping track of. We can tell Git to track a file using `git add`:
#
# ~~~
# $ git add mars.txt
# ~~~
#
# and then check that the right thing happened:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
#
# Initial commit
#
# Changes to be committed:
# (use "git rm --cached <file>..." to unstage)
#
# new file: mars.txt
#
# ~~~
#
# *Note*: You can add individual files by adding them by name, or any changes listed after typing `status` (new/modified/deleted files) by typing `git add .`
# Git now knows that it's supposed to keep track of `mars.txt`,
# but it hasn't recorded these changes as a commit yet.
# To get it to do that, we need to run one more command:
#
# ~~~
# $ git commit -m "Start notes on Mars as a base"
# ~~~
#
# ~~~
# [master (root-commit) f22b25e] Start notes on Mars as a base
# 1 file changed, 1 insertion(+)
# create mode 100644 mars.txt
# ~~~
# When we run `git commit`,
# Git takes everything we have told it to save by using `git add`
# and stores a copy permanently inside the special `.git` directory.
# This permanent copy is called a [commit](https://git-scm.com/docs/git-commit)
# (or [revision](https://git-scm.com/docs/gitrevisions)) and its short identifier is `f22b25e`.
# Your commit may have another identifier.
#
# We use the `-m` flag (for "message")
# to record a short, descriptive, and specific comment that will help us remember later on what we did and why.
# If we just run `git commit` without the `-m` option,
# Git will launch `vim` (or whatever other editor we configured as `core.editor`)
# so that we can write a longer message.
#
# [Good commit messages](https://chris.beams.io/posts/git-commit/) start with a brief (< 50 characters) statement about the changes made in the commit. Generally, the message should complete the sentence "If applied, this commit will ...".
#
# If you want to go into more detail, add a blank line between the summary line and your additional notes. Use this additional space to explain why you made changes and/or what their impact will be.
# If we run `git status` now:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
# nothing to commit, working directory clean
# ~~~
#
# it tells us everything is up to date.
# If we want to know what we've done recently,
# we can ask Git to show us the project's history using `git log`:
#
# ~~~
# $ git log
# ~~~
#
# ~~~
# commit f22b25e3233b4645dabd0d81e651fe074bd8e73b
# Author: <NAME> <<EMAIL>>
# Date: Thu Aug 22 09:51:46 2013 -0400
#
# Start notes on Mars as a base
# ~~~
# `git log` lists all commits made to a repository in reverse chronological order.
# The listing for each commit includes
# the commit's full identifier
# (which starts with the same characters as
# the short identifier printed by the `git commit` command earlier),
# the commit's author,
# when it was created,
# and the log message Git was given when the commit was created.
# ### Where Are My Changes?
#
# If we run `ls` at this point, we will still see just one file called `mars.txt`.
# That's because Git saves information about files' history
# in the special `.git` directory mentioned earlier
# so that our filesystem doesn't become cluttered
# (and so that we can't accidentally edit or delete an old version).
#
# Now suppose Dracula adds more information to the file.
#
# ~~~
# $ vim mars.txt
# ~~~
#
# ~~~
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# ~~~
#
# When we run `git status` now,
# it tells us that a file it already knows about has been modified:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
# Changes not staged for commit:
# (use "git add <file>..." to update what will be committed)
# (use "git checkout -- <file>..." to discard changes in working directory)
#
# modified: mars.txt
#
# no changes added to commit (use "git add" and/or "git commit -a")
# ~~~
# The last line is the key phrase: "no changes added to commit".
#
# We have changed this file, but we haven't told Git we will want to save those changes
# (which we do with `git add`) nor have we saved them (which we do with `git commit`).
#
# If you want to review your changes before saving them. We do this using `git diff`.
# This shows us the differences between the current state
# of the file and the most recently saved version:
#
# ~~~
# $ git diff
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index df0654a..315bf3a 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1 +1,2 @@
# Cold and dry, but everything is my favorite color
# +The two moons may be a problem for Wolfman
# ~~~
# The output is a series of commands for tools like editors and `patch`
# telling them how to reconstruct one file given the other:
#
# 1. The first line tells us that Git is producing output similar to the Unix `diff` command
# comparing the old and new versions of the file.
# 2. The second line tells exactly which versions of the file
# Git is comparing;
# `df0654a` and `315bf3a` are unique computer-generated labels for those versions.
# 3. The third and fourth lines once again show the name of the file being changed.
# 4. The remaining lines are the most interesting, they show us the actual differences
# and the lines on which they occur.
# The `+` marker in the first column shows where we added a line.
# Now to commit:
#
# ~~~
# $ git commit -m "Add concerns about effects of Mars' moons on Wolfman"
# $ git status
# ~~~
#
# ~~~
# On branch master
# Changes not staged for commit:
# (use "git add <file>..." to update what will be committed)
# (use "git checkout -- <file>..." to discard changes in working directory)
#
# modified: mars.txt
#
# no changes added to commit (use "git add" and/or "git commit -a")
# ~~~
# Why did we get that note?
#
# ~~~
# $ git add mars.txt
# $ git commit -m "Add concerns about effects of Mars' moons on Wolfman"
# ~~~
#
# ~~~
# [master 34961b1] Add concerns about effects of Mars' moons on Wolfman
# 1 file changed, 1 insertion(+)
# ~~~
#
# Git insists that we add files to the set we want to commit before actually committing anything. This allows us to commit our changes in stages and capture changes in logical portions rather than only large batches.
#
# For example,suppose we're adding a few citations to relevant research to our thesis.
# We might want to commit those additions,
# and the corresponding bibliography entries,
# but *not* commit some of our work drafting the conclusion
# (which we haven't finished yet).
#
# To allow for this, Git has a special *staging area* where it keeps track of things that have been added to
# the current [changeset](http://swcarpentry.github.io/git-novice/reference#changeset)
# but not yet committed.
# ### Staging Area
#
# If you think of Git as taking snapshots of changes over the life of a project,
# `git add` specifies *what* will go in a snapshot
# (putting things in the staging area),
# and `git commit` then *actually takes* the snapshot, and
# makes a permanent record of it (as a commit).
# If you don't have anything staged when you type `git commit`,
# Git will prompt you to use `git commit -a` or `git commit --all`.
#
# Only do this if you are certain you know what will go into the commit, at least by checking `git status` first!
#
# 
# Let's watch as our changes to a file move from our editor
# to the staging area and into long-term storage.
# First, we'll add another line to the file:
#
# ~~~
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# But the Mummy will appreciate the lack of humidity
# ~~~
#
# ~~~
# $ git diff
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index 315bf3a..b36abfd 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1,2 +1,3 @@
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# +But the Mummy will appreciate the lack of humidity
# ~~~
# Now let's put that change in the staging area
# and see what `git diff` reports:
#
# ~~~
# $ git add mars.txt
# $ git diff
# ~~~
#
# There is no output: as far as Git can tell,
# there's no difference between what it's been asked to save permanently
# and what's currently in the directory.
# However, if we do this:
#
# ~~~
# $ git diff --staged
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index 315bf3a..b36abfd 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1,2 +1,3 @@
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# +But the Mummy will appreciate the lack of humidity
# ~~~
#
# it shows us the difference between the last committed change
# and what's in the staging area.
# Let's save our changes:
#
# ~~~
# $ git commit -m "Discuss concerns about Mars' climate for Mummy"
# ~~~
#
# ~~~
# [master 005937f] Discuss concerns about Mars' climate for Mummy
# 1 file changed, 1 insertion(+)
# ~~~
#
# check our status:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
# nothing to commit, working directory clean
# ~~~
# and look at the history of what we've done so far:
#
# ~~~
# $ git log
# ~~~
#
# ~~~
# commit 005937fbe2a98fb83f0ade869025dc2636b4dad5
# Author: <NAME> <<EMAIL>>
# Date: Thu Aug 22 10:14:07 2013 -0400
#
# Discuss concerns about Mars' climate for Mummy
#
# commit 34961b159c27df3b475cfe4415d94a6d1fcd064d
# Author: <NAME> <<EMAIL>>
# Date: Thu Aug 22 10:07:21 2013 -0400
#
# Add concerns about effects of Mars' moons on Wolfman
#
# commit f22b25e3233b4645dabd0d81e651fe074bd8e73b
# Author: <NAME> <<EMAIL>>
# Date: Thu Aug 22 09:51:46 2013 -0400
#
# Start notes on Mars as a base
# ~~~
# ### Paging the Log
#
# When the output of `git log` is too long to fit in your screen,
# `git` uses a program to split it into pages of the size of your screen.
# When this "pager" is called, you will notice that the last line in your
# screen is a `:`, instead of your usual prompt.
#
# - To get out of the pager, press <kbd>Q</kbd>.
# - To move to the next page, press <kbd>Spacebar</kbd>.
# - To search for `some_word` in all pages,
# press <kbd>/</kbd>
# and type `some_word`.
# Navigate through matches pressing <kbd>N</kbd>.
# ### Limit Log Size
#
# To avoid having `git log` cover your entire terminal screen, you can limit the
# number of commits that Git lists by using `-N`, where `N` is the number of
# commits that you want to view. For example, if you only want information from
# the last commit you can use:
#
# ~~~
# $ git log -1
# ~~~
#
# ~~~
# commit 005937fbe2a98fb83f0ade869025dc2636b4dad5
# Author: <NAME> <<EMAIL>>
# Date: Thu Aug 22 10:14:07 2013 -0400
#
# Discuss concerns about Mars' climate for Mummy
# ~~~
# You can also reduce the quantity of information using the
# `--oneline` option:
#
# ~~~
# $ git log --oneline
# ~~~
#
# ~~~
# - 005937f Discuss concerns about Mars' climate for Mummy
# - 34961b1 Add concerns about effects of Mars' moons on Wolfman
# - f22b25e Start notes on Mars as a base
# ~~~
# ### Directories
#
# Two important facts you should know about directories in Git.
#
# 1) Git does not track directories on their own, only files within them.
# Try it for yourself:
#
# ~~~
# $ mkdir directory
# $ git status
# $ git add directory
# $ git status
# ~~~
#
# Note, our newly created empty directory `directory` does not appear in
# the list of untracked files even if we explicitly add it (_via_ `git add`) to our
# repository. This is the reason why you will sometimes see `.gitkeep` files
# in otherwise empty directories. Unlike `.gitignore`, these files are not special
# and their sole purpose is to populate a directory so that Git adds it to
# the repository. In fact, you can name such files anything you like.
# 2) If you create a directory in your Git repository and populate it with files,
# you can add all files in the directory at once by:
#
# ~~~
# git add <directory-with-files>
# ~~~
# To recap, when we want to add changes to our repository,
# we first need to add the changed files to the staging area
# (`git add`) and then commit the staged changes to the
# repository (`git commit`):
#
# 
# ### Author and Committer
#
# For each of the commits you have done, Git stored your name twice.
# You are named as the author and as the committer. You can observe
# that by telling Git to show you more information about your last
# commits:
#
# ~~~
# $ git log --format=full
# ~~~
#
# When committing you can name someone else as the author:
#
# ~~~
# $ git commit --author="<NAME> <<EMAIL>>"
# ~~~
# ## Exploring History
#
# As we previously saw, we can refer to commits by their
# identifiers. You can refer to the _most recent commit_ of the working
# directory by using the identifier `HEAD`.
#
# We've been adding one line at a time to `mars.txt`, so it's easy to track our
# progress by looking, so let's do that using our `HEAD`s. Before we start,
# let's make a change to `mars.txt`:
#
# ~~~
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# But the Mummy will appreciate the lack of humidity
# An ill-considered change
# ~~~
# Now, let's see what we get.
#
# ~~~
# $ git diff HEAD mars.txt
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index b36abfd..0848c8d 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1,3 +1,4 @@
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# But the Mummy will appreciate the lack of humidity
# +An ill-considered change.
# ~~~
#
# which is the same as what you would get if you leave out `HEAD` (try it). The
# real goodness in all this is when you can refer to previous commits. We do
# that by adding `~1`
# (where "~" is "tilde", pronounced [**til**-d*uh*])
# to refer to the commit one before `HEAD`.
#
# ~~~
# $ git diff HEAD~1 mars.txt
# ~~~
# If we want to see the differences between older commits we can use `git diff`
# again, but with the notation `HEAD~1`, `HEAD~2`, and so on, to refer to them:
#
# ~~~
# $ git diff HEAD~2 mars.txt
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index df0654a..b36abfd 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1 +1,4 @@
# Cold and dry, but everything is my favorite color
# +The two moons may be a problem for Wolfman
# +But the Mummy will appreciate the lack of humidity
# +An ill-considered change
# ~~~
# We could also use `git show` which shows us what changes we made at an older commit as well as the commit message, rather than the _differences_ between a commit and our working directory that we see by using `git diff`.
#
# ~~~
# $ git show HEAD~2 mars.txt
# ~~~
#
# ~~~
# commit 34961b159c27df3b475cfe4415d94a6d1fcd064d
# Author: <NAME> <<EMAIL>>
# Date: Thu Aug 22 10:07:21 2013 -0400
#
# Start notes on Mars as a base
#
# diff --git a/mars.txt b/mars.txt
# new file mode 100644
# index 0000000..df0654a
# --- /dev/null
# +++ b/mars.txt
# @@ -0,0 +1 @@
# +Cold and dry, but everything is my favorite color
# ~~~
# In this way, we can build up a chain of commits.
# The most recent end of the chain is referred to as `HEAD`;
# we can refer to previous commits using the `~` notation,
# so `HEAD~1` means "the previous commit",
# while `HEAD~123` goes back 123 commits from where we are now.
#
# We can also refer to commits using those long strings of digits and letters
# that `git log` displays. These are unique IDs for the changes,
# and "unique" really does mean unique: every change to any set of files on any computer
# has a unique 40-character identifier.
# Our first commit was given the ID `f22b25e3233b4645dabd0d81e651fe074bd8e73b`,
# so let's try this:
#
# ~~~
# $ git diff f22b25e3233b4645dabd0d81e651fe074bd8e73b mars.txt
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index df0654a..93a3e13 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1 +1,4 @@
# Cold and dry, but everything is my favorite color
# +The two moons may be a problem for Wolfman
# +But the Mummy will appreciate the lack of humidity
# +An ill-considered change
# ~~~
# That's the right answer, but typing out random 40-character strings is annoying,
# so Git lets us use just the first few characters:
#
# ~~~
# $ git diff f22b25e mars.txt
# ~~~
#
# ~~~
# diff --git a/mars.txt b/mars.txt
# index df0654a..93a3e13 100644
# --- a/mars.txt
# +++ b/mars.txt
# @@ -1 +1,4 @@
# Cold and dry, but everything is my favorite color
# +The two moons may be a problem for Wolfman
# +But the Mummy will appreciate the lack of humidity
# +An ill-considered change
# ~~~
# All right! So we can save changes to files and see what we've changed—now how
# can we restore older versions of things?
# Let's suppose we accidentally overwrite our file:
#
# ~~~
# We will need to manufacture our own oxygen
# ~~~
#
# `git status` now tells us that the file has been changed,
# but those changes haven't been staged:
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
# Changes not staged for commit:
# (use "git add <file>..." to update what will be committed)
# (use "git checkout -- <file>..." to discard changes in working directory)
#
# modified: mars.txt
#
# no changes added to commit (use "git add" and/or "git commit -a")
# ~~~
# We can put things back the way they were
# by using `git checkout`:
#
# ~~~
# $ git checkout HEAD mars.txt
# $ cat mars.txt
# ~~~
#
# ~~~
# Cold and dry, but everything is my favorite color
# The two moons may be a problem for Wolfman
# But the Mummy will appreciate the lack of humidity
# ~~~
#
# As you might guess from its name, `git checkout` checks out (i.e., restores) an old version of a file.
# In this case, we're telling Git that we want to recover the version of the file recorded in `HEAD`,
# which is the last saved commit.
# If we want to go back even further, we can use a commit identifier instead:
#
# ~~~
# $ git checkout f22b25e mars.txt
# ~~~
#
# ~~~
# $ cat mars.txt
# ~~~
#
# ~~~
# Cold and dry, but everything is my favorite color
# ~~~
#
# ~~~
# $ git status
# ~~~
#
# ~~~
# On branch master
# Changes to be committed:
# (use "git reset HEAD <file>..." to unstage)
# Changes not staged for commit:
# (use "git add <file>..." to update what will be committed)
# (use "git checkout -- <file>..." to discard changes in working directory)
#
# modified: mars.txt
#
# no changes added to commit (use "git add" and/or "git commit -a")
# ~~~
# Notice that the changes are on the staged area.
# Again, we can put things back the way they were
# by using `git checkout`:
#
# ~~~
# $ git checkout HEAD mars.txt
# ~~~
# ### Don't Lose Your HEAD
#
# Above we used
#
# ~~~
# $ git checkout f22b25e mars.txt
# ~~~
#
# to revert `mars.txt` to its state after the commit `f22b25e`. But be careful!
# The command `checkout` has other important functionalities and Git will misunderstand
# your intentions if you are not accurate with the typing. For example,
# if you forget `mars.txt` in the previous command.
#
# ~~~
# $ git checkout f22b25e
# ~~~
# ~~~
# Note: checking out 'f22b25e'.
#
# You are in 'detached HEAD' state. You can look around, make experimental
# changes and commit them, and you can discard any commits you make in this
# state without impacting any branches by performing another checkout.
#
# If you want to create a new branch to retain commits you create, you may
# do so (now or later) by using -b with the checkout command again. Example:
#
# git checkout -b <new-branch-name>
#
# HEAD is now at f22b25e Start notes on Mars as a base
# ~~~
# The "detached HEAD" is like "look, but don't touch" here,
# so you shouldn't make any changes in this state.
# After investigating your repo's past state, reattach your `HEAD` with `git checkout master`.
# It's important to remember that
# we must use the commit number that identifies the state of the repository
# *before* the change we're trying to undo.
# A common mistake is to use the number of
# the commit in which we made the change we're trying to get rid of.
# In the example below, we want to retrieve the state from before the most
# recent commit (`HEAD~1`), which is commit `f22b25e`:
#
# 
#
# So, to put it all together,
# here's how Git works in cartoon form:
#
# 
# ### Simplifying the Common Case
# If you read the output of `git status` carefully,
# you'll see that it includes this hint:
# ~~~
# (use "git checkout -- <file>..." to discard changes in working directory)
# ~~~
# As it says, `git checkout` without a version identifier restores files to the state saved in `HEAD`.
# The double dash `--` (optional) separates the names of the files being recovered
# from the command itself. Without it, Git might try to use the name of the file as the commit identifier (but I haven't had this problem).
# The fact that files can be reverted one by one
# tends to change the way people organize their work.
# If everything is in one large document,
# it's hard (but not impossible) to undo changes to the introduction
# without also undoing changes made later to the conclusion.
# If the introduction and conclusion are stored in separate files,
# on the other hand,
# moving backward and forward in time becomes much easier.
# ### Explore and Summarize Histories
#
# Exploring history is an important part of git, often it is a challenge to find
# the right commit ID, especially if the commit is from several months ago.
#
# Imagine the `planets` project has more than 50 files.
# You would like to find a commit with specific text in `mars.txt` is modified.
# When you type `git log`, a very long list appeared,
# How can you narrow down the search?
#
# Recall that the `git diff` command allow us to explore one specific file,
# e.g. `git diff mars.txt`. We can apply a similar idea here.
#
# ~~~
# $ git log mars.txt
# ~~~
#
# Unfortunately some of these commit messages are very ambiguous e.g. `update files`.
# How can you search through these files?
# Both `git diff` and `git log` are very useful and they summarize a different part of the history for you.
# Is it possible to combine both? Let's try the following:
#
# ~~~
# $ git log --oneline --patch mars.txt
# ~~~
#
# You should get a long list of output, and you should be able to see both commit messages and the difference between each commit.
# ### Tagging
#
# If there is a particularly important commit, e.g. a new version of a program, you can add an identifier to your commit (that is, just after committing). There are two types of tags. A "lightweight" tag is as simple as:
#
# ~~~
# $ git tag v0.1-lw
# ~~~
#
# This tag simply points to a particular commit and then can be used for checking out a branch instead of the alphanumeric string given to the branch.
#
# More useful is the annotated tag:
#
# ~~~
# $ git tag -a v1.0 -m "my version 1.0"
# ~~~
#
# Annotated tags are stored as full objects in the Git database. They’re checksummed; contain the tagger name, email, and date; have a tagging message; and can be signed and verified with GNU Privacy Guard (GPG) (cryptographically signed). It’s generally recommended that you create annotated tags so you can have all this information, and then it is a full bookmark of the state.
#
# If desired, read more about tagging [here](https://git-scm.com/book/en/v2/Git-Basics-Tagging).
# We'll continue our discussion of Git at the next class!
|
source/notebooks/lecture06_git_and_github.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:env_multilingual_class]
# language: python
# name: conda-env-env_multilingual_class-py
# ---
# + [markdown] Collapsed="false"
# # The IMDb Dataset
# The IMDb dataset consists of sentences from movie reviews and human annotations of their sentiment. The task is to predict the sentiment of a given sentence. We use the two-way (positive/negative) class split, and use only sentence-level labels.
# + Collapsed="false"
from IPython.display import display, Markdown
with open('../../doc/env_variables_setup.md', 'r') as fh:
content = fh.read()
display(Markdown(content))
# + [markdown] Collapsed="false"
# ## Import Packages
# + Collapsed="false"
import tensorflow as tf
import tensorflow_datasets
from tensorflow.keras.utils import to_categorical
from transformers import (
BertConfig,
BertTokenizer,
TFBertModel,
TFBertForSequenceClassification,
glue_convert_examples_to_features,
glue_processors
)
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import seaborn as sns
from google.cloud import storage
import math
import numpy as np
import os
import glob
import time
from datetime import timedelta
import shutil
from datetime import datetime
import pandas as pd
from scipy import stats
# + [markdown] Collapsed="false"
# ## Check configuration
# + Collapsed="false"
print(tf.version.GIT_VERSION, tf.version.VERSION)
# + Collapsed="false"
print(tf.keras.__version__)
# + Collapsed="false"
gpus = tf.config.list_physical_devices('GPU')
if len(gpus)>0:
for gpu in gpus:
print('Name:', gpu.name, ' Type:', gpu.device_type)
else:
print('No GPU available !!!!')
# + [markdown] Collapsed="false"
# ## Define Paths
# + Collapsed="false"
try:
data_dir=os.environ['PATH_DATASETS']
except KeyError:
print('missing PATH_DATASETS')
try:
tensorboard_dir=os.environ['PATH_TENSORBOARD']
except KeyError:
print('missing PATH_TENSORBOARD')
try:
savemodel_dir=os.environ['PATH_SAVE_MODEL']
except KeyError:
print('missing PATH_SAVE_MODEL')
# + [markdown] Collapsed="false"
# ## Import local packages
# + Collapsed="false"
import preprocessing.preprocessing as pp
import utils.model_metrics as mm
# + Collapsed="false"
import importlib
importlib.reload(pp);
importlib.reload(mm);
# + [markdown] Collapsed="false"
# ## Loading a data from Tensorflow Datasets
# + Collapsed="false"
data, info = tensorflow_datasets.load(name="imdb_reviews",
data_dir=data_dir,
as_supervised=True,
with_info=True)
# + [markdown] Collapsed="false"
# ### Checking baics info from the metadata
# + Collapsed="false"
info
# + Collapsed="false"
pp.print_info_dataset(info)
# + [markdown] Collapsed="false"
# ### Checking basic info from the metadata
# + Collapsed="false"
data
# + Collapsed="false"
data.keys()
# + Collapsed="false"
pp.print_info_data(data['train'])
# + [markdown] Collapsed="false"
# ## Define parameters of the model
# + Collapsed="false"
# extract parameters
size_train_dataset = info.splits['train'].num_examples
size_valid_dataset = info.splits['test'].num_examples
number_label = info.features["label"].num_classes
print('Dataset size: {:6}/{:6}'.format(size_train_dataset, size_valid_dataset))
# + [markdown] Collapsed="false"
# ## Data analysis
# + [markdown] Collapsed="false"
# ### Extraction of the data and creating dataframes
# + Collapsed="false"
def extraction(feature):
#print('feature:',feature['sentence'])
return feature['sentence'], feature['label']
# + Collapsed="false"
np_train = np.array(list(data['train'].as_numpy_iterator()))
np_valid = np.array(list(data['test'].as_numpy_iterator()))
#size_valid_dataset = np.shape(np.array(list(data_valid.as_numpy_iterator())))[0]
# + Collapsed="false"
df_train = pd.DataFrame(data=np_train, columns=['sentence', 'label'])
df_valid = pd.DataFrame(data=np_valid, columns=['sentence', 'label'])
# + Collapsed="false"
df_train['text']=df_train['sentence'].apply(lambda x: x.decode("utf-8"))
df_valid['text']=df_valid['sentence'].apply(lambda x: x.decode("utf-8"))
# + Collapsed="false"
df_train['label']=df_train['label'].astype('int32')
df_valid['label']=df_valid['label'].astype('int32')
# + Collapsed="false"
df_train.pop('sentence')
df_valid.pop('sentence');
# + Collapsed="false"
df_train['word_count'] = df_train['text'].apply(lambda txt: len(txt.split(' ')))
df_valid['word_count'] = df_valid['text'].apply(lambda txt: len(txt.split(' ')))
# + [markdown] Collapsed="false"
# ### Structure exploration
# + Collapsed="false"
df_train.columns
# + Collapsed="false"
df_train.dtypes
# + [markdown] Collapsed="false"
# ### Basics exploration
# + Collapsed="false"
len(df_train)
# + Collapsed="false"
len(df_valid)
# + Collapsed="false"
df_train.head()
# + Collapsed="false"
# number of characters
new_df = df_valid.copy()
# + Collapsed="false"
new_df = new_df[0:1000]
# + Collapsed="false"
len(new_df)
# + Collapsed="false"
new_df['token'] = new_df['text'].map(lambda txt: len(txt))
# + Collapsed="false"
new_df.head(n=10)
# + Collapsed="false"
new_df['token'].sum()*2
# + Collapsed="false"
# + Collapsed="false"
df_train.isnull().sum()
# + Collapsed="false"
df_valid.isnull().sum()
# + Collapsed="false"
df_train['label'].value_counts(dropna=False)
# + Collapsed="false"
df_valid['label'].value_counts(dropna=False)
# + Collapsed="false"
df_train['word_count'].describe()
# + Collapsed="false"
df_valid['word_count'].describe()
# + [markdown] Collapsed="false"
# ### Visualization of the data
# + Collapsed="false"
import matplotlib as mpl
from cycler import cycler
plt.style.use('seaborn')
#plt.style.use('dark_background')
# + Collapsed="false"
plt.rcParams['figure.figsize'] = [10, 8]
# + Collapsed="false"
# matplotlib histogram
plt.style.use('seaborn')
plt.hist(df_train['word_count'], color = 'blue', edgecolor = 'black',bins = int(50));
# Add labels
plt.title('Histogram of word count')
plt.xlabel('Number of words')
plt.ylabel('Document')
# + Collapsed="false"
plt.style.use('seaborn')
fig, ax1 = plt.subplots()
ax1.hist([df_train['word_count'],df_valid['word_count']],color=['b', 'y'], label=['train', 'valid'], density=True)
ax1.set_ylabel('Histogram of word count')
ax1.set_xlabel('Number of words')
plt.legend(loc='upper right')
plt.show()
# + Collapsed="false"
plt.style.use('seaborn')
explode = (0, 0.1)
# Make figure and axes
fig, axs = plt.subplots(1, 2)
# A standard pie plot
labels = info.features["label"].names[list(df_train['label'].value_counts(dropna=False).index)[0]], info.features["label"].names[list(df_train['label'].value_counts(dropna=False).index)[1]]
fracs = list(df_train['label'].value_counts(dropna=False))
axs[0].pie(fracs, labels=labels, explode=explode, autopct='%1.1f%%', shadow=True);
# A standard pie plot
labels = info.features["label"].names[list(df_valid['label'].value_counts(dropna=False).index)[0]], info.features["label"].names[list(df_valid['label'].value_counts(dropna=False).index)[1]]
fracs = list(df_valid['label'].value_counts(dropna=False))
axs[1].pie(fracs, labels=labels, explode=explode, autopct='%1.1f%%', shadow=True);
# + Collapsed="false"
# Data
#plt.style.use('dark_background')
plt.style.use('ggplot')
labels_train = info.features["label"].names[list(df_train['label'].value_counts(dropna=False).index)[0]], info.features["label"].names[list(df_train['label'].value_counts(dropna=False).index)[1]]
labels_valid = info.features["label"].names[list(df_valid['label'].value_counts(dropna=False).index)[0]], info.features["label"].names[list(df_train['label'].value_counts(dropna=False).index)[1]]
fracs_train = list(df_train['label'].value_counts(dropna=False))
fracs_valid = list(df_valid['label'].value_counts(dropna=False))
r = [0,1]
raw_data = {'greenBars': [fracs_train[0],fracs_valid[0]], 'orangeBars': [fracs_train[1],fracs_valid[1]]}
df = pd.DataFrame(raw_data)
# From raw value to percentage
totals = [i+j for i,j in zip(df['greenBars'], df['orangeBars'])]
greenBars = [i / j * 100 for i,j in zip(df['greenBars'], totals)]
orangeBars = [i / j * 100 for i,j in zip(df['orangeBars'], totals)]
# plot
barWidth = 0.85
names = ('Train', 'Validation')
# Create green Bars
ax1 = plt.bar(r, greenBars, color='#b5ffb9', edgecolor='white', width=barWidth, label="positive")
# Create orange Bars
ax2 = plt.bar(r, orangeBars, bottom=greenBars, color='#f9bc86', edgecolor='white', width=barWidth, label="negative")
for r1, r2 in zip(ax1, ax2):
h1 = r1.get_height()
h2 = r2.get_height()
plt.text(r1.get_x() + r1.get_width() / 2., h1 / 2., "%d" % h1, ha="center", va="center", color="white", fontsize=16, fontweight="bold")
plt.text(r2.get_x() + r2.get_width() / 2., h1 + h2 / 2., "%d" % h2, ha="center", va="center", color="white", fontsize=16, fontweight="bold")
# Custom x axis
plt.xticks(r, names)
plt.title("Balance of the Datasets")
plt.ylabel("Percentage")
plt.legend(loc='upper left', bbox_to_anchor=(1,1), ncol=1)
# Show graphic
plt.show()
# + [markdown] Collapsed="false"
# ### Selection
# + Collapsed="false"
q=0.95
n=512
# + Collapsed="false"
x = df_train['word_count']
sns.distplot(x, hist=True, rug=True);
print('Minimum word count required to include all words in {}% of the reviews: {}'.format(q*100, x.quantile(q)))
print('\nPercent of sequences included when choosing a maximum word count of {}: {}%'.format(n, stats.percentileofscore(x,500)))
# + Collapsed="false"
stats.percentileofscore(x,512)
# + Collapsed="false"
x = df_valid['word_count']
sns.distplot(x, hist=True, rug=True);
print('Minimum word count required to include all words in {}% of the reviews: {}'.format(q*100, x.quantile(q)))
print('\nPercent of sequences included when choosing a maximum word count of {}: {}%'.format(n, stats.percentileofscore(x,500)))
# + Collapsed="false"
sns.distplot(df_train['word_count'], hist_kws={'cumulative': True});
# + Collapsed="false"
sns.distplot(df_valid['word_count'], hist_kws={'cumulative': True});
# + Collapsed="false"
def fraction_data(df):
tot=len(df)
# batch sizes
for size in [2, 5, 10, 20, 27]:
nb=len(df[df['word_count'] <size])
print('threshold: {:10d} amount of data selected: {:10d} percentage of the data selected: {:.2f} %'.format(size, nb , nb/tot*100))
# + Collapsed="false"
fraction_data(df_train)
# + Collapsed="false"
fraction_data(df_valid)
# + Collapsed="false"
# + Collapsed="false"
# + Collapsed="false"
|
notebook/01-Analysis/01_IMDb_Huggingface_data_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="xnY85mGJjqZs"
#
# + id="40kP6K0J0Wct"
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="xQZfbehi8LJ9"
# ### All needed imports
# + id="2Bm84Jy3gjVn"
# !pip install keras-tuner
# !pip install yfinance
import numpy as np
from numpy import concatenate
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Dropout, Activation
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from kerastuner import HyperModel
from kerastuner.tuners import RandomSearch
from tensorflow.keras.models import load_model
import yfinance as yf
from datetime import datetime, date, timedelta
# + [markdown] id="Vd97WtyPjjNb"
# ### Data
#
# Time series as (SP500, DXY GOLD) had some missing values (especially on Sundays, as we expected) so I chose to fill those empty values with the nearest value available in the time series.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 487} id="Ag2uDDQ8gjjk" outputId="3badd015-132f-48bc-9f9a-e5e173f9d167"
today = datetime.today().strftime('%Y-%m-%d')
start_date = '2015-08-07'
idx = pd.date_range(start_date, today)
df_A = yf.download("BTC-USD", start=start_date, end=today)
df_A = df_A["Close"]
df_A = df_A.reindex(idx, fill_value=0, method="nearest")
# The S&P 500 - stock market index that measures the stock performance of 500 large companies listed on stock exchanges in the US.
df_B = yf.download("%5EGSPC", start=start_date, end=today)
df_B = df_B["Close"]
df_B = df_B.reindex(idx, fill_value=0, method="nearest")
# US Dollar/USDX - Index - Cash
df_C = yf.download("DX-Y.NYB", start=start_date, end=today)
df_C = df_C["Close"]
df_C = df_C.reindex(idx, fill_value=0, method="nearest")
# Gold
df_D = yf.download("GC=F", start=start_date, end=today)
df_D = df_D["Close"]
df_D = df_D.reindex(idx, fill_value=0, method="nearest")
timeseries= pd.concat([df_A, df_B, df_C, df_D], axis=1)
timeseries.columns=(["Close-btc", "Close-sp500", "Close-dxy", "Close-gold"])
timeseries
# + [markdown] id="WiwMjWS68gqo"
# ### Scaling and making Y-column of future Bitcoin values shifted for one day
#
# **Y-column** is *Close-btc-output*
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="E1y3FduOkqBz" outputId="2af927b2-4640-4ab5-aab0-e46ec45642eb"
scl = MinMaxScaler()
timeseries[["Close-btc", "Close-sp500", "Close-dxy", "Close-gold"]] = scl.fit_transform(timeseries[
["Close-btc", "Close-sp500", "Close-dxy", "Close-gold"]])
timeseries["Close-btc-output"] = timeseries["Close-btc"]
timeseries["Close-btc-output"] = timeseries["Close-btc-output"].shift(-1)
#timeseries.drop(timeseries.tail(1).index, inplace=True)
timeseries.loc[today, 'Close-btc-output']
timeseries
# + [markdown] id="HFIuD-O28Ia7"
# #### Filling today's **Close-btc-output** value with yesterday value. This is needed for later proper prediction
# + colab={"base_uri": "https://localhost:8080/"} id="vIraL6ByOPLW" outputId="11f18bf4-7dba-4093-f841-77b7e65da1d7"
yesterday = date.today() - timedelta(days=1)
yesterday = yesterday.strftime('%Y-%m-%d')
timeseries.loc[today, 'Close-btc-output'] = timeseries.loc[yesterday, 'Close-btc-output']
timeseries
print(timeseries)
array = timeseries.values
print(array)
# + [markdown] id="saDZYWQ88w-l"
# ### Model will be predicting a future Bitcoin values based on three days before
# + colab={"base_uri": "https://localhost:8080/"} id="vZM-NX8Bk8Kh" outputId="d6856e1c-4365-4aeb-a63a-0ec19c737a91"
#number of days to take in a row before Y
X_quantity = 3
mod = len(array) % X_quantity
# how many predictions to make
times = 7
# deleting first-mod values to have /mod-zero array
for i_ in range(mod):
array = np.delete(array, 0, 0)
# for splitting into train/test
division = X_quantity * times
split = len(array) - division
print('Division: ', division, 'Split is on: ', split)
predict = array[split:]
train = array[:split]
predict, predict.shape
# + [markdown] id="rKxlZbNbLw7H"
# #### Checking if everything is alright and I didn't miss anything
# + colab={"base_uri": "https://localhost:8080/", "height": 700} id="dHb3Z_2rPqVO" outputId="dd4a8ac8-7b6c-47a1-ec8c-076a189e07fd"
timeseries.tail(21)
# + id="zoa3PtiDFPTC"
def get_X_values(values):
"""
Returns numpay.ndarray of sequence of X_quantity days
Args:
values: array of predict/train values
"""
x = []
ready_X = []
COUNT = 1
for i_ in values:
x.extend(i_)
if COUNT % X_quantity == 0:
ready_X.append(x)
x = []
COUNT += 1
ready_X = np.array(ready_X)
return ready_X
def get_Y_targets(targets):
"""
Returns numpay.ndarray of target values
Args:
targets: array of predict/train targets
"""
ready_Y = []
for i_ in range(int(len(targets) / X_quantity)):
i_ += 1
ready_Y.append(targets[i_ * X_quantity -1])
ready_Y = np.array(ready_Y)
return ready_Y
# + colab={"base_uri": "https://localhost:8080/"} id="ErOUrPYQk8QS" outputId="d627668a-ef72-4643-95fc-5ed85a770e46"
# taking 4 features
predict_values = predict[:, :-1]
# taking target values
predict_target = predict[:, -1]
print('predict_values.shape: {} | predict_target.shape: {}'.format(predict_values.shape, predict_target.shape))
predict_X = get_X_values(predict_values)
print('\n predict_X type: {} \n predict_X.shape: {} \n predict_X: \n{}'.format(type(predict_X), predict_X.shape, predict_X))
predict_Y = get_Y_targets(predict_target)
print('\nPred Y: {}'.format(predict_Y))
# + colab={"base_uri": "https://localhost:8080/"} id="fu-cMehW9qly" outputId="78ea9b8e-8565-4b8d-821e-7fe3b7c731e3"
# taking 4 features
train_values = train[:, :-1]
# taking target values
train_target = train[:, -1]
print('train_values.shape: {} | train_target.shape : {}'.format(train_values.shape, train_target.shape))
train_X = get_X_values(train_values)
print('\n type of train_X: {} \n train_X.shape: {} \n train_X: \n {}'.format(type(train_X), train_X.shape, train_X))
train_Y = get_Y_targets(train_target)
print('\n len(train_Y) : {} '.format(len(train_Y)))
# + [markdown] id="63ekw5KqMp0x"
# #### Splitting into Train and Test
# + colab={"base_uri": "https://localhost:8080/"} id="VHBda7e7Lo4V" outputId="64a4d804-8d39-41f6-cbb6-0bdfbd65a534"
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
predict_X = predict_X.reshape((predict_X.shape[0], 1, predict_X.shape[1]))
X_train, X_validate, Y_train, Y_validate = train_test_split(train_X, train_Y, test_size=0.20, random_state=42)
print('X_train.shape: {} | Y_train: {}'.format( X_train.shape, Y_train.shape))
print('predict_X.shape: {}'.format( predict_X.shape))
# + [markdown] id="t97C1Hng4BOR"
# #### Training The Model
#
# keras-tuner is used to get the best possible hyperparameter combination out of 60 different random ones using a random search
#
# *forward_days* is bumber of days to predict
# + id="adBu4L63l1a4"
class MyHyperModel(HyperModel):
def __init__(self, num_classes):
self.num_classes = num_classes
self.forward_days = 1
def build(self, hp):
# Specify model
model = Sequential()
NUM_NEURONS_FirstLayer = hp.Choice('NUM_NEURONS_FirstLayer',values=[80, 90, 100, 120])
model.add(LSTM(NUM_NEURONS_FirstLayer,input_shape=(X_train.shape[1],X_train.shape[2]), return_sequences=True))
model.add(Dropout(hp.Float('dropout_1', 0, 0.4, step=0.1, default=0)))
NUM_NEURONS_SecondLayer = hp.Choice('NUM_NEURONS_SecondLayer',values=[30, 40, 50, 60, 70])
model.add(LSTM(NUM_NEURONS_SecondLayer,input_shape=(NUM_NEURONS_FirstLayer,1)))
model.add(Dropout(hp.Float('dropout_2', 0, 0.4, step=0.1, default=0)))
# Output layer
model.add(Dense(self.forward_days, activation='linear'))
# Compile the constructed model and return it
model.compile(
optimizer=Adam(
hp.Choice('learning_rate',
values=[0.01, 0.001, 0.0001])),
loss='mean_squared_error')
print(loss)
return model
# + colab={"base_uri": "https://localhost:8080/"} id="mzMo6wkWl1ez" outputId="1b757b03-8519-46e3-eda8-ba1f251dcae9"
hypermodel = MyHyperModel(num_classes=1)
# Construct the RandomSearch tuner
random_tuner = RandomSearch(
hypermodel,
objective='val_loss',
max_trials=100,
executions_per_trial = 1,
seed=10,
project_name='lstm-kerastuner-multi',
directory="/content")
# Search for the best parameters of the neural network using the contructed random search tuner
random_tuner.search(X_train, Y_train,
epochs=100,
validation_data=(X_validate, Y_validate))
#get the best model
random_params = random_tuner.get_best_hyperparameters()[0]
best_model = random_tuner.get_best_models(1)[0]
# + colab={"base_uri": "https://localhost:8080/"} id="bj47vaEWl1iQ" outputId="3b2a3387-3345-40d6-b2f4-490be32132f8"
best_model.summary()
# + [markdown] id="S1-s2Khtcnix"
# #### Saving the model
# + colab={"base_uri": "https://localhost:8080/"} id="nEsMycidlSxt" outputId="4304f64a-39e7-4f3c-9536-9d7d91acad75"
#Saving the model
file_name = 'LSTM_MULTI-3.h5'
best_model.save(file_name)
print("Saved model `{}` to disk".format(file_name))
# + [markdown] id="UlXJYmQjYp_I"
# #### Loading model fron **/content** or **gdrive** and predicting
# + colab={"base_uri": "https://localhost:8080/"} id="QXZfBqU0YGLZ" outputId="8f8dee97-c575-4bb9-b2a7-9289084d7ac3"
# Loading Model
#file_name = '/content/gdrive/MyDrive/ColabData/LSTM_MULTI-3.h5'
file_name = '/content/LSTM_MULTI-3.h5'
model = tf.keras.models.load_model(file_name)
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="kdM6oDYsdHnP" outputId="dadc22a1-3ae6-4531-d12c-fd946daac474"
#yhat = best_model.predict(predict_X)
yhat = model.predict(predict_X)
yhat, yhat.shape
# + [markdown] id="ZA37s_HVNtps"
# I have my predicted values and target values. Now I need only to reshape them and do inverse transform on them
# + id="PPKDSrvESikz"
def get_array(g):
"""
Gets an array of type (None, 1), shapes it into the shape (None, 4), then scales and returns only predicted values
:param: array of shape (None, 1)
:return: array ready for predictions with shape (None, 4)
"""
g = np.insert(g, [1], .4, axis = 1)
g = np.insert(g, [2], .4, axis = 1)
g = np.insert(g, [3], .4, axis = 1)
array_ = scl.inverse_transform(g)
array_ready = []
for i in range(len(array_[:, :1])):
array_ready.append(array_[i,:1][0])
return array_ready
# + [markdown] id="OlGjSXp65AoW"
# ### Predicting on Test Set
# + colab={"base_uri": "https://localhost:8080/"} id="YPkuwSVyTLXH" outputId="0f6b0c7c-3dd4-4c65-b5cf-c723aa4c6c13"
pred = get_array(yhat)
print("PRED: ", pred)
true = get_array(predict_Y.reshape(-1,1))
print('TRUE: ', true)
# + [markdown] id="6UBnLK73OAE0"
# ### Dataframe of predicted values on theit true ones. Note, that the last value in true column doesn't count, since it was added to target array purely to make prediction - it's not real and does not exist.
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="jm_1SlgkD4dC" outputId="4a026656-1960-4ce8-fb96-eb8d01211d3a"
d = {"pred": pred, "true": true}
df = pd.DataFrame(d)
df
# + colab={"base_uri": "https://localhost:8080/", "height": 606} id="DN5EyYT92VU1" outputId="c3c2338f-a358-464d-e322-3063ad9dd719"
plt.figure(figsize = (15,10))
plt.plot(pred, color='r', label='Prediction')
plt.plot(true, label='Target')
plt.xticks(np.arange(0, 10, 1), np.arange(1, 21, 1))
plt.xlabel('Days')
plt.ylabel('USD')
plt.legend(loc='best')
plt.show()
# + [markdown] id="Ru6Ov_dg5M2q"
# ### Predicting on All Values
#
#
# 1. Retrieving all values
# 2. Reshaping them to match model.predict() conditions
# 3. Predicting
# 4. Plotting predictions
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="6wl2EwloUpsP" outputId="3fa016ef-6fc7-4cf8-b10c-86a52a083cfc"
predict_all = get_X_values(array[:,:-1])
predict_all.shape
# + colab={"base_uri": "https://localhost:8080/"} id="2hhKNgdPU903" outputId="4129d06e-6349-4137-839e-af4b1bbcd153"
predict_all = predict_all.reshape((predict_all.shape[0], 1, predict_all.shape[1]))
predict_all.shape
# + colab={"base_uri": "https://localhost:8080/"} id="fitD7VbyVJA0" outputId="ec96c076-dd17-45e2-8a09-5469cfdfcd99"
yhat = model.predict(predict_all)
pred = get_array(yhat)
yhat.shape , len(pred)
# + colab={"base_uri": "https://localhost:8080/", "height": 877} id="mi8rTFrTVRn5" outputId="dbf2706d-effa-4114-cdca-edd872e8ea05"
plt.figure(figsize = (20,15))
plt.plot(pred, color='r', label='Prediction')
#plt.xticks(np.arange(0, 10, 1), np.arange(1, 21, 1))
plt.xlabel('Days')
plt.ylabel('USD')
plt.legend(loc='best')
plt.show()
# + [markdown] id="I-1VsoIuJlCG"
# ### Retrieving timestamp of each prediction and concatenating it with prediction values.
#
# Reminder: each prediction in array *pred* is made on the basis of three days before. For instance, predicted value for timestep 2015-08-10 is based on 7th, 8th and 9th days
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="YGYbfSM86Jbe" outputId="406ae481-125e-4f3f-f35a-8e6b143baa3e"
t = timeseries.reset_index()
timestamp = pd.DataFrame()
timestamp['data'] = t['index'].copy()
timestamp
# + colab={"base_uri": "https://localhost:8080/"} id="UDmPiNCSIAGR" outputId="6946a140-3c2b-4561-df89-70003010f617"
DAY = 0
ds = []
tomorrow = date.today() + timedelta(days=1)
tomorrow = tomorrow.strftime('%Y-%m-%d')
for i_ in range(len(pred)):
if i_ == (len(pred) -1):
ds.append(tomorrow)
else:
DAY = DAY + 3
ds.append(timestamp.iloc[DAY][0].strftime('%Y-%m-%d'))
len(ds)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="o9swRClyfKVp" outputId="eddf34d1-cbdc-4bc4-fd59-e6109bc20543"
d = {"ds": ds, "pred": pred}
df = pd.DataFrame(d)
df
# + id="O-E1oegrf1as"
|
models/lstm/Keras_tuner_multi_BTC_3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import sklearn as sk
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from sklearn.preprocessing import scale
import random
print('seaborn: {}'.format (seaborn.__version__))
# -
# ls
import seaborn as sns
df = pd.read_csv('creditcard.csv', low_memory=False)
df = df.sample(frac=1).reset_index(drop=True)
df.head()
fraud = df.loc[df['Class'] == 1]
non_frauds = df.loc[df['Class'] == 0]
print(len(fraud))
print(len(non_frauds))
ax = fraud.plot.scatter(x='Amount', y = 'Class', color= 'Orange', label='Fraud')
non_frauds.plot.scatter(x='Amount', y = 'Class', color= 'Blue', label='Normal', ax = ax)
plt.show()
print (df.shape)
data = df.sample(frac=0.1, random_state = 1)
print(data.shape)
#plot a histogram
data.hist(figsize=(20,20))
plt.show()
# +
#determine number of fraud transaction in databases
fraud = data[data['Class']==1]
valid = data[data['Class']==0]
outlier_fraction = len(fraud) / float(len(valid))
print(outlier_fraction)
print('fraud cases:{}'.format(len(fraud)))
print('valid cases:{}'.format(len(valid)))
# -
#correlation Matrix
cormat = data.corr()
fig = plt.figure(figsize = (12, 9))
sns.heatmap(cormat, vmax= .8, square = True)
plt.show()
# +
#time for machine learning
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# +
x = df.iloc[:, :-1]
y = df['Class']
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.35)
# -
logistic = linear_model.LogisticRegression(C=1e5)
logistic.fit(x_train, y_train)
print('Score:', logistic.score(x_test, y_test))
y_predicted = np.array(logistic.predict(x_test))
print(y_predicted)
|
frauddetection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Short Bursts Distributions
#
# We look at short bursts on PA and AR senate.
import matplotlib.pyplot as plt
from gerrychain import (GeographicPartition, Partition, Graph, MarkovChain,
proposals, updaters, constraints, accept, Election)
from gerrychain.proposals import recom, propose_random_flip
from gerrychain.tree import recursive_tree_part
from gerrychain.metrics import mean_median, efficiency_gap, polsby_popper, partisan_gini
from functools import (partial, reduce)
import pandas
import geopandas as gp
import numpy as np
import networkx as nx
import pickle
import seaborn as sns
import pprint
import operator
import scipy
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale, normalize
import random
from nltk.util import bigrams
from nltk.probability import FreqDist
from gingleator import Gingleator
from numpy.random import randn
from scipy.stats import norm, probplot
## This function takes a name of a shapefile and returns a tuple of the graph
## and its associated dataframe
def build_graph(filename):
print("Pulling in Graph from Shapefile: " + filename)
graph = Graph.from_file(filename)
df = gp.read_file(filename)
return(graph, df)
# +
# graph, df = build_graph("AR_shape/AR.shp")
# -
# +
# pickle.dump(graph, open("graph_AR.p", "wb"))
# pickle.dump(df, open("df_AR.p", "wb"))
# -
# +
## Set up PA enacted
graph_PA = pickle.load(open("PA_graph.p", "rb"))
df_PA = pickle.load(open("PA_df.p", "rb"))
PA_updaters = {"population": updaters.Tally("TOT_POP", alias="population"),
"bvap": updaters.Tally("BLACK_POP", alias="bvap"),
"vap": updaters.Tally("VAP", alias="vap"),
"bvap_prec": lambda part: {k: part["bvap"][k] / part["population"][k] for k in part["bvap"]}}
PA_enacted_senate = GeographicPartition(graph_PA, assignment="SSD",
updaters=PA_updaters)
# -
total_population_PA = sum(df_PA.TOT_POP.values)
ideal_population_PA = total_population_PA / 50
seed_part_senate = recursive_tree_part(graph_PA, range(50), pop_col="TOT_POP",
pop_target=ideal_population_PA,
epsilon=0.01, node_repeats=1)
PA_seed_seante = GeographicPartition(graph_PA, assignment=seed_part_senate,updaters=PA_updaters)
# +
## Set up AR
graph_AR = pickle.load(open("graph_AR.p", "rb"))
df_AR = pickle.load(open("df_AR.p", "rb"))
AR_updaters = {"population": updaters.Tally("TOTPOP", alias="population"),
"bvap": updaters.Tally("BVAP", alias="bvap"),
"vap": updaters.Tally("VAP", alias="vap"),
"bvap_prec": lambda part: {k: part["bvap"][k] / part["vap"][k]
for k in part["bvap"]}}
AR_enacted_senate = GeographicPartition(graph_AR, assignment="SSD", updaters=AR_updaters)
# -
AR_enacted_house = GeographicPartition(graph_AR, assignment="SHD", updaters=AR_updaters)
total_population_AR = sum(df_AR.TOTPOP.values)
ideal_population_AR = total_population_AR / 35
senate_seed = recursive_tree_part(graph_AR, range(35), pop_col="TOTPOP",
pop_target=ideal_population_AR,
epsilon=0.01, node_repeats=1)
AR_seed_senate = GeographicPartition(graph_AR, assignment=senate_seed,updaters=AR_updaters)
house_seed = recursive_tree_part(graph_AR, range(100),
pop_col="TOTPOP",
pop_target=total_population_AR / 100,
epsilon=0.05, node_repeats=1)
AR_seed_house = GeographicPartition(graph_AR, assignment=house_seed,
updaters=AR_updaters)
H_enact = Gingleator.num_opportunity_dists(AR_enacted_house, "bvap_prec", 0.4)
H_seed = Gingleator.num_opportunity_dists(AR_seed_house, "bvap_prec", 0.4)
Gingleator.num_opportunity_dists(AR_seed_senate, "bvap_prec", 0.4)
Gingleator.num_opportunity_dists(AR_enacted_senate, "bvap_prec", 0.4)
# ## Reprojections onto the line
def transition_frequencies(observations):
observations = observations.astype(int)
dim = observations.max()
seen_bigrams = []
for row in observations:
seen_bigrams.extend(bigrams(row))
fdist = FreqDist(seen_bigrams)
probs = np.zeros((dim, dim))
for k, v in fdist.items():
probs[k[0]-1][k[1]-1] = v
probs = normalize(probs, norm="l1")
return probs
def rand_walk_graph(transition_frequencies):
G = nx.from_numpy_array(transition_frequencies, create_using=nx.DiGraph)
mapping = {n: n+1 for n in G.nodes}
G = nx.relabel_nodes(G, mapping)
return G
def edge_weights(G, prec=None):
if not prec:
return dict([((u,v,), d['weight']) for u,v,d in G.edges(data=True)])
else:
return dict([((u,v,), round(d['weight'],prec)) for u,v,d in G.edges(data=True)])
PA_gingles = Gingleator(PA_seed_seante, pop_col="TOT_POP", minority_prec_col="bvap_prec",
epsilon=0.1)
AR_gingles = Gingleator(AR_seed_senate, pop_col="TOTPOP", minority_prec_col="bvap_prec",
epsilon=0.1)
# #### PA random walk graph
_, PA_observations = PA_gingles.short_burst_run(num_bursts=200, num_steps=25)
PA_trans = transition_frequencies(PA_observations)
PA_rand_walk = rand_walk_graph(PA_trans)
edge_weights(PA_rand_walk)
# #### AR random walk graph
_, AR_observations = AR_gingles.short_burst_run(num_bursts=200, num_steps=25)
AR_trans = transition_frequencies(AR_observations)
AR_rand_walk = rand_walk_graph(AR_trans)
edge_weights(AR_rand_walk)
# ## Distribution of Observations
def stationary_distribution(graph, nodes=None):
probs = edge_weights(graph)
if not nodes:
observed_nodes = reduce(lambda s, k: s | set(k), probs.keys(), set())
observed_nodes.remove(min(observed_nodes))
else: observed_nodes = nodes
stationary = reduce(lambda pis, i: pis + [pis[-1]*probs[i-1, i] / probs[i, i-1]], observed_nodes, [1])
stationary = normalize([stationary], norm="l1")
return stationary[0]
# ## Distribution of Observations of various methods on AR state house
# We look at the distribution of times we see plans with some number of opportunity districts when we use an unbiased run, the short burst method to maximized and to minimize, and a tilted method with p=0.25 of accepting a worse plan.
# #### AR house with just count as score and 5000 iterations.
# Bursts are 25 steps each
AR_house_gingles = Gingleator(AR_seed_house, pop_col="TOTPOP", minority_prec_col="bvap_prec",
epsilon=0.1)
_, AR_observations_hub = AR_house_gingles.short_burst_run(num_bursts=1,
num_steps=5000)
_, AR_observations_hsb_max = AR_house_gingles.short_burst_run(num_bursts=200, num_steps=25)
_, AR_observations_hsb_min = AR_house_gingles.short_burst_run(num_bursts=200, num_steps=25,
maximize=False)
_, AR_observations_htilt = AR_house_gingles.biased_run(num_iters=5000)
_, AR_observations_htilt_8 = AR_house_gingles.biased_run(num_iters=5000, p=0.125)
_, AR_observations_htilt_16 = AR_house_gingles.biased_run(num_iters=5000, p=0.0625)
_, AR_observations_hsbtilt = AR_house_gingles.biased_short_burst_run(num_bursts=200,
num_steps=25)
_, AR_observations_hsbtilt_8 = AR_house_gingles.biased_short_burst_run(num_bursts=200,
num_steps=25, p=0.125)
_, AR_observations_hsb_max_5 = AR_house_gingles.short_burst_run(num_bursts=1000, num_steps=5)
_, AR_observations_hsb_max_10 = AR_house_gingles.short_burst_run(num_bursts=500, num_steps=10)
_, AR_observations_hsb_max_50 = AR_house_gingles.short_burst_run(num_bursts=100, num_steps=50)
AR_observations_hsb_tails = np.concatenate((AR_observations_hsb_max, AR_observations_hsb_min))
AR_trans_house = transition_frequencies(AR_observations_hsb_tails)
AR_house_rwgraph = rand_walk_graph(AR_trans_house)
edge_weights(AR_house_rwgraph)
AR_house_stat = stationary_distribution(AR_house_rwgraph)
AR_house_stat
AR_house_scale_stat = np.random.choice(range(6,16), 5000, p=AR_house_stat)
# +
plt.figure(figsize=(8,6))
plt.title("AR State House (100 seats)")
plt.xlabel("Number of Opportunity Districts")
plt.ylabel("Frequency")
sns.distplot(AR_observations_hub.flatten(), kde=False, label="Unbiased", bins=30)
# sns.distplot(AR_observations_hsb1.flatten(), kde=False, label="Short Bursts", color="purple")
# sns.distplot(AR_observations_hsb_min.flatten(), kde=False, label="Short Bursts Min", color="cyan")
sns.distplot(AR_house_scale_stat, kde=False, label="RW Stationary", color="g", bins=30)
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_unbiased_stationary_distribution.png")
# -
plt.figure(figsize=(8,6))
plt.title("AR State House (100 seats)")
plt.xlabel("Number of Opportunity Districts")
plt.ylabel("Frequency")
sns.distplot(AR_observations_hub.flatten(), kde=False, label="Unbiased")
# sns.distplot(AR_observations_htilt, kde=False, label="Tilted Run (p=0.25)", color="g")
sns.distplot(AR_observations_hsb_max.flatten(), kde=False, label="Short Bursts Max", color="purple")
sns.distplot(AR_observations_hsb_min.flatten(), kde=False, label="Short Bursts Min", color="cyan")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_distribution_of_short_bursts.png")
plt.figure(figsize=(8,6))
plt.title("AR State House (100 seats)")
plt.xlabel("Number of Opportunity Districts")
plt.ylabel("Frequency")
sns.distplot(AR_observations_hub.flatten(), kde=False, label="Unbiased", color="green")
sns.distplot(AR_observations_htilt, kde=False, label="Tilted Run (p=0.25)", color="cyan")
sns.distplot(AR_observations_htilt_8.flatten(), kde=False, label="Tilted Run (p=0.125)")
# sns.distplot(AR_observations_htilt_16.flatten(), kde=False, label="Tilted Run (p=0.0625)",
# color="purple")
sns.distplot(AR_observations_hsb_max.flatten(), kde=False, label="Short Bursts", color="purple")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_short_bursts_vs_tilted_run.png")
plt.figure(figsize=(8,6))
plt.title("AR State House (100 seats)")
plt.xlabel("Number of Opportunity Districts")
plt.ylabel("Frequency")
sns.distplot(AR_observations_hub.flatten(), kde=False, label="Unbiased", color="green")
sns.distplot(AR_observations_htilt, kde=False, label="Tilted Run (p=0.25)", color="cyan")
sns.distplot(AR_observations_htilt_8.flatten(), kde=False, label="Tilted Run (p=0.125)")
sns.distplot(AR_observations_htilt_16.flatten(), kde=False, label="Tilted Run (p=0.0625)",
color="purple")
# sns.distplot(AR_observations_hsb_max.flatten(), kde=False, label="Short Bursts", color="purple")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_tilted_runs.png")
plt.figure(figsize=(8,6))
plt.title("AR State House (100 seats)")
plt.xlabel("Number of Opportunity Districts")
plt.ylabel("Frequency")
sns.distplot(AR_observations_hub.flatten(), kde=False, label="Unbiased", color="green",
bins=50)
sns.distplot(AR_observations_hsb_max.flatten(), kde=False, label="Short Bursts Max",
color="cyan", bins=50)
sns.distplot(AR_observations_hsbtilt.flatten(), kde=False,
label="Tilted Short Bursts (p=0.25)", bins=50)
sns.distplot(AR_observations_hsbtilt_8.flatten(), kde=False,
label="Tilted Short Bursts (p=0.125)", color="purple", bins=50)
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_distribuition_of_tilted_short_bursts_runs.png")
plt.figure(figsize=(8,6))
plt.title("AR State House (100 seats)")
plt.xlabel("Number of Opportunity Districts")
plt.ylabel("Frequency")
# sns.distplot(AR_observations_hub.flatten(), kde=False, label="Unbiased", color="green",
# bins=50)
sns.distplot(AR_observations_hsb_max_5.flatten(), kde=False,
label="len 5", bins=50)
sns.distplot(AR_observations_hsb_max_10.flatten(), kde=False, label="len 10",
bins=50, color="green")
sns.distplot(AR_observations_hsb_max.flatten(), kde=False, label="len 25",
color="cyan", bins=50)
sns.distplot(AR_observations_hsb_max_50.flatten(), kde=False,
label="len 50", color="purple", bins=50)
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# +
plt.figure(figsize=(8,10))
plt.title("AR State House: Short Bursts Walks (200, 25)")
plt.xlim(7, 17)
plt.xlabel("Number of opportunity districts")
plt.ylabel("Steps")
for i in range(200):
plt.plot(AR_observations_hsb_max[i], range(25*i, 25*(i+1)))
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_short_burst_over_time.png")
# -
plt.figure(figsize=(8,10))
plt.title("AR State House: Tilted Runs")
plt.xlim(4, 19)
plt.xlabel("Number of opportunity districts")
plt.ylabel("Steps")
plt.plot(AR_observations_hub.flatten(), range(5000), label="Unbiased")
plt.plot(AR_observations_htilt, range(5000), label="Tilted p=0.25")
plt.plot(AR_observations_htilt_8, range(5000), label="Tilted p=0.125")
plt.plot(AR_observations_htilt_16, range(5000), label="Tilted p=0.0625")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_tilted_runs_over_time.png")
plt.figure(figsize=(8,10))
plt.title("AR State House: Tilted Short Burst Runs")
plt.xlim(4, 18)
plt.xlabel("Number of opportunity districts")
plt.ylabel("Steps")
plt.plot(AR_observations_hub.flatten(), range(5000), label="Unbiased")
plt.plot(AR_observations_hsb_max.flatten(), range(5000), label="Short Burst Max")
plt.plot(AR_observations_hsbtilt.flatten(), range(5000), label="Tilted Short Burst (p=0.25)")
plt.plot(AR_observations_hsbtilt_8.flatten(), range(5000), label="Tilted Short Burst (p=0.125)")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# plt.savefig("plots/AR_state_house_tilted_short_burst_runs_over_time.png")
plt.figure(figsize=(8,10))
plt.title("AR State House: Short Burst Runs")
plt.xlim(4, 17)
plt.xlabel("Number of opportunity districts")
plt.ylabel("Steps")
plt.plot(AR_observations_hub.flatten(), range(5000), label="Unbiased")
plt.plot(AR_observations_hsb_max.flatten(), range(5000), label="Short Burst Max")
plt.plot(AR_observations_hsb_min.flatten(), range(5000), label="Short Burst Min")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# +
plt.figure(figsize=(8,10))
plt.title("AR State House: Short Burst Runs")
plt.xlim(4, 17)
plt.xlabel("Number of opportunity districts")
plt.ylabel("Steps")
plt.plot(AR_observations_hsb_max_5.flatten(), range(5000), label="len 5")
plt.plot(AR_observations_hsb_max_10.flatten(), range(5000), label="len 10")
plt.plot(AR_observations_hsb_max.flatten(), range(5000), label="len 25")
plt.plot(AR_observations_hsb_max_50.flatten(), range(5000), label="len 50")
plt.axvline(x=H_enact, color="k", linestyle="--", label="enacted")
plt.axvline(x=H_seed, color="grey", linestyle="--", label="seed")
plt.legend()
plt.show()
# -
plt.figure(figsize=(8,6))
plt.title("AR State House")
plt.hist([AR_observations_hub.flatten(), AR_observations_hsb.flatten(),
AR_observations_hsb_min],
label=["Unbiased","Short Bursts Max","Short Bursts Min" ,"Stationary RW"])
plt.legend()
plt.show()
_, PA_unbiased_run = PA_gingles.short_burst_run(num_bursts=1, num_steps=5000)
# _, PA_burst_run = PA_gingles.short_burst_run(num_bursts=100, num_steps=10)
stationary = stationary_distribution(PA_rand_walk)
stat = np.random.choice([3,4,5], 5000, p=stationary)
# +
mu, std = norm.fit(PA_unbiased_run.flatten())
plt.figure(figsize=(10,8))
plt.title("Distributions on PA")
plt.hist([PA_unbiased_run.flatten(), PA_observations.flatten(),stat],
label=["Unbiased","Short Burst","Random Walk"])
p = norm.pdf(x, mu, std)
plt.plot(x, p*5000, 'k', linewidth=2)
plt.legend()
plt.show()
# -
_, AR_unbiased_run = AR_gingles.short_burst_run(num_bursts=1, num_steps=5000)
AR_stationary = stationary_distribution(AR_rand_walk)
AR_stat = np.random.choice([1,2,3,4,5], 5000, p=AR_stationary)
mu, std = norm.fit(AR_unbiased_run.flatten())
plt.figure(figsize=(10,8))
plt.title("Distributions on AR")
plt.hist([AR_unbiased_run.flatten(), AR_observations.flatten(), AR_stat],
label=["Unbiased","Short Burst","Random Walk"])
p = norm.pdf(x, mu, std)
plt.plot(x, p*5000, 'k', linewidth=2)
plt.legend()
plt.show()
plt.figure(figsize=(10,8))
plt.title("Distributions on PA")
sns.distplot(PA_unbiased_run.flatten(), kde=False, label="Unbiased")
sns.distplot(PA_observations.flatten(), kde=False, label="Short Burst")
sns.distplot(stat, kde=False, label="Random Walk")
plt.legend()
plt.show()
plt.figure(figsize=(10,8))
plt.title("Distributions on AR")
sns.distplot(AR_unbiased_run.flatten(), kde=False, label="Unbiased Run")
sns.distplot(AR_observations.flatten(), kde=False, label="Short Burst")
sns.distplot(AR_stat, kde=False, label="Random Walk")
plt.legend()
plt.show()
plt.figure()
probplot(PA_unbiased_run.flatten(), plot=plt)
plt.show()
# +
mu, std = norm.fit(PA_unbiased_run.flatten())
plt.hist(PA_unbiased_run.flatten(), bins=3, density=True, alpha=0.6, color='g')
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
plt.plot(x, p, 'k', linewidth=2)
title = "Fit results: mu = %.2f, std = %.2f" % (mu, std)
plt.title(title)
plt.show()
# -
PA_observations[100]
dist_precs = enacted_senate["bvap_prec"].values()
sum(list(map(lambda v: v >= 0.4, dist_precs)))
max(i for i in dist_precs if i < 0.4)
|
misc/projections_to_the_line_and_observed_distributions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Movies have form www.imdb.com/title/tt2406566/ #atomic blonde
#
# To get full credits cast, writers, etc:
#
# look for <div id="fullcredits_content" class="header">
#
# inside it will have alternating a tags linking back to the actor names and pages
#
# <a href="/name/nm0000234/?ref_=ttfc_fc_cl_t1"
# itemprop='url'> <span class="itemprop" itemprop="name"><NAME></span>
# </a>
# <a href="/name/nm0564215/?ref_=ttfc_fc_cl_t2"
# itemprop='url'> <span class="itemprop" itemprop="name"><NAME></span>
# </a>
#
#
# To download full source of website:
# view-source:http://www.imdb.com/title/tt2406566/fullcredits
#
from requests import Request, Session
from time import sleep
title_nums = ['tt0085244', 'tt2406566'] #The Big Chill, Atomic Blonde
'''
import requests
def print_url(r, *args, **kwargs):
print(r.url)
hooks = dict(response=print_url)
r = requests.get('http://httpbin.org', hooks=dict(response=print_url))
print(r.status_code)'''
def write_result(response, *args, **kwargs):
print('writing file from..{} '.format({response.url}))
filename = "test" + str(next(numbers)) + ".html"
with open(filename, 'wb') as f:
f.write(response.content)
f.write(response.url.encode('utf-8'))
print('saved file %s' % filename)
def make_urls():
base_url = "http://www.imdb.com/title/"
urls = []
for title in title_nums:
urls.append(base_url + title + '/')
return urls
# +
def my_count():
n = 10110
while True:
yield n
n += 1
numbers = my_count()
# -
def start(my_session = None):
urls = make_urls()
print('Urls', urls)
for url in urls:
try:
r = my_session.get(url, hooks=dict(response=write_result), timeout=10)
print("accessing url", url)
print("request headers",r.request.headers)
print("response headers",r.headers)
except Exception as e:
print("accessing url", url)
print('Exception encountered at position 1:', e)
return r, urls
# +
with Session() as s:
try:
start_tuple3 = start(s)
except Exception as err:
print('Exception encountered at position 2:', err)
# -
make_urls()
# !ls
# !cat test1014.html
|
notebooks/IMDB-Downloader.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Notebook for testing the physics module
import numpy as np
import imp
import sys
import os
sys.path.append(os.path.expanduser('~/quantum-ml/nanowire_model'))
import physics
imp.reload(physics)
import potential_profile
E_scale = 'eV'
dx_scale = '1e-9'
kT = 400e-6
b1 = (-200e-3,-0.3,0.1,1)
d = (400e-3,0.0,0.1,1)
b2 = (-200e-3,0.3,0.1,1)
x = np.linspace(-1,1,100)
V = potential_profile.V_x_wire(x,[b1,d,b2])
K_onsite = 10e-3
sigma = x[1] - x[0]
x_0 = x[1] - x[0]
mu_l = (50.0e-3,50.1e-3)
battery_weight = 100
model_physics = (E_scale,dx_scale,kT,x,V,K_onsite,sigma,x_0,mu_l,battery_weight)
my_phys = physics.Physics(model_physics)
# -
my_phys.K
|
junk/test_notebooks/physics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#A-brief-tutorial-for-the-WormBase-Enrichment-Suite,-Python-interface" data-toc-modified-id="A-brief-tutorial-for-the-WormBase-Enrichment-Suite,-Python-interface-1"><span class="toc-item-num">1 </span>A brief tutorial for the WormBase Enrichment Suite, Python interface</a></div><div class="lev2 toc-item"><a href="#Loading-the-required-libraries" data-toc-modified-id="Loading-the-required-libraries-1.1"><span class="toc-item-num">1.1 </span>Loading the required libraries</a></div><div class="lev2 toc-item"><a href="#Loading-your-gene-list-and-fetching-the-dictionaries" data-toc-modified-id="Loading-your-gene-list-and-fetching-the-dictionaries-1.2"><span class="toc-item-num">1.2 </span>Loading your gene list and fetching the dictionaries</a></div><div class="lev2 toc-item"><a href="#Analyzing-your-gene-list" data-toc-modified-id="Analyzing-your-gene-list-1.3"><span class="toc-item-num">1.3 </span>Analyzing your gene list</a></div><div class="lev2 toc-item"><a href="#Plotting-the-results" data-toc-modified-id="Plotting-the-results-1.4"><span class="toc-item-num">1.4 </span>Plotting the results</a></div>
# -
# # A brief tutorial for the WormBase Enrichment Suite, Python interface
# ## Loading the required libraries
# +
# this first cell imports the libraries we typically use for data science in Python
import pandas as pd
import numpy as np
# this is the WormBase Enrichment Suite module (previously just TEA)
import tissue_enrichment_analysis as ea
# plotting libraries
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# This enables SVG graphics inline.
# %config InlineBackend.figure_formats = {'png', 'retina'}
# -
# ## Loading your gene list and fetching the dictionaries
# load your DE genes (in WBID format) to a pandas dataframe or to a list
df = pd.read_csv('EVN_wbids.csv')
# fetch the dictionaries using the fetch_dictionary function:
tissue = ea.fetch_dictionary('tissue')
phenotype = ea.fetch_dictionary('phenotype')
go = ea.fetch_dictionary('go')
# ## Analyzing your gene list
# +
# place the dictionaries into a hash
frames = {'tissue': tissue, 'phenotype': phenotype, 'go': go}
# test the list of genes against each dictionary and store the
# results in a hash called results
# NOTE: The enrichment_analysis function only returns Stat. Sig. Results!
result = {}
for analysis, dictionary in frames.items():
result[analysis] = ea.enrichment_analysis(df.gene_name, dictionary, show=False, alpha=10**-1)
# -
# ## Plotting the results
# +
# make the figure in the paper:
fig, ax = plt.subplots(nrows=3, figsize=(8, 10))
i= 0
# go through the results hash:
for t, r in result.items():
# calculate the negative log of the Q-values and store
r['logQ'] = -r['Q value'].apply(np.log10)
# remove np.infinites with np.nan
r.logQ.replace([np.inf], np.nan, inplace=True)
# remove np.nan with 70 (after 10**-64, the hypergeometric function crashes and returns 0)
r.logQ.replace(np.nan, 70, inplace=True)
# call the plotting function in the Enrichment Suite to plot the results
ea.plot_enrichment_results(r, title=t, analysis=t, ax=ax[i], y='logQ', n_bars=10)
# prettify axes
ax[i].set_ylabel(t)
if i != 2:
ax[i].set_xlabel('')
else:
ax[i].set_xlabel(r'$-\log_{10}{Q}$')
i += 1
# save figure
plt.savefig('Enrichment_Results.svg', bbox_inches='tight')
|
tutorial/Tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib nbagg
import matplotlib.pyplot as plt
import numpy as np
# Cross-Validation
# ----------------------------------------
# <img src="figures/cross_validation.svg" width=100%>
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.cross_validation import cross_val_score
from sklearn.svm import LinearSVC
cross_val_score(LinearSVC(), X, y, cv=5)
cross_val_score(LinearSVC(), X, y, cv=5, scoring="f1_macro")
# Let's go to a binary task for a moment
y % 2
cross_val_score(LinearSVC(), X, y % 2)
cross_val_score(LinearSVC(), X, y % 2, scoring="average_precision")
cross_val_score(LinearSVC(), X, y % 2, scoring="roc_auc")
from sklearn.metrics.scorer import SCORERS
print(SCORERS.keys())
# Implementing your own scoring metric:
# +
def my_accuracy_scoring(est, X, y):
return np.mean(est.predict(X) == y)
cross_val_score(LinearSVC(), X, y, scoring=my_accuracy_scoring)
# -
def my_super_scoring(est, X, y):
return np.mean(est.predict(X) == y) - np.mean(est.coef_ != 0)
# +
from sklearn.grid_search import GridSearchCV
y = iris.target
grid = GridSearchCV(LinearSVC(C=.01, dual=False),
param_grid={'penalty' : ['l1', 'l2']},
scoring=my_super_scoring)
grid.fit(X, y)
print(grid.best_params_)
# -
# There are other ways to do cross-valiation
# +
from sklearn.cross_validation import ShuffleSplit
shuffle_split = ShuffleSplit(len(X), 10, test_size=.4)
cross_val_score(LinearSVC(), X, y, cv=shuffle_split)
# +
from sklearn.cross_validation import StratifiedKFold, KFold, ShuffleSplit
def plot_cv(cv, n_samples):
masks = []
for train, test in cv:
mask = np.zeros(n_samples, dtype=bool)
mask[test] = 1
masks.append(mask)
plt.matshow(masks)
# -
plot_cv(StratifiedKFold(y, n_folds=5), len(y))
plot_cv(KFold(len(iris.target), n_folds=5), len(iris.target))
plot_cv(ShuffleSplit(len(iris.target), n_iter=20, test_size=.2),
len(iris.target))
# # Exercises
# Use KFold cross validation and StratifiedKFold cross validation (3 or 5 folds) for LinearSVC on the iris dataset.
# Why are the results so different? How could you get more similar results?
# +
# # %load solutions/cross_validation_iris.py
|
05 - Cross-validation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
Non-personalized
Content based
Attributes
Jaccard
Text
Cosine similarity
|
notebooks/recommendations/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: adsi_ass_2
# language: python
# name: adsi_ass_2
# ---
# # Introduction
#
# This notebook predicts the `beer_style` using a neural network on the PyTorch
# framework. It is a modification of the 5_pytorch.ipynb notebook. After 20
# epochs, there seems to be still some room for improvement.
#
# The same model is trained again for 60 more epochs.
#
# ## Summary
# The increase of neurons has **not** improved the model performance. The
# [classification report](#Classification-report) shows that the validation
# accuracy increased to as high as 31.2%, and the test accuracy remains at 32%.
# + pycharm={"name": "#%%\n"}
artefact_prefix = '8_pytorch'
target = 'beer_style'
# + pycharm={"name": "#%%\n"}
# %load_ext autoreload
# %autoreload 2
# + pycharm={"name": "#%%\n"}
from dotenv import find_dotenv
from datetime import datetime
import pandas as pd
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from category_encoders.binary import BinaryEncoder
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from joblib import dump, load
from src.data.sets import merge_categories
from src.data.sets import save_sets
from src.data.sets import load_sets
from src.data.sets import split_sets_random
from src.data.sets import test_class_exclusion
from src.models.performance import convert_cr_to_dataframe
from src.models.pytorch import PytorchClassification_8
from src.models.pytorch import get_device
from src.models.pytorch import train_classification
from src.models.pytorch import test_classification
from src.models.pytorch import PytorchDataset
from src.models.pipes import create_preprocessing_pipe
from src.visualization.visualize import plot_confusion_matrix
# -
# # Set up directories
# + pycharm={"name": "#%% Set directory\n"}
project_dir = Path(find_dotenv()).parent
data_dir = project_dir / 'data'
raw_data_dir = data_dir / 'raw'
interim_data_dir = data_dir / 'interim'
processed_data_dir = data_dir / 'processed'
reports_dir = project_dir / 'reports'
models_dir = project_dir / 'models'
# -
# # Load data
# + pycharm={"name": "#%%\n"}
X_train, X_test, X_val, y_train, y_test, y_val = load_sets()
# -
# # Preprocess data
#
# 1. The `brewery_name` is a feature with a very high cardinality, ~5700. One hot
# encoding is not feasible as it will introduce 5700 very sparse columns.
# Another option is to use binary encoding, which would result in 14 new columns.
# 1. Standard scaling is used to ensure that the binary columns ([0, 1])and the
# review columns ([1, 5]) are on the same scale.
# + pycharm={"name": "#%%\n"}
pipe = Pipeline([
('bin_encoder', BinaryEncoder(cols=['brewery_name'])),
('scaler', StandardScaler())
])
# + pycharm={"name": "#%%\n"}
X_train_trans = pipe.fit_transform(X_train)
X_val_trans = pipe.transform(X_val)
X_test_trans = pipe.transform(X_test)
# + pycharm={"name": "#%%\n"}
X_train_trans.shape
# + pycharm={"name": "#%%\n"}
n_features = X_train_trans.shape[1]
n_features
# + pycharm={"name": "#%%\n"}
n_classes = y_train.nunique()
n_classes
# -
# ## Encoding
#
# PyTorch accepts only numerical labels.
# + pycharm={"name": "#%%\n"}
le = LabelEncoder()
y_train_trans = le.fit_transform(y_train.to_frame())
y_val_trans = le.fit_transform(y_val.to_frame())
y_test_trans = le.transform(y_test.to_frame())
# + pycharm={"name": "#%%\n"}
y_test_trans
# -
# ## Convert to Pytorch tensors
# + pycharm={"name": "#%%\n"}
device = get_device()
device
# + pycharm={"name": "#%%\n"}
train_dataset = PytorchDataset(X=X_train_trans, y=y_train_trans)
val_dataset = PytorchDataset(X=X_val_trans, y=y_val_trans)
test_dataset = PytorchDataset(X=X_test_trans, y=y_test_trans)
# -
# # Classification model
# + pycharm={"name": "#%%\n"}
model = PytorchClassification_8(n_features=n_features, n_classes=n_classes)
model.to(device)
# + pycharm={"name": "#%%\n"}
criterion = nn.CrossEntropyLoss()
# + pycharm={"name": "#%%\n"}
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# -
# # Train the model
# + pycharm={"name": "#%%\n"}
N_EPOCHS = 60
BATCH_SIZE = 4096
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)
# + pycharm={"name": "#%%\n"}
start_time = datetime.now()
print(f'Started: {start_time}')
for epoch in range(N_EPOCHS):
train_loss, train_acc = train_classification(train_dataset,
model=model,
criterion=criterion,
optimizer=optimizer,
batch_size=BATCH_SIZE,
device=device,
scheduler=scheduler)
valid_loss, valid_acc = test_classification(val_dataset,
model=model,
criterion=criterion,
batch_size=BATCH_SIZE,
device=device)
print(f'Epoch: {epoch}')
print(f'\t(train)\tLoss: {train_loss:.4f}\t|\tAcc: {train_acc * 100:.1f}%')
print(f'\t(valid)\tLoss: {valid_loss:.4f}\t|\tAcc: {valid_acc * 100:.1f}%')
end_time = datetime.now()
runtime = end_time - start_time
print(f'Ended: {end_time}')
print(f'Runtime: {runtime}')
# + pycharm={"name": "#%%\n"}
N_EPOCHS = 20
BATCH_SIZE = 4096
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)
# + pycharm={"name": "#%%\n"}
start_time = datetime.now()
print(f'Started: {start_time}')
for epoch in range(N_EPOCHS):
train_loss, train_acc = train_classification(train_dataset,
model=model,
criterion=criterion,
optimizer=optimizer,
batch_size=BATCH_SIZE,
device=device,
scheduler=scheduler)
valid_loss, valid_acc = test_classification(val_dataset,
model=model,
criterion=criterion,
batch_size=BATCH_SIZE,
device=device)
print(f'Epoch: {epoch}')
print(f'\t(train)\tLoss: {train_loss:.4f}\t|\tAcc: {train_acc * 100:.1f}%')
print(f'\t(valid)\tLoss: {valid_loss:.4f}\t|\tAcc: {valid_acc * 100:.1f}%')
end_time = datetime.now()
runtime = end_time - start_time
print(f'Ended: {end_time}')
print(f'Runtime: {runtime}')
# -
# # Prediction
# + pycharm={"name": "#%%\n"}
# Use the CPU version if the GPU runs out of memory.
# preds = model(test_dataset.X_tensor.to(device)).argmax(1)
model.to('cpu')
preds = model(test_dataset.X_tensor).argmax(1)
preds
model.to(device)
# -
# # Evaluation
# ## Classification report
# + pycharm={"name": "#%%\n"}
report = classification_report(y_test, le.inverse_transform(preds.cpu()))
print(report)
# + [markdown] pycharm={"name": "#%% md\n"}
# # Save objects for production
# -
# ## Save model
# + pycharm={"name": "#%%\n"}
path = models_dir / f'{artefact_prefix}_model'
torch.save(model, path.with_suffix('.torch'))
# -
# ## Create pipe object
#
# This is for transforming the input prior to prediction.
# + pycharm={"name": "#%%\n"}
X = pd.concat([X_train, X_val, X_test])
prod_pipe = create_preprocessing_pipe(X)
path = models_dir / f'{artefact_prefix}_pipe'
dump(prod_pipe, path.with_suffix('.sav'))
# -
# ## Save `LabelEncoder`
#
# This is required to get back the name of the name of the `beer_style`.
# + pycharm={"name": "#%%\n"}
path = models_dir / f'{artefact_prefix}_label_encoder'
dump(le, path.with_suffix('.sav'))
# + pycharm={"name": "#%%\n"}
|
notebooks/8_pytorch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Tutorial: fitting a FSRQ broad-band SED using angpy and sherpa
#
# In this tutorial we will show how use `sherpa` to wrap `agnpy` functions to perform the fit of the broad-band SED of PKS1510-089, measured during its gamma-ray flaring activity in 2015 [(Ahnen et al. 2017)](https://ui.adsabs.harvard.edu/abs/2017A%26A...603A..29A/abstract). We select the MWL SED corresponding to the period identified in the paper as "Period B" (MJD 57164-57166).
#
# [sherpa](https://sherpa.readthedocs.io/en/latest/index.html) is required to run this notebook.
# +
# import numpy, astropy and matplotlib for basic functionalities
import numpy as np
import astropy.units as u
from astropy.constants import c, G, k_B, m_e, M_sun
from astropy.coordinates import Distance
from pathlib import Path
from astropy.table import Table
import matplotlib.pyplot as plt
import pkg_resources
# import agnpy classes
import agnpy
from agnpy.emission_regions import Blob
from agnpy.spectra import BrokenPowerLaw
from agnpy.synchrotron import Synchrotron
from agnpy.compton import SynchrotronSelfCompton, ExternalCompton
from agnpy.targets import RingDustTorus, SSDisk
from agnpy.utils.plot import load_mpl_rc, sed_x_label, sed_y_label, plot_sed
load_mpl_rc()
# import sherpa classes
from sherpa.models import model
from sherpa import data
from sherpa.fit import Fit
from sherpa.stats import Chi2
from sherpa.optmethods import LevMar
# constants
mec2 = m_e.to("erg", equivalencies=u.mass_energy())
gamma_size = 400
gamma_to_integrate = np.logspace(0, 7, gamma_size)
# -
# ### sherpa wrapper of agnpy radiative processes
# Now let us [follow the sherpa documentation](https://sherpa.readthedocs.io/en/latest/model_classes/usermodel.html) and define a model wrapping agnpy's functions to compute the Synchrotron, Synchrotron Self-Compton and External Compton on Dust Torus SEDs. We will assume a broken power-law electron distribution. The thermal SEDs of the Disk and the DT are added to the total flux model.
#
# **NOTE:** for the parameters that vary over several orders of magnitude (i.e. normalisation and Lorentz factors of the electron distribution) it is better to provide to the fitting routine a "scaled" version of them (e.g. their log10), such that larger ranges might be covered with small parameters variation.
#
# **NOTE:** the size of the blob $R_{\rm b}$ is constrained through the variability time scale, $t_{\rm var}$, and the Doppler factor, $\delta_{\rm D}$, via: $R_{\rm b} = (c t_{\rm var} \delta_{\rm D}) / (1 + z)$.
class AgnpyEC(model.RegriddableModel1D):
"""Wrapper of agnpy's non synchrotron, SSC and EC classes. The flux model
accounts for the Disk and DT's thermal SEDs.
A broken power law is assumed for the electron spectrum.
To limit the span of the parameters space, we fit the log10 of the parameters
whose range is expected to cover several orders of magnitudes (normalisation,
gammas, size and magnetic field of the blob).
"""
def __init__(self, name="ec"):
# EED parameters
self.log10_k_e = model.Parameter(name, "log10_k_e", -2.0, min=-20.0, max=10.0)
self.p1 = model.Parameter(name, "p1", 2.1, min=-2.0, max=5.0)
self.p2 = model.Parameter(name, "p2", 3.1, min=-2.0, max=5.0)
self.log10_gamma_b = model.Parameter(name, "log10_gamma_b", 3, min=1, max=6)
self.log10_gamma_min = model.Parameter(name, "log10_gamma_min", 1, min=0, max=4)
self.log10_gamma_max = model.Parameter(name, "log10_gamma_max", 5, min=4, max=8)
# source general parameters
self.z = model.Parameter(name, "z", 0.1, min=0.01, max=1)
self.d_L = model.Parameter(name, "d_L", 1e27, min=1e25, max=1e33, units="cm")
# emission region parameters
self.delta_D = model.Parameter(name, "delta_D", 10, min=0, max=40)
self.log10_B = model.Parameter(name, "log10_B", -2, min=-4, max=2)
self.t_var = model.Parameter(
name, "t_var", 600, min=10, max=np.pi * 1e7, units="s"
)
self.mu_s = model.Parameter(name, "mu_s", 0.9, min=0.0, max=1.0)
self.log10_r = model.Parameter(name, "log10_r", 17.0, min=16.0, max=20.0)
# disk parameters
self.log10_L_disk = model.Parameter(
name, "log10_L_disk", 45.0, min=42.0, max=48.0
)
self.log10_M_BH = model.Parameter(name, "log10_M_BH", 42, min=32, max=45)
self.m_dot = model.Parameter(
name, "m_dot", 1e26, min=1e24, max=1e30, units="g s-1"
)
self.R_in = model.Parameter(name, "R_in", 1e14, min=1e12, max=1e16, units="cm")
self.R_out = model.Parameter(
name, "R_out", 1e17, min=1e12, max=1e19, units="cm"
)
# DT parameters
self.xi_dt = model.Parameter(name, "xi_dt", 0.6, min=0.0, max=1.0)
self.T_dt = model.Parameter(
name, "T_dt", 1.0e3, min=1.0e2, max=1.0e4, units="K"
)
self.R_dt = model.Parameter(
name, "R_dt", 2.5e18, min=1.0e17, max=1.0e19, units="cm"
)
model.RegriddableModel1D.__init__(
self,
name,
(
self.log10_k_e,
self.p1,
self.p2,
self.log10_gamma_b,
self.log10_gamma_min,
self.log10_gamma_max,
self.z,
self.d_L,
self.delta_D,
self.log10_B,
self.t_var,
self.mu_s,
self.log10_r,
self.log10_L_disk,
self.log10_M_BH,
self.m_dot,
self.R_in,
self.R_out,
self.xi_dt,
self.T_dt,
self.R_dt,
),
)
def calc(self, pars, x):
"""evaluate the model calling the agnpy functions"""
(
log10_k_e,
p1,
p2,
log10_gamma_b,
log10_gamma_min,
log10_gamma_max,
z,
d_L,
delta_D,
log10_B,
t_var,
mu_s,
log10_r,
log10_L_disk,
log10_M_BH,
m_dot,
R_in,
R_out,
xi_dt,
T_dt,
R_dt,
) = pars
# add units, scale quantities
x *= u.Hz
k_e = 10 ** log10_k_e * u.Unit("cm-3")
gamma_b = 10 ** log10_gamma_b
gamma_min = 10 ** log10_gamma_min
gamma_max = 10 ** log10_gamma_max
B = 10 ** log10_B * u.G
R_b = c.to_value("cm s-1") * t_var * delta_D / (1 + z) * u.cm
r = 10 ** log10_r * u.cm
d_L *= u.cm
L_disk = 10 ** log10_L_disk * u.Unit("erg s-1")
M_BH = 10 ** log10_M_BH * u.Unit("g")
m_dot *= u.Unit("g s-1")
R_in *= u.cm
R_out *= u.cm
R_dt *= u.cm
T_dt *= u.K
eps_dt = 2.7 * (k_B * T_dt / mec2).to_value("")
# non-thermal components
sed_synch = Synchrotron.evaluate_sed_flux(
x,
z,
d_L,
delta_D,
B,
R_b,
BrokenPowerLaw,
k_e,
p1,
p2,
gamma_b,
gamma_min,
gamma_max,
ssa=True,
gamma=gamma_to_integrate,
)
sed_ssc = SynchrotronSelfCompton.evaluate_sed_flux(
x,
z,
d_L,
delta_D,
B,
R_b,
BrokenPowerLaw,
k_e,
p1,
p2,
gamma_b,
gamma_min,
gamma_max,
ssa=True,
gamma=gamma_to_integrate,
)
sed_ec_dt = ExternalCompton.evaluate_sed_flux_dt(
x,
z,
d_L,
delta_D,
mu_s,
R_b,
L_disk,
xi_dt,
eps_dt,
R_dt,
r,
BrokenPowerLaw,
k_e,
p1,
p2,
gamma_b,
gamma_min,
gamma_max,
gamma=gamma_to_integrate,
)
# thermal components
sed_bb_disk = SSDisk.evaluate_multi_T_bb_norm_sed(
x, z, L_disk, M_BH, m_dot, R_in, R_out, d_L
)
sed_bb_dt = RingDustTorus.evaluate_bb_norm_sed(
x, z, xi_dt * L_disk, T_dt, R_dt, d_L
)
return sed_synch + sed_ssc + sed_ec_dt + sed_bb_disk + sed_bb_dt
# ### Fitting with sherpa
# Let us start here the procedure to fit with sherpa, first we read the data and then we pass them in a `Data1D` object provided by sherpa.
# We add an educated guess on systematic errors on the flux measurements in the different energy bands.
# read the 1D data
sed_path = pkg_resources.resource_filename("agnpy", "data/mwl_seds/PKS1510-089_2015b.ecsv")
sed_table = Table.read(sed_path)
x = sed_table["e_ref"].to("Hz", equivalencies=u.spectral())
y = sed_table["e2dnde"]
y_err_stat = sed_table["e2dnde_errn"]
# array of systematic errors, will just be summed in quadrature to the statistical error
# we assume
# - 30% on VHE gamma-ray instruments
# - 10% on HE gamma-ray instruments
# - 10% on X-ray instruments
# - 5% on lower-energy instruments
y_err_syst = np.zeros(len(x))
# define energy ranges
nu_vhe = (100 * u.GeV).to("Hz", equivalencies=u.spectral())
nu_he = (0.1 * u.GeV).to("Hz", equivalencies=u.spectral())
nu_x_ray_max = (300 * u.keV).to("Hz", equivalencies=u.spectral())
nu_x_ray_min = (0.3 * u.keV).to("Hz", equivalencies=u.spectral())
vhe_gamma = x >= nu_vhe
he_gamma = (x >= nu_he) * (x < nu_vhe)
x_ray = (x >= nu_x_ray_min) * (x < nu_x_ray_max)
uv_to_radio = x < nu_x_ray_min
# declare systematics
y_err_syst[vhe_gamma] = 0.30
y_err_syst[he_gamma] = 0.10
y_err_syst[x_ray] = 0.10
y_err_syst[uv_to_radio] = 0.05
y_err_syst = y * y_err_syst
# define the data1D object containing it
sed = data.Data1D("sed", x, y, staterror=y_err_stat, syserror=y_err_syst)
# Now we create an instance of the model wrapping the non-thermal and thermal emissions.
# Let us leave free to vary the parameters describing the electron distribution and the magnetic field.
# declare a model
agnpy_ec = AgnpyEC()
# global parameters of the blob and the DT
z = 0.361
d_L = Distance(z=z).to("cm")
# blob
Gamma = 20
delta_D = 25
Beta = np.sqrt(1 - 1 / np.power(Gamma, 2)) # jet relativistic speed
mu_s = (1 - 1 / (Gamma * delta_D)) / Beta # viewing angle
B = 0.35 * u.G
# disk
L_disk = 6.7e45 * u.Unit("erg s-1") # disk luminosity
M_BH = 5.71 * 1e7 * M_sun
eta = 1 / 12
m_dot = (L_disk / (eta * c ** 2)).to("g s-1")
R_g = ((G * M_BH) / c ** 2).to("cm")
R_in = 6 * R_g
R_out = 10000 * R_g
# DT
xi_dt = 0.6 # fraction of disk luminosity reprocessed by the DT
T_dt = 1e3 * u.K
R_dt = 6.47 * 1e18 * u.cm
# size and location of the emission region
t_var = 0.5 * u.d
r = 6e17 * u.cm
# instance of the model wrapping angpy functionalities
# - AGN parameters
# -- distances
agnpy_ec.z = z
agnpy_ec.z.freeze()
agnpy_ec.d_L = d_L.cgs.value
agnpy_ec.d_L.freeze()
# -- SS disk
agnpy_ec.log10_L_disk = np.log10(L_disk.to_value("erg s-1"))
agnpy_ec.log10_L_disk.freeze()
agnpy_ec.log10_M_BH = np.log10(M_BH.to_value("g"))
agnpy_ec.log10_M_BH.freeze()
agnpy_ec.m_dot = m_dot.to_value("g s-1")
agnpy_ec.m_dot.freeze()
agnpy_ec.R_in = R_in.to_value("cm")
agnpy_ec.R_in.freeze()
agnpy_ec.R_out = R_out.to_value("cm")
agnpy_ec.R_out.freeze()
# -- <NAME>
agnpy_ec.xi_dt = xi_dt
agnpy_ec.xi_dt.freeze()
agnpy_ec.T_dt = T_dt.to_value("K")
agnpy_ec.T_dt.freeze()
agnpy_ec.R_dt = R_dt.to_value("cm")
agnpy_ec.R_dt.freeze()
# - blob parameters
agnpy_ec.delta_D = delta_D
agnpy_ec.delta_D.freeze()
agnpy_ec.log10_B = np.log10(B.to_value("G"))
agnpy_ec.mu_s = mu_s
agnpy_ec.mu_s.freeze()
agnpy_ec.t_var = (t_var).to_value("s")
agnpy_ec.t_var.freeze()
agnpy_ec.log10_r = np.log10(r.to_value("cm"))
agnpy_ec.log10_r.freeze()
# - EED
agnpy_ec.log10_k_e = np.log10(0.05)
agnpy_ec.p1 = 1.8
agnpy_ec.p2 = 3.5
agnpy_ec.log10_gamma_b = np.log10(500)
agnpy_ec.log10_gamma_min = np.log10(1)
agnpy_ec.log10_gamma_min.freeze()
agnpy_ec.log10_gamma_max = np.log10(3e4)
agnpy_ec.log10_gamma_max.freeze()
# Now we define the Fit procedure choosing the statistics (chi2) and the minimisation method. We will fit only the data between $10^{11}\,{\rm Hz}$ and $10^{30}\,{\rm Hz}$, avoiding the lowest-energy radio data usually attributed to the extended jet emission.
# fit using the Levenberg-Marquardt optimiser
fitter = Fit(sed, agnpy_ec, stat=Chi2(), method=LevMar())
min_x = 1e11 * u.Hz
max_x = 1e30 * u.Hz
sed.notice(min_x, max_x)
# %%time
# perform the fit and time it!
results = fitter.fit()
print("-- fit succesful?", results.succeeded)
print(results.format())
# plot the results!
x = np.logspace(11, 27, 200)
plt.errorbar(sed.x, sed.y, yerr=sed.get_error(), marker=".", ls="", color="k")
plot_sed(x, agnpy_ec(x), ls="-", color="crimson")
plt.ylim([1e-14, 1e-8])
plt.show()
|
docs/tutorials/ec_dt_sherpa_fit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
baseurl = "http://api.openweathermap.org/data/2.5/weather?"
units = "imperial"
baseurl = f"{baseurl}appid={weather_api_key}&units={units}&q="
# Create empty lists to store the data for each field
# city_id_list = []
city_name_list = []
country_list = []
lng_list = []
lat_list = []
maxtemp_list = []
humidity_list = []
clouds_list = []
wind_speed_list = []
date_list=[]
# initialize the counter
setCount = 1
recordCount=1
print("Beginning Data Retrieval")
print("-----------------------------")
for city in cities:
# set counters
if recordCount == 51:
recordCount = 1
setCount = setCount + 1
# Get the JSON response from the OpenWeather API
url =f"{baseurl}{city}"
# print(url)
weather_response = requests.get(url)
# print(weather_response)
# check the response code is 200
if weather_response.status_code == 200:
weather_json = weather_response.json()
# try block for exceptions
try:
city_id = weather_json['id']
city_id_list.append(city_id)
date = weather_json['dt']
date_list.append(date)
city_name = weather_json['name']
city_name_list.append(city_name)
country_name = weather_json['sys']['country']
country_list.append(country_name)
lng = weather_json['coord']['lon']
lng_list.append(lng)
lat = weather_json['coord']['lat']
lat_list.append(lat)
temp = weather_json['main']['temp_max']
maxtemp_list.append(temp)
humidity = weather_json['main']['humidity']
humidity_list.append(humidity)
clouds = weather_json['clouds']['all']
clouds_list.append(clouds)
wind_speed = weather_json['wind']['speed']
wind_speed_list.append(wind_speed)
print(f"Processing Record {recordCount} of Set {setCount} | {city}")
recordCount = recordCount + 1
except:
print("City not found. Skipping...")
#----------End Try
#----------End If
# Convert the data into a dataframe
cities_df = pd.DataFrame({
"City": city_name_list,
"Lat": lat_list,
"Lng": lng_list,
"Max Temp": maxtemp_list,
"Humidity": humidity_list,
"Cloudiness": clouds_list,
"Wind Speed": wind_speed_list,
"Country": country_list,
"Date": date_list})
# -
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
cities_df.to_csv('../output_data/cities.csv')
# +
cities_df.head()
# -
cities_df.describe()
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
cities_df.describe()
# +
# Get the indices of cities that have humidity over 100%.
# none of cities have humidity over 100%
# +
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# none of cities have humidity over 100%
# -
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
cities_df.plot.scatter('Lat', 'Max Temp', grid=True, title='City Latitude vs Max Temprature (%s)' % time.strftime('%x'))
plt.ylabel("Max Temprature (F)")
plt.xlabel("Latitude")
plt.show()
# ## Latitude vs. Humidity Plot
cities_df.plot.scatter('Lat', 'Humidity', grid=True, title='City Latitude vs Humidity (%s)' % time.strftime('%x'))
plt.ylabel("Humidity (%)")
plt.xlabel("Latitude")
plt.show()
# ## Latitude vs. Cloudiness Plot
cities_df.plot.scatter('Lat', 'Cloudiness', grid=True, title='City Latitude vs Cloudiness (%s)' % time.strftime('%x'))
plt.ylabel("Cloudiness (%)")
plt.xlabel("Latitude")
plt.show()
# ## Latitude vs. Wind Speed Plot
cities_df.plot.scatter('Lat', 'Wind Speed', grid=True, title='City Latitude vs Wind Speed (%s)' % time.strftime('%x'))
plt.ylabel("Wind Speed (mph)")
plt.xlabel("Latitude")
plt.show()
# ## Linear Regression
# +
# Create Northern and Southern Hemisphere DataFrames
nhemis= cities_df['Lat'] > 0
nhemis_df = cities_df[nhemis]
shemis_df = cities_df[~nhemis]
# shemis=cities_df['Lat']<0
# shemis_df = cities_df[shemis]
# print(shemis)
# print(str(len(nhemis_df)) + "," + str(len(shemis_df)))
nhemis_df.head()
shemis_df.head()
# -
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
x_values = nhemis_df["Lat"]
y_values = nhemis_df["Max Temp"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.title='North Hemis Latitude vs Max Temp'
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(10,20),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
plt.ylim(-5, 100)
plt.xlim(0, 80)
plt.grid(which='major', linestyle='-')
plt.grid(which='minor', linestyle=':')
plt.tight_layout()
plt.savefig("../Images/nhemis_lat_maxtemp_regression.png")
plt.show()
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
x_values = shemis_df["Lat"]
y_values = shemis_df["Max Temp"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.title='South Hemis Latitude vs Max Temp'
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-35,80),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
plt.ylim(40, 100)
plt.xlim(0, -60)
plt.minorticks_on()
plt.tight_layout()
plt.savefig("../Images/shemis_lat_maxtemp_regression.png")
plt.show()
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
x_values = nhemis_df["Lat"]
y_values = nhemis_df["Humidity"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(10,20),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
plt.ylim(-5, 100)
plt.xlim(0, 80)
plt.grid(which='major', linestyle='-')
plt.grid(which='minor', linestyle=':')
plt.tight_layout()
plt.savefig("../Images/nhemis_lat_Humidity_regression.png")
plt.show()
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
x_values = shemis_df["Lat"]
y_values = shemis_df["Humidity"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-35,80),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
plt.ylim(40, 100)
plt.xlim(0, -60)
plt.minorticks_on()
plt.tight_layout()
plt.savefig("../Images/shemis_lat_humidity_regression.png")
plt.show()
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = nhemis_df["Lat"]
y_values = nhemis_df["Cloudiness"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(10,20),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
plt.ylim(0, 100)
plt.xlim(0, 80)
plt.grid(which='major', linestyle='-')
plt.grid(which='minor', linestyle=':')
plt.tight_layout()
plt.savefig("../Images/nhemis_lat_Cloudiness_regression.png")
plt.show()
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = shemis_df["Lat"]
y_values = shemis_df["Cloudiness"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-25,80),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
plt.ylim(0, 110)
plt.xlim(0, -50)
plt.minorticks_on()
plt.tight_layout()
plt.savefig("../Images/shemis_lat_Cloudiness_regression.png")
plt.show()
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
x_values = nhemis_df["Lat"]
y_values = nhemis_df["Wind Speed"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
# plt.title('Noth Hemisphere Lateral vs Wind Speed Regression')
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(30,20),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
plt.ylim(0, 25)
plt.xlim(0, 80)
plt.minorticks_on()
plt.tight_layout()
plt.savefig("../Images/nhemis_lat_WindSpeed_regression.png")
plt.show()
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
x_values = shemis_df["Lat"]
y_values = shemis_df["Wind Speed"]
# Run linear regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
# plt.title('South Hemisphere Lateral vs Wind Speed Regression')
# plt.title("Name")
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-20,18),fontsize=12,color="red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
plt.ylim(0, 20)
plt.xlim(0, -50)
plt.minorticks_on()
plt.tight_layout()
plt.savefig("../Images/shemis_lat_WindSpeed_regression.png")
plt.show()
|
WeatherPy/WeatherPy.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
versioninfo()
g=6.0
ENV["JULIA_CUDA_MEMORY_LIMIT"] = convert(Int, round(g * 1024 * 1024 * 1024))
ENV["JULIA_DEBUG"]="all"
# # New version of main
#
# - [X] refactor the code of main1
# - [X] use seeding
# - [X] pre-gen graphs and splitting
# - [X] use cloud GPU for training
# - [X] I don't need correlation experiments anymore
1+2
include("../src/main.jl")
# + tags=[]
main_data()
# -
main_ngraph()
main_ersf124()
main_ch3()
function main_cnn()
for d in [10,20,50],
(prefix, model_fn,nsteps) in [
# ("CNN", ()->cnn_model(2), 3e4),
("CNN2-$(now())", ()->cnn_model(2, 128, (5,5), (2,2)), 3e4),
# ("CNN2", ()->cnn_model(2, 128, (3,3), (1,1)), 3e4)
]
specs = []
for gtype in [:ER, :SF],
k in [1]
push!(specs, DataSpec(d=d, k=k, gtype=gtype,
noise=:Gaussian, mat=:CH3,
ng=10000, N=1))
end
specs = Array{DataSpec}(specs)
# print more frequently for CNN and FC to get more data to print
test_throttle = if prefix == "EQ2" 10 else 1 end
@info "training .." prefix d
expID = exp_train(specs, model_fn,
# TODO I'll need to increase the training steps here
# CAUTION feed in the gtype in the model prefix
prefix="$prefix-ERSF-k1-d=$d", train_steps=nsteps,
test_throttle = test_throttle,
merge=true)
end
end
main_cnn()
main_mat()
main_ensemble_d()
# # DEBUG
main()
# Ensemble
#
# - [ ] I actually probably want to merge the datasets with the same d.
# This is because, I can merge the data to be more evenly distributed to fit the model.
# - [ ] I probably want to adjust batch size to speed it up
model = eq_model_fn() |> gpu
ds, test_ds = spec2ds(DataSpec(d=20, k=1, gtype=:ER,
noise=:Gaussian, mat=:COV))
# +
x, y = next_batch!(test_ds) |> gpu
@show size(x)
@show size(y)
# -
model(x)
# # model params
include("main.jl")
import Printf
function test_size()
# model size
@info "FC model"
for d in [10,20,50,100]
Printf.@printf "%.2f\n" param_count(fc_model_fn(d)) / 1e6
end
@info "FC deep model"
for d in [7, 10,15,20,25,30]
Printf.@printf "%.2f\n" param_count(deep_fc_model_fn(d)) / 1e6
end
# EQ models is independent of input size
@info "EQ model"
Printf.@printf "%.2f\n" param_count(eq_model_fn(10)) / 1e6
Printf.@printf "%.2f\n" param_count(deep_eq_model_fn(10)) / 1e6
end
("EQ2", eq2_model_fn, 3e4),
("FC", ()->fc_model(d=d, ch=2, z=1024, nlayer=6), 3e4),
("FCreg", ()->fc_model(d=d, ch=2, z=1024, nlayer=6, reg=true), 3e4),
("CNN", ()->cnn_model(2), 3e4)
for d in [10,20,50,100]
Printf.@printf "%.2f\n" param_count(fc_model(d=d, ch=2, z=1024, nlayer=6)) / 1e6
end
Printf.@printf "%.2f\n" param_count(eq2_model_fn()) / 1e6
Printf.@printf "%.2f\n" param_count(eq_model_fn()) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2)) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(1, 32)) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2, 32)) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2, 64)) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2, 128)) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2, 32, (5,5), (2,2))) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2, 64, (5,5), (2,2))) / 1e6
Printf.@printf "%.2f\n" param_count(cnn_model(2, 128, (5,5), (2,2))) / 1e6
# # Test ERSF124 on individual types
function test_ersf124()
for d in [10, 20],
types in [(:ER, :SF), (:ER2, :SF2), (:ER4, :SF4)]
ID = "EQ2-ERSF124-d=$d-ensemble"
specs = []
for gtype in types,
k in [1]
push!(specs, DataSpec(d=d, k=k, gtype=gtype,
noise=:Gaussian, mat=:CH3))
end
specs = Array{DataSpec}(specs)
exp_test(ID, specs, "TEST-types=$types")
end
end
test_ersf124()
# # Test d=100 model on d=100,200,300,400
include("../src/main.jl")
exp_test
function test_large_d()
for (ID,bsize) in [("EQ2-ERSF-k1-d=100-ensemble", 4),
("EQ2-ERSF-k1-d=200-ng=1000-N=1-ensemble", 4),
("EQ2-ERSF-k1-d=400-ng=1000-N=1-ensemble", 4)],
d in [200, 400]
specs = []
for gtype in [:ER, :SF],
k in [1]
push!(specs, DataSpec(d=d, k=k, gtype=gtype,
noise=:Gaussian, mat=:CH3,
bsize=bsize))
end
specs = Array{DataSpec}(specs)
exp_test(ID, specs, "TEST-d=$d")
end
end
test_large_d()
# # Test the ensemble model on unseen d
# +
# can I just run the test without saving? No because loading the main.jl is slow
# -
include("main.jl")
function test_unseen_d()
for ID in ["EQ2-CH3-d=[10,15,20]-ensemble","EQ2-CH3-d=[20,30,40]-ensemble"],
d in [10, 15, 20, 30, 40, 50, 80, 100]
specs = []
for gtype in [:ER, :SF],
k in [1]
push!(specs, DataSpec(d=d, k=k, gtype=gtype,
noise=:Gaussian, mat=:CH3))
end
specs = Array{DataSpec}(specs)
exp_test(ID, specs, "TEST-d=$d")
end
end
test_unseen_d()
# test the individual trained models
# d=10,15,20
# on larger d
function test_unseen_d_2()
for ID in ["EQ2-ERSF-k1-d=10-ensemble", "EQ2-ERSF-k1-d=15-ensemble", "EQ2-ERSF-k1-d=20-ensemble"],
d in [10, 15, 20, 30, 40, 50, 80, 100]
specs = []
for gtype in [:ER, :SF],
k in [1]
push!(specs, DataSpec(d=d, k=k, gtype=gtype,
noise=:Gaussian, mat=:CH3))
end
specs = Array{DataSpec}(specs)
exp_test(ID, specs, "TEST-d=$d")
end
end
test_unseen_d_2()
# # Test on different noise and the tables
function test_different_noise()
# FIXME OOM for d=100??
for d in [10,20,50,100],
noise in [:Gaussian, :Poisson, :Exp, :Gumbel]
ID = "EQ2-ERSF-k1-d=$d-ensemble"
specs = []
for gtype in [:ER, :SF],
k in [1]
push!(specs, DataSpec(d=d, k=k, gtype=gtype,
noise=noise, mat=:CH3))
end
specs = Array{DataSpec}(specs)
@info "Testing on" ID d noise
exp_test(ID, specs, "TEST-d=$d-noise=$noise")
end
end
test_different_noise()
# # Sachs 2005 experiment
# ## Load and explore the data
import CSV
df = CSV.read("Sachs/1.cd3cd28.csv")
SachsX = convert(Matrix, df)
include("data_graph.jl")
SachsG = Sachs_ground_truth()
myplot(SachsG)
medcovX = cov(SachsX) ./ median(var(SachsX, dims=1))
maxcovX = cov(SachsX) ./ maximum(var(SachsX, dims=1))
corX = cor(SachsX)
ch2X = getch2(SachsX)
include("exp.jl")
# ## Load a model
# load the trained model
@load "saved_models/EQ-d=20_k=1_gtype=SF_noise=Gaussian_mat=medCOV_mec=Linear/step-15000.bson" model
@load "saved_models/EQ-d=10_k=1_gtype=ER_noise=Gaussian_mat=medCOV_mec=Linear/step-15000.bson" model
@load "back/back-0907/CORCOV/EQ-d=10_k=1_gtype=SF_noise=Gaussian_mat=COR_mec=Linear/step-15000.bson" model
@load "saved_models/ensK-2020-09-08T10:58:41.247-ensemble/step-10000.bson" model
# The new ensemble model
@load "saved_models/ensemEQ-ICLR-1-ensemble/step-159443.bson" model
@load "saved_models/ensemEQ-CH2-1,2,4-2020-10-11T11:29:01.183-ensemble/step-100000.bson" model
# ## Evaluate the model
out = inf_one(model, medcovX)
out = inf_one(model, corX)
out = inf_one(model, maxcovX)
out = inf_one(model, ch2X)
Wout = threshold(σ.(out), 0.3, true)
myplot(DiGraph(Wout), names(df))
# FIXME NOW !!!! the names might not match at all!!!
myplot(SachsG, names(df))
# +
# predicted edge, true edge, SHD
predicted_edge = ne(DiGraph(Wout))
@show predicted_edge
correct_edge = sum(Wout[Wout .== 1] .== adjacency_matrix(SachsG)[Wout .== 1])
@show correct_edge
# metrics
ytrue = Matrix(gen_weights(SachsG))
sup_graph_metrics(Wout, ytrue)
# -
# TODO calculate #reverse direction edges
sum(Wout[Wout .== 1] .== adjacency_matrix(SachsG)[Wout .== 1])
sum(Wout'[Wout' .== 1] .== adjacency_matrix(SachsG)[Wout' .== 1])
# TODO implement the recursive add procedure to remove cycles
# Or, just construct the graph, and keep removing until it is a DAG
is_cyclic(DiGraph(Wout))
Wout
# Or just implement the procedure
out
# ## Adding non-cyclic procedure
Wout2 = threshold(σ.(out), 0.3, false)
# find the order of the index, or, sort the indexes
sort(Wout2, dims=1)
edgeidx = findall((x)->x>0, Wout2)
sorted_idx = edgeidx[sortperm(Wout2[edgeidx], rev=true)]
Wout2
# add sorted idx
g = MetaDiGraph(11)
for idx in sorted_idx
add_edge!(g, idx[1], idx[2])
if is_cyclic(g)
rem_edge!(g, idx[1], idx[2])
end
end
p1 = myplot(g, names(df))
draw(PNG("p1.png"), p1)
p0 = myplot(SachsG, names(df))
draw(PNG("p0.png"), p0)
# +
adj1 = adjacency_matrix(g)
adj0 = adjacency_matrix(SachsG)
# predicted edge, true edge, SHD
predicted_edge = ne(g)
@show predicted_edge
correct_edge = sum(adj1[adj1 .== 1] .== adj0[adj1 .== 1])
@show correct_edge
reversed_edge = sum(adj1'[adj1' .== 1] .== adj0[adj1' .== 1])
@show reversed_edge
# metrics
ytrue = Matrix(gen_weights(SachsG))
sup_graph_metrics(adj1, ytrue)
# -
# ## More data
#
# - [ ] goblinop's gaussian data (just for test)
# - [ ] bnlearn's continous data
# - [ ] discrete data?
# ### goblinop's (??) gaussian data
import CSV
df = CSV.read("data-back/gaussian.dat", delim=" ")
X = convert(Matrix, df)
include("data_graph.jl")
maxcovX = cov(X) ./ maximum(var(X, dims=1))
include("exp.jl")
# The new ensemble model
@load "saved_models/ensemEQ-ICLR-1-ensemble/step-159443.bson" model
out = inf_one(model, maxcovX)
Wout2 = threshold(σ.(out), 0.3, false)
# find the order of the index, or, sort the indexes
sort(Wout2, dims=1)
edgeidx = findall((x)->x>0, Wout2)
sorted_idx = edgeidx[sortperm(Wout2[edgeidx], rev=true)]
Wout2
size(X,2)
# add sorted idx
g = MetaDiGraph(size(X,2))
for idx in sorted_idx
add_edge!(g, idx[1], idx[2])
if is_cyclic(g)
rem_edge!(g, idx[1], idx[2])
end
end
bnp1 = myplot(g, ["A", "B", "C", "D", "E", "F", "G"])
function bnlearn_ground_truth()
greal = named_graph([:A, :B, :C, :D, :E, :F, :G])
named_graph_add_edge!(greal, :B, :C)
named_graph_add_edge!(greal, :A, :C)
named_graph_add_edge!(greal, :B, :D)
named_graph_add_edge!(greal, :D, :F)
named_graph_add_edge!(greal, :A, :F)
named_graph_add_edge!(greal, :G, :F)
named_graph_add_edge!(greal, :E, :F)
greal
end
groundG = bnlearn_ground_truth()
bnp0 = myplot(groundG, ["A", "B", "C", "D", "E", "F", "G"])
draw(PNG("bnp0.png"), bnp0)
draw(PNG("bnp1.png"), bnp1)
# +
adj1 = adjacency_matrix(g)
adj0 = adjacency_matrix(groundG)
# predicted edge, true edge, SHD
predicted_edge = ne(g)
@show predicted_edge
correct_edge = sum(adj1[adj1 .== 1] .== adj0[adj1 .== 1])
@show correct_edge
reversed_edge = sum(adj1'[adj1' .== 1] .== adj0[adj1' .== 1])
@show reversed_edge
# metrics
ytrue = Matrix(gen_weights(groundG))
sup_graph_metrics(adj1, ytrue)
|
notebooks/main.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# CDs
user_item_res = {'MAP@10': (0.06967576301474467, 0.003166036638077451),
'MAP@15': (0.06299312349816566, 0.002642216310283345),
'MAP@20': (0.05815498052932873, 0.002313510028788075),
'MAP@5': (0.07996363035212431, 0.004219137398488782),
'MAP@50': (0.04287378323036822, 0.0014896505793858103),
'NDCG': (0.16056426673261637, 0.004679162839418328),
'Precision@10': (0.05435609191601918, 0.002179272606282965),
'Precision@15': (0.04690582465421282, 0.0017270471705763456),
'Precision@20': (0.04169284179203174, 0.00147050452077659),
'Precision@5': (0.06877169780128947, 0.0032115203466584427),
'Precision@50': (0.026963134402380563, 0.0008807019950014483),
'R-Precision': (0.06971269444272153, 0.0036516275234849916),
'Recall@10': (0.13016474382769494, 0.005599369026932372),
'Recall@15': (0.16674700005122858, 0.0061824760781168915),
'Recall@20': (0.19547171410919253, 0.006590533272274745),
'Recall@5': (0.08534234229083451, 0.004539139168287699),
'Recall@50': (0.30009099604718237, 0.007562927041383074)}
item_based_res = {'MAP@10': (0.07178887629333984, 0.0031642553731385755),
'MAP@15': (0.064992031749347, 0.002635286591977173),
'MAP@20': (0.06000381513721743, 0.002304365891757811),
'MAP@5': (0.08220697635972887, 0.0042622624420715505),
'MAP@50': (0.044192917573934835, 0.0014837377050769267),
'NDCG': (0.16778164007455423, 0.004908149428189497),
'Precision@10': (0.056670524053562564, 0.002183254014644424),
'Precision@15': (0.04838265278007384, 0.0017324994225048935),
'Precision@20': (0.04308976690362043, 0.001466437083771068),
'Precision@5': (0.07019342040006613, 0.003196544742899958),
'Precision@50': (0.027554967763266658, 0.0009091317645205956),
'R-Precision': (0.07243349225360872, 0.0037471162564621336),
'Recall@10': (0.1419863086006407, 0.005926964704051735),
'Recall@15': (0.17839619650584668, 0.006543351311776944),
'Recall@20': (0.20791309653313403, 0.006932247875513412),
'Recall@5': (0.09135255791336933, 0.004732662881220303),
'Recall@50': (0.30698083409364585, 0.007815814255038857)}
user_based_res = {'MAP@10': (0.027662961475988426, 0.0020350425531479774),
'MAP@15': (0.02533236979095799, 0.0017040315413899023),
'MAP@20': (0.02365930147648219, 0.001495541701848737),
'MAP@5': (0.030833746624786464, 0.0026988426807522743),
'MAP@50': (0.018425052652704506, 0.0009706651132499718),
'NDCG': (0.06930589317092652, 0.0031603934852285132),
'Precision@10': (0.022648371631674658, 0.001422552135699057),
'Precision@15': (0.01962858874745137, 0.0011206733198182316),
'Precision@20': (0.017912051578773354, 0.0009585532836581427),
'Precision@5': (0.028004628864275093, 0.0020950784481967824),
'Precision@50': (0.013066622582244999, 0.0005930316778871791),
'R-Precision': (0.026034915073542435, 0.0021865854175256334),
'Recall@10': (0.05141660514915298, 0.0035963108493951806),
'Recall@15': (0.06612453783626274, 0.004046048067636085),
'Recall@20': (0.07933286856577168, 0.004426999086434319),
'Recall@5': (0.03247410939701062, 0.002846364877967614),
'Recall@50': (0.1397135668983857, 0.005679354921733084)}
# Beer
user_item_res = {'MAP@10': (0.055037055095075574, 0.00271760155112745),
'MAP@15': (0.051745667680821276, 0.00229974479769462),
'MAP@20': (0.04911610731258947, 0.0020397178310452542),
'MAP@5': (0.06045051194539249, 0.00358743056523425),
'MAP@50': (0.04058976109960154, 0.0014208353232993188),
'NDCG': (0.11061358430162982, 0.003160283681719818),
'Precision@10': (0.04776450511945392, 0.00197653604717534),
'Precision@15': (0.0432650739476678, 0.0016336711097877305),
'Precision@20': (0.040051194539249144, 0.0014294610458038988),
'Precision@5': (0.05430034129692834, 0.0027827016714734656),
'Precision@50': (0.031003412969283276, 0.0009986026431826965),
'R-Precision': (0.04616147106833989, 0.002303717415339684),
'Recall@10': (0.07037424997438727, 0.0039006945367076992),
'Recall@15': (0.09476025058335748, 0.004467342890288348),
'Recall@20': (0.11499280080949816, 0.004854710107875195),
'Recall@5': (0.039174533796154404, 0.0028903331435289827),
'Recall@50': (0.20961257540339695, 0.006110170090254078)}
item_based_res = {'MAP@10': (0.022312415352944363, 0.0016645119503467458),
'MAP@15': (0.020760076638062987, 0.0013902801832968463),
'MAP@20': (0.019681531591873785, 0.0012239746277176403),
'MAP@5': (0.024571103526734927, 0.0022296530333048496),
'MAP@50': (0.016267408710191472, 0.0008264043406741288),
'NDCG': (0.04288493444680673, 0.0020306935982967473),
'Precision@10': (0.01861774744027304, 0.0011760285178246187),
'Precision@15': (0.017064846416382253, 0.0009640787637895016),
'Precision@20': (0.015955631399317407, 0.0008507705336377911),
'Precision@5': (0.022969283276450512, 0.00172364733170156),
'Precision@50': (0.012580204778156997, 0.0005672840933801791),
'R-Precision': (0.017466114041808117, 0.0014085103984669777),
'Recall@10': (0.025981601839728127, 0.002409947202481572),
'Recall@15': (0.034509479591048456, 0.0027058987502942195),
'Recall@20': (0.04287631515663236, 0.0030315121081121673),
'Recall@5': (0.016478250732903512, 0.0019431723508946126),
'Recall@50': (0.08058585111864981, 0.004010568363559986)}
user_based_res = {'MAP@10': (0.037076520938295686, 0.0021836381227901313),
'MAP@15': (0.03519383378598396, 0.0018524455232287705),
'MAP@20': (0.033752377268455466, 0.0016467131664474927),
'MAP@5': (0.03970079635949943, 0.0028627113837744406),
'MAP@50': (0.02901247820595199, 0.0011484030056434567),
'NDCG': (0.0809328726602833, 0.0026708968864754845),
'Precision@10': (0.03290102389078498, 0.0016083906056846586),
'Precision@15': (0.030625711035267348, 0.001352170620200592),
'Precision@20': (0.02871160409556314, 0.0011796532304680235),
'Precision@5': (0.037440273037542667, 0.0022969449504500407),
'Precision@50': (0.023918088737201363, 0.000830700999192574),
'R-Precision': (0.03140600834368189, 0.0019051571701536505),
'Recall@10': (0.046410763095268745, 0.0032310746391458513),
'Recall@15': (0.06350003836979959, 0.003699626763247859),
'Recall@20': (0.07840436401688601, 0.00406143081888389),
'Recall@5': (0.02569159156512268, 0.0023676209099114825),
'Recall@50': (0.16196184143576622, 0.0055002529364572925)}
def get_pic(user_item_res, item_based_res, user_based_res):
# data to plot
n_groups = 17
means_user_item, errs_user_item = get_mean_err(user_item_res)
means_item, errs_item = get_mean_err(item_based_res)
means_user, errs_user = get_mean_err(user_based_res)
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.8
rects1 = plt.bar(index - bar_width, means_user_item, bar_width,
alpha=opacity,
color='y',
label='user_item',yerr = errs_user_item)
rects2 = plt.bar(index, means_item, bar_width,
alpha=opacity,
color='g',
label='item', yerr = errs_item)
rects3 = plt.bar(index + bar_width, means_user, bar_width,
alpha=opacity,
color='b',
label='user', yerr = errs_user)
plt.xlabel('Algorithm')
plt.ylabel('Scores')
plt.title('Scores by algorithm')
plt.xticks(index, xlabel)
plt.legend()
fig.set_size_inches(18, 10)
plt.tight_layout()
plt.show()
get_pic(user_item_res, item_based_res, user_based_res)
get_pic(user_item_res, item_based_res, user_based_res)
|
data exploration/Visualization of result.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Numpy
# Импортировать NumPy под именем np
import numpy as np
# Создать вектор (одномерный массив) размера 10, заполненный нулями
a0=np.zeros(10)
print(a0)
# Создать вектор размера 10, заполненный единицами
np.ones(10)
# Создать вектор размера 10, заполненный числом 2.5
a=np.array([2.5]*10)
print (a)
# Создать вектор размера 10, заполненный нулями, но пятый элемент равен 1
a0[4]=5
print (a0)
# Развернуть вектор (первый становится последним)
# +
b=np.random.random(15)
print (b)
print (np.flip(b,0))
# -
# Создать матрицу (двумерный массив) 3x3 со значениями от 0 до 8
# +
print (np.array([[0,1,2], [3,4,5],[6,7,8]]))
np.arange(0,9).reshape(3,3)
# -
# Найти индексы ненулевых элементов в [1,2,0,0,4,0]
c=np.array([1,2,0,0,4,0])
c.nonzero()
# Создать 3x3 единичную матрицу
# +
a=np.ones(3)
cc=np.array([a,a,a])
cc1=np.ones([3,3])
print (cc)
print (cc1)
# -
# Создать массив 3x3x3 со случайными значениями
# +
cc=np.array([[np.random.random(3),np.random.random(3),np.random.random(3)],[np.random.random(3),np.random.random(3),np.random.random(3)],[np.random.random(3),np.random.random(3),np.random.random(3)]])
cc1=np.random.random([3,3,3])
print (cc)
print ('Или так :)')
print (cc1)
# -
# Создать случайный вектор размера 30 и найти среднее значение всех элементов
a30=np.random.random(30)
print (a30.mean())
# Перемножить матрицы 5x3 и 3x2
# +
a1=np.random.random([5,3])
b1=np.random.random([3,2])
c1=a1.shape*b1
print (a1)
print (b1)
print (c1)
# -
# Создать вектор размера 10 со значениями от 0 до 1, не включая ни то, ни другое
a=np.linspace(0.01,0.99,10)
print (a)
print (a.shape)
# Заменить максимальный элемент на ноль
# +
a[a.argmax()]=0
print (a)
# -
# Преобразовать массив из float в int
a.astype(int)
# Дан массив (10,2) координат, найти расстояние от каждой точки до каждой
# +
a=np.arange(0,20).reshape(10,2)
def distance (a1):
return list( map (np.sqrt, (map (np.sum,(a1-a)**2))))
print (a)
list(map(distance, a))
# -
# Случайно расположить p элементов в 2D массив
n = 10
p = 10
Z = np.zeros((n,n))
r=np.put(Z, np.random.choice(range(n*n), p, replace=False), 5)
print (Z)
# Дан вектор [1, 2, 3, 4, 5], построить новый вектор с тремя нулями между каждым значением
# +
a0=np.zeros(3,dtype=int)
a1=np.arange(1,6)
i=a1.size -1
while i > 0:
a1=np.insert(a1, i, a0, axis=0)
i -= 1
print (a1)
# -
# Найти наиболее частое значение в массиве
(values,index,counts)=np.unique(a1,return_index=True, return_counts=True)
ind = counts.argmax()
print (values[ind])
# Найти n наибольших значений в массиве
# +
#меняем исходный массив
a1=np.insert(a1,(4,6),3)
(values,index,counts)=np.unique(a1,return_index=True, return_counts=True)
n=2
print (values [np.argpartition(-counts,n)[:n]])
#изначально делал циклом
#ind = np.argpartition(-counts,n)[:n]
#aa=np.array([])
#for x in ind:
# print (x)
# aa=np.append(aa, values[x])
#print (values, counts, a1, aa)
# -
# ## Pandas - изучение данных
# Перейти по ссылке [Kaggle]( https://www.kaggle.com/openfoodfacts/world-food-facts)
# Скачать датасет на свой компьютер и распаковать
# Считать csv файл в датафрейм food
import pandas as pd
food=pd.DataFrame.from_csv('/Users/gat/Documents/otis/en.openfoodfacts.org.products.tsv', sep='\t')
# Посмотреть первые 5 записей
#food[:5]
food.head(5)
# Сколько наблюдений в наборе данных?
#
food.index.size
# Сколько столбцов в наборе данных?
food.columns.size
# Напечатайте все названия столбцов
food.columns.values
# Какое название у 105 столбца?
food.columns.values[105]
# Какой тип наблюдений в 105 столбце?
food.dtypes[105]
# Как проиндексирован набор данных?
food.reindex
# Каково значение имени продукта в 19-м наблюдении?
food.product_name.iloc[18:19]
# ## Pandas - фильтрация данных
# Считать данные по [адресу](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
# Присвоить переменной chipo
url='https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
chipo=pd.DataFrame.from_csv(url, sep='\t')
# Как много продуктов стоят более чем $10.00?
# +
#chipo ['item_price'] =pd.DataFrame.astype(chipo.item_price.str.strip(to_strip='$'),dtype='float')
#chipo.item_price.astype('int32')
chipo[ pd.DataFrame.astype(chipo.item_price.str.strip(to_strip='$'),dtype='float') > 10].count()
#chipo.dtypes
#chipo[[item_price] > 10]
# -
# Какова цена каждого предмета? Напечатать столбцы item_name и item_price
col=['item_name','item_price']
chipo[col]
# Отсортировать по имени предмета
chipo.sort_values('item_name')
# Сколько было заказано самого дорогого предмета?
chipo['price']=chipo.item_price.str.strip(to_strip='$')
chipo[(chipo.price.astype('float').values) == (chipo.price.astype('float').max())].count()
#chipo.item_price.describe()
# Сколько раз был заказан предмет "Veggie Salad Bowl"?
chipo.item_name.value_counts()['Veggie Salad Bowl']
# Сколько раз люди заказали более чем 1 предмет "Canned Soda"?
# +
chipo_caned_soda=chipo[((chipo.item_name == 'Canned Soda') & (chipo.quantity.astype('float')>1))]
chipo_caned_soda.item_name.count()
#chipo.groupby
# -
|
HomeWork 1/exercises-19469-b3240b.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit ('opt-qiskit')
# metadata:
# interpreter:
# hash: 9c837c7ac14898b4157c3602a52e89b1bd57ef10d6bdaa28fc65fa6f9116fa6b
# name: python3
# ---
# # Option Pricing with qGANs
#
#
# ## Introduction
# In this notebook, we discuss how a Quantum Machine Learning Algorithm, namely a quantum Generative Adversarial Network (qGAN), can facilitate the pricing of a European call option. More specifically, a qGAN can be trained such that a quantum circuit models the spot price of an asset underlying a European call option. The resulting model can then be integrated into a Quantum Amplitude Estimation based algorithm to evaluate the expected payoff - see [European Call Option Pricing](03_european_call_option_pricing.ipynb). <br/>
# For further details on learning and loading random distributions by training a qGAN please refer to [Quantum Generative Adversarial Networks for Learning and Loading Random Distributions. Zoufal, Lucchi, Woerner. 2019.](https://www.nature.com/articles/s41534-019-0223-2)
# +
import matplotlib.pyplot as plt
import numpy as np
from qiskit import Aer, QuantumRegister, QuantumCircuit
from qiskit.circuit import ParameterVector
from qiskit.circuit.library import TwoLocal
from qiskit.quantum_info import Statevector
from qiskit.utils import QuantumInstance
from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem
from qiskit_finance.applications.estimation import EuropeanCallPricing
from qiskit_finance.circuit.library import NormalDistribution
# -
# ### Uncertainty Model
#
# The Black-Scholes model assumes that the spot price at maturity $S_T$ for a European call option is log-normally distributed. Thus, we can train a qGAN on samples from a log-normal distribution and use the result as an uncertainty model underlying the option.
# <br/>
# In the following, we construct a quantum circuit that loads the uncertainty model. The circuit output reads
#
# $$ \big| g_{\theta}\rangle = \sum_{j=0}^{2^n-1}\sqrt{p_{\theta}^{j}} \big| j \rangle , $$
#
# where the probabilities $p_{\theta}^{j}$, for $j\in \left\{0, \ldots, {2^n-1} \right\}$, represent a model of the target distribution.
# +
# Set upper and lower data values
bounds = np.array([0.0, 7.0])
# Set number of qubits used in the uncertainty model
num_qubits = 3
# Load the trained circuit parameters
g_params = [0.29399714, 0.38853322, 0.9557694, 0.07245791, 6.02626428, 0.13537225]
# Set an initial state for the generator circuit
init_dist = NormalDistribution(num_qubits, mu=1.0, sigma=1.0, bounds=bounds)
# construct the variational form
var_form = TwoLocal(num_qubits, "ry", "cz", entanglement="circular", reps=1)
# keep a list of the parameters so we can associate them to the list of numerical values
# (otherwise we need a dictionary)
theta = var_form.ordered_parameters
# compose the generator circuit, this is the circuit loading the uncertainty model
g_circuit = init_dist.compose(var_form)
# -
# ### Evaluate Expected Payoff
# Now, the trained uncertainty model can be used to evaluate the expectation value of the option's payoff function with Quantum Amplitude Estimation.
# +
# set the strike price (should be within the low and the high value of the uncertainty)
strike_price = 2
# set the approximation scaling for the payoff function
c_approx = 0.25
# -
# ### Plot the probability distribution
# Next, we plot the trained probability distribution and, for reasons of comparison, also the target probability distribution.
# + tags=["nbsphinx-thumbnail"]
# Evaluate trained probability distribution
values = [
bounds[0] + (bounds[1] - bounds[0]) * x / (2 ** num_qubits - 1) for x in range(2 ** num_qubits)
]
uncertainty_model = g_circuit.assign_parameters(dict(zip(theta, g_params)))
amplitudes = Statevector.from_instruction(uncertainty_model).data
x = np.array(values)
y = np.abs(amplitudes) ** 2
# Sample from target probability distribution
N = 100000
log_normal = np.random.lognormal(mean=1, sigma=1, size=N)
log_normal = np.round(log_normal)
log_normal = log_normal[log_normal <= 7]
log_normal_samples = []
for i in range(8):
log_normal_samples += [np.sum(log_normal == i)]
log_normal_samples = np.array(log_normal_samples / sum(log_normal_samples))
# Plot distributions
plt.bar(x, y, width=0.2, label="trained distribution", color="royalblue")
plt.xticks(x, size=15, rotation=90)
plt.yticks(size=15)
plt.grid()
plt.xlabel("Spot Price at Maturity $S_T$ (\$)", size=15)
plt.ylabel("Probability ($\%$)", size=15)
plt.plot(
log_normal_samples,
"-o",
color="deepskyblue",
label="target distribution",
linewidth=4,
markersize=12,
)
plt.legend(loc="best")
plt.show()
# -
# ### Evaluate Expected Payoff
# Now, the trained uncertainty model can be used to evaluate the expectation value of the option's payoff function analytically and with Quantum Amplitude Estimation.
# +
# Evaluate payoff for different distributions
payoff = np.array([0, 0, 0, 1, 2, 3, 4, 5])
ep = np.dot(log_normal_samples, payoff)
print("Analytically calculated expected payoff w.r.t. the target distribution: %.4f" % ep)
ep_trained = np.dot(y, payoff)
print("Analytically calculated expected payoff w.r.t. the trained distribution: %.4f" % ep_trained)
# Plot exact payoff function (evaluated on the grid of the trained uncertainty model)
x = np.array(values)
y_strike = np.maximum(0, x - strike_price)
plt.plot(x, y_strike, "ro-")
plt.grid()
plt.title("Payoff Function", size=15)
plt.xlabel("Spot Price", size=15)
plt.ylabel("Payoff", size=15)
plt.xticks(x, size=15, rotation=90)
plt.yticks(size=15)
plt.show()
# -
# construct circuit for payoff function
european_call_pricing = EuropeanCallPricing(
num_qubits,
strike_price=strike_price,
rescaling_factor=c_approx,
bounds=bounds,
uncertainty_model=uncertainty_model,
)
# +
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
qi = QuantumInstance(Aer.get_backend("aer_simulator"), shots=100)
problem = european_call_pricing.to_estimation_problem()
# construct amplitude estimation
ae = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi)
# -
result = ae.estimate(problem)
conf_int = np.array(result.confidence_interval_processed)
print("Exact value: \t%.4f" % ep_trained)
print("Estimated value: \t%.4f" % (result.estimation_processed))
print("Confidence interval:\t[%.4f, %.4f]" % tuple(conf_int))
# +
import qiskit.tools.jupyter
# %qiskit_version_table
# %qiskit_copyright
|
docs/tutorials/10_qgan_option_pricing.ipynb
|
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: collapsed,code_folding,heading_collapsed,hidden
# cell_metadata_json: true
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: econ-ark-3.8
# language: python
# name: econ-ark-3.8
# ---
# %% [markdown]
# # IndShockConsumerType Documentation
# ## Consumption-Saving model with Idiosyncratic Income Shocks
# %% {"code_folding": [0]}
# Initial imports and notebook setup, click arrow to show
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
from HARK.utilities import plot_funcs_der, plot_funcs
import matplotlib.pyplot as plt
import numpy as np
mystr = lambda number : "{:.4f}".format(number)
# %% [markdown]
# The module `HARK.ConsumptionSaving.ConsIndShockModel` concerns consumption-saving models with idiosyncratic shocks to (non-capital) income. All of the models assume CRRA utility with geometric discounting, no bequest motive, and income shocks are fully transitory or fully permanent.
#
# `ConsIndShockModel` includes:
# 1. A very basic "perfect foresight" model with no uncertainty.
# 2. A model with risk over transitory and permanent income shocks.
# 3. The model described in (2), with an interest rate for debt that differs from the interest rate for savings.
#
# This notebook provides documentation for the second of these models.
# $\newcommand{\CRRA}{\rho}$
# $\newcommand{\DiePrb}{\mathsf{D}}$
# $\newcommand{\PermGroFac}{\Gamma}$
# $\newcommand{\Rfree}{\mathsf{R}}$
# $\newcommand{\DiscFac}{\beta}$
# %% [markdown]
# ## Statement of idiosyncratic income shocks model
#
# Suppose we want to solve a model like the one analyzed in [BufferStockTheory](http://www.econ2.jhu.edu/people/ccarroll/papers/BufferStockTheory/), which has all the same features as the perfect foresight consumer, plus idiosyncratic shocks to income each period. Agents with this kind of model are represented by the class `IndShockConsumerType`.
#
# Specifically, this type of consumer receives two income shocks at the beginning of each period: a completely transitory shock $\newcommand{\tShkEmp}{\theta}{\tShkEmp_t}$ and a completely permanent shock $\newcommand{\pShk}{\psi}{\pShk_t}$. Moreover, the agent is subject to borrowing a borrowing limit: the ratio of end-of-period assets $A_t$ to permanent income $P_t$ must be greater than $\underline{a}$. As with the perfect foresight problem, this model is stated in terms of *normalized* variables, dividing all real variables by $P_t$:
#
# \begin{eqnarray*}
# v_t(m_t) &=& \max_{c_t} {~} u(c_t) + \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(m_{t+1}) \right], \\
# a_t &=& m_t - c_t, \\
# a_t &\geq& \text{$\underline{a}$}, \\
# m_{t+1} &=& \Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}, \\
# (\psi_{t+1},\theta_{t+1}) &\sim& F_{t+1}, \\
# \mathbb{E}[\psi]=\mathbb{E}[\theta] &=& 1, \\
# u(c) &=& \frac{c^{1-\rho}}{1-\rho}.
# \end{eqnarray*}
# %% [markdown]
# ## Solution method for IndShockConsumerType
#
# With the introduction of (non-trivial) risk, the idiosyncratic income shocks model has no closed form solution and must be solved numerically. The function `solveConsIndShock` solves the one period problem for the `IndShockConsumerType` class. To do so, HARK uses the original version of the endogenous grid method (EGM) first described [here](http://www.econ2.jhu.edu/people/ccarroll/EndogenousGridpoints.pdf) <cite data-cite="6202365/HQ6H9JEI"></cite>; see also the [SolvingMicroDSOPs](http://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/) lecture notes.
#
# Briefly, the transition equation for $m_{t+1}$ can be substituted into the problem definition; the second term of the reformulated maximand represents "end of period value of assets" $\mathfrak{v}_t(a_t)$ ("Gothic v"):
#
# \begin{eqnarray*}
# v_t(m_t) &=& \max_{c_t} {~} u(c_t) + \underbrace{\DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ (\PermGroFac_{t+1}\psi_{t+1})^{1-\CRRA} v_{t+1}(\Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}) \right]}_{\equiv \mathfrak{v}_t(a_t)}.
# \end{eqnarray*}
#
# The first order condition with respect to $c_t$ is thus simply:
#
# \begin{eqnarray*}
# u^{\prime}(c_t) - \mathfrak{v}'_t(a_t) = 0 \Longrightarrow c_t^{-\CRRA} = \mathfrak{v}'_t(a_t) \Longrightarrow c_t = \mathfrak{v}'_t(a_t)^{-1/\CRRA},
# \end{eqnarray*}
#
# and the marginal value of end-of-period assets can be computed as:
#
# \begin{eqnarray*}
# \mathfrak{v}'_t(a_t) = \DiscFac (1-\DiePrb_{t+1}) \mathbb{E}_{t} \left[ \Rfree (\PermGroFac_{t+1}\psi_{t+1})^{-\CRRA} v'_{t+1}(\Rfree/(\PermGroFac_{t+1} \psi_{t+1}) a_t + \theta_{t+1}) \right].
# \end{eqnarray*}
#
# To solve the model, we choose an exogenous grid of $a_t$ values that spans the range of values that could plausibly be achieved, compute $\mathfrak{v}'_t(a_t)$ at each of these points, calculate the value of consumption $c_t$ whose marginal utility is consistent with the marginal value of assets, then find the endogenous $m_t$ gridpoint as $m_t = a_t + c_t$. The set of $(m_t,c_t)$ gridpoints is then interpolated to construct the consumption function.
# %% [markdown]
# ## Example parameter values to construct an instance of IndShockConsumerType
#
# In order to create an instance of `IndShockConsumerType`, the user must specify parameters that characterize the (age-varying) distribution of income shocks $F_{t+1}$, the artificial borrowing constraint $\underline{a}$, and the exogenous grid of end-of-period assets-above-minimum for use by EGM, along with all of the parameters for the perfect foresight model. The table below presents the complete list of parameter values required to instantiate an `IndShockConsumerType`, along with example values.
#
# | Parameter | Description | Code | Example value | Time-varying? |
# | :---: | --- | --- | --- | --- |
# | $\DiscFac$ |Intertemporal discount factor | $\texttt{DiscFac}$ | $0.96$ | |
# | $\CRRA$|Coefficient of relative risk aversion | $\texttt{CRRA}$ | $2.0$ | |
# | $\Rfree$ | Risk free interest factor | $\texttt{Rfree}$ | $1.03$ | |
# | $1 - \DiePrb_{t+1}$ |Survival probability | $\texttt{LivPrb}$ | $[0.98]$ | $\surd$ |
# |$\PermGroFac_{t+1}$|Permanent income growth factor|$\texttt{PermGroFac}$| $[1.01]$ | $\surd$ |
# | $\sigma_\psi$| Standard deviation of log permanent income shocks | $\texttt{PermShkStd}$ | $[0.1]$ |$\surd$ |
# | $N_\psi$| Number of discrete permanent income shocks | $\texttt{PermShkCount}$ | $7$ | |
# | $\sigma_\theta$| Standard deviation of log transitory income shocks | $\texttt{TranShkStd}$ | $[0.2]$ | $\surd$ |
# | $N_\theta$| Number of discrete transitory income shocks | $\texttt{TranShkCount}$ | $7$ | |
# | $\mho$ | Probability of being unemployed and getting $\theta=\underline{\theta}$ | $\texttt{UnempPrb}$ | $0.05$ | |
# | $\underline{\theta}$| Transitory shock when unemployed | $\texttt{IncUnemp}$ | $0.3$ | |
# | $\mho^{Ret}$ | Probability of being "unemployed" when retired | $\texttt{UnempPrb}$ | $0.0005$ | |
# | $\underline{\theta}^{Ret}$| Transitory shock when "unemployed" and retired | $\texttt{IncUnemp}$ | $0.0$ | |
# | $(none)$ | Period of the lifecycle model when retirement begins | $\texttt{T_retire}$ | $0$ | |
# | $(none)$ | Minimum value in assets-above-minimum grid | $\texttt{aXtraMin}$ | $0.001$ | |
# | $(none)$ | Maximum value in assets-above-minimum grid | $\texttt{aXtraMax}$ | $20.0$ | |
# | $(none)$ | Number of points in base assets-above-minimum grid | $\texttt{aXtraCount}$ | $48$ | |
# | $(none)$ | Exponential nesting factor for base assets-above-minimum grid | $\texttt{aXtraNestFac}$ | $3$ | |
# | $(none)$ | Additional values to add to assets-above-minimum grid | $\texttt{aXtraExtra}$ | $None$ | |
# | $\underline{a}$| Artificial borrowing constraint (normalized) | $\texttt{BoroCnstArt}$ | $0.0$ | |
# | $(none)$|Indicator for whether $\texttt{vFunc}$ should be computed | $\texttt{vFuncBool}$ | $True$ | |
# | $(none)$ |Indicator for whether $\texttt{cFunc}$ should use cubic splines | $\texttt{CubicBool}$ | $False$ | |
# |$T$| Number of periods in this type's "cycle" |$\texttt{T_cycle}$| $1$ | |
# |(none)| Number of times the "cycle" occurs |$\texttt{cycles}$| $0$ | |
# %% {"code_folding": [0]}
IdiosyncDict={
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : [0.98], # Survival probability
"PermGroFac" :[1.01], # Permanent income growth factor
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1], # Standard deviation of log permanent shocks to income
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.2], # Standard deviation of log transitory shocks to income
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 0, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 1, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
# %% [markdown]
# The distribution of permanent income shocks is specified as mean one lognormal, with an age-varying (underlying) standard deviation. The distribution of transitory income shocks is also mean one lognormal, but with an additional point mass representing unemployment; the transitory shocks are adjusted so that the distribution is still mean one. The continuous distributions are discretized with an equiprobable distribution.
#
# Optionally, the user can specify the period when the individual retires and escapes essentially all income risk as `T_retire`; this can be turned off by setting the parameter to $0$. In retirement, all permanent income shocks are turned off, and the only transitory shock is an "unemployment" shock, likely with small probability; this prevents the retired problem from degenerating into a perfect foresight model.
#
# The grid of assets above minimum $\texttt{aXtraGrid}$ is specified by its minimum and maximum level, the number of gridpoints, and the extent of exponential nesting. The greater the (integer) value of $\texttt{aXtraNestFac}$, the more dense the gridpoints will be at the bottom of the grid (and more sparse near the top); setting $\texttt{aXtraNestFac}$ to $0$ will generate an evenly spaced grid of $a_t$.
#
# The artificial borrowing constraint $\texttt{BoroCnstArt}$ can be set to `None` to turn it off.
#
# It is not necessary to compute the value function in this model, and it is not computationally free to do so. You can choose whether the value function should be calculated and returned as part of the solution of the model with $\texttt{vFuncBool}$. The consumption function will be constructed as a piecewise linear interpolation when $\texttt{CubicBool}$ is `False`, and will be a piecewise cubic spline interpolator if `True`.
# %% [markdown] {"heading_collapsed": true}
# ## Solving and examining the solution of the idiosyncratic income shocks model
#
# The cell below creates an infinite horizon instance of `IndShockConsumerType` and solves its model by calling its `solve` method.
# %% {"hidden": true}
IndShockExample = IndShockConsumerType(**IdiosyncDict)
IndShockExample.cycles = 0 # Make this type have an infinite horizon
IndShockExample.solve()
# %% [markdown] {"hidden": true}
# After solving the model, we can examine an element of this type's $\texttt{solution}$:
# %% {"hidden": true}
print(vars(IndShockExample.solution[0]))
# %% [markdown] {"hidden": true}
# The single-period solution to an idiosyncratic shocks consumer's problem has all of the same attributes as in the perfect foresight model, with a couple additions. The solution can include the marginal marginal value of market resources function $\texttt{vPPfunc}$, but this is only constructed if $\texttt{CubicBool}$ is `True`, so that the MPC can be accurately computed; when it is `False`, then $\texttt{vPPfunc}$ merely returns `NaN` everywhere.
#
# The `solveConsIndShock` function calculates steady state market resources and stores it in the attribute $\texttt{mNrmSS}$. This represents the steady state level of $m_t$ if *this period* were to occur indefinitely, but with income shocks turned off. This is relevant in a "one period infinite horizon" model like we've specified here, but is less useful in a lifecycle model.
#
# Let's take a look at the consumption function by plotting it, along with its derivative (the MPC):
# %% {"hidden": true}
print('Consumption function for an idiosyncratic shocks consumer type:')
plot_funcs(IndShockExample.solution[0].cFunc,IndShockExample.solution[0].mNrmMin,5)
print('Marginal propensity to consume for an idiosyncratic shocks consumer type:')
plot_funcs_der(IndShockExample.solution[0].cFunc,IndShockExample.solution[0].mNrmMin,5)
# %% [markdown] {"hidden": true}
# The lower part of the consumption function is linear with a slope of 1, representing the *constrained* part of the consumption function where the consumer *would like* to consume more by borrowing-- his marginal utility of consumption exceeds the marginal value of assets-- but he is prevented from doing so by the artificial borrowing constraint.
#
# The MPC is a step function, as the $\texttt{cFunc}$ itself is a piecewise linear function; note the large jump in the MPC where the borrowing constraint begins to bind.
#
# If you want to look at the interpolation nodes for the consumption function, these can be found by "digging into" attributes of $\texttt{cFunc}$:
# %% {"hidden": true}
print('mNrmGrid for unconstrained cFunc is ',IndShockExample.solution[0].cFunc.functions[0].x_list)
print('cNrmGrid for unconstrained cFunc is ',IndShockExample.solution[0].cFunc.functions[0].y_list)
print('mNrmGrid for borrowing constrained cFunc is ',IndShockExample.solution[0].cFunc.functions[1].x_list)
print('cNrmGrid for borrowing constrained cFunc is ',IndShockExample.solution[0].cFunc.functions[1].y_list)
# %% [markdown] {"hidden": true}
# The consumption function in this model is an instance of `LowerEnvelope1D`, a class that takes an arbitrary number of 1D interpolants as arguments to its initialization method. When called, a `LowerEnvelope1D` evaluates each of its component functions and returns the lowest value. Here, the two component functions are the *unconstrained* consumption function-- how the agent would consume if the artificial borrowing constraint did not exist for *just this period*-- and the *borrowing constrained* consumption function-- how much he would consume if the artificial borrowing constraint is binding.
#
# The *actual* consumption function is the lower of these two functions, pointwise. We can see this by plotting the component functions on the same figure:
# %% {"hidden": true}
plot_funcs(IndShockExample.solution[0].cFunc.functions,-0.25,5.)
# %% [markdown]
# ## Simulating the idiosyncratic income shocks model
#
# In order to generate simulated data, an instance of `IndShockConsumerType` needs to know how many agents there are that share these particular parameters (and are thus *ex ante* homogeneous), the distribution of states for newly "born" agents, and how many periods to simulated. These simulation parameters are described in the table below, along with example values.
#
# | Description | Code | Example value |
# | :---: | --- | --- |
# | Number of consumers of this type | $\texttt{AgentCount}$ | $10000$ |
# | Number of periods to simulate | $\texttt{T_sim}$ | $120$ |
# | Mean of initial log (normalized) assets | $\texttt{aNrmInitMean}$ | $-6.0$ |
# | Stdev of initial log (normalized) assets | $\texttt{aNrmInitStd}$ | $1.0$ |
# | Mean of initial log permanent income | $\texttt{pLvlInitMean}$ | $0.0$ |
# | Stdev of initial log permanent income | $\texttt{pLvlInitStd}$ | $0.0$ |
# | Aggregrate productivity growth factor | $\texttt{PermGroFacAgg}$ | $1.0$ |
# | Age after which consumers are automatically killed | $\texttt{T_age}$ | $None$ |
#
# Here, we will simulate 10,000 consumers for 120 periods. All newly born agents will start with permanent income of exactly $P_t = 1.0 = \exp(\texttt{pLvlInitMean})$, as $\texttt{pLvlInitStd}$ has been set to zero; they will have essentially zero assets at birth, as $\texttt{aNrmInitMean}$ is $-6.0$; assets will be less than $1\%$ of permanent income at birth.
#
# These example parameter values were already passed as part of the parameter dictionary that we used to create `IndShockExample`, so it is ready to simulate. We need to set the `track_vars` attribute to indicate the variables for which we want to record a *history*.
# %%
IndShockExample.track_vars = ['aNrm','mNrm','cNrm','pLvl']
IndShockExample.initialize_sim()
IndShockExample.simulate()
# %% [markdown]
# We can now look at the simulated data in aggregate or at the individual consumer level. Like in the perfect foresight model, we can plot average (normalized) market resources over time, as well as average consumption:
# %%
plt.plot(np.mean(IndShockExample.history['mNrm'],axis=1))
plt.xlabel('Time')
plt.ylabel('Mean market resources')
plt.show()
plt.plot(np.mean(IndShockExample.history['cNrm'],axis=1))
plt.xlabel('Time')
plt.ylabel('Mean consumption')
plt.show()
# %% [markdown]
# We could also plot individual consumption paths for some of the consumers-- say, the first five:
# %%
plt.plot(IndShockExample.history['cNrm'][:,0:5])
plt.xlabel('Time')
plt.ylabel('Individual consumption paths')
plt.show()
# %% [markdown]
# ## Other example specifications of idiosyncratic income shocks consumers
#
# $\texttt{IndShockConsumerType}$-- and $\texttt{HARK}$ in general-- can also represent models that are not infinite horizon.
#
# ### Lifecycle example
#
# Suppose we wanted to represent consumers with a *lifecycle*-- parameter values that differ by age, with a finite end point beyond which the individual cannot surive. This can be done very easily by simply specifying the time-varying attributes $\texttt{PermGroFac}$, $\texttt{LivPrb}$, $\texttt{PermShkStd}$, and $\texttt{TranShkStd}$ as Python *lists* specifying the sequence of periods these agents will experience, from beginning to end.
#
# In the cell below, we define a parameter dictionary for a rather short ten period lifecycle, with arbitrarily chosen parameters. For a more realistically calibrated (and much longer) lifecycle model, see the [SolvingMicroDSOPs REMARK](https://github.com/econ-ark/REMARK/blob/master/REMARKs/SolvingMicroDSOPs.md).
# %% {"code_folding": [0]}
LifecycleDict={ # Click arrow to expand this fairly large parameter dictionary
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : [0.99,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1],
"PermGroFac" : [1.01,1.01,1.01,1.02,1.02,1.02,0.7,1.0,1.0,1.0],
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1,0.2,0.1,0.2,0.1,0.2,0.1,0,0,0],
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.3,0.2,0.1,0.3,0.2,0.1,0.3,0,0,0],
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 7, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 10, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : 11, # Age after which simulated agents are automatically killed
}
# %% [markdown]
# In this case, we have specified a ten period model in which retirement happens in period $t=7$. Agents in this model are more likely to die as they age, and their permanent income drops by 30\% at retirement. Let's make and solve this lifecycle example, then look at the $\texttt{solution}$ attribute.
# %%
LifecycleExample = IndShockConsumerType(**LifecycleDict)
LifecycleExample.cycles = 1 # Make this consumer live a sequence of periods -- a lifetime -- exactly once
LifecycleExample.solve()
print('First element of solution is',LifecycleExample.solution[0])
print('Solution has', len(LifecycleExample.solution),'elements.')
# %% [markdown]
# This was supposed to be a *ten* period lifecycle model-- why does our consumer type have *eleven* elements in its $\texttt{solution}$? It would be more precise to say that this specification has ten *non-terminal* periods. The solution to the 11th and final period in the model would be the same for every set of parameters: consume $c_t = m_t$, because there is no future. In a lifecycle model, the terminal period is assumed to exist; the $\texttt{LivPrb}$ parameter does not need to end with a $0.0$ in order to guarantee that survivors die.
#
# We can quickly plot the consumption functions in each period of the model:
# %%
print('Consumption functions across the lifecycle:')
mMin = np.min([LifecycleExample.solution[t].mNrmMin for t in range(LifecycleExample.T_cycle)])
LifecycleExample.unpack('cFunc') # This makes all of the cFuncs accessible in the attribute cFunc
plot_funcs(LifecycleExample.cFunc,mMin,5)
# %% [markdown]
# ### "Cyclical" example
#
# We can also model consumers who face an infinite horizon, but who do *not* face the same problem in every period. Consider someone who works as a ski instructor: they make most of their income for the year in the winter, and make very little money in the other three seasons.
#
# We can represent this type of individual as a four period, infinite horizon model in which expected "permanent" income growth varies greatly across seasons.
# %% {"code_folding": [0]}
CyclicalDict = { # Click the arrow to expand this parameter dictionary
# Parameters shared with the perfect foresight model
"CRRA": 2.0, # Coefficient of relative risk aversion
"Rfree": 1.03, # Interest factor on assets
"DiscFac": 0.96, # Intertemporal discount factor
"LivPrb" : 4*[0.98], # Survival probability
"PermGroFac" : [1.082251, 2.8, 0.3, 1.1],
# Parameters that specify the income distribution over the lifecycle
"PermShkStd" : [0.1,0.1,0.1,0.1],
"PermShkCount" : 7, # Number of points in discrete approximation to permanent income shocks
"TranShkStd" : [0.2,0.2,0.2,0.2],
"TranShkCount" : 7, # Number of points in discrete approximation to transitory income shocks
"UnempPrb" : 0.05, # Probability of unemployment while working
"IncUnemp" : 0.3, # Unemployment benefits replacement rate
"UnempPrbRet" : 0.0005, # Probability of "unemployment" while retired
"IncUnempRet" : 0.0, # "Unemployment" benefits when retired
"T_retire" : 0, # Period of retirement (0 --> no retirement)
"tax_rate" : 0.0, # Flat income tax rate (legacy parameter, will be removed in future)
# Parameters for constructing the "assets above minimum" grid
"aXtraMin" : 0.001, # Minimum end-of-period "assets above minimum" value
"aXtraMax" : 20, # Maximum end-of-period "assets above minimum" value
"aXtraCount" : 48, # Number of points in the base grid of "assets above minimum"
"aXtraNestFac" : 3, # Exponential nesting factor when constructing "assets above minimum" grid
"aXtraExtra" : [None], # Additional values to add to aXtraGrid
# A few other paramaters
"BoroCnstArt" : 0.0, # Artificial borrowing constraint; imposed minimum level of end-of period assets
"vFuncBool" : True, # Whether to calculate the value function during solution
"CubicBool" : False, # Preference shocks currently only compatible with linear cFunc
"T_cycle" : 4, # Number of periods in the cycle for this agent type
# Parameters only used in simulation
"AgentCount" : 10000, # Number of agents of this type
"T_sim" : 120, # Number of periods to simulate
"aNrmInitMean" : -6.0, # Mean of log initial assets
"aNrmInitStd" : 1.0, # Standard deviation of log initial assets
"pLvlInitMean" : 0.0, # Mean of log initial permanent income
"pLvlInitStd" : 0.0, # Standard deviation of log initial permanent income
"PermGroFacAgg" : 1.0, # Aggregate permanent income growth factor
"T_age" : None, # Age after which simulated agents are automatically killed
}
# %% [markdown]
# This consumer type's parameter dictionary is nearly identical to the original infinite horizon type we made, except that each of the time-varying parameters now have *four* values, rather than just one. Most of these have the same value in each period *except* for $\texttt{PermGroFac}$, which varies greatly over the four seasons. Note that the product of the four "permanent" income growth factors is almost exactly 1.0-- this type's income does not grow on average in the long run!
#
# Let's make and solve this consumer type, then plot his quarterly consumption functions:
# %%
CyclicalExample = IndShockConsumerType(**CyclicalDict)
CyclicalExample.cycles = 0 # Make this consumer type have an infinite horizon
CyclicalExample.solve()
CyclicalExample.unpack('cFunc')
print('Quarterly consumption functions:')
mMin = min([X.mNrmMin for X in CyclicalExample.solution])
plot_funcs(CyclicalExample.cFunc,mMin,5)
# %% [markdown]
# The very low green consumption function corresponds to the quarter in which the ski instructors make most of their income. They know that they are about to experience a 70% drop in "permanent" income, so they do not consume much *relative to their income this quarter*. In the other three quarters, *normalized* consumption is much higher, as current "permanent" income is low relative to future expectations. In *level*, the consumption chosen in each quarter is much more similar
# %%
|
examples/ConsIndShockModel/IndShockConsumerType.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
from bs4 import BeautifulSoup
url = 'http://quotes.toscrape.com/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
quotes = soup.find_all('span', class_='text')
authors = soup.find_all('small', class_='author')
tags = soup.find_all('div', class_='tags')
print(quotes)
for quote in quotes:
print(quote.text)
for i in range(0, len(quotes)):
print(quotes[i].text)
print(authors[i].text)
for i in range(0, len(quotes)):
print(quotes[i].text)
print(authors[i].text)
quoteTags = tags[i].find_all('a', class_='tag')
for quoteTag in quoteTags:
print(quoteTag.text)
|
Jupyter Notebooks/How to isolate data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Face Detection
#
# Hello! In this task you will create your own deep face detector.
#
# First of all, we need import some useful stuff.
# %load_ext autoreload
# %autoreload 2
# %pylab inline
# Do you have modern Nvidia [GPU](https://en.wikipedia.org/wiki/Graphics_processing_unit)? There is your video-card model in [list](https://developer.nvidia.com/cuda-gpus) and CUDA capability >= 3.0?
#
# - Yes. You can use it for fast deep learning! In this work we recommend you use tensorflow backend with GPU. Read [installation notes](https://www.tensorflow.org/install/) with attention to gpu section, install all requirements and then install GPU version `tensorflow-gpu`.
# - No. CPU is enough for this task, but you have to use only simple model. Read [installation notes](https://www.tensorflow.org/install/) and install CPU version `tensorflow`.
#
# Of course, also you should install `keras`, `matplotlib`, `numpy` and `scikit-image`.
from keras import backend as K
from matplotlib import pyplot as plt
import numpy as np
from skimage import transform
from get_data import load_dataset, unpack
# In this task we use processed [FDDB dataset](http://vis-www.cs.umass.edu/fddb/). Processing defined in file [./prepare_data.ipynb](prepare_data.ipynb) and consists of:
#
# 1. Extract bboxes from dataset. In base dataset face defined by [ellipsis](http://vis-www.cs.umass.edu/fddb/samples/) that not very useful for basic neural network learning.
# 2. Remove images with big and small faces on one shoot.
# 3. Re-size images to bounding boxes (bboxes) have same size 32 +/- pixels.
#
# Each image in train, validation and test datasets have shape (176, 176, 3), but part of this image is black background. Interesting image aligned at top left corner.
#
# Bounding boxes define face in image and consist of 5 integer numbers: [image_index, min_row, min_col, max_row, max_col]. Bounding box width and height are 32 +/- 8 pixels wide.
#
# `train_bboxes` and `val_bboxes` is a list of bboxes.
#
# `train_shapes` and `val_shapes` is a list of interesting image shapes.
# +
# First run will download 30 MB data from github
train_images, train_bboxes, train_shapes = load_dataset("data", "train")
val_images, val_bboxes, val_shapes = load_dataset("data", "val")
# -
# ## Prepare data (1 point)
#
# For learning we should extract positive and negative samples from image.
# Positive and negative samples counts should be similar.
# Every samples should have same size.
from graph import visualize_bboxes
visualize_bboxes(images=train_images,
true_bboxes=train_bboxes
)
# Every image can represent multiple faces, so we should extract all faces from every images and crop them to `SAMPLE_SHAPE`. This set of extracted images are named `positive`.
#
# Then we chould extract `negative` set. This images should have `SAMPLE_SHAPE` size. Pseudocode for extracting:
#
# negative_collection := []
#
# for i in range(negative_bbox_count):
# Select random image.
# image_shape := image_shapes[image_index]
# image_true_bboxes := true_bboxes[true_bboxes[:, 0] == image_index, 1:]
#
# for j in TRY_COUNT: # TRY_COUNT is a magic constant, for example, 100
# Generate new_bbox within image_shape.
#
# if new_bbox is negative bbox for image_true_bboxes:
# Extract from image, new_bbox and resize to SAMPLE_SIZE negative_sample.
# Add negative sample to negative_collection.
# Break # for j in TRY_COUNT
SAMPLE_SHAPE = (32, 32, 3)
# +
from scores import iou_score # https://en.wikipedia.org/wiki/Jaccard_index
def is_negative_bbox(new_bbox, true_bboxes, eps=1e-1):
"""Check if new bbox not in true bbox list.
There bbox is 4 ints [min_row, min_col, max_row, max_col] without image index."""
for bbox in true_bboxes:
if iou_score(new_bbox, bbox) >= eps:
return False
return True
# +
# Write this function
def gen_negative_bbox(image_shape, true_bboxes):
"""Generate negative bbox for image."""
while True:
rowEnd = random.randint(0, image_shape[0])
columnEnd = random.randint(0, image_shape[1])
if(rowEnd==0 or columnEnd==0):
continue
rowBegin = random.randint(0, rowEnd)
columnBegin = random.randint(0, columnEnd)
if (abs(rowBegin-rowEnd)<SAMPLE_SHAPE[0] or abs(columnBegin-columnEnd)<SAMPLE_SHAPE[1]):
continue
bbox = [rowBegin, columnBegin, rowEnd, columnEnd]
if is_negative_bbox(bbox, true_bboxes, eps=1e-1):
break
else:
pass
return bbox
def get_positive_negative(images, true_bboxes, image_shapes, negative_bbox_count=None):
def make_bbox(image_index, positive = True):
image_shape = image_shapes[image_index]
image_true_bboxes = true_bboxes[true_bboxes[:, 0] == image_index, 1:]
image = images[image_index][0:image_shape[0], 0:image_shape[1], :]
if(all(image_shape<3*np.array(SAMPLE_SHAPE[0:2])) and not positive):
return False
if positive:
for bbox in image_true_bboxes:
image_crop_bbox = image[bbox[0]:bbox[2], bbox[1]:bbox[3],:]
# resize
image_resized_bbox = transform.resize(image_crop_bbox, SAMPLE_SHAPE)
# add to positives
positives.append(image_resized_bbox)
else:
bbox = gen_negative_bbox(image_shape,image_true_bboxes)
image_crop_bbox = image[bbox[0]:bbox[2], bbox[1]:bbox[3],:]
# resize
image_resized_bbox = transform.resize(image_crop_bbox, SAMPLE_SHAPE)
# add to positives
negatives.append(image_resized_bbox)
return True
"""Retrieve positive and negative samples from image."""
positives = []
negatives = []
image_count = image_shapes.shape[0]
if negative_bbox_count is None:
negative_bbox_count = len(true_bboxes)
for i in range(image_count):
make_bbox(i, positive=True)
negs = negative_bbox_count
while(negs>0):
image_index = random.randint(0, image_count-1)
if(make_bbox(image_index, positive = False)):
negs-=1
print(len(positives))
print(len(negatives))
# Pay attention to the fact that most part of image may be black -
# extract negative samples only from part [0:image_shape[0], 0:image_shape[1]]
# Write code here
# ...
return positives, negatives
# -
def get_samples(images, true_bboxes, image_shapes):
"""Usefull samples for learning.
X - positive and negative samples.
Y - one hot encoded list of zeros and ones. One is positive marker.
"""
positive, negative = get_positive_negative(images=images, true_bboxes=true_bboxes,
image_shapes=image_shapes)
X = positive
Y = [[0, 1]] * len(positive)
X.extend(negative)
Y.extend([[1, 0]] * len(negative))
return np.array(X), np.array(Y)
# Now we can extract samples from images.
X_train, Y_train = get_samples(train_images, train_bboxes, train_shapes)
X_val, Y_val = get_samples(val_images, val_bboxes, val_shapes)
# There we should see faces
from graph import visualize_samples
visualize_samples(X_train[Y_train[:, 1] == 1])
# There we shouldn't see faces
visualize_samples(X_train[Y_train[:, 1] == 0])
# ## Classifier training (3 points)
#
# First of all, we should train face classifier that checks if face represented on sample.
BATCH_SIZE = 64
# ### Image augmentation
#
# Important thing in deep learning is augmentation. Sometimes, if your model are complex and cool, you can increase quality by using good augmentation.
#
# Keras provide good [images preprocessing and augmentation](https://keras.io/preprocessing/image/). This preprocessing executes online (on the fly) while learning.
#
# Of course, if you want using samplewise and featurewise center and std normalization you should run this transformation on predict stage. But you will use this classifier to fully convolution detector, in this case such transformation quite complicated, and we don't recommend use them in classifier.
#
# For heavy augmentation you can use library [imgaug](https://github.com/aleju/imgaug). If you need, you can use this library in offline manner (simple way) and online manner (hard way). However, hard way is not so hard: you only have to write [python generator](https://wiki.python.org/moin/Generators), which returns image batches, and pass it to [fit_generator](https://keras.io/models/model/#fit_generator)
# +
from keras.preprocessing.image import ImageDataGenerator # Usefull thing. Read the doc.
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.1,
)
datagen.fit(X_train)
# -
# ### Fitting classifier
#
# For fitting you can use one of Keras optimizer algorithms. [Good overview](http://ruder.io/optimizing-gradient-descent/)
#
# To choose best learning rate strategy you should read about EarlyStopping and ReduceLROnPlateau or LearningRateScheduler on [callbacks](https://keras.io/callbacks/) page of keras documentation, it's very useful in deep learning.
#
# If you repeat architecture from some paper, you can find information about good optimizer algorithm and learning rate strategy in this paper. For example, every [keras application](https://keras.io/applications/) has link to paper, that describes suitable learning procedure for this specific architecture.
# +
import os.path
from keras.optimizers import Adam
# Very usefull, pay attention
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler
from graph import plot_history
def fit(model, datagen, X_train, Y_train, X_val, Y_val, model_name=None, output_dir="data/checkpoints", class_weight=None, epochs=50, lr=0.001, verbose=False):
"""Fit model.
You can edit this function anyhow.
"""
if verbose:
model.summary()
model.compile(optimizer=Adam(lr=lr), # You can use another optimizer
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=BATCH_SIZE),
validation_data=(datagen.standardize(X_val), Y_val),
epochs=epochs, steps_per_epoch=len(X_train) // BATCH_SIZE,
callbacks=[ModelCheckpoint(os.path.join(output_dir, "{model_name}").format(model_name=model_name) + "-{epoch:02d}-{val_loss:.2f}.hdf5", save_best_only=True),
] if model_name is not None else [],
class_weight=class_weight,
) # starts training
plot_history(history)
# -
# #### (first point out of three)
#
# 
# <NAME>., <NAME>., <NAME>. and <NAME>., 1998. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11), pp.2278-2324.
#
# Of course, you can use any another architecture, if want. Main thing is classification quality of your model.
#
# Acceptable validation accuracy for this task is 0.92.
# +
from keras.models import Model, Sequential
from keras.layers import Flatten, Dense, Activation, Input, Dropout, Activation, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
def generate_model(sample_shape):
# Classification model
# You can start from LeNet architecture
x = inputs = Input(shape=sample_shape)
x = Conv2D(20,(5,5),activation='relu')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Conv2D(50,(5,5),activation='relu')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Flatten()(x)
x = Dense(120, activation='relu')(x)
x = Dense(84, activation='relu')(x)
# This creates a model
predictions = Dense(2, activation='softmax')(x)
return Model(inputs=inputs, outputs=predictions)
model = generate_model(SAMPLE_SHAPE)
# -
# #### Fit the model (second point out of three)
#
# If you doesn't have fast video-card suitable for deep learning, you can first check neural network modifications with small value of parameter `epochs`, for example, 10, and then after selecting best model increase this parameter.
# Fitting on CPU can be long, we suggest do it at bedtime.
#
# Don't forget change model name.
# Attention: Windows implementation may cause an error here. In that case use model_name=None.
fit(model_name="lenet", model=model, datagen=datagen, X_train=X_train, X_val=X_val, Y_train=Y_train, Y_val=Y_val)
# #### (third point out of three)
#
# After learning model weights saves in folder `data/checkpoints/`.
# Use `model.load_weights(fname)` to load best weights.
#
# If you use Windows and Model Checkpoint doesn't work on your configuration, you should implement [your own Callback](https://keras.io/callbacks/#create-a-callback) to save best weights in memory and then load it back.
# +
def get_checkpoint():
return "data/checkpoints/lenet-44-0.04.hdf5"
model.load_weights(get_checkpoint())
# -
# ## Detection
#
# If you have prepared classification architecture with high validation score, you can use this architecture for detection.
#
# Convert classification architecture to fully convolution neural network (FCNN), that returns heatmap of activation.
#
# ### Detector model or sliding window (1 point)
#
# Now you should replace fully-connected layers with $1 \times 1$ convolution layers.
#
# Every fully connected layer perform operation $f(Wx + b)$, where $f(\cdot)$ is nonlinear activation function, $x$ is layer input, $W$ and $b$ is layer weights. This operation can be emulated with $1 \times 1$ convolution with activation function $f(\cdot)$, that perform exactly same operation $f(Wx + b)$.
#
# If there is `Flatten` layer with $n \times k$ input size before fully connected layers, convolution should have same $n \times k$ input size.
# Multiple fully connected layers can be replaced with convolution layers sequence.
#
# After replace all fully connected layers with convolution layers, we get fully convolution network. If input shape is equal to input size of previous network, output will have size $1 \times 1$. But if we increase input shape, output shape automatically will be increased. For example, if convolution step of previous network strides 4 pixels, increase input size with 100 pixels along all axis makes increase outputsize with 25 values along all axis. We got activation map of classifier without necessary extract samples from image and multiple calculate low-level features.
#
# In total:
# 1. $1 \times 1$ convolution layer is equivalent of fully connected layer.
# 2. $1 \times 1$ convolution layers can be used to get activation map of classification network in "sliding window" manner.
#
# We propose replace last fully connected layer with softmax actiovation to convolution layer with linear activation.It will be usefull to find good treshold. Of course, you can use softmax activation.
#
# #### Example of replace cnn head:
#
# ##### Head before convert
#
# 
#
# ##### Head after convert
#
# 
#
# On this images displayed only head. `InputLayer` should be replaced with convolution part exit.
# Before convert network head takes fifty $8 \times 8$ feature maps and returns two values: probability of negative and positive classes. This output can be considered as activation map with size $1 \times 1$.
#
# If input have size $8 \times 8$, output after convert would have $1 \times 1$ size, but input size is $44 \times 44$.
# After convert network head returns one $37 \times 37$ activation map.
# +
# FCNN
IMAGE_SHAPE = (176, 176, 3)
def generate_fcnn_model(image_shape):
"""After model compilation input size cannot be changed.
So, we need create a function to have ability to change size later.
"""
x = inputs = Input(image_shape)
x = Conv2D(20,(5,5),activation='relu')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Conv2D(50,(5,5),activation='relu')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
x = Conv2D(120,(5,5), activation='relu')(x)
x = Conv2D(84,(1,1), activation='relu')(x)
# This creates a model
predictions = Conv2D(2, (1, 1), activation='linear')(x)
return Model(inputs=inputs, outputs=predictions)
fcnn_model = generate_fcnn_model(IMAGE_SHAPE)
fcnn_model.summary()
# -
# #### (1 point)
#
# Then you should write function that copy weights from classification model to fully convolution model.
# Convolution weights may be copied without modification, fully-connected layer weights should be reshaped before copy.
#
# Pay attention to last layer.
model.summary()
fcnn_model.summary()
# +
def copy_weights(base_model, fcnn_model):
"""Set FCNN weights from base model.
"""
new_fcnn_weights = []
prev_fcnn_weights = fcnn_model.get_weights()
prev_base_weights = base_model.get_weights()
# Write code here
for i, weights in enumerate(prev_base_weights):
new_fcnn_weights.append(weights.reshape(prev_fcnn_weights[i].shape))
fcnn_model.set_weights(new_fcnn_weights)
copy_weights(base_model=model, fcnn_model=fcnn_model)
# -
fcnn_model.summary()
# ### Model visualization
from graph import visualize_heatmap
predictions = fcnn_model.predict(np.array(val_images))
visualize_heatmap(val_images, predictions[:, :, :, 1])
# ### Detector (1 point)
#
# First detector part is getting bboxes and decision function.
# Greater decision function indicates better detector confidence.
#
# This function should return pred_bboxes and decision_function:
#
# - `pred bboxes` is list of 5 int tuples like `true bboxes`: `[image_index, min_row, min_col, max_row, max_col]`.
# - `decision function` is confidence of detector for every pred bbox: list of float values, `len(decision function) == len(pred bboxes)`
#
# We propose resize image to `IMAGE_SHAPE` size, find faces on resized image with `SAMPLE_SHAPE` size and then resize them back.
# +
# Detection
from skimage.feature import peak_local_max
def get_bboxes_and_decision_function(fcnn_model, images, image_shapes):
cropped_images = np.array([transform.resize(image, IMAGE_SHAPE, mode="reflect") if image.shape != IMAGE_SHAPE else image for image in images])
pred_bboxes, decision_function = [], []
# Predict
predictions = fcnn_model.predict(cropped_images)
scale = float(images[0].shape[0] - 32)/float(predictions[0].shape[0] - 1)
for image_idx, pred in enumerate(predictions[:, :, :, 1]):
peaks = peak_local_max(pred, threshold_abs=1.5)
for peak in peaks:
prow = int(scale*peak[0])
pcol = int(scale*peak[1])
pred_bboxes.append((image_idx, prow, pcol, prow + 32, pcol + 32))
decision_function.append(pred[peak[0], peak[1]])
return pred_bboxes, decision_function
# -
# #### Detector visualization
# +
pred_bboxes, decision_function = get_bboxes_and_decision_function(fcnn_model=fcnn_model, images=val_images, image_shapes=val_shapes)
visualize_bboxes(images=val_images,
pred_bboxes=pred_bboxes,
true_bboxes=val_bboxes,
decision_function=decision_function
)
# -
# ## Detector score (1 point)
#
# Write [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall) graph.
#
# You can use function `best_match` to extract matching between prediction and ground truth, false positive and false negative samples. Pseudo-code for calculation precision and recall graph:
#
# # Initialization for first step threshold := -inf
# tn := 0 # We haven't any positive sample
# fn := |false_negative| # But some faces wasn't been detected
# tp := |true_bboxes| # All true bboxes have been caught
# fp := |false_positive| # But also some false positive samples have been caught
#
# Sort decision_function and pred_bboxes with order defined by decision_function
# y_true := List of answers for "Is the bbox have matching in y_true?" for every bbox in pred_bboxes
#
# for y_on_this_step in y_true:
# # Now we increase threshold, so some predicted bboxes makes positive.
# # If y_t is True then the bbox is true positive else bbox is false positive
# # So we should
# Update tp, tn, fp, fn with attention to y_on_this_step
#
# Add precision and recall point calculated by formula through tp, tn, fp, fn on this step
# Threshold for this point is decision function on this step.
# +
from scores import best_match
from graph import plot_precision_recall
def precision_recall_curve(pred_bboxes, true_bboxes, decision_function):
precision, recall, thresholds = [], [], []
# Write code here
pred_dec_sorted = sorted(zip(pred_bboxes, decision_function), key=lambda x: x[1])
pred_sorted = [pred for pred, dec in pred_dec_sorted]
dec_sorted = [dec for pred, dec in pred_dec_sorted]
for i in range(len(pred_dec_sorted)):
thresh = dec_sorted[i]
matched, false_negative, false_positive = best_match(pred_sorted[i:], true_bboxes, dec_sorted[i:])
tp = float(len(matched))
fp = float(len(false_positive))
fn = float(len(false_negative))
precision.append(tp/(tp + fp))
recall.append(tp/(tp + fn))
thresholds.append(thresh)
return precision, recall, thresholds
# -
precision, recall, thresholds = precision_recall_curve(pred_bboxes=pred_bboxes, true_bboxes=val_bboxes, decision_function=decision_function)
plot_precision_recall(precision=precision, recall=recall)
# ### Threshold (1 point)
#
# Next step in detector creating is select threshold for decision_function.
# Every possible threshold presents point on recall-precision graph.
#
# Select threshold for `recall=0.85`.
def get_threshold(thresholds, recall):
thresh_rec_sorted = sorted(zip(thresholds, recall), key=lambda x: x[1])
thresh_sorted = [thresh for thresh, rec in thresh_rec_sorted]
rec_sorted = np.array([rec for thresh, rec in thresh_rec_sorted])
idx = np.where(rec_sorted >= 0.85)[0][0]
return thresh_sorted[idx]
THRESHOLD = get_threshold(thresholds, recall)
print (THRESHOLD)
def detect(fcnn_model, images, image_shapes, threshold, return_decision=True):
"""Get bboxes with decision_function not less then threshold."""
pred_bboxes, decision_function = get_bboxes_and_decision_function(fcnn_model, images, image_shapes)
result, result_decision = [], []
idx = np.where(np.array(decision_function) > threshold)
result = np.array(pred_bboxes)[idx]
result_decision = np.array(decision_function)[idx]
if return_decision:
return result, result_decision
else:
return result
# +
pred_bboxes, decision_function = detect(fcnn_model=fcnn_model, images=val_images, image_shapes=val_shapes, threshold=THRESHOLD, return_decision=True)
visualize_bboxes(images=val_images,
pred_bboxes=pred_bboxes,
true_bboxes=val_bboxes,
decision_function=decision_function
)
precision, recall, thresholds = precision_recall_curve(pred_bboxes=pred_bboxes, true_bboxes=val_bboxes, decision_function=decision_function)
plot_precision_recall(precision=precision, recall=recall)
# -
# ## Test dataset (1 point)
#
# Last detector preparation step is testing.
#
# Attention: to avoid over-fitting, after testing algorithm you should run [./prepare_data.ipynb](prepare_data.ipynb), and start all fitting from beginning.
#
# Detection score (in graph header) should be 0.85 or greater.
# +
test_images, test_bboxes, test_shapes = load_dataset("data", "test")
# We test get_bboxes_and_decision_function becouse we want pay attention to all recall values
pred_bboxes, decision_function = get_bboxes_and_decision_function(fcnn_model=fcnn_model, images=test_images, image_shapes=test_shapes)
visualize_bboxes(images=test_images,
pred_bboxes=pred_bboxes,
true_bboxes=test_bboxes,
decision_function=decision_function
)
precision, recall, threshold = precision_recall_curve(pred_bboxes=pred_bboxes, true_bboxes=test_bboxes, decision_function=decision_function)
plot_precision_recall(precision=precision, recall=recall)
# -
# ## Optional tasks
#
# ### Real image dataset
#
# Test your algorithm on original (not scaled) data.
# Visualize bboxes and plot precision-recall curve.
# +
# First run will download 523 MB data from github
original_images, original_bboxes, original_shapes = load_dataset("data", "original")
# -
# Write code here
# ...
# ## Hard negative mining
#
# Upgrade the score with [hard negative mining](https://www.reddit.com/r/computervision/comments/2ggc5l/what_is_hard_negative_mining_and_how_is_it/).
#
# A hard negative is when you take that falsely detected patch, and explicitly create a negative example out of that patch, and add that negative to your training set. When you retrain your classifier, it should perform better with this extra knowledge, and not make as many false positives.
# Write this function
def hard_negative(train_images, image_shapes, train_bboxes, X_val, Y_val, base_model, fcnn_model):
pass
hard_negative(train_images=train_images, image_shapes=train_shapes, train_bboxes=train_bboxes, X_val=X_val, Y_val=Y_val, base_model=model, fcnn_model=fcnn_model)
model.load_weights("data/checkpoints/...")
# +
copy_weights(base_model=model, fcnn_model=fcnn_model)
pred_bboxes, decision_function = get_bboxes_and_decision_function(fcnn_model=fcnn_model, images=val_images, image_shapes=val_shapes)
visualize_bboxes(images=val_images,
pred_bboxes=pred_bboxes,
true_bboxes=val_bboxes,
decision_function=decision_function
)
precision, recall, thresholds = precision_recall_curve(pred_bboxes=pred_bboxes, true_bboxes=val_bboxes, decision_function=decision_function)
plot_precision_recall(precision=precision, recall=recall)
# -
# ### Multi scale detector
#
# Write and test detector with [pyramid representation][pyramid].
# [pyramid]: https://en.wikipedia.org/wiki/Pyramid_(image_processing)
#
# 1. Resize images to predefined scales.
# 2. Run detector with different scales.
# 3. Apply non-maximum supression to detection on different scales.
#
# References:
# 1. [<NAME>,<NAME>, <NAME>, <NAME>, <NAME>: Pyramid methods in image processing](http://persci.mit.edu/pub_pdfs/RCA84.pdf)
# 2. [<NAME>, <NAME>: The Laplacian Pyramid as a Compact Image Code](http://persci.mit.edu/pub_pdfs/pyramid83.pdf)
def multiscale_detector(fcnn_model, images, image_shapes):
return []
# ### Next step
#
# Next steps in deep learning detection are R-CNN, Faster R-CNN and SSD architectures.
# This architecture realization is quite complex.
# For this reason the task doesn't cover them, but you can find the articles in the internet.
|
CNN/Face_Detection-(v2).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
import networkx as nx
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer
# %matplotlib inline
# # Summarizing Text
#
# Let's try out extractive summarization using the first four paragraphs of [The Great Gatsby](http://gutenberg.net.au/ebooks02/0200041h.html).
#
# First, we'll try to extract the most representative sentence. Then, we'll extract keywords.
#
# ## I. Sentence extraction
#
# The steps of our sentence extraction process:
#
# 1. Parse and tokenize the text using spaCy, and divide into sentences.
# 2. Calculate the tf-idf matrix.
# 3. Calculate similarity scores.
# 4. Calculate TextRank: We're going to use the `networkx` package to run the TextRank algorithm.
#
# Let's get started!
#
# +
# Importing the text the lazy way.
gatsby="In my younger and more vulnerable years my father gave me some advice that I've been turning over in my mind ever since. \"Whenever you feel like criticizing any one,\" he told me, \"just remember that all the people in this world haven't had the advantages that you've had.\" He didn't say any more but we've always been unusually communicative in a reserved way, and I understood that he meant a great deal more than that. In consequence I'm inclined to reserve all judgments, a habit that has opened up many curious natures to me and also made me the victim of not a few veteran bores. The abnormal mind is quick to detect and attach itself to this quality when it appears in a normal person, and so it came about that in college I was unjustly accused of being a politician, because I was privy to the secret griefs of wild, unknown men. Most of the confidences were unsought--frequently I have feigned sleep, preoccupation, or a hostile levity when I realized by some unmistakable sign that an intimate revelation was quivering on the horizon--for the intimate revelations of young men or at least the terms in which they express them are usually plagiaristic and marred by obvious suppressions. Reserving judgments is a matter of infinite hope. I am still a little afraid of missing something if I forget that, as my father snobbishly suggested, and I snobbishly repeat a sense of the fundamental decencies is parcelled out unequally at birth. And, after boasting this way of my tolerance, I come to the admission that it has a limit. Conduct may be founded on the hard rock or the wet marshes but after a certain point I don't care what it's founded on. When I came back from the East last autumn I felt that I wanted the world to be in uniform and at a sort of moral attention forever; I wanted no more riotous excursions with privileged glimpses into the human heart. Only Gatsby, the man who gives his name to this book, was exempt from my reaction--Gatsby who represented everything for which I have an unaffected scorn. If personality is an unbroken series of successful gestures, then there was something gorgeous about him, some heightened sensitivity to the promises of life, as if he were related to one of those intricate machines that register earthquakes ten thousand miles away. This responsiveness had nothing to do with that flabby impressionability which is dignified under the name of the \"creative temperament\"--it was an extraordinary gift for hope, a romantic readiness such as I have never found in any other person and which it is not likely I shall ever find again. No--Gatsby turned out all right at the end; it is what preyed on Gatsby, what foul dust floated in the wake of his dreams that temporarily closed out my interest in the abortive sorrows and short-winded elations of men."
# no.1
# We want to use the standard english-language parser.
parser = spacy.load('en')
# Parsing Gatsby.
gatsby = parser(gatsby)
# Dividing the text into sentences and storing them as a list of strings.
sentences=[]
for span in gatsby.sents:
# go from the start to the end of each span, returning each token in the sentence
# combine each token using join()
sent = ''.join(gatsby[i].string for i in range(span.start, span.end)).strip()
sentences.append(sent)
# no. 2
# Creating the tf-idf matrix.
counter = TfidfVectorizer(lowercase=False,
stop_words=None,
ngram_range=(1, 1),
analyzer=u'word',
max_df=.5,
min_df=1,
max_features=None,
vocabulary=None,
binary=False)
#Applying the vectorizer
data_counts=counter.fit_transform(sentences)
# -
# # Similarity
#
# So far, this is all (hopefully) familiar: We've done text parsing and the tf-idf calculation before. We should now have sentences represented as vectors, with each word having a score based on how often it occurs in the sentence divided by how often it occurs in the whole text.
#
# Now let's calculate the similarity scores for the sentences and apply the TextRank algorithm. Because TextRank is based on Google's PageRank algorithm, the function is called 'pagerank'. The hyperparameters are the damping parameter ´alpha´ and the convergence parameter ´tol´.
# +
# no. 3
# Calculating similarity
similarity = data_counts * data_counts.T
# no. 4
# Identifying the sentence with the highest rank.
nx_graph = nx.from_scipy_sparse_matrix(similarity)
ranks=nx.pagerank(nx_graph, alpha=.85, tol=.00000001)
ranked = sorted(((ranks[i],s) for i,s in enumerate(sentences)),
reverse=True)
print(ranked[0])
# -
# Since a lot of Gatsby is about the narrator acting as the observer of other peoples' sordid secrets, this seems pretty good. Now, let's extract some keywords.
#
# # II. Keyword summarization
#
# 1) Parse and tokenize text (already done).
# 2) Filter out stopwords, choose only nouns and adjectives.
# 3) Calculate the neighbors of words (we'll use a window of 4).
# 4) Run TextRank on the neighbor matrix.
#
# +
# no. 2
# Removing stop words and punctuation, then getting a list of all unique words in the text
gatsby_filt = [word for word in gatsby if word.is_stop==False and (word.pos_=='NOUN')]
words=set(gatsby_filt)
#Creating a grid indicating whether words are within 4 places of the target word
adjacency=pd.DataFrame(columns=words,index=words,data=0)
#Iterating through each word in the text and indicating which of the unique words are its neighbors
for i,word in enumerate(gatsby):
# Checking if any of the word's next four neighbors are in the word list
if any([word == item for item in gatsby_filt]):
# Making sure to stop at the end of the string, even if there are less than four words left after the target.
end=max(0,len(gatsby)-(len(gatsby)-(i+5)))
# The potential neighbors.
nextwords=gatsby[i+1:end]
# Filtering the neighbors to select only those in the word list
inset=[x in gatsby_filt for x in nextwords] # boolean True or False
neighbors=[nextwords[i] for i in range(len(nextwords)) if inset[i]] # if returns True
# Adding 1 to the adjacency matrix for neighbors of the target word
if neighbors:
adjacency.loc[word,neighbors]=adjacency.loc[word,neighbors]+1
print('done!')
# +
# no. 4
# Running TextRank
nx_words = nx.from_numpy_matrix(adjacency.as_matrix())
ranks=nx.pagerank(nx_words, alpha=.85, tol=.00000001)
# Identifying the five most highly ranked keywords
ranked = sorted(((ranks[i],s) for i,s in enumerate(words)),
reverse=True) # descending order
print(ranked[:5])
# -
# These results are less impressive. 'Hope', 'promises', and 'glimpses' certainly fit the elegiac, on-the-outside-looking-in tone of the book. TextRank may perform better on a larger text sample.
#
# # Drill
#
# It is also possible that keyword phrases will work better. Modfiy the keyword extraction code to extract two-word phrases (digrams) rather than single words. Then try it with trigrams. You will probably want to broaden the window that defines 'neighbors.' Try a few different modifications, and write up your observations in your notebook. Discuss with your mentor.
# ## III. Digrams
gatsby[:20]
# +
# creation of digrams
new_gats = []
for i in range(len(gatsby)):
digram = gatsby[i:i+2]
new_gats.append(digram)
# -
# remove punctuation from our digrams
second_gats = [digram for digram in new_gats[:-2] if digram[0].is_punct==False and digram[1].is_punct==False]
# +
# remove digrams that contain at least one stop word, this proved to improve the results of summarization
shorter_gats = [digram for digram in second_gats if digram[0].is_stop==False and digram[1].is_stop==False]
# +
new_gats_filt = [digram for digram in shorter_gats if digram[0].pos_=='ADJ'
and digram[1].pos_=='NOUN']
new_gats_words=set(new_gats_filt)
# -
shorter_gats[1:5]
# +
adjacency_digr=pd.DataFrame(columns=[str(i) for i in new_gats_words] ,index=[str(i) for i in new_gats_words],data=0)
#Iterating through each word in the text and indicating which of the unique words are its neighbors
for i,digr in enumerate(shorter_gats):
if any([digr == item for item in new_gats_filt]):
end=max(0,len(shorter_gats)-(len(shorter_gats)-(i+15)))
nextwords=shorter_gats[i+1:end]
inset=[x in new_gats_filt for x in nextwords] # boolean True or False
neighbors=[nextwords[i] for i in range(len(nextwords)) if inset[i]] # if returns True
if neighbors:
for i in neighbors:
adjacency_digr.loc[str(digr),str(i)]=adjacency_digr.loc[str(digr),str(i)]+1
print('done!')
# -
adjacency_digr.head()
# +
# Running TextRank
nx_words_digr = nx.from_numpy_matrix(adjacency_digr.as_matrix())
ranks_digr=nx.pagerank(nx_words_digr, alpha=.85, tol=.00000001)
# Identifying the five most highly ranked keywords
ranked_digr = sorted(((ranks_digr[i],s) for i,s in enumerate(new_gats_words)),
reverse=True) # descending order
print(ranked_digr[:5])
# -
# ### IV. Trigrams
# +
# creation of trigrams
trig_gats = []
for i in range(len(gatsby)):
trigram = gatsby[i:i+3]
trig_gats.append(trigram)
# -
#indeed three words
trig_gats[0]
# remove punctuation from our trigrams and stop words
trig_gats = [trigram for trigram in trig_gats[:-2]
if trigram[0].is_punct==False
and trigram[1].is_punct==False
and trigram[2].is_punct==False]
# +
trig_gats_filt = [trigram for trigram in trig_gats
if trigram[2].is_stop==False
and (trigram[0].pos_=='NOUN'
or trigram[1].pos_=='NOUN'
or trigram[2].pos_=='NOUN')]
trig_gats_words=set(trig_gats_filt)
# +
#Creating a grid indicating whether words are within 4 places of the target word
adjacency_trigr=pd.DataFrame(columns=[str(i) for i in trig_gats_words] ,index=[str(i) for i in trig_gats_words],data=0)
#Iterating through each word in the text and indicating which of the unique words are its neighbors
for i,trigr in enumerate(shorter_gats):
if any([trigr == item for item in trig_gats_filt]):
end=max(0,len(trig_gats)-(len(trig_gats)-(i+5)))
nextwords=trig_gats[i+1:end]
inset=[x in trig_gats_filt for x in nextwords] # boolean True or False
neighbors=[nextwords[i] for i in range(len(nextwords)) if inset[i]] # if returns True
if neighbors:
for i in neighbors:
adjacency_trigr.loc[str(trigr),str(i)]=adjacency_trigr.loc[str(trigr),str(i)]+1
print('done!')
# +
# Running TextRank
nx_words_trigr = nx.from_numpy_matrix(adjacency_trigr.as_matrix())
ranks_trigr=nx.pagerank(nx_words_trigr, alpha=.85, tol=.00000001)
# Identifying the five most highly ranked keywords
ranked_trigr = sorted(((ranks_trigr[i],s) for i,s in enumerate(trig_gats_words)),
reverse=True) # descending order
print(ranked_trigr[:5])
# -
|
6.4.3+Summarizing+Text-+Guided+Example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.9 64-bit
# language: python
# name: python3
# ---
# # Analysis of Neural Networks by computing saliency maps (see e.g. https://www.sciencedirect.com/topics/engineering/saliency-map)
os.chdir("..") # You should now be in the 'drought_impact_forecasting' folder
os.getcwd()
import numpy as np
import torch
from load_model_data import load_data_point, load_model
from draw_forecast import visualize_rgb
from time_ndvi_plot import plot_ndvi
# Load two trained models.
SGConvLSTM = load_model("trained_models/SGConvLSTM.ckpt")
SGEDConvLSTM = load_model("trained_models/SGEDConvLSTM.ckpt")
# Load an example from the extreme data set.
truth, context, target, npf = load_data_point(test_context_dataset = "Data/small_data/extreme_context_data_paths.pkl",
test_target_dataset = "Data/small_data/extreme_target_data_paths.pkl",
index = 1)
# These lines are important for autograd to know that we want the gradient with respect to the context!
context_SG = context.clone().requires_grad_()
context_SGED = context.clone().requires_grad_()
# We can now run inference. This may take a while.
# Run inference (can be slow)
prediction_SG, _, _ = SGConvLSTM(x = context_SG, prediction_count = int((2/3)*truth.shape[-1]), non_pred_feat = npf)
prediction_SGED, _, _ = SGEDConvLSTM(x = context_SGED, prediction_count = int((2/3)*truth.shape[-1]), non_pred_feat = npf)
# We want the response to be the mean NDVI of e.g. the last image. For this, we can define a function.
def mean_NDVI(prediction):
# compute NDVI
ndvi = ((prediction[:, 3, ...] - prediction[ :, 2, ...]) / (
prediction[:, 3, ...] + prediction[:, 2, ...] + 1e-6))
return torch.mean(ndvi[0, :, :, 0])
prediction_mean_ndvi_SG = mean_NDVI(prediction_SG)
prediction_mean_ndvi_SGED = mean_NDVI(prediction_SGED)
# Time to backpropagate. Again, this can take a while.
g_SG = torch.autograd.grad(prediction_mean_ndvi_SG, context_SG)
g_SGED = torch.autograd.grad(prediction_mean_ndvi_SGED, context_SGED)
# Finally, we can visualize the desired maps.
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [4, 4]
plt.rcParams['figure.dpi'] = 90
plt.plot(torch.abs(torch.mean(g_SG[0][0, 6, :, :, :], dim=(0, 1))))
plt.imshow((g_SG[0][0, 6, :, :, 19]).permute(1, 0))
plt.colorbar()
|
demos/saliency_map.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="8510ac1d600e8b4c1e27a34a84fb4cc703970d37" _cell_guid="8c580981-fa5a-43a9-ae99-0251cda5b7a9"
# *This tutorial is part Level 2 in the [Learn Machine Learning](https://www.kaggle.com/learn/machine-learning) curriculum. This tutorial picks up where Level 1 finished, so you will get the most out of it if you've done the exercise from Level 1.*
#
# In this step, you will learn three approaches to dealing with missing values. You will then learn to compare the effectiveness of these approaches on any given dataset.*
#
# # Introduction
#
# There are many ways data can end up with missing values. For example
# - A 2 bedroom house wouldn't include an answer for _How large is the third bedroom_
# - Someone being surveyed may choose not to share their income
#
# Python libraries represent missing numbers as **nan** which is short for "not a number". You can detect which cells have missing values, and then count how many there are in each column with the command:
# ```
# missing_val_count_by_column = (data.isnull().sum())
# print(missing_val_count_by_column[missing_val_count_by_column > 0
# ```
#
# Most libraries (including scikit-learn) will give you an error if you try to build a model using data with missing values. So you'll need to choose one of the strategies below.
#
# ---
# ## Solutions
#
#
# ## 1) A Simple Option: Drop Columns with Missing Values
# If your data is in a DataFrame called `original_data`, you can drop columns with missing values. One way to do that is
# ```
# data_without_missing_values = original_data.dropna(axis=1)
# ```
#
# In many cases, you'll have both a training dataset and a test dataset. You will want to drop the same columns in both DataFrames. In that case, you would write
#
# ```
# cols_with_missing = [col for col in original_data.columns
# if original_data[col].isnull().any()]
# reduced_original_data = original_data.drop(cols_with_missing, axis=1)
# reduced_test_data = test_data.drop(cols_with_missing, axis=1)
# ```
# If those columns had useful information (in the places that were not missing), your model loses access to this information when the column is dropped. Also, if your test data has missing values in places where your training data did not, this will result in an error.
#
# So, it's somewhat usually not the best solution. However, it can be useful when most values in a column are missing.
#
#
#
# ## 2) A Better Option: Imputation
# Imputation fills in the missing value with some number. The imputed value won't be exactly right in most cases, but it usually gives more accurate models than dropping the column entirely.
#
# This is done with
# ```
# from sklearn.impute import SimpleImputer
# my_imputer = SimpleImputer()
# data_with_imputed_values = my_imputer.fit_transform(original_data)
# ```
# The default behavior fills in the mean value for imputation. Statisticians have researched more complex strategies, but those complex strategies typically give no benefit once you plug the results into sophisticated machine learning models.
#
# One (of many) nice things about Imputation is that it can be included in a scikit-learn Pipeline. Pipelines simplify model building, model validation and model deployment.
#
# ## 3) An Extension To Imputation
# Imputation is the standard approach, and it usually works well. However, imputed values may by systematically above or below their actual values (which weren't collected in the dataset). Or rows with missing values may be unique in some other way. In that case, your model would make better predictions by considering which values were originally missing. Here's how it might look:
# ```
# # make copy to avoid changing original data (when Imputing)
# new_data = original_data.copy()
#
# # make new columns indicating what will be imputed
# cols_with_missing = (col for col in new_data.columns
# if new_data[col].isnull().any())
# for col in cols_with_missing:
# new_data[col + '_was_missing'] = new_data[col].isnull()
#
# # Imputation
# my_imputer = SimpleImputer()
# new_data = pd.DataFrame(my_imputer.fit_transform(new_data))
# new_data.columns = original_data.columns
# ```
#
# In some cases this approach will meaningfully improve results. In other cases, it doesn't help at all.
#
# ---
# # Example (Comparing All Solutions)
#
# We will see am example predicting housing prices from the Melbourne Housing data. To master missing value handling, fork this notebook and repeat the same steps with the Iowa Housing data. Find information about both in the **Data** section of the header menu.
#
#
# ### Basic Problem Set-up
# + _uuid="44b399828f0b07fe63abbdcdf74bbf3b22bb8067" _cell_guid="a5e604e7-3c20-409b-ad29-5c2a4fe40738"
import pandas as pd
# Load data
melb_data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv')
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
y = train.SalePrice
features_all = train.drop(['SalePrice'], axis = 1)
X = features_all.select_dtypes(exclude = ['object'])
# + _uuid="63a47765b5f1414bf6c6d44e32c5cf8c84f61ffc"
# + [markdown] _uuid="de7bdb4f005022ea45742b5d25f47cba7a6d698d" _cell_guid="88d1c1c9-91ba-4bef-aee5-f4a19a68e61c"
# ### Create Function to Measure Quality of An Approach
# We divide our data into **training** and **test**. If the reason for this is unfamiliar, review [Welcome to Data Science](https://www.kaggle.com/dansbecker/welcome-to-data-science-1).
#
# We've loaded a function `score_dataset(X_train, X_test, y_train, y_test)` to compare the quality of diffrent approaches to missing values. This function reports the out-of-sample MAE score from a RandomForest.
# + _uuid="6088bfdac20ece9c040e83beb28ff169d17f0666" _cell_guid="986dbfe4-9386-4a03-b2a4-9e99bf1b08f5" _kg_hide-input=true
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
train_X, test_X, train_y, test_y = train_test_split(X, y, train_size = 0.7, test_size = 0.3,random_state = 5)
def mae_score(train_X, test_X, train_y, test_y):
model = RandomForestRegressor()
model.fit(train_X, train_y)
predicted_y = model.predict(test_X)
mae = mean_absolute_error(test_y, predicted_y)
return mae
# + [markdown] _uuid="df0103056e52ffb3c500f5fc1437bd175a41adad" _cell_guid="7184fc99-e266-4bd7-af0d-0da9c96887f0"
# ### Get Model Score from Dropping Columns with Missing Values
# + _uuid="2957c4a7c4e6ed990b1406d5c88a9aa4c738b28f" _cell_guid="64ae7a0a-95aa-47a7-aa29-a1589ebcbd18"
col_with_missed_val = [col for col in X.columns
if X[col].isnull().any()]
train_X_no_missed = train_X.drop(col_with_missed_val, axis = 1)
test_X_no_missed = test_X.drop(col_with_missed_val, axis = 1)
mae_score(train_X_no_missed, test_X_no_missed, train_y, test_y)
# + [markdown] _uuid="e78f8751db60278737a388a4b85b72bed4d3f45b" _cell_guid="b446d3e1-0a5c-4552-8718-e07ab5f4496a"
# ### Get Model Score from Imputation
# + _uuid="7f1030b598d08cd586a56d4ab33d1f99f6535784" _cell_guid="8cb756dc-9623-43b7-92c7-dfd87c70f450"
from sklearn.impute import SimpleImputer
train_X_copy = train_X.copy()
test_X_copy = test_X.copy()
impute = SimpleImputer()
train_X_imputed = impute.fit_transform(train_X_copy)
test_X_imputed = impute.transform(test_X_copy)
mae_score(train_X_imputed, test_X_imputed, train_y, test_y)
# + [markdown] _uuid="5540a5c3c2dd2fd0427820a4af57baf97729c462" _cell_guid="2713edc7-a4cc-42ef-8619-bf775fa85481"
# ### Get Score from Imputation with Extra Columns Showing What Was Imputed
# + _uuid="914b9e57b99d7964013f007537c300fe57e0bf91" _cell_guid="39ba8166-9b03-41cb-9403-d728b342d5e7"
from sklearn.impute import SimpleImputer
train_X_copy = train_X.copy()
test_X_copy = test_X.copy()
col_with_missed_val = [col for col in X.columns
if X[col].isnull().any()]
for col in col_with_missed_val:
train_X_copy[col + '_was_missed'] = train_X_copy[col].isnull()
test_X_copy[col + '_was_missed'] = test_X_copy[col].isnull()
impute = SimpleImputer()
train_X_imputed = impute.fit_transform(train_X_copy)
test_X_imputed = impute.transform(test_X_copy)
mae_score(train_X_imputed, test_X_imputed, train_y, test_y)
# + [markdown] _uuid="2336d82df8f643c42ecb1354c034f214e87d7aa4" _cell_guid="bc678d8b-f4c9-464f-8fdf-0d69afaefcaa"
# # Conclusion
# As is common, imputing missing values allowed us to improve our model compared to dropping those columns. We got an additional boost by tracking what values had been imputed.
# + [markdown] _uuid="ede373b4ec290324b175149afca77f2634a95277" _cell_guid="a8969353-a3af-4ac3-998b-b0fb884036cb"
# # Your Turn
# 1) Find some columns with missing values in your dataset.
#
# 2) Use the Imputer class so you can impute missing values
#
# 3) Add columns with missing values to your predictors.
#
# If you find the right columns, you may see an improvement in model scores. That said, the Iowa data doesn't have a lot of columns with missing values. So, whether you see an improvement at this point depends on some other details of your model.
#
# Once you've added the Imputer, keep using those columns for future steps. In the end, it will improve your model (and in most other datasets, it is a big improvement).
#
# # Keep Going
# Once you've added the Imputer and included columns with missing values, you are ready to [add categorical variables](https://www.kaggle.com/dansbecker/using-categorical-data-with-one-hot-encoding), which is non-numeric data representing categories (like the name of the neighborhood a house is in).
#
# ---
#
# Part of the **[Learn Machine Learning](https://www.kaggle.com/learn/machine-learning)** track.
|
machine learning/kernel-missing-vars.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# # Tutorial #2: Deploy an image classification model in Azure Container Instance (ACI)
#
# This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud.
#
# Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself.
#
# In this part of the tutorial, you use Azure Machine Learning service (Preview) to:
#
# > * Set up your testing environment
# > * Retrieve the model from your workspace
# > * Test the model locally
# > * Deploy the model to ACI
# > * Test the deployed model
#
# ACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where).
#
#
# ## Prerequisites
#
# Complete the model training in the [Tutorial #1: Train an image classification model with Azure Machine Learning](train-models.ipynb) notebook.
#
#
# + tags=["register model from file"]
# If you did NOT complete the tutorial, you can instead run this cell
# This will register a model and download the data needed for this tutorial
# These prerequisites are created in the training tutorial
# Feel free to skip this cell if you completed the training tutorial
# register a model
from azureml.core import Workspace
ws = Workspace.from_config()
from azureml.core.model import Model
model_name = "sklearn_mnist"
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name=model_name,
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
# download test data
import os
import urllib.request
data_folder = os.path.join(os.getcwd(), 'data')
os.makedirs(data_folder, exist_ok = True)
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', filename=os.path.join(data_folder, 'test-images.gz'))
urllib.request.urlretrieve('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', filename=os.path.join(data_folder, 'test-labels.gz'))
# -
# ## Set up the environment
#
# Start by setting up a testing environment.
#
# ### Import packages
#
# Import the Python packages needed for this tutorial.
# + tags=["check version"]
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
# -
# ### Retrieve the model
#
# You registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory.
# + tags=["load workspace", "download model"]
from azureml.core import Workspace
from azureml.core.model import Model
import os
ws = Workspace.from_config()
model=Model(ws, 'sklearn_mnist')
model.download(target_dir=os.getcwd(), exist_ok=True)
# verify the downloaded model file
file_path = os.path.join(os.getcwd(), "sklearn_mnist_model.pkl")
os.stat(file_path)
# -
# ## Test model locally
#
# Before deploying, make sure your model is working locally by:
# * Loading test data
# * Predicting test data
# * Examining the confusion matrix
#
# ### Load test data
#
# Load the test data from the **./data/** directory created during the training tutorial.
# +
from utils import load_data
import os
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
# -
# ### Predict test data
#
# Feed the test dataset to the model to get predictions.
# +
import pickle
from sklearn.externals import joblib
clf = joblib.load( os.path.join(os.getcwd(), 'sklearn_mnist_model.pkl'))
y_hat = clf.predict(X_test)
# -
# ### Examine the confusion matrix
#
# Generate a confusion matrix to see how many samples from the test set are classified correctly. Notice the mis-classified value for the incorrect predictions.
# +
from sklearn.metrics import confusion_matrix
conf_mx = confusion_matrix(y_test, y_hat)
print(conf_mx)
print('Overall accuracy:', np.average(y_hat == y_test))
# -
# Use `matplotlib` to display the confusion matrix as a graph. In this graph, the X axis represents the actual values, and the Y axis represents the predicted values. The color in each grid represents the error rate. The lighter the color, the higher the error rate is. For example, many 5's are mis-classified as 3's. Hence you see a bright grid at (5,3).
# +
# normalize the diagonal cells so that they don't overpower the rest of the cells when visualized
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
cax = ax.matshow(norm_conf_mx, cmap=plt.cm.bone)
ticks = np.arange(0, 10, 1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticklabels(ticks)
fig.colorbar(cax)
plt.ylabel('true labels', fontsize=14)
plt.xlabel('predicted values', fontsize=14)
plt.savefig('conf.png')
plt.show()
# -
# ## Deploy as web service
#
# Once you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI.
#
# To build the correct environment for ACI, provide the following:
# * A scoring script to show how to use the model
# * An environment file to show what packages need to be installed
# * A configuration file to build the ACI
# * The model you trained before
#
# ### Create scoring script
#
# Create the scoring script, called score.py, used by the web service call to show how to use the model.
#
# You must include two required functions into the scoring script:
# * The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started.
#
# * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
#
# +
# %%writefile score.py
import json
import numpy as np
import os
import pickle
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from azureml.core.model import Model
def init():
global model
# retrieve the path to the model file using the model name
model_path = Model.get_model_path('sklearn_mnist')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
# -
# ### Create environment file
#
# Next, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`.
# + tags=["set conda dependencies"]
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
# -
# Review the content of the `myenv.yml` file.
with open("myenv.yml","r") as f:
print(f.read())
# ### Create configuration file
#
# Create a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
# + tags=["configure web service", "aci"]
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
# -
# ### Deploy in ACI
# Estimated time to complete: **about 7-8 minutes**
#
# Configure the image and deploy. The following code goes through these steps:
#
# 1. Build an image using:
# * The scoring file (`score.py`)
# * The environment file (`myenv.yml`)
# * The model file
# 1. Register that image under the workspace.
# 1. Send the image to the ACI container.
# 1. Start up a container in ACI using the image.
# 1. Get the web service HTTP endpoint.
# + tags=["configure image", "create image", "deploy web service", "aci"]
# %%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(runtime= "python",
entry_script="score.py",
conda_file="myenv.yml")
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
# -
# Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
# + tags=["get scoring uri"]
print(service.scoring_uri)
# -
# ## Test deployed service
#
# Earlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data.
#
# The following code goes through these steps:
# 1. Send the data as a JSON array to the web service hosted in ACI.
#
# 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
#
# 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples.
#
# Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
# + tags=["score web service"]
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
# -
# You can also send raw HTTP request to test the web service.
# + tags=["score web service"]
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
# -
# ## Clean up resources
#
# To keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
# + tags=["delete web service"]
service.delete()
# -
#
# If you're not going to use what you've created here, delete the resources you just created with this quickstart so you don't incur any charges. In the Azure portal, select and delete your resource group. You can also keep the resource group, but delete a single workspace by displaying the workspace properties and selecting the Delete button.
#
#
# ## Next steps
#
# In this Azure Machine Learning tutorial, you used Python to:
#
# > * Set up your testing environment
# > * Retrieve the model from your workspace
# > * Test the model locally
# > * Deploy the model to ACI
# > * Test the deployed model
#
# You can also try out the [regression tutorial](regression-part1-data-prep.ipynb).
# 
|
tutorials/img-classification-part2-deploy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import json
import pandas as pd
#Create an empty list for reponses
response_list = []
# Create the first connection to get started
endpoint = "https://sample.thoughtindustries.com/incoming/v2/events/courseCompletion"
headers = {"Authorization": "Bearer #Enter your API Key from thought industries here"}
response = requests.get(endpoint, headers=headers).json()
response_dict = dict(response)
# Determine if hasMore is equal to True
has_more = response_dict.get('pageInfo', {}).get('hasMore')
# Use the .get method on the dictionary to pull out the cursor value
cursor_value = str(response_dict.get('pageInfo', {}).get('cursor'))
response_list.append(cursor_value)
#loop through the rest of the pages to get the cursor values. Use the range function to determine how many pages of data you would like
for x in range(1, 20):
next_endpoint = "https://sample.thoughtindustries.com/incoming/v2/events/courseCompletion"
next_response_url = next_endpoint +"?cursor="+ cursor_value
next_response = requests.get(next_response_url, headers=headers).json()
next_response_dict = dict(next_response)
has_more = next_response_dict.get('pageInfo', {}).get('hasMore')
cursor_value = next_response_dict.get('pageInfo', {}).get('cursor')
response_list.append(cursor_value)
#Set loops = 0. #First loop creates the dataframe, the subsequent ones add to it.
loops = 0
# use the cursor values in the response list now to get the data. Convert to a JSON then to a dictionary and then to a dataframe.
for cursor in response_list:
loops +=1
endpoint = "https://sample.thoughtindustries.com/incoming/v2/events/courseCompletion"+"?cursor="+cursor
headers = {"Authorization": "#Enter API Key Here"}
response = requests.get(endpoint, headers=headers).json()
response_dict = dict(response)
event_dict = { key:value for (key,value) in response_dict.items() if key == "events"}
events_unnested = event_dict.get('events', {})
if loops == 1:
course_data_df = pd.DataFrame.from_dict(events_unnested)
else:
to_append = pd.DataFrame.from_dict(events_unnested)
course_data_df = course_data_df.append(to_append)
print(course_data_df)
|
TI Web API.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# Copyright 2021 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# -
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # Getting Started MovieLens: Download and Convert
#
# ## MovieLens25M
#
# The [MovieLens25M](https://grouplens.org/datasets/movielens/25m/) is a popular dataset for recommender systems and is used in academic publications. The dataset contains 25M movie ratings for 62,000 movies given by 162,000 users. Many projects use only the user/item/rating information of MovieLens, but the original dataset provides metadata for the movies, as well. For example, which genres a movie has. Although we may not improve state-of-the-art results with our neural network architecture in this example, we will use the metadata to show how to multi-hot encode the categorical features.
# ## Download the dataset
# +
# External dependencies
import os
from sklearn.model_selection import train_test_split
from nvtabular.utils import download_file
# Get dataframe library - cudf or pandas
from nvtabular.dispatch import get_lib
df_lib = get_lib()
# -
# We define our base input directory, containing the data.
INPUT_DATA_DIR = os.environ.get(
"INPUT_DATA_DIR", os.path.expanduser("~/nvt-examples/movielens/data/")
)
# We will download and unzip the data.
download_file(
"http://files.grouplens.org/datasets/movielens/ml-25m.zip",
os.path.join(INPUT_DATA_DIR, "ml-25m.zip"),
)
# ## Convert the dataset
# First, we take a look on the movie metadata.
movies = df_lib.read_csv(os.path.join(INPUT_DATA_DIR, "ml-25m/movies.csv"))
movies.head()
# We can see, that genres are a multi-hot categorical features with different number of genres per movie. Currently, genres is a String and we want split the String into a list of Strings. In addition, we drop the title.
movies["genres"] = movies["genres"].str.split("|")
movies = movies.drop("title", axis=1)
movies.head()
# We save movies genres in parquet format, so that they can be used by NVTabular in the next notebook.
movies.to_parquet(os.path.join(INPUT_DATA_DIR, "movies_converted.parquet"))
# ## Splitting into train and validation dataset
# We load the movie ratings.
ratings = df_lib.read_csv(os.path.join(INPUT_DATA_DIR, "ml-25m", "ratings.csv"))
ratings.head()
# We drop the timestamp column and split the ratings into training and test dataset. We use a simple random split.
ratings = ratings.drop("timestamp", axis=1)
train, valid = train_test_split(ratings, test_size=0.2, random_state=42)
# We save the dataset to disk.
train.to_parquet(os.path.join(INPUT_DATA_DIR, "train.parquet"))
valid.to_parquet(os.path.join(INPUT_DATA_DIR, "valid.parquet"))
|
examples/getting-started-movielens/01-Download-Convert.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: py36
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
# +
df = pd.read_csv('./datasets/nyc-rolling-sales.csv')
display(df.head(3))
display(df.columns)
display(df.info())
display(df.describe())
# -
# ## 1. Change values of `BOROUGH` to categories
df['BOROUGH'].unique()
# +
def change_values(x):
if x == 1:
return 'Manhattan'
elif x == 2:
return 'Bronx'
elif x == 3:
return 'Brooklyn'
elif x == 4:
return 'Queens'
elif x == 5:
return 'Staten Island'
df['BOROUGH'] = df['BOROUGH'].apply(change_values)
# -
df['BOROUGH'].unique()
# ## 2. Missing values
# - Remove rows that have missing values in `SALE PRICE` column.
df = df[df['SALE PRICE'].notnull()]
# ## 3. Duplicate values
# - Remove rows that have duplicates in all columns
df = df.drop_duplicates(subset=df.columns)
# ## 4. Filter Outliers
# - quartile range should be between 0.10 and 0.90
# #### Convert values
# +
def str_to_float(x):
try:
return float(x)
except:
return float('nan')
df['LAND SQUARE FEET'] = df['LAND SQUARE FEET'].apply(str_to_float)
df['GROSS SQUARE FEET'] = df['GROSS SQUARE FEET'].apply(str_to_float)
# +
def int_to_str(x):
try:
return str(x)
except:
return str('No Value')
df['Unnamed: 0'] = df['Unnamed: 0'].apply(int_to_str)
df['BLOCK'] = df['BLOCK'].apply(int_to_str)
df['LOT'] = df['LOT'].apply(int_to_str)
df['ZIP CODE'] = df['ZIP CODE'].apply(int_to_str)
# -
df = df.dropna()
# #### Detect outliers
df.plot(
kind = 'box',
subplots = True,
layout = (3,3),
sharex = False, sharey=False,
figsize=(20,10))
plt.show()
# Columns affected by outliers:
# + `RESIDENTIAL UNITS`
# + `COMMERCIAL UNITS`
# + `TOTAL UNITS`
# + `LAND SQUARE FEET`
# + `GROSS SQUARE FEET`
# + `YEAR BUILT`
# #### Filter
# +
# Retrieve only outlier columns
outlier_df = df[[
'RESIDENTIAL UNITS',
'COMMERCIAL UNITS',
'TOTAL UNITS',
'LAND SQUARE FEET',
'GROSS SQUARE FEET',
'YEAR BUILT']]
# find max and min using IQR
Q1 = outlier_df.quantile(0.10)
Q3 = outlier_df.quantile(0.90)
IQR = Q3-Q1
minimum = Q1 - 1.5*IQR
maximum = Q3 + 1.5*IQR
# Filter rows that have outliers
condition = ((outlier_df <= maximum) & (outlier_df >= minimum)) \
.all(axis=1)
df = df[condition]
# -
df.plot(
kind = 'box',
subplots = True,
layout = (3,3),
sharex = False, sharey=False,
figsize=(20,10))
plt.show()
|
Problems/Clean_NYC_Property_Sales/Clean_NYC_Property_Sales.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# run installed version of flopy or add local path
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
# -
# creat the model package
m = flopy.modflow.Modflow("drt_test", model_ws="temp", exe_name="mfnwt", version="mfnwt")
d = flopy.modflow.ModflowDis(m, nlay=1, nrow=10, ncol=10, nper=1, perlen=1, top=10, botm=0, steady=True)
b = flopy.modflow.ModflowBas(m, strt=10, ibound=1)
u = flopy.modflow.ModflowUpw(m, hk=10)
n = flopy.modflow.ModflowNwt(m)
o = flopy.modflow.ModflowOc(m)
# create the drt package
spd = []
for i in range(m.nrow):
spd.append([0, i, m.ncol-1, 5.0, 50.0, 1, 1, 1, 1.0])
d = flopy.modflow.ModflowDrt(m, stress_period_data={0:spd})
# run the drt model
m.write_input()
m.run_model()
# plot heads for the drt model
hds = flopy.utils.HeadFile(os.path.join(m.model_ws, m.name+".hds"))
hds.plot(colorbar=True)
# remove the drt package and create a standard drain file
spd = []
for i in range(m.nrow):
spd.append([0, i, m.ncol-1, 5.0, 1.0])
m.remove_package("DRT")
d = flopy.modflow.ModflowDrn(m, stress_period_data={0:spd})
# run the drain model
m.write_input()
m.run_model()
# plot the heads for the model with the drain
hds = flopy.utils.HeadFile(os.path.join(m.model_ws, m.name+".hds"))
hds.plot(colorbar=True)
|
examples/Notebooks/flopy3_drain_return.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark 2.3 (Python 3)
# language: python
# name: pyspark3
# ---
# # Spark Accumulators
#
# Spark accumulators are *distributed counters* which allow you to increment a global counter in Python UDFs. This is useful for counting certain events or cases, which are not directly part of your data processing. Best example would be to count broken records.
#
# ## Weather Data Example
# We will use the weather measurement data again as an example. Instead of using the Spark functions to extract the measurement information, we will write a Python UDF instead. Although this would not be required in our example, this approach might actually be useful in different scenarios. Even the weather data set contains more information which is at non-fixed locations and could not be extracted using simple Spark/SQL string functions.
#
# This example will show to use accumulators to count records. For example this might be useful to count broken records in other examples (weather data does not have broken records, though).
# # 1 Load Data
#
# As we will not use the previous extraction, we simply load a single year as text data. In the next section we will apply a Python UDF to extract the desired information.
storageLocation = "s3://dimajix-training/data/weather"
# +
from pyspark.sql.functions import *
raw_weather = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003))
raw_weather.limit(5).toPandas()
# -
# # 2 Extract Weather
#
# We will now create and test a simple Python UDF for extracting the weather data. In the next section we will improve that function for counting invalid USAF and WBAN codes. But step by step...
# ## 2.1 Define Python UDF
# +
from pyspark.sql.types import *
schema = StructType([
StructField("usaf", StringType()),
StructField("wban", StringType()),
StructField("air_temperature", FloatType()),
StructField("air_temperature_qual", IntegerType()),
])
@udf(schema)
def extract_weather(row):
usaf = row[4:10]
wban = row[10:15]
air_temperature = float(row[87:92])/10
air_temperature_qual = int(row[92])
return (usaf, wban, air_temperature, air_temperature_qual)
# -
# ## 2.2 Use Python UDF
#
# Now we can apply the Python UDF `extract_weather` to process our data.
result = # YOUR CODE HERE
result.limit(5).toPandas()
# ### Inspect Schema
#
# Since the UDF returned multiple columns, we now have a nested schema.
result.printSchema()
# # 3 Use Accumulators
#
# As we just said, we want to improve the Python UDF to count certain important events. For example you might be interested at how many records are broken (none in our data set). We chose a different example: We want to count the number of invalid USAF and WBAN.
# ## 3.1 Create Accumulator
records_processed = # YOUR CODE HERE
invalid_usaf = # YOUR CODE HERE
invalid_wban = # YOUR CODE HERE
# ## 3.2 Increment accumulators
#
# Now we need to adopt our Python UDF to increment accumulators on specific events. We want to increment each of the accumulators whenever we process an invalid usaf and/or wban.
# +
from pyspark.sql.types import *
schema = StructType([
StructField("usaf", StringType()),
StructField("wban", StringType()),
StructField("air_temperature", FloatType()),
StructField("air_temperature_qual", IntegerType()),
])
@udf(schema)
def extract_weather(row):
usaf = row[4:10]
wban = row[10:15]
air_temperature = float(row[87:92])/10
air_temperature_qual = int(row[92])
# Increment accumulators
# YOUR CODE HERE
return (usaf, wban, air_temperature, air_temperature_qual)
# -
# ## 3.3 Execute Query
#
# Now we can use the modified UDF and check if the accumulators are used.
result = # YOUR CODE HERE
result.limit(5).toPandas()
# ### Inspect Counters
print("records_processed=" + str(records_processed.value))
print("invalid_usaf=" + str(invalid_usaf.value))
print("invalid_wban=" + str(invalid_wban.value))
# Surprisingly the counters are not increased. We will try `count()` instead.
# ### First run
#
# Now let's try to execute the UDF for every record. The method `count()` should do the job
# +
# YOUR CODE HERE
# -
print("records_processed=" + str(records_processed.value))
print("invalid_usaf=" + str(invalid_usaf.value))
print("invalid_wban=" + str(invalid_wban.value))
# ### Second Run
#
# Since that didn't work either, because Spark is too clever, let's force the execution by adding a filter condition which requries the UDF to be executed.
# +
# YOUR CODE HERE
# -
print("records_processed=" + str(records_processed.value))
print("invalid_usaf=" + str(invalid_usaf.value))
print("invalid_wban=" + str(invalid_wban.value))
# ### Third Run
#
# Accumulators won't be reset automatically between query executions.
# +
# YOUR CODE HERE
print("records_processed=" + str(records_processed.value))
print("invalid_usaf=" + str(invalid_usaf.value))
print("invalid_wban=" + str(invalid_wban.value))
# -
# ### Reset Counter
#
# You can also reset counters by simply assign them a value.
# +
# YOUR CODE HERE
# +
result.filter(result["measurement.wban"] != '123').count()
print("records_processed=" + str(records_processed.value))
print("invalid_usaf=" + str(invalid_usaf.value))
print("invalid_wban=" + str(invalid_wban.value))
# -
# ## 4 Afterthought
#
# It is important to understand that Spark accumulators actually count how often a specific event was triggered in our Python UDF. Since Spark might evaluate certain code paths multiple times (for example in cases of node failures or in cases when the execution plan executes a certain step multiple times). Therefore accumulators cannot and therefore should not be used for generating statistics over the data itself. But they can be used to understand which code paths have been used more often than others.
|
spark-training/spark-python/jupyter-advanced/07 - Accumulators - Skeleton.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="bwPlsCuR8DZG" colab_type="text"
# A **Double Deep Q Network (DDQN)** implementation in tensorflow with random experience replay.
# The code is tested with Gym's discrete action space environment, CartPole-v0 on Colab.
#
# ## Notations:
#
# Model network = $Q_{\theta}$
#
# Model parameter = $\theta$
#
# Model network Q value =
# $Q_{\theta}$
# (s, a)
#
# Target network = $Q_{\phi}$
#
# Target parameter = $\phi$
#
# Target network Q value =
# $Q_{\phi}$
# ($s^{'}$,
# $a^{'}$)
#
# ---
#
# ## Equations:
#
# TD target =
# r (s, a)
# $+$
# $\gamma$
# $Q_{\phi}$
# ($s^{\prime}$,
# $argmax_{a^{\prime}}$
# $Q_{\theta}$
# (s$^{\prime}$,
# a$^{\prime}$))
#
# TD error =
# (TD target)
# $-$
# (Model network Q value)
# =
# [r (s, a)
# $+$
# $\gamma$
# $Q_{\phi}$
# ($s^{\prime}$,
# $argmax_{a^{\prime}}$
# $Q_{\theta}$
# (s$^{\prime}$,
# a$^{\prime}$))]
# $-$
# $Q_{\theta}$
# (s, a)
#
# ---
#
# ## Key implementation details:
#
# Create a placeholder to feed Q values from model network:
#
# ```
# self.model_s_next_Q_val = tf.placeholder(tf.float32, [None,self.num_actions], name='model_s_next_Q_val')
# ```
#
# Select Q values from model network using $s^{'}$ as features & feed them to the training session:
#
# ```
# # select actions from model network
# model_s_next_Q_val = self.sess.run(self.model_Q_val, feed_dict={self.s: s_next})
#
# # training
# _, loss = self.sess.run([self.optimizer, self.loss],
# feed_dict = {self.s: s,
# self.a: a,
# self.r: r,
# self.s_next: s_next,
# self.done: done,
# self.model_s_next_Q_val: model_s_next_Q_val})
# ```
#
# Select minibatch actions with largest Q values from model network,
# create indices & select corresponding minibatch actions from target network:
#
# ```
# def td_target(self, s_next, r, done, model_s_next_Q_val, target_Q_val):
# # select action with largest Q value from model network
# model_max_a = tf.argmax(model_s_next_Q_val, axis=1, output_type=tf.dtypes.int32)
#
# arr = tf.range(tf.shape(model_max_a)[0], dtype=tf.int32) # create row indices
# indices = tf.stack([arr, model_max_a], axis=1) # create 2D indices
# max_target_Q_val = tf.gather_nd(target_Q_val, indices) # select minibatch actions from target network
# max_target_Q_val = tf.reshape(max_target_Q_val, (self.minibatch_size,1))
# ```
#
# ---
#
# ## References:
#
# [Deep Reinforcement Learning with Double Q-learning
# (<NAME>, 2016)](https://arxiv.org/pdf/1509.06461.pdf)
#
# ---
#
# <br>
#
# + id="WKUyXimN61sC" colab_type="code" colab={}
import tensorflow as tf
import gym
import numpy as np
from matplotlib import pyplot as plt
# + id="m98LF3tqEzMl" colab_type="code" colab={}
# random sampling for learning from experience replay
class Exp():
def __init__(self, obs_size, max_size):
self.obs_size = obs_size
self.num_obs = 0
self.max_size = max_size
self.mem_full = False
# memory structure that stores samples from observations
self.mem = {'s' : np.zeros(self.max_size * self.obs_size, dtype=np.float32).reshape(self.max_size,self.obs_size),
'a' : np.zeros(self.max_size * 1, dtype=np.int32).reshape(self.max_size,1),
'r' : np.zeros(self.max_size * 1).reshape(self.max_size,1),
'done' : np.zeros(self.max_size * 1, dtype=np.int32).reshape(self.max_size,1)}
# stores sample obervation at each time step in experience memory
def store(self, s, a, r, done):
i = self.num_obs % self.max_size
self.mem['s'][i,:] = s
self.mem['a'][i,:] = a
self.mem['r'][i,:] = r
self.mem['done'][i,:] = done
self.num_obs += 1
if self.num_obs == self.max_size:
self.num_obs = 0 # reset number of observation
self.mem_full = True
# returns a minibatch of experience
def minibatch(self, minibatch_size):
if self.mem_full == False:
max_i = min(self.num_obs, self.max_size) - 1
else:
max_i = self.max_size - 1
# randomly sample a minibatch of indexes
sampled_i = np.random.randint(max_i, size=minibatch_size)
s = self.mem['s'][sampled_i,:].reshape(minibatch_size, self.obs_size)
a = self.mem['a'][sampled_i].reshape(minibatch_size)
r = self.mem['r'][sampled_i].reshape((minibatch_size,1))
s_next = self.mem['s'][sampled_i + 1,:].reshape(minibatch_size, self.obs_size)
done = self.mem['done'][sampled_i].reshape((minibatch_size,1))
return (s, a, r, s_next, done)
# + id="rRZCt-FnrKLr" colab_type="code" colab={}
# Evaluates behavior policy while improving target policy
class DDQN_agent():
def __init__(self, num_actions, obs_size, nhidden,
epoch,
epsilon, gamma, learning_rate,
replace, polyak, tau_step,
mem_size, minibatch_size):
super(DDQN_agent, self).__init__()
self.actions = range(num_actions)
self.num_actions = num_actions
self.obs_size = obs_size # number of features
self.nhidden = nhidden # hidden nodes
self.epoch = epoch # for epsilon decay & to decide when to start training
self.epsilon = epsilon # for eploration
self.gamma = gamma # discount factor
self.learning_rate = learning_rate # learning rate alpha
# for params replacement
self.replace = replace # type of replacement
self.polyak = polyak # for soft replacement
self.tau_step = tau_step # for hard replacement
self.learn_step = 0 # steps after learning
# for Experience replay
self.mem = Exp(self.obs_size, mem_size) # memory that holds experiences
self.minibatch_size = minibatch_size
self.step = 0 # each step in a episode
# for tensorflow ops
self.built_graph()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.sess.run(self.target_replace_hard)
self.cum_loss_per_episode = 0 # for charting display
# decay epsilon after each epoch
def epsilon_decay(self):
if self.step % self.epoch == 0:
self.epsilon = max(.01, self.epsilon * .95)
# epsilon-greedy behaviour policy for action selection
def act(self, s):
if np.random.random() < self.epsilon:
i = np.random.randint(0,len(self.actions))
else:
# get Q(s,a) from model network
Q_val = self.sess.run(self.model_Q_val, feed_dict={self.s: np.reshape(s, (1,s.shape[0]))})
# get index of largest Q(s,a)
i = np.argmax(Q_val)
action = self.actions[i]
self.step += 1
self.epsilon_decay()
return action
def learn(self, s, a, r, done):
# stores observation in memory as experience at each time step
self.mem.store(s, a, r, done)
# starts training a minibatch from experience after 1st epoch
if self.step > self.epoch:
self.replay() # start training with experience replay
def td_target(self, s_next, r, done, model_s_next_Q_val, target_Q_val):
# select action with largest Q value from model network
model_max_a = tf.argmax(model_s_next_Q_val, axis=1, output_type=tf.dtypes.int32)
arr = tf.range(tf.shape(model_max_a)[0], dtype=tf.int32) # create row indices
indices = tf.stack([arr, model_max_a], axis=1) # create 2D indices
max_target_Q_val = tf.gather_nd(target_Q_val, indices) # select minibatch actions from target network
max_target_Q_val = tf.reshape(max_target_Q_val, (self.minibatch_size,1))
# if state = done, td_target = r
td_target = (1.0 - tf.cast(done, tf.float32)) * tf.math.multiply(self.gamma, max_target_Q_val) + r
# exclude td_target in gradient computation
td_target = tf.stop_gradient(td_target)
return td_target
# select Q(s,a) from actions using e-greedy as behaviour policy from model network
def predicted_Q_val(self, a, model_Q_val):
# create 1D tensor of length = number of rows in a
arr = tf.range(tf.shape(a)[0], dtype=tf.int32)
# stack by column to create indices for Q(s,a) selections based on a
indices = tf.stack([arr, a], axis=1)
# select Q(s,a) using indice from model_Q_val
Q_val = tf.gather_nd(model_Q_val, indices)
Q_val = tf.reshape(Q_val, (self.minibatch_size,1))
return Q_val
# contruct neural network
def built_net(self, var_scope, w_init, b_init, features, num_hidden, num_output):
with tf.variable_scope(var_scope):
feature_layer = tf.contrib.layers.fully_connected(features, num_hidden,
activation_fn = tf.nn.relu,
weights_initializer = w_init,
biases_initializer = b_init)
Q_val = tf.contrib.layers.fully_connected(feature_layer, num_output,
activation_fn = None,
weights_initializer = w_init,
biases_initializer = b_init)
return Q_val
# contruct tensorflow graph
def built_graph(self):
tf.reset_default_graph()
self.s = tf.placeholder(tf.float32, [None,self.obs_size], name='s')
self.a = tf.placeholder(tf.int32, [None,], name='a')
self.r = tf.placeholder(tf.float32, [None,1], name='r')
self.s_next = tf.placeholder(tf.float32, [None,self.obs_size], name='s_next')
self.done = tf.placeholder(tf.int32, [None,1], name='done')
self.model_s_next_Q_val = tf.placeholder(tf.float32, [None,self.num_actions], name='model_s_next_Q_val')
# weight, bias initialization
w_init = tf.initializers.lecun_uniform()
b_init = tf.initializers.he_uniform(1e-4)
self.model_Q_val = self.built_net('model_net', w_init, b_init, self.s, self.nhidden, self.num_actions)
self.target_Q_val = self.built_net('target_net', w_init, b_init, self.s_next, self.nhidden, self.num_actions)
with tf.variable_scope('td_target'):
td_target = self.td_target(self.s_next, self.r, self.done, self.model_s_next_Q_val, self.target_Q_val)
with tf.variable_scope('predicted_Q_val'):
predicted_Q_val = self.predicted_Q_val(self.a, self.model_Q_val)
with tf.variable_scope('loss'):
self.loss = tf.losses.huber_loss(td_target, predicted_Q_val)
with tf.variable_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
# get network params
with tf.variable_scope('params'):
self.target_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')
self.model_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model_net')
# replace target net params with model net params
with tf.variable_scope('hard_replace'):
self.target_replace_hard = [t.assign(m) for t, m in zip(self.target_net_params, self.model_net_params)]
with tf.variable_scope('soft_replace'):
self.target_replace_soft = [t.assign(self.polyak * m + (1 - self.polyak) * t) for t, m in zip(self.target_net_params, self.model_net_params)]
# decide soft or hard params replacement
def replace_params(self):
if self.replace == 'soft':
# soft params replacement
self.sess.run(self.target_replace_soft)
else:
# hard params replacement
if self.learn_step % self.replace_step == 0:
self.sess.run(self.target_replace_hard)
self.learn_step += 1
def replay(self):
# select minibatch of experiences from memory for training
(s, a, r, s_next, done) = self.mem.minibatch(self.minibatch_size)
# select actions from model network
model_s_next_Q_val = self.sess.run(self.model_Q_val, feed_dict={self.s: s_next})
# training
_, loss = self.sess.run([self.optimizer, self.loss],
feed_dict = {self.s: s,
self.a: a,
self.r: r,
self.s_next: s_next,
self.done: done,
self.model_s_next_Q_val: model_s_next_Q_val})
self.cum_loss_per_episode += loss
self.replace_params()
# + id="rzYSxqh71_j_" colab_type="code" colab={}
# compute stats
def stats(r_per_episode, R, cum_R, cum_R_episodes,
cum_loss_per_episode, cum_loss, cum_loss_episodes):
r_per_episode = np.append(r_per_episode, R) # store reward per episode
cum_R_episodes += R
cum_R = np.append(cum_R, cum_R_episodes) # store cumulative reward of all episodes
cum_loss_episodes += cum_loss_per_episode
cum_loss = np.append(cum_loss, cum_loss_episodes) # store cumulative loss of all episodes
return (r_per_episode, cum_R_episodes, cum_R, cum_loss_episodes, cum_loss)
# plot performance
def plot_charts(values, y_label):
fig = plt.figure(figsize=(10,5))
plt.title("DQN performance")
plt.xlabel("Episode")
plt.ylabel(y_label)
plt.plot(values)
plt.show(fig)
def display(r_per_episode, cum_R, cum_loss):
plot_charts(r_per_episode, "Reward")
plot_charts(cum_R, "cumulative_reward")
plot_charts(cum_loss, "cumulative_loss")
avg_r = np.sum(r_per_episode) / max_episodes
print("avg_r", avg_r)
avg_loss = np.sum(cum_loss) / max_episodes
print("avg_loss", avg_loss)
# + id="w2pKtkEa2EVL" colab_type="code" colab={}
def run_episodes(env, agent, max_episodes):
r_per_episode = np.array([0])
cum_R = np.array([0])
cum_loss = np.array([0])
cum_R_episodes = 0
cum_loss_episodes = 0
# repeat each episode
for episode_number in range(max_episodes):
s = env.reset() # reset new episode
done = False
R = 0
# repeat each step
while not done:
# select action using behaviour policy(epsilon-greedy) from model network
a = agent.act(s)
# take action in environment
next_s, r, done, _ = env.step(a)
# agent learns
agent.learn(s, a, r, done)
s = next_s
R += r
(r_per_episode, cum_R_episodes, cum_R, cum_loss_episodes, cum_loss) = stats(r_per_episode, R, cum_R, cum_R_episodes,
agent.cum_loss_per_episode, cum_loss, cum_loss_episodes)
display(r_per_episode, cum_R, cum_loss)
env.close()
# + id="fOoF9WtG7Wyx" colab_type="code" colab={}
env = gym.make('CartPole-v0') # openai gym environment
max_episodes = 500
epoch = 100
num_actions = env.action_space.n # number of possible actions
obs_size = env.observation_space.shape[0] # dimension of state space
nhidden = 128 # number of hidden nodes
epsilon = .9
gamma = .9
learning_rate = .3
replace = 'soft' # params replacement type, 'soft' for soft replacement or empty string '' for hard replacement
polyak = .001
tau_step = 300
mem_size = 30000
minibatch_size = 64
# %matplotlib inline
# + id="hKctG0Y4kwK2" colab_type="code" outputId="3860401d-04dd-427c-a866-837dca7a14d9" colab={"base_uri": "https://localhost:8080/", "height": 1047}
agent = DDQN_agent(num_actions, obs_size, nhidden,
epoch,
epsilon, gamma, learning_rate,
replace, polyak, tau_step,
mem_size, minibatch_size)
run_episodes(env, agent, max_episodes)
|
DQN_variants/DDQN/double_DQN_cartpole.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/CC-MNNIT/2018-19-Classes/blob/master/MachineLearning/2019_04_11_ML3_content/Pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="73lc-L-5RK26" colab_type="text"
# #### Copyright 2019 MNNIT Computer Club.
# + id="xiAG2ZFERMll" colab_type="code" colab={}
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="sM4wzLQcDM2a" colab_type="text"
# # Pandas (Current Release : 0.23)
# + [markdown] id="VJdSpMaCDQ0O" colab_type="text"
# **Learning Objectives : **
#
# - What is Pandas, why do we need it ?
# - Basics of Pandas
# + [markdown] id="bqnnaiG7DdcG" colab_type="text"
# # What is Pandas ?
#
# - It is a python library/package which provides expressive **data structures** designed to make working with relational(tabular), labeled data easier.
# - It is build on top of numpy
#
# Think of pandas as MS Excel for python programmers.
#
#
# ## It offers two main datastructures :
# 1. **Series** : 1D labeled homogeneously-typed array
# 2. **DataFrame** : General 2D labeled, size-mutable tabular structure with potentially heterogeneously-typed column
#
# Think of pandas DS as containers
# df is a container made of series containers.
# Series is a container made of scalars.
# + [markdown] id="F9ScdHStGjo2" colab_type="text"
# # Why do we need pandas ?
#
# Actually you can do without pandas, but as the saying goes
#
# > "Don't reinvent the wheel"
#
# 1. In a lot of problems the data is in a csv/tsv (a.k.a tabular) format or is brought to one if not.
# 2. Numpy arrays must be homogeneous, Pandas columns need to be homogenous but not the whole dataframe.
# 3. Has a lot of inbuilt functionalities which one might need to implement everytime they encounter a fairly common workflow
# + [markdown] id="EbY9u04ZKj3Z" colab_type="text"
# # When and When not to use pandas ?
#
# - Pandas is **good for large data(<100GB)**.
# - **Not for BIG data.**
# - Pandas is extremely efficient on small datasets(<1GB) and performance is rarely a concern.
#
# For big data there can be performance issues as pandas was not made for BIG data and the internal implementations might fail.
#
# + [markdown] id="D1mm-rH9LJ4k" colab_type="text"
# # Motivation to use
#
# An experience : large dataset required to reduce the memory usage to fit into local memory for analysis (even before reading the data set to a dataframe).
#
# - Pandas allows the csv file to be read in chunks, (eg. 1000 lines at a time)
# - then later on these chunks can be concatenated and the resulting dataframe fits the memory
#
# + [markdown] id="18_TSMoTL38m" colab_type="text"
# # Enough talk, show me the code
# + id="BzCyOEmdDPFP" colab_type="code" colab={}
import pandas as pd
import numpy as np
# + [markdown] id="l9RZl_zpOKEN" colab_type="text"
# # Pandas Series
#
#
#
# ```python
# class pandas.Series(data=None, index=None, dtype=None, name=None, copy=False, fastpath=False)
# ```
#
# > data can be :
# 1. Python dictionary
# 2. Python List
# 3. Numpy array
# 4. Scalar
#
# > ["fastpath" is an internal parameter, will probably be fixed in next release](https://github.com/pandas-dev/pandas/issues/6903)
#
#
# + [markdown] id="P69Q-HqFOR71" colab_type="text"
# ## Creating a Series
# + id="SOWXVyIMOHyx" colab_type="code" colab={}
# Using python list
pd.Series(['a','b','c'])
# + id="2ZX7y1a7PXdd" colab_type="code" colab={}
# series can accomodate heterogeneous datatypes
pd.Series(['a','b',4,1.414])
# + id="XJKbbQq_NQxf" colab_type="code" colab={}
# Using numpy array
pd.Series(np.array([1,2,3,4]))
# + [markdown] id="66Bv1azgQ9xs" colab_type="text"
# ### Indexing a Series
#
# - By default indices 0....len(data)-1 are assigned, as can be seen above
# - To set your own indices, use the parameter **index**
# - **Indices are also called labels**
# - Labels/indices can appear to be same, but must have a unique hash during their existence
# + id="D6FqPYIQPjxu" colab_type="code" colab={}
pd.Series([180,72,"DG"],index=['Height','Weight','Name'])
# + id="Nu1SGkM9Rgne" colab_type="code" colab={}
human = pd.Series({'Height' : 180, 'Weight' : 72, 'Name' : 'DG'})
# + [markdown] id="0pC9jafvTQa8" colab_type="text"
# # Pandas DataFrame
#
#
#
# ```python
# class pandas.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)
# ```
#
# > **data** can be :
# - numpy ndarray (structured or homogeneous)
# - dict (values can be pandas Series, arrays constants, list like objects)
# - another pandas DataFrame
#
# > **index** : row labels
#
# > **columns** : column labels
#
# > A few points about DataFrame
# - Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns).
# - **Arithmetic operations align on both row and column labels.**
# - Can be thought of as a dict-like container for Series objects.
# - Most used pandas datastructure
#
# + [markdown] id="i0Vr746pZzqL" colab_type="text"
# ## Creating pandas DataFrame
# + id="d5ZBp9zZTUYQ" colab_type="code" colab={}
# from numpy array
pd.DataFrame(np.array([[1,2],[3,4]]))
# + id="pVbgc18rZsmJ" colab_type="code" colab={}
# from list
pd.DataFrame([1,2,'hello','world'])
# + id="F_sQp9PYY23w" colab_type="code" colab={}
# from dictionary
df = pd.DataFrame({'Height':[180,178],'Weight':[72,70],'Name':['DG','AR']})
df
# + id="vf7xmJ6gZNqH" colab_type="code" colab={}
# pandas automatically handles datatypes, for each column
# unlike numpy which would have forced all columns to be string objects in this case
df.dtypes
# + id="kfo6Up9sZWAy" colab_type="code" colab={}
# from a series
pd.DataFrame(human)
# + id="TbAghnPwGECu" colab_type="code" colab={}
# creates a replica of dataframe df
duplicate_df = df.copy()
# + [markdown] id="1mWnFtuJckaU" colab_type="text"
# ## Creating a dataframe from dataset
# + id="JnKM8WxdgdKR" colab_type="code" colab={}
# downloading data
# !rm -rf datasets
# !mkdir -p datasets
# !wget -O datasets/data.csv https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv
# # !gunzip datasets/data.tsv.gz
# + id="o0vLbZCHc3Ex" colab_type="code" colab={}
# see documentation for full set of parameters
iris_data = pd.read_csv('datasets/data.csv',sep=',')
# + id="g9mhMYXjeHwW" colab_type="code" colab={}
iris_data.head()
# + id="m00TzJN-h4JK" colab_type="code" colab={}
iris_data['species'].describe()
# + [markdown] id="gQNyffTwtB15" colab_type="text"
# ## Selecting and Indexing a DataFrame
#
# Three main methods :
#
# 1. .loc : location (primarily label based)
# 2. .iloc : integer location
# 3. [ ] :
#
#
# ### **General Selection Syntax**
#
# ```python
# df.loc[row-selector,column-selector]
# ```
#
# Selectors can be :
# 1. A label
# 2. List/Numpy-Array of labels
# 3. Label slice
#
# [pandas documentation about indexing](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html)
# + [markdown] id="otSs0CiSBf8m" colab_type="text"
# ### .loc
# + id="Aqw_xjo6tF9Z" colab_type="code" colab={}
# rows with labels 0,1,2.....10, and the species column
# 0:10 -> are row labels not the indices in a numerical sense
iris_data.loc[0:10,'species']
# + id="GjI8GF8Oz6jq" colab_type="code" colab={}
# rows with labels 10,90
# columns with labels sepal_length, species
iris_data.loc[np.array([10,90]),['sepal_length','species'] ]
# + id="dWsk_kUjESm6" colab_type="code" colab={}
# label slices
X_data = iris_data.loc[:,'sepal_length':'petal_width']
y_data = iris_data.loc[:,'species']
##### IMPORTANT #######
# While using .loc and label slices(start:stop), both the start and stop label are inclusive
# unlike regular python where stop is not included
# + id="Zo7kUSw7FAQi" colab_type="code" colab={}
X_data.head()
# + id="QcqWDOMRFA4-" colab_type="code" colab={}
y_data.head()
# + [markdown] id="6nt8FcJFBjfl" colab_type="text"
# ### .iloc
# + id="9Zf-2LGL1eio" colab_type="code" colab={}
iris_data.iloc[0:10]
# + id="4kQIRblSBoWh" colab_type="code" colab={}
iris_data.iloc[0:10,0:1]
# integer based selectors/slices.. .iloc won't take row,column label names
# + [markdown] id="WepUSwFTC8J1" colab_type="text"
# ### [ ]
# + id="0bSE0yJQBy2h" colab_type="code" colab={}
# [ ] selects columns
iris_data['species']
# + id="uzHeBvn4C_OT" colab_type="code" colab={}
# multiple columns are selected by passing a list containing
my_columns = ['species','sepal_length']
iris_data[my_columns]
# the above is equivalent to the following:
# iris_data[ ['species','sepal_length'] ]
# + [markdown] id="A57TnsnrDq7R" colab_type="text"
# ### Important point about selectors:
#
# ```python
# iris_data[['species','sepal_length']]
# ```
# is not the same as
# ```python
# iris_data[['sepal_length','species']]
# ```
#
# **ORDER MATTERS!!**
# + [markdown] id="tTruV_2OHjkI" colab_type="text"
# ### TODO
#
# 1. at()
# 2. iat()
# + [markdown] id="mHGUkvvqjo0b" colab_type="text"
# ## Modifying a Dataframe
# + id="KPn3Yhm2jvL2" colab_type="code" colab={}
# most common way is to use apply method and use a lambda function to modify the column
iris_data['petal_width'].apply(lambda x : x*10)
# + id="vSpd5eiSnk69" colab_type="code" colab={}
# we can also use replace method for a consistent replacement mapping
mapping = {flower:idx for idx,flower in enumerate(iris_data.species.unique())}
iris_data['species'].replace(mapping)
# + id="jKmoNZoJlRHu" colab_type="code" colab={}
# changing datatype of a column
iris_data['species'].astype('category')
# + id="Tmb7EiJgqEkR" colab_type="code" colab={}
iris_data
# + [markdown] id="jIDQJaDimVKS" colab_type="text"
# **Pandas also has a very powerful text manipulation tools under the *str* attribute of the dataframe. Do check it for text data/column**
#
# [Working with text Data in Pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/text.html)
# + [markdown] id="c9C5DhpgH7o3" colab_type="text"
# # Data Exploration, Statistics tools
# + id="m7_F7hj-qLg2" colab_type="code" colab={}
iris_data.head(25)
# + id="NL8BVCrPqRAh" colab_type="code" colab={}
iris_data.tail(7)
# + id="R9WKceCTIBjR" colab_type="code" colab={}
iris_data.describe()
# + id="8-c8_G2XILIo" colab_type="code" colab={}
iris_data.info()
# + id="jAk7_vtsIMzD" colab_type="code" colab={}
# can also be applied on sub part of a dataframe
iris_data.mean()
# iris_data.median()
# iris_data.std()
# iris_data.mode()
# + id="eSmtq9qZIPLD" colab_type="code" colab={}
# gives mean of sepal_length of first 11 entries
iris_data.loc[0:10,'sepal_length'].mean()
# + id="dzsSDYXFIaQU" colab_type="code" colab={}
iris_data.median()
# + [markdown] id="fEB9qLnwJe94" colab_type="text"
# # Exporting a pandas dataframe/series
# + id="1e65HL40Ju7A" colab_type="code" colab={}
# returns a numpy array
# the following is going to be deprecated in 0.24
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.values.html#pandas.DataFrame.values
# will be replaced by
# iris_data.to_numpy()
iris_data.values
# + id="pCuMh212JiQq" colab_type="code" colab={}
# index = False ensures that indices are not written to the csv file
iris_data.to_csv('datasets/fromDataFrame.csv',sep=',',encoding='utf-8', index=False)
# + [markdown] id="HYMBeuhsqj7D" colab_type="text"
# # Try this yourself (Home Assignment ;-) )
#
# 1. Concatenating two dataframes along axis=0, axis=1
# 2. Appending a row to dataframe
# + [markdown] id="Q5Bo0Sahq7N0" colab_type="text"
# ---
#
# Authored By [<NAME>](https://github.com/dipunj) | Report errors/typos as github issues.
#
# ---
|
MachineLearning/2019_04_11_ML3_content/Pandas.ipynb
|
# # Bagging
#
# This notebook introduces a very natural strategy to build ensembles of
# machine learning models named "bagging".
#
# "Bagging" stands for Bootstrap AGGregatING. It uses bootstrap resampling
# (random sampling with replacement) to learn several models on random
# variations of the training set. At predict time, the predictions of each
# learner are aggregated to give the final predictions.
#
# First, we will generate a simple synthetic dataset to get insights regarding
# bootstraping.
# +
import pandas as pd
import numpy as np
# create a random number generator that will be used to set the randomness
rng = np.random.RandomState(1)
def generate_data(n_samples=30):
"""Generate synthetic dataset. Returns `data_train`, `data_test`,
`target_train`."""
x_min, x_max = -3, 3
x = rng.uniform(x_min, x_max, size=n_samples)
noise = 4.0 * rng.randn(n_samples)
y = x ** 3 - 0.5 * (x + 1) ** 2 + noise
y /= y.std()
data_train = pd.DataFrame(x, columns=["Feature"])
data_test = pd.DataFrame(
np.linspace(x_max, x_min, num=300), columns=["Feature"])
target_train = pd.Series(y, name="Target")
return data_train, data_test, target_train
# +
import matplotlib.pyplot as plt
import seaborn as sns
data_train, data_test, target_train = generate_data(n_samples=30)
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
_ = plt.title("Synthetic regression dataset")
# -
#
# The relationship between our feature and the target to predict is non-linear.
# However, a decision tree is capable of approximating such a non-linear
# dependency:
# +
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3, random_state=0)
tree.fit(data_train, target_train)
y_pred = tree.predict(data_test)
# -
# Remember that the term "test" here refers to data that was not used for
# training and computing an evaluation metric on such a synthetic test set
# would be meaningless.
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
plt.plot(data_test, y_pred, label="Fitted tree")
plt.legend()
_ = plt.title("Predictions by a single decision tree")
# Let's see how we can use bootstraping to learn several trees.
#
# ## Bootstrap resampling
#
# A bootstrap sample corresponds to a resampling with replacement, of the
# original dataset, a sample that is the same size as the original dataset.
# Thus, the bootstrap sample will contain some data points several times while
# some of the original data points will not be present.
#
# We will create a function that given `data` and `target` will return a
# resampled variation `data_bootstrap` and `target_bootstrap`.
def bootstrap_sample(data, target):
# Indices corresponding to a sampling with replacement of the same sample
# size than the original data
bootstrap_indices = rng.choice(
np.arange(target.shape[0]), size=target.shape[0], replace=True,
)
# In pandas, we need to use `.iloc` to extract rows using an integer
# position index:
data_bootstrap = data.iloc[bootstrap_indices]
target_bootstrap = target.iloc[bootstrap_indices]
return data_bootstrap, target_bootstrap
#
# We will generate 3 bootstrap samples and qualitatively check the difference
# with the original dataset.
n_bootstraps = 3
for bootstrap_idx in range(n_bootstraps):
# draw a bootstrap from the original data
data_bootstrap, target_booststrap = bootstrap_sample(
data_train, target_train,
)
plt.figure()
plt.scatter(data_bootstrap["Feature"], target_booststrap,
color="tab:blue", facecolors="none",
alpha=0.5, label="Resampled data", s=180, linewidth=5)
plt.scatter(data_train["Feature"], target_train,
color="black", s=60,
alpha=1, label="Original data")
plt.title(f"Resampled data #{bootstrap_idx}")
plt.legend()
#
# Observe that the 3 variations all share common points with the original
# dataset. Some of the points are randomly resampled several times and appear
# as darker blue circles.
#
# The 3 generated bootstrap samples are all different from the original dataset
# and from each other. To confirm this intuition, we can check the number of
# unique samples in the bootstrap samples.
# +
data_train_huge, data_test_huge, target_train_huge = generate_data(
n_samples=100_000)
data_bootstrap_sample, target_bootstrap_sample = bootstrap_sample(
data_train_huge, target_train_huge)
ratio_unique_sample = (np.unique(data_bootstrap_sample).size /
data_bootstrap_sample.size)
print(
f"Percentage of samples present in the original dataset: "
f"{ratio_unique_sample * 100:.1f}%"
)
# -
#
# On average, ~63.2% of the original data points of the original dataset will
# be present in a given bootstrap sample. The other ~36.8% are repeated
# samples.
#
# We are able to generate many datasets, all slightly different.
#
# Now, we can fit a decision tree for each of these datasets and they all shall
# be slightly different as well.
bag_of_trees = []
for bootstrap_idx in range(n_bootstraps):
tree = DecisionTreeRegressor(max_depth=3, random_state=0)
data_bootstrap_sample, target_bootstrap_sample = bootstrap_sample(
data_train, target_train)
tree.fit(data_bootstrap_sample, target_bootstrap_sample)
bag_of_trees.append(tree)
#
# Now that we created a bag of different trees, we can use each of the trees to
# predict the samples within the range of data. They shall give slightly
# different predictions.
# +
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
for tree_idx, tree in enumerate(bag_of_trees):
tree_predictions = tree.predict(data_test)
plt.plot(data_test, tree_predictions, linestyle="--", alpha=0.8,
label=f"Tree #{tree_idx} predictions")
plt.legend()
_ = plt.title("Predictions of trees trained on different bootstraps")
# -
# ## Aggregating
#
# Once our trees are fitted and we are able to get predictions for each of
# them. In regression, the most straightforward way to combine those
# predictions is just to average them: for a given test data point, we feed the
# input feature values to each of the `n` trained models in the ensemble and as
# a result compute `n` predicted values for the target variable. The final
# prediction of the ensemble for the test data point is the average of those
# `n` values.
#
# We can plot the averaged predictions from the previous example.
# +
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
bag_predictions = []
for tree_idx, tree in enumerate(bag_of_trees):
tree_predictions = tree.predict(data_test)
plt.plot(data_test, tree_predictions, linestyle="--", alpha=0.8,
label=f"Tree #{tree_idx} predictions")
bag_predictions.append(tree_predictions)
bag_predictions = np.mean(bag_predictions, axis=0)
plt.plot(data_test, bag_predictions, label="Averaged predictions",
linestyle="-")
plt.legend()
_ = plt.title("Predictions of bagged trees")
# -
#
# The unbroken red line shows the averaged predictions, which would be the
# final predictions given by our 'bag' of decision tree regressors. Note that
# the predictions of the ensemble is more stable because of the averaging
# operation. As a result, the bag of trees as a whole is less likely to overfit
# than the individual trees.
#
# ## Bagging in scikit-learn
#
# Scikit-learn implements the bagging procedure as a "meta-estimator", that is
# an estimator that wraps another estimator: it takes a base model that is
# cloned several times and trained independently on each bootstrap sample.
#
# The following code snippet shows how to build a bagging ensemble of decision
# trees. We set `n_estimators=100` instead of 3 in our manual implementation
# above to get a stronger smoothing effect.
# +
from sklearn.ensemble import BaggingRegressor
bagged_trees = BaggingRegressor(
base_estimator=DecisionTreeRegressor(max_depth=3),
n_estimators=100,
)
_ = bagged_trees.fit(data_train, target_train)
# -
#
# Let us visualize the predictions of the ensemble on the same interval of data:
# +
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
bagged_trees_predictions = bagged_trees.predict(data_test)
plt.plot(data_test, bagged_trees_predictions)
_ = plt.title("Predictions from a bagging classifier")
# -
#
# Because we use 100 trees in the ensemble, the average prediction is indeed
# slightly smoother but very similar to our previous average plot.
#
# It is possible to access the internal models of the ensemble stored as a
# Python list in the `bagged_trees.estimators_` attribute after fitting.
#
# Let us compare the based model predictions with their average:
# +
import warnings
with warnings.catch_warnings():
# ignore scikit-learn warning when accessing bagged estimators
warnings.filterwarnings(
"ignore",
message="X has feature names, but DecisionTreeRegressor was fitted without feature names",
)
for tree_idx, tree in enumerate(bagged_trees.estimators_):
label = "Predictions of individual trees" if tree_idx == 0 else None
tree_predictions = tree.predict(data_test)
plt.plot(data_test, tree_predictions, linestyle="--", alpha=0.1,
color="tab:blue", label=label)
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
bagged_trees_predictions = bagged_trees.predict(data_test)
plt.plot(data_test, bagged_trees_predictions,
color="tab:orange", label="Predictions of ensemble")
_ = plt.legend()
# -
#
# We used a low value of the opacity parameter `alpha` to better appreciate the
# overlap in the prediction functions of the individual trees.
#
# This visualization gives some insights on the uncertainty in the predictions
# in different areas of the feature space.
#
# ## Bagging complex pipelines
#
# While we used a decision tree as a base model, nothing prevents us of using
# any other type of model.
#
# As we know that the original data generating function is a noisy polynomial
# transformation of the input variable, let us try to fit a bagged polynomial
# regression pipeline on this dataset:
# +
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import make_pipeline
polynomial_regressor = make_pipeline(
MinMaxScaler(),
PolynomialFeatures(degree=4),
Ridge(alpha=1e-10),
)
# -
#
# This pipeline first scales the data to the 0-1 range with `MinMaxScaler`.
# Then it extracts degree-4 polynomial features. The resulting features will
# all stay in the 0-1 range by construction: if `x` lies in the 0-1 range then
# `x ** n` also lies in the 0-1 range for any value of `n`.
#
# Then the pipeline feeds the resulting non-linear features to a regularized
# linear regression model for the final prediction of the target variable.
#
# Note that we intentionally use a small value for the regularization parameter
# `alpha` as we expect the bagging ensemble to work well with slightly overfit
# base models.
#
# The ensemble itself is simply built by passing the resulting pipeline as the
# `base_estimator` parameter of the `BaggingRegressor` class:
bagging = BaggingRegressor(
base_estimator=polynomial_regressor,
n_estimators=100,
random_state=0,
)
_ = bagging.fit(data_train, target_train)
# +
for i, regressor in enumerate(bagging.estimators_):
regressor_predictions = regressor.predict(data_test)
base_model_line = plt.plot(
data_test, regressor_predictions, linestyle="--", alpha=0.2,
label="Predictions of base models" if i == 0 else None,
color="tab:blue"
)
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
bagging_predictions = bagging.predict(data_test)
plt.plot(data_test, bagging_predictions,
color="tab:orange", label="Predictions of ensemble")
plt.ylim(target_train.min(), target_train.max())
plt.legend()
_ = plt.title("Bagged polynomial regression")
# -
#
# The predictions of this bagged polynomial regression model looks
# qualitatively better than the bagged trees. This is somewhat expected since
# the base model better reflects our knowldege of the true data generating
# process.
#
# Again the different shades induced by the overlapping blue lines let us
# appreciate the uncertainty in the prediction of the bagged ensemble.
#
# To conclude this notebook, we note that the bootstrapping procedure is a
# generic tool of statistics and is not limited to build ensemble of machine
# learning models. The interested reader can learn more on the [Wikipedia
# article on
# bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)).
|
notebooks/ensemble_bagging.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ejercicio 1: Peticiones HTTP usando Sockets
# Aquí un ejemplo de petición via sockets
# +
import ssl
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('google.com', 443))
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23)
s.sendall(b"GET / HTTP/1.1\r\nHost:www.google.com\r\n\r\n")
data = s.recv(1024)
print(data)
# -
# Crear un socket que haga una petición a la web [ifconfig.io](http://ifconfig.io) para obtener nuestra IP pública
# > Punto extra! formatea la salida para mostrar SOLO LA IP, sin el resto del texto
# +
import socket
with socket.socket() as s:
s.connect(('ifconfig.io', 80))
s.sendall(b'GET / HTTP/1.1\r\nHost: ifconfig.io\r\nUser-Agent: curl\r\n\r\n')
data = s.recv(1024)
data_str = data.decode('ascii')
ip = data_str.split('\r\n\r\n')[1].rstrip()
print(ip)
# -
# # Ejercicio 2: HTTP protocol client con Python
# Establecemos una conexión con la web `www.python.org`
# +
import http.client
conn = http.client.HTTPSConnection("www.python.org")
print(conn)
# -
# ## Peticiones GET
# Lanzamos una petición GET para obtener la información de esa página
conn.request("GET", "/")
# A continuación vamos a guardar la respuesta en un objeto e inspeccionar el resultado
r1 = conn.getresponse()
print(r1)
# Los campos disponibles en el objeto `HTTPResponse` puedes encontrarlos en la [documentación oficial de Python](https://docs.python.org/3/library/http.client.html#httpresponse-objects).
#
# Vamos a inspeccionar los principales:
print(r1.status, r1.reason)
data1 = r1.read() # This will return entire content.
print(data1)
# Ahora vamos a hacer lo mismo con una respuesta inválida, por ejemplo, `docs.python.org/parrot.spam`
conn = http.client.HTTPSConnection("docs.python.org")
conn.request("GET", "/parrot.spam")
r2 = conn.getresponse()
print(r2.status, r2.reason)
data2 = r2.read()
print(data2)
conn.close()
# ## Peticiones `POST`
# En la web de [http://bugs.python.org/](http://bugs.python.org/) tienen un issue abierto (el [12524](http://bugs.python.org/issue12524)) para que los desarrolladores python puedan hacer pruebas de peticiones POST sobre una web. Vamos a hacer una prueba nosotros.
#
# En este caso tenemos que configurar más parámetros que con el get, ya que no vamos a obtener información sino que vamos a modificar un servicio externo. Por ello, vamos a importar la librería `urllib`, para gestionar mejor la construcción de la URL
import http.client, urllib.parse
# A continuación vamos a construir la url y las headers de nuestra petición:
params = urllib.parse.urlencode({'@number': 12524, '@type': 'issue', '@action': 'show'})
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
# Construimos la conexión y lanzamos la petición:
conn = http.client.HTTPConnection("bugs.python.org")
conn.request("POST", "", params, headers)
response = conn.getresponse()
print(response.status, response.reason)
# Parece que algo no ha ido bien... prueba ahora con una conexión https:
conn = http.client.HTTPSConnection("bugs.python.org")
conn.request("POST", "", params, headers)
response = conn.getresponse()
print(response.status, response.reason)
# Muestra el contenido de la respuesta:
data = response.read()
data
# Y por último, cierra la conexión:
conn.close()
# # Ejercicio 3: Librería `requests`
# La librería más usada por los desarrolladores Python para hacer requests a una API es **[requests: http para humanos](https://docs.python-requests.org/es/latest/)**
#
# ! pip install requests
import requests
# Uno de los métodos HTTP más comunes es GET. El método GET indica que está intentando obtener o recuperar datos de un recurso específico.
requests.get('https://api.github.com')
# Una respuesta (response) es un objeto poderoso para inspeccionar los resultados de la petición. Haz la misma petición nuevamente, pero esta vez almacena el valor de retorno en una variable para que puedas ver más de cerca sus atributos y comportamientos:
response = requests.get('https://api.github.com')
response.status_code
# A veces, puedes usar el campo `status_code` para tomar decisiones en el código
if response.status_code == 200:
print('Success!')
elif response.status_code == 404:
print('Not Found.')
# Prueba ahora con una URL inválida
response = requests.get('https://api.github.com/invalid')
response.status_code
# La respuesta de un GET a menudo tiene información valiosa, conocida como carga útil, en el cuerpo del mensaje. Usando los atributos y métodos de `response`, puede ver la carga útil en una variedad de formatos diferentes.
response = requests.get('https://api.github.com')
response.content
response.text
response.json()
# ## Headers
# Los encabezados de respuesta pueden darte información útil, como el tipo de contenido de la carga útil de respuesta y un límite de tiempo sobre cuánto tiempo almacenar en caché la respuesta. Para ver estos encabezados, accede al campo `headers`:
response.headers
# Una forma común de personalizar una solicitud GET es pasar valores a través de parámetros de cadena de consulta en la URL. Para hacer usa la función get utilizando el parámetro `params`.
#
# Por ejemplo, puede usar la API de búsqueda de GitHub para buscar la biblioteca de solicitudes:
# +
import requests
# Search GitHub's repositories for requests
response = requests.get(
'https://api.github.com/search/repositories',
params={'q': 'requests+language:python'},
)
# Inspect some attributes of the `requests` repository
json_response = response.json()
repository = json_response['items'][0]
print(f'Repository name: {repository["name"]}') # Python 3.6+
print(f'Repository description: {repository["description"]}') # Python 3.6+
# -
# ## Otros métodos HTTP
response = requests.post('https://httpbin.org/post', data={'key':'value'})
response.json()
requests.post('https://httpbin.org/post', data={'key':'value'})
requests.put('https://httpbin.org/put', data={'key':'value'})
requests.delete('https://httpbin.org/delete')
requests.head('https://httpbin.org/get')
requests.patch('https://httpbin.org/patch', data={'key':'value'})
requests.options('https://httpbin.org/get')
# ## Ejercicio 4: Explora otras librerías
response = requests.get(
'https://cat-fact.herokuapp.com/facts',
params={'animal_type': 'horse'},
)
response.json()
response = requests.get('https://api.fbi.gov/wanted/v1/list')
data = response.json()
print(data['total'])
print(data['items'][:10])
response = requests.get('https://www.fruityvice.com/api/fruit/carbohydrates/', params={"min": "0"})
response.json()
response = requests.get('https://api.imgflip.com/get_memes')
response.json()
# Elige una api pública (a poder ser sin token de autorización) y explora sus endpoints con alguna librería de protocolo http
#
# https://github.com/public-apis/public-apis
|
ejercicios/protocolo-http.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import theme
theme.load_style()
# # Lesson 20: The Advection Diffusion Equation
#
# <img src="Images/advec-diffus.d/intro.png"/>
#
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a>
#
# This lecture by <NAME> is licensed under the
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. All code examples are also licensed under the [MIT license](http://opensource.org/licenses/MIT).
# # Topics
#
# - [Introduction](#intro)
# - [Advection-Diffusion, Strong Form](#strong)
# - [Advection-Diffusion, Weak Form](#weak)
# # <a id='intro'></a>Introduction
#
# The advection–diffusion equation, drift–diffusion equation, Smoluchowski equation , or generic scalar transport equation describe systems where some species is transported through a potential driven flux diffusion within a moving fluid. The time‐dependent response assuming an incompressible fluid, constant diffusion coefficient, and no sources/sinks is governed by:
#
# $$
# \frac{\partial c}{\partial t} = D\nabla^2 c -
# \boldsymbol{v}\nabla c
# $$
#
# Where $c$ is the concentration, $D$ is the diffusion coefficient, and $\boldsymbol{v}$ is the flow velocity. We can visualize this for a steady flow with some species added at $t=0$, $x=0$. This could be poison added to the water supply or medicine injected in the blood stream, depending on your line of work.
#
# <img src='./Images/advec-diffus.d/concentration.png' style='width:80%'/>
#
# <div class='msg'>
# In his 1905 publication on Brownian motion, <NAME> showed that the diffusion coefficient could be derived from a distribution of molecular velocities as a function of the Boltzmann constant and absolute temperature. This has led to an understanding of physical phenomena in diverse fields ranging from semi‐conductors to fish migration.
# </div>
# # <a id='strong'></a> Advection-Diffusion, Strong Form
#
# <img src='./Images/advec-diffus.d/strong.png' style='width:35%'/>
#
# For the diffusion process shown, $u(x)=c(x)$, the concentration $\left(\frac{\text{moles}}{\text{volume}}\right)$ of some solute. The steady state mass balance on the solute is
#
# $$
# \left(Avu\right)_x-\left(Avu\right)_{x+\Delta x} - \left(Aq\right)_{x+\Delta x} + \Delta x s_{x+\Delta x/2}=0
# $$
#
# Dividing by $\Delta x$ and taking the limit as $\Delta x\rightarrow 0$ and with a change in sign
#
# $$
# \frac{d}{dx}\left(Avu\right) + \frac{d}{dx}\left(Aq\right) - s = 0
# $$
#
# If the fluid is incompressible, $\left(Av\right)$ is constant (i.e. the volumetric flow rate is te same at every point)
#
# $$
# \frac{d}{dx}\left(Avu\right)=\left(Av\frac{du}{dx} + u\frac{d(Av)}{dx}\right) = Av\frac{du}{dx}
# $$
#
# Substituting gives
#
# $$
# Av\frac{du}{dx}+\frac{d}{dx}(Aq) - s = 0
# $$
#
# From Flick's law of diffusion, $q=-k\frac{du}{dx}$, giving
#
# $$
# Av\frac{du}{dx}-\frac{d}{dx}\left(Ak\frac{du}{dx}\right)-s=0
# $$
# # <a id='weak'></a> Advection-Diffusion, Weak Form
#
# Starting with the strong form and a generalized expression for the boundary conditions, apply the three step process
#
# 1. Multiply by a weight function and integrate over the problem domain
#
# $$
# \int_{\Omega}w\left(
# Av\frac{du}{dx}-\frac{d}{dx}\left(Ak\frac{du}{dx}\right)-s
# \right)dx=0, \quad \forall w \\
# Aw\left(kn\frac{du}{dx}+\overline{q}\right)\Bigg|_{\Gamma_q}=0, \quad \forall w
# $$
#
# As
|
Lessons/Lesson13_AdvectionDiffusion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + raw_mimetype="text/restructuredtext" active=""
# Bayesian estimation of prevalence models
# ###########################################################################
# This section describes how to use the prevalence models for Bayesian
# estimation in Episuite.
#
# .. seealso::
#
# `Bayesian modelling for COVID-19 seroprevalence studies <https://blog.christianperone.com/2020/06/talk-bayesian-modelling-for-covid-19-seroprevalence-studies/>`_
# This is a talk that uses the same models implemented in Episuite.
#
# `Estimating SARS-CoV-2 seroprevalence and epidemiological parameters with uncertainty from serological surveys <https://www.medrxiv.org/content/10.1101/2020.04.15.20067066v2>`_
# Excellent recent articule by :cite:t:`Larremore2020` on estimation for seroprevalence studies.
#
# Episuite models are based on Numpyro, with uses Jax.
# +
import numpyro
import arviz as az
from numpyro.infer import MCMC, NUTS
from numpyro.infer import init_to_value, init_to_feasible
from matplotlib import pyplot as plt
from jax import random
from episuite import prevalence
# Set 2 cores in Numpyro
numpyro.set_host_device_count(2)
# -
# ### True prevalence model
# In this section we will estimate a true prevalence model, a model that assumes that you're observing true prevalences (i.e. on a seroprevalence study w/ perfect testing validation properties). Leter we will improve on it by assuming imperfect testing.
num_warmup, num_samples = 500, 2000
# Random generator needed by jax
rng_key = random.PRNGKey(42)
rng_key, rng_key_ = random.split(rng_key)
# Scenario: collected 4000 samples and 20 were found positive
total_observations = 4000
positive_observations = 20
# Configure MCMC with the true_prevalence_model from Episuite
kernel = NUTS(prevalence.true_prevalence_model, init_strategy=init_to_feasible())
mcmc = MCMC(kernel, num_warmup, num_samples, num_chains=1)
# Run MCMC
mcmc.run(rng_key_,
obs_positive=positive_observations,
obs_total=total_observations)
samples = mcmc.get_samples()
mcmc.print_summary()
inference_data = az.from_numpyro(mcmc)
az.plot_forest(inference_data)
plt.show()
az.plot_trace(inference_data)
plt.show()
az.plot_posterior(inference_data, round_to=3, point_estimate="mode")
plt.show()
# ### Apparent prevalence model
# In this section we will estimate an apparent prevalence model, a model that incorporates the sensitiviy and specificity properties of the test validation results. We will use here a scenario where we collected samples and tested for SARS-CoV-2 and assume properties from a real test from the brand Wondfo (used in Brazil on different seroprevalence surveys).
# +
# Wondfo test parameters (taken from their product description from tests they made with a PCR gold standard)
#
# From a total of 42 confirmed COVID-19 positive patients: the test detected 42 positive and 0 negative.
# From a total of 172 COVID 19 negative patients: the test detected 2 positive and 170 negative.
# Specificity parameters
n_sp = 172
x_sp = 170
# Sensitivity paramters
n_se = 42
x_se = 42
# These are results from a seroprevalence study in Brazil
observed_total = 4189
observed_positive = 2
# -
kernel = NUTS(prevalence.apparent_prevalence_model,
init_strategy=init_to_feasible())
mcmc = MCMC(kernel, num_warmup, num_samples, num_chains=1)
mcmc.run(rng_key_,
x_se=x_se, n_se=n_se, # Sensitivity parameters of the test used
x_sp=x_sp, n_sp=n_sp, # Specificity parameters of the test used
obs_positive=observed_positive, # Positive results
obs_total=observed_total) # Total samples
mcmc.print_summary(exclude_deterministic=False)
samples_1 = mcmc.get_samples()
inference_data = az.from_numpyro(mcmc)
az.plot_forest(inference_data)
plt.show()
az.plot_trace(inference_data)
plt.show()
az.plot_posterior(inference_data, round_to=3,
point_estimate="mode", var_names=["true_p", "apparent_p"])
plt.show()
az.plot_pair(inference_data, var_names=["se_p", "sp_p"], kind="kde",
colorbar=True, figsize=(10, 8), kde_kwargs={"fill_last": True})
plt.show()
# + raw_mimetype="text/restructuredtext" active=""
# .. note:: Please note that in this example we used only 1 MCMC chain and a few samples, on a real scenario you
# are advised to use multiple chains to have better diagnostics and much more samples.
|
docs/source/prevalence_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="XL7J2ffWNoPQ"
# # This colab notebook must be run on a **P100** GPU instance otherwise it will crash. Use the Cell-1 to ensure that it has a **P100** GPU instance
# + [markdown] id="LpU4e-qkNttp"
# Cell-1: Ensure the required gpu instance (P100)
# + id="SPfTSIZeNgFz"
#no.of sockets i.e available slots for physical processors
# !lscpu | grep 'Socket(s):'
#no.of cores each processor is having
# !lscpu | grep 'Core(s) per socket:'
#no.of threads each core is having
# !lscpu | grep 'Thread(s) per core'
#GPU count and name
# !nvidia-smi -L
#use this command to see GPU activity while doing Deep Learning tasks, for this command 'nvidia-smi' and for above one to work, go to 'Runtime > change runtime type > Hardware Accelerator > GPU'
# !nvidia-smi
# + [markdown] id="cgEg3UdIOBFw"
# Cell-2: Add Google Drive
# + id="5WEHfOvOOBxW"
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="HmESAHB6ODmO"
# Cell-3: Install Required Dependencies
# + id="qd9kcFgyOGAP"
# !pip install efficientnet_pytorch==0.7.0
# !pip install albumentations==0.4.5
# !pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch\_stable.html -q\
# + [markdown] id="axobq88eOJNg"
# Cell-4: Run this cell to generate current fold weight ( Estimated Time for training this fold is around 2 hours 22 minutes )
# + id="V-qSEALoOLvk"
import sys
sys.path.insert(0, "/content/gdrive/My Drive/zindi_cgiar_wheat_growth_stage_challenge/src_lq2")
from dataset import *
from model import *
from trainer import *
from utils import *
import numpy as np
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import DataLoader
config = {
'n_folds': 5,
'random_seed': 42,
'run_fold': 0,
'model_name': 'efficientnet-b3',
'global_dim': 1536,
'batch_size': 66,
'n_core': 0,
'weight_saving_path': '/content/gdrive/My Drive/zindi_cgiar_wheat_growth_stage_challenge/train_lq2_only_effnet_b3_step1/weights/',
'resume_checkpoint_path': None,
'lr': 0.01,
'total_epochs': 100,
}
if __name__ == '__main__':
set_random_state(config['random_seed'])
imgs = np.load('/content/gdrive/My Drive/zindi_cgiar_wheat_growth_stage_challenge/zindi_npy_data/train_imgs.npy')
labels = np.load('/content/gdrive/My Drive/zindi_cgiar_wheat_growth_stage_challenge/zindi_npy_data/train_labels.npy')
labels_quality = np.load('/content/gdrive/My Drive/zindi_cgiar_wheat_growth_stage_challenge/zindi_npy_data/train_labels_quality.npy')
imgs = imgs[labels_quality == 2]
labels = labels[labels_quality == 2]
labels = labels - 1
skf = StratifiedKFold(n_splits=config['n_folds'], shuffle=True, random_state=config['random_seed'])
for fold_number, (train_index, val_index) in enumerate(skf.split(X=imgs, y=labels)):
if fold_number != config['run_fold']:
continue
train_dataset = ZCDataset(
imgs[train_index],
labels[train_index],
transform=get_train_transforms(),
test=False,
)
train_loader = DataLoader(
train_dataset,
batch_size=config['batch_size'],
shuffle=True,
num_workers=config['n_core'],
drop_last=True,
pin_memory=True,
)
val_dataset = ZCDataset(
imgs[val_index],
labels[val_index],
transform=get_val_transforms(),
test=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=config['batch_size'],
shuffle=False,
num_workers=config['n_core'],
pin_memory=True,
)
del imgs, labels
model = CNN_Model(config['model_name'], config['global_dim'])
args = {
'model': model,
'Loaders': [train_loader,val_loader],
'metrics': {'Loss':AverageMeter, 'f1_score':PrintMeter, 'rmse':PrintMeter},
'checkpoint_saving_path': config['weight_saving_path'],
'resume_train_from_checkpoint': False,
'resume_checkpoint_path': config['resume_checkpoint_path'],
'lr': config['lr'],
'fold': fold_number,
'epochsTorun': config['total_epochs'],
'batch_size': config['batch_size'],
'test_run_for_error': False,
'problem_name': 'zindi_cigar',
}
Trainer = ModelTrainer(**args)
Trainer.fit()
|
Image Classification/CGIAR Wheat Growth Stage Challenge/neurofitting/zindi_cgiar_wheat_growth_stage_challenge/train_lq2_only_effnet_b3_step1/train_lq2_only_effnet_b3_step1_fold0.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Train S-CNN baseline
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import os
import librosa
import glob
import sys
sys.path.insert(0,'../..')
from sed_endtoend.cnn.model import build_custom_cnn
from keras.optimizers import Adam
from sed_endtoend.callbacks import MetricsCallback
from sed_endtoend.data_generator import DataGenerator, Scaler
from keras.callbacks import CSVLogger
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from params import *
# files parameters
Nfiles = None
resume = False
load_subset = Nfiles
# +
params = {'sequence_time': sequence_time, 'sequence_hop_time':sequence_hop_time,
'label_list':label_list,'audio_hop':audio_hop, 'audio_win':audio_win,
'n_fft':n_fft,'sr':sr,'mel_bands':mel_bands,'normalize':normalize_data,
'frames':frames,'get_annotations':get_annotations, 'dataset': dataset}
sequence_frames = int(np.ceil(sequence_time*sr/audio_hop))
# Datasets
labels = {}# Labels
train_files = sorted(glob.glob(os.path.join(audio_folder,'train', '*.wav')))
val_files = sorted(glob.glob(os.path.join(audio_folder,'validate', '*.wav')))
if load_subset is not None:
train_files = train_files[:load_subset]
val_files = val_files[:load_subset]
train_labels = {}
train_mel = {}
val_labels = {}
val_mel = {}
for n,id in enumerate(train_files):
labels[id] = os.path.join(label_folder, 'train',os.path.basename(id).replace('.wav','.txt'))
for id in val_files:
labels[id] = os.path.join(label_folder, 'validate',os.path.basename(id).replace('.wav','.txt'))
# Generators
print('Making training generator')
training_generator = DataGenerator(train_files, labels, **params)
params['sequence_hop_time'] = sequence_time # To calculate F1_1s
print('Making validation generator')
validation_generator = DataGenerator(val_files, labels, **params)
print('Getting validation data')
_,_,mel_val,y_val = validation_generator.return_all()
print('Getting training data')
_,_,mel_train,y_train = training_generator.return_all()
print('Founding standard scaler')
standard_scaler = Scaler(normalizer='standard')
standard_scaler.fit(mel_train)
standard_scaler_sklearn = standard_scaler.get_scaler()
mean= standard_scaler_sklearn.mean_
scale = standard_scaler_sklearn.scale_
# +
print('\nBuilding model...')
sequence_samples = int(sequence_time*sr)
model = build_custom_cnn(n_freq_cnn=mel_bands, n_frames_cnn=sequence_frames,large_cnn=large_cnn)
# Init Batchnorm
model.layers[1].set_weights([np.ones_like(mean),np.zeros_like(mean),mean,scale])
model.summary()
opt = Adam(lr=learning_rate)
if resume:
print('Loading best weights and resuming...')
weights_best_file = os.path.join(expfolder, 'weights_best.hdf5')
model.load_weights(weights_best_file)
# Fit model
print('\nFitting model...')
if resume:
f1s_best = resume_f1_best
metrics_callback = MetricsCallback(mel_val, y_val, 0, 0, os.path.join(expfolder, 'weights_best.hdf5'))
csv_logger = CSVLogger(os.path.join(expfolder, 'training.log'))
model.compile(loss='binary_crossentropy',optimizer=opt)
history = model.fit(x=mel_train, y=y_train, batch_size=2*batch_size,
epochs=epochs, verbose=fit_verbose,
validation_split=0.0,
shuffle=True,
callbacks=[metrics_callback,csv_logger])
|
exps/04_CNN_baseline/train.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # Creating a Real-Time Inferencing Service
#
# You've spent a lot of time in this course training and registering machine learning models. Now it's time to deploy a model as a real-time service that clients can use to get predictions from new data.
#
# ## Before You Start
#
# Before you start this lab, ensure that you have completed the *Create an Azure Machine Learning Workspace* and *Create a Compute Instance* tasks in [Lab 1: Getting Started with Azure Machine Learning](./labdocs/Lab01.md). Then open this notebook in Jupyter on your Compute Instance.
#
# ## Connect to Your Workspace
#
# The first thing you need to do is to connect to your workspace using the Azure ML SDK.
#
# > **Note**: If you do not have a current authenticated session with your Azure subscription, you'll be prompted to authenticate. Follow the instructions to authenticate using the code provided.
# +
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
# -
# ## Train and Register a Model
#
# You'll need a trained model to deploy. Run the cell below to train and register a model that predicts the likelihood of a clinic patient being diabetic.
# +
from azureml.core import Experiment
from azureml.core import Model
import pandas as pd
import numpy as np
import joblib
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Create an Azure ML experiment in your workspace
experiment = Experiment(workspace = ws, name = "diabetes-training")
run = experiment.start_logging()
print("Starting experiment:", experiment.name)
# load the diabetes dataset
print("Loading Data...")
diabetes = pd.read_csv('data/diabetes.csv')
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a decision tree model
print('Training a decision tree model')
model = DecisionTreeClassifier().fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# Save the trained model
model_file = 'diabetes_model.pkl'
joblib.dump(value=model, filename=model_file)
run.upload_file(name = 'outputs/' + model_file, path_or_stream = './' + model_file)
# Complete the run
run.complete()
# Register the model
run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model',
tags={'Training context':'Inline Training'},
properties={'AUC': run.get_metrics()['AUC'], 'Accuracy': run.get_metrics()['Accuracy']})
print('Model trained and registered.')
# -
# ## Deploy a Model as a Web Service
#
# Now you have trained and registered a machine learning model that classifies patients based on the likelihood of them having diabetes. This model could be used in a production environment such as a doctor's surgery where only patients deemed to be at risk need to be subjected to a clinical test for diabetes. To support this scenario, you will deploy the model as a web service.
#
# First, let's determine what models you have registered in the workspace.
# +
from azureml.core import Model
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
# -
# Right, now let's get the model that we want to deploy. By default, if we specify a model name, the latest version will be returned.
model = ws.models['diabetes_model']
print(model.name, 'version', model.version)
# We're going to create a web service to host this model, and this will require some code and configuration files; so let's create a folder for those.
# +
import os
folder_name = 'diabetes_service'
# Create a folder for the web service files
experiment_folder = './' + folder_name
os.makedirs(folder_name, exist_ok=True)
print(folder_name, 'folder created.')
# -
# The web service where we deploy the model will need some Python code to load the input data, get the model from the workspace, and generate and return predictions. We'll save this code in an *entry script* that will be deployed to the web service:
# +
# %%writefile $folder_name/score_diabetes.py
import json
import joblib
import numpy as np
from azureml.core.model import Model
# Called when the service is loaded
def init():
global model
# Get the path to the deployed model file and load it
model_path = Model.get_model_path('diabetes_model')
model = joblib.load(model_path)
# Called when a request is received
def run(raw_data):
# Get the input data as a numpy array
data = np.array(json.loads(raw_data)['data'])
# Get a prediction from the model
predictions = model.predict(data)
# Get the corresponding classname for each prediction (0 or 1)
classnames = ['not-diabetic', 'diabetic']
predicted_classes = []
for prediction in predictions:
predicted_classes.append(classnames[prediction])
# Return the predictions as JSON
return json.dumps(predicted_classes)
# -
# The web service will be hosted in a container, and the container will need to install any required Python dependencies when it gets initialized. In this case, our scoring code requires **scikit-learn**, so we'll create a .yml file that tells the container host to install this into the environment.
# +
from azureml.core.conda_dependencies import CondaDependencies
# Add the dependencies for our model (AzureML defaults is already included)
myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn")
# Save the environment config as a .yml file
env_file = folder_name + "/diabetes_env.yml"
with open(env_file,"w") as f:
f.write(myenv.serialize_to_string())
print("Saved dependency info in", env_file)
# Print the .yml file
with open(env_file,"r") as f:
print(f.read())
# -
# Now you're ready to deploy. We'll deploy the container a service named **diabetes-service**. The deployment process includes the following steps:
#
# 1. Define an inference configuration, which includes the scoring and environment files required to load and use the model.
# 2. Define a deployment configuration that defines the execution environment in which the service will be hosted. In this case, an Azure Container Instance.
# 3. Deploy the model as a web service.
# 4. Verify the status of the deployed service.
#
# > **More Information**: For more details about model deployment, and options for target execution environments, see the [documentation](https://docs.microsoft.com/en-gb/azure/machine-learning/service/how-to-deploy-and-where).
#
# Deployment will take some time as it first runs a process to create a container image, and then runs a process to create a web service based on the image. When deployment has completed successfully, you'll see a status of **Healthy**.
# +
from azureml.core.webservice import AciWebservice
from azureml.core.model import InferenceConfig
# Configure the scoring environment
inference_config = InferenceConfig(runtime= "python",
source_directory = folder_name,
entry_script="score_diabetes.py",
conda_file="diabetes_env.yml")
deployment_config = AciWebservice.deploy_configuration(cpu_cores = 1, memory_gb = 1)
service_name = "diabetes-service"
service = Model.deploy(ws, service_name, [model], inference_config, deployment_config)
service.wait_for_deployment(True)
print(service.state)
# -
# Hopefully, the deployment has been successful and you can see a status of **Healthy**. If not, you can use the following code to check the status and get the service logs to help you troubleshoot.
# +
print(service.state)
print(service.get_logs())
# If you need to make a change and redeploy, you may need to delete unhealthy service using the following code:
#service.delete()
# -
# Take a look at your workspace in [Azure Machine learning studio](https://ml.azure.com) and view the **Endpoints** page, which shows the deployed services in your workspace.
#
# You can also retrieve the names of web services in your workspace by running the following code:
for webservice_name in ws.webservices:
print(webservice_name)
# ## Use the Web Service
#
# With the service deployed, now you can consume it from a client application.
# +
import json
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22]]
print ('Patient: {}'.format(x_new[0]))
# Convert the array to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Call the web service, passing the input data (the web service will also accept the data in binary format)
predictions = service.run(input_data = input_json)
# Get the predicted class - it'll be the first (and only) one.
predicted_classes = json.loads(predictions)
print(predicted_classes[0])
# -
# You can also send multiple patient observations to the service, and get back a prediction for each one.
# +
import json
# This time our input is an array of two feature arrays
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22],
[0,148,58,11,179,39.19207553,0.160829008,45]]
# Convert the array or arrays to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Call the web service, passing the input data
predictions = service.run(input_data = input_json)
# Get the predicted classes.
predicted_classes = json.loads(predictions)
for i in range(len(x_new)):
print ("Patient {}".format(x_new[i]), predicted_classes[i] )
# -
# The code above uses the Azure ML SDK to connect to the containerized web service and use it to generate predictions from your diabetes classification model. In production, a model is likely to be consumed by business applications that do not use the Azure ML SDK, but simply make HTTP requests to the web service.
#
# Let's determine the URL to which these applications must submit their requests:
endpoint = service.scoring_uri
print(endpoint)
# Now that you know the endpoint URI, an application can simply make an HTTP request, sending the patient data in JSON (or binary) format, and receive back the predicted class(es).
# +
import requests
import json
x_new = [[2,180,74,24,21,23.9091702,1.488172308,22],
[0,148,58,11,179,39.19207553,0.160829008,45]]
# Convert the array to a serializable list in a JSON document
input_json = json.dumps({"data": x_new})
# Set the content type
headers = { 'Content-Type':'application/json' }
predictions = requests.post(endpoint, input_json, headers = headers)
predicted_classes = json.loads(predictions.json())
for i in range(len(x_new)):
print ("Patient {}".format(x_new[i]), predicted_classes[i] )
# -
# You've deployed your web service as an Azure Container Instance (ACI) service that requires no authentication. This is fine for development and testing, but for production you should consider deploying to an Azure Kubernetes Service (AKS) cluster and enabling authentication. This would require REST requests to include an **Authorization** header.
#
# ### More Information
#
# For more information about publishing a model as a service, see the [documentation](https://docs.microsoft.com/azure/machine-learning/how-to-deploy-and-where)
#
# ## Clean Up
#
# If you've finished exploring, you can delete your service by running the cell below. Then close this notebook and shut down your Compute Instance.
service.delete()
print("Service deleted.")
|
06-Deploying_a_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Health/CALM/CALM-moving-out-6.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# # CALM - Moving Out 6
# ## Part 6 - Food and Supplies
#
# 📙In this section we will consider food and household supplies that you will need. You will be using a [dataframes from a Python library called pandas](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html#dataframe). These dataframes are like spreadsheets, and the code will look a little complicated, but it shouldn't be too bad.
#
# ### Meal Plan
#
# Before we get into dataframes, though, you need to create a meal plan. With the [Canadian Food Guide](https://food-guide.canada.ca/en/food-guide-snapshot/) in mind, complete a 7-day meal plan considering nutritionally balanced choices at each meal. You can choose to eat out only twice on this menu.
#
# You will then use this to decide your grocery needs for one week.
#
# Replace the words "meal" in the cell below with the meals you plan to eat, then run the cell to store your plan.
# +
# %%writefile moving_out_8.txt
✏️
|Day|Breakfast|Lunch|Dinner|
|-|-|-|-|
|Monday| meal | meal | meal |
|Tuesday| meal | meal | meal |
|Wednesday| meal | meal | meal |
|Thursday| meal | meal | meal |
|Friday| meal | meal | meal |
|Saturday| meal | meal | meal |
|Sunday| meal | meal | meal |
# -
# ### Food Shopping
#
# 📙From your meal plan make a shopping list of food needed to prepare three meals a day for one week. Research the price of these food items by going to grocery store websites, using grocery fliers, going to the grocery store, or reviewing receipts or bills with your family. Buying items in bulk is usually more economical in the long run, but for this exercise you only require food for one week so choose the smallest quantities possible.
#
# `Run` the following cell to generate a data table that you can then edit.
#
# Double-click on the "nan" values to put in your information. Use the "Add Row" and "Remove Row" buttons if necessary.
import pandas as pd
import qgrid
foodItemList = ['Vegetables','Fruit','Protein','Whole Grains','Snacks','Restaurant Meal 1','Restaurant Meal 2']
foodColumns = ['Size','Quantity','Price']
foodIndex = range(1,len(foodItemList)+1)
dfFood = pd.DataFrame(index=pd.Series(foodIndex), columns=pd.Series(foodColumns))
dfFood.insert(0,'Item(s)',foodItemList,True)
dfFood['Quantity'] = 1
dfFood['Price'] = 1
dfFoodWidget = qgrid.QgridWidget(df=dfFood, show_toolbar=True)
dfFoodWidget
# 📙After you have added data to the table above, `Run` the next cell to calculate your food costs for the month. It adds up weekly food costs and multiplies by 4.3 weeks per month.
foodShoppingList = dfFoodWidget.get_changed_df()
foodPrices = pd.to_numeric(foodShoppingList['Price'])
weeklyFoodCost = foodPrices.sum()
monthlyFoodCost = weeklyFoodCost * 4.3
# %store monthlyFoodCost
print('That is about $' + str(weeklyFoodCost) + ' per week for food.')
print('Your food for the month will cost about $' + str('{:.2f}'.format(monthlyFoodCost)) + '.')
# ### Household Supplies and Personal Items
#
# 📙The following is a typical list of household and personal items. Add any additional items you feel you need and delete items you don’t need. Look for smaller quantities with a **one-month** budget in mind, or adjust pricing if buying in bulk.
#
# `Run` the next cell to generate a data table that you can then edit.
householdItemList = ['Toilet Paper','Tissues','Paper Towel',
'Dish Soap','Laundry Detergent','Cleaners',
'Plastic Wrap','Foil','Garbage/Recycling Bags',
'Condiments','Coffee/Tea','Flour','Sugar',
'Shampoo','Conditioner','Soap','Deodorant',
'Toothpaste','Mouthwash','Hair Products','Toothbrush',
'Makeup','Cotton Balls','Shaving Gel','Razors',
]
householdColumns = ['Size','Quantity','Price']
householdIndex = range(1,len(householdItemList)+1)
dfHousehold = pd.DataFrame(index=pd.Series(householdIndex), columns=pd.Series(householdColumns))
dfHousehold.insert(0,'Item(s)',householdItemList,True)
dfHousehold['Quantity'] = 1
dfHousehold['Price'] = 1
dfHouseholdWidget = qgrid.QgridWidget(df=dfHousehold, show_toolbar=True)
dfHouseholdWidget
# 📙After you have added data to the above data table, `Run` the next cell to calculate your monthly household item costs.
householdShoppingList = dfHouseholdWidget.get_changed_df()
householdPrices = pd.to_numeric(householdShoppingList['Price'])
monthlyHouseholdCost = householdPrices.sum()
# %store monthlyHouseholdCost
print('That is about $' + str(monthlyHouseholdCost) + ' per month for household items.')
# ### Furniture and Equipment
#
# 📙Think about items you need for your place. How comfortable do you want to be? Are there items you have already been collecting or that your family is saving for you? Discuss which items they may be willing to give you, decide which items you can do without, which items a roommate may have, and which items you will need to purchase. Although it is nice to have new things, remember household items are often a bargain at garage sales, dollar stores, and thrift stores.
#
# `Run` the next cell to generate a data table that you can edit.
fneItemList = ['Pots and Pans','Glasses','Plates','Bowls',
'Cutlery','Knives','Oven Mitts','Towels','Cloths',
'Toaster','Garbage Cans','Kettle','Table','Kitchen Chairs',
'Broom and Dustpan','Vacuum Cleaner','Clock',
'Bath Towels','Hand Towels','Bath Mat',
'Toilet Brush','Plunger',
'Bed','Dresser','Night Stand','Sheets','Blankets','Pillows',
'Lamps','TV','Electronics','Coffee Table','Couch','Chairs',
]
fneColumns = ['Room','Quantity','Price']
fneIndex = range(1,len(fneItemList)+1)
dfFne = pd.DataFrame(index=pd.Series(fneIndex), columns=pd.Series(fneColumns))
dfFne.insert(0,'Item(s)',fneItemList,True)
dfFne['Quantity'] = 1
dfFne['Price'] = 1
dfFneWidget = qgrid.QgridWidget(df=dfFne, show_toolbar=True)
dfFneWidget
# 📙Next `Run` the following cell to add up your furniture and equipment costs.
fneList = dfFneWidget.get_changed_df()
fnePrices = pd.to_numeric(fneList['Price'])
fneCost = fnePrices.sum()
# %store fneCost
print('That is about $' + str(fneCost) + ' for furniture and equipment items.')
# ### Clothing
#
# 📙When calculating the cost of clothing for yourself, consider the type of work you plan to be doing and how important clothing is to you. Consider how many of each item of clothing you will purchase in a year, and multiply this by the cost per item. Be realistic.
#
# `Run` the next cell to generate an editable data table.
clothingItemList = ['Dress Pants','Skirts','Shirts','Suits/Jackets/Dresses'
'T-Shirts/Tops','Jeans/Pants','Shorts',
'Dress Shoes','Casual Shoes','Running Shoes',
'Outdoor Coats','Boots','Sports Clothing',
'Pajamas','Underwear','Socks','Swimsuits'
]
clothingColumns = ['Quantity Required','Cost per Item']
clothingIndex = range(1,len(clothingItemList)+1)
dfClothing = pd.DataFrame(index=pd.Series(clothingIndex), columns=pd.Series(clothingColumns))
dfClothing.insert(0,'Item(s)',clothingItemList,True)
dfClothing['Quantity Required'] = 1
dfClothing['Cost per Item'] = 1
dfClothingWidget = qgrid.QgridWidget(df=dfClothing, show_toolbar=True)
dfClothingWidget
# 📙Once you have added data to the above table, `Run` the next cell to add up your clothing costs.
clothingList = dfClothingWidget.get_changed_df()
clothingQuantities = pd.to_numeric(clothingList['Quantity Required'])
clothingPrices = pd.to_numeric(clothingList['Cost per Item'])
clothingList['Total Cost'] = clothingQuantities * clothingPrices
clothingCost = clothingList['Total Cost'].sum()
monthlyClothingCost = clothingCost / 12
# %store monthlyClothingCost
print('That is $' + str('{:.2f}'.format(clothingCost)) + ' per year, or about $' + str('{:.2f}'.format(monthlyClothingCost)) + ' per month for clothing.')
clothingList # this displays the table with total cost calculations
# ### Health Care
#
# 📙Most people living and working in Alberta have access to hospital and medical services under the [Alberta Health Care Insurance Plan (AHCIP)](https://www.alberta.ca/ahcip.aspx) paid for by the government. Depending on where you work, your employer may offer additional benefit packages such as Extended Health Care that cover a portion of medical and dental expenses.
#
# If you do not have health benefits from your employer you will have to pay for medications, dental visits, and vision care.
#
# Allow money in your budget for prescriptions and over-the-counter medications.
#
# Budget for the dentist and optometrist. One visit to the dentist including a check-up x-rays and teeth cleaning is approximately $330. You should see your dentist yearly.
#
# A visit to the optometrist is approximately $120. You should normally see your optometrist once every 2 years, or once a year if you’re wearing contact lenses.
#
# `Run` the next cell to display a data table that you can edit with your expected health costs.
healthItems = [
'Pain Relievers','Bandages','Cough Medicine',
'Prescriptions','Dental Checkup',
'Optometrist','Glasses','Contacts','Contact Solution',
'Physiotherapy','Massage'
]
healthColumns = ['Cost Per Year']
healthIndex = range(1,len(healthItems)+1)
dfHealth = pd.DataFrame(index=pd.Series(healthIndex), columns=pd.Series(healthColumns))
dfHealth.insert(0,'Item or Service',healthItems,True)
dfHealth['Cost Per Year'] = 1
dfHealthWidget = qgrid.QgridWidget(df=dfHealth, show_toolbar=True)
dfHealthWidget
# 📙`Run` the next cell to add up your health care costs.
healthList = dfHealthWidget.get_changed_df()
healthCost = pd.to_numeric(healthList['Cost Per Year']).sum()
monthlyHealthCost = healthCost / 12
# %store monthlyHealthCost
print('That is $' + str('{:.2f}'.format(healthCost)) + ' per year, or about $' + str('{:.2f}'.format(monthlyHealthCost)) + ' per month for health care.')
# 📙Once again, `Run` the next cell to check that your answers have been stored.
print('Monthly food cost:', monthlyFoodCost)
print('Monthly household items cost:', monthlyHouseholdCost)
print('Furniture and equipment cost:', fneCost)
print('Monthly clothing cost:', monthlyClothingCost)
print('Monthly health cost', monthlyHealthCost)
with open('moving_out_8.txt', 'r') as file8:
print(file8.read())
# 📙You have now completed this section. Proceed to [section 7](./CALM-moving-out-7.ipynb)
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
|
_build/html/_sources/curriculum-notebooks/Health/CALM/CALM-moving-out-6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ISM Lecture 2 Part 2
#
# This content is authored by <NAME> for use in the University of Edinbugh Business School Investment and Securities Markets course in Autumn 2020.
#
# Make sure to have watched the videos preceeding this Notebook and have covered the slides. Detailed explanations in the assigned textbook chapters.
#
# This lesson covers:
#
# * Holding period returns over multiple periods
#
# The first computational cell below (with In [ ] in front) contains the solution. Go over the command lines, make sure they make sense to you, click inside the cell, it should become surrounded by a green rectangle, press Esc - the rectangle will become blue, now press Shift+Enter - this will execute the cell and produce the results beneath it.
#
# To remove all output in the notebook and start again, go to the Kernel tab above, select Restart and Clear Output.
# ## Solved Problem 1: Compute holding period returns using pandas
#
# Stock A was \\$50 per share at the end of February. Using the information below, calculate the holding period return at the end of May.
#
# t 0 1 2 3
# month Feb Mar Apr May
# price 50 60 45 51.75
#
# Recall that in the previous notebook we declared separate variables/contaners for each price at different points in time. Now we shall do things slightly more efficiently by declaring a list of all prices in the same container.
#
# What is especially convinient in Python is that we count the elements in a list starting at 0. This means that our series of prices at time 0, 1, 2 and 3 will correspond exactly to the numbering of elements in a list.
#
# In this example there are no dividends, so the holding pereiod return formula is simply P1/P0 - 1. Make sure you know why (ask if not clear).
#
# To do this we need a python library called pandas that allows us to move back and forth along the elements of a list with the function shift().
#
# Note that NaN stands for "not a number" and denotes a missing value.
#
lstA = [50, 60, 45, 51.75]
# Call pandas - a python library that makes working with time series very easy
import pandas as pd
# define something called a dataframe named pricesA from the list lstA.
# Dataframes are objects that the pandas library operates on.
pricesA = pd.DataFrame(lstA)
# Enter out holding period return formula using the shift function on the dataframe pricesA.
# Note that shift(1) moves all elements by 1 to the right.
returnsA = pricesA / pricesA.shift(1) - 1
returnsA
# ## Practice Problem 1: Compute holding period returns using pandas
#
# Solve the same problem for another stock B with the following prices over the period Feb - May:
# end of month prices: 55, 57, 68, 43
#
# Make sure to define your variables specifically to stock B.
#
# Enter your solution below:
# ## Solved Problem 2: Compute a holding period return from multiple shorter holding period returns
#
# Use the monthly holding period returns we computed in solved problem 1 to compute the 3-month holding period return between end of February and end of May.
#
# What we need here is to add 1 to each monthly holding period return, multiply them all together and substact 1 at the end. See slides.
#
# First let us compute the series of gross returns and print them to make sure they make sense.
# Note that we do not need to import pandas every time.
# Once we import in the beginning of the notebook and that cell is run, they will be available to the rest of the notebook.
# same with lstA - it has already been defined above as long as the cell is run and is avalable to use again.
pricesA = pd.DataFrame(lstA)
returnsA = pricesA / pricesA.shift(1) - 1
gross_returnsA = returnsA + 1
gross_returnsA
# + active=""
# Next let us print the label of what we are computing and compute the 3-month holding period return between end of February and end of May:
# -
print("3-month Holding Period Return end of May of storck A:")
# Here we need to use the iloc() suffix of pandas which allows us to pull single elements of the dataframe by their location.
# Remeber the location is counted from 0 which serves us perfectly.
gross_returnsA.iloc[1]*gross_returnsA.iloc[2]*gross_returnsA.iloc[3]-1
# ## Practice Problem 2: Compute a holding period return from multiple shorter holding period returns
#
# Use the monthly holding period returns you computed in practice problem 1 to compute the 3-month holding period return between end of February and end of May for stock B. Make sure to use the dataframe returnsB you defined in practice problem 1.
#
# Enter your solution below:
# ## Solved Problem 3: Compute holding period returns from prices
#
# Let us compare the return we get when we use beginning and ending price for stock A to what we got using the three monthly holding period returns.
#
# Remeber that pricesA are a pandas dataframe and to pick out its elements we need the pandas iloc() suffix.
print("3-month Holding Period Return end of May of stock A using prices:")
pricesA.iloc[3]/pricesA.iloc[0]-1
# ## Practice Problem 3: Compute holding period returns from prices
#
# Now let us compare the return we get when we use beginning and ending price for stock B to what we got using the three monthly holding period returns.
|
week02/w01_part2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Linear-Algebra" data-toc-modified-id="Linear-Algebra-1"><span class="toc-item-num">1 </span>Linear Algebra</a></span><ul class="toc-item"><li><span><a href="#Dot-Products" data-toc-modified-id="Dot-Products-1.1"><span class="toc-item-num">1.1 </span>Dot Products</a></span><ul class="toc-item"><li><span><a href="#What-does-a-dot-product-conceptually-mean?" data-toc-modified-id="What-does-a-dot-product-conceptually-mean?-1.1.1"><span class="toc-item-num">1.1.1 </span>What does a dot product conceptually mean?</a></span></li></ul></li><li><span><a href="#Exercises" data-toc-modified-id="Exercises-1.2"><span class="toc-item-num">1.2 </span>Exercises</a></span></li><li><span><a href="#Using-Scikit-Learn" data-toc-modified-id="Using-Scikit-Learn-1.3"><span class="toc-item-num">1.3 </span>Using Scikit-Learn</a></span></li><li><span><a href="#Bag-of-Words-Models" data-toc-modified-id="Bag-of-Words-Models-1.4"><span class="toc-item-num">1.4 </span>Bag of Words Models</a></span></li></ul></li><li><span><a href="#Distance-Measures" data-toc-modified-id="Distance-Measures-2"><span class="toc-item-num">2 </span>Distance Measures</a></span><ul class="toc-item"><li><span><a href="#Euclidean-Distance" data-toc-modified-id="Euclidean-Distance-2.1"><span class="toc-item-num">2.1 </span>Euclidean Distance</a></span><ul class="toc-item"><li><span><a href="#Scikit-Learn" data-toc-modified-id="Scikit-Learn-2.1.1"><span class="toc-item-num">2.1.1 </span>Scikit Learn</a></span></li></ul></li></ul></li><li><span><a href="#Similarity-Measures" data-toc-modified-id="Similarity-Measures-3"><span class="toc-item-num">3 </span>Similarity Measures</a></span></li><li><span><a href="#Linear-Relationships" data-toc-modified-id="Linear-Relationships-4"><span class="toc-item-num">4 </span>Linear Relationships</a></span><ul class="toc-item"><li><span><a href="#Pearson-Correlation-Coefficient" data-toc-modified-id="Pearson-Correlation-Coefficient-4.1"><span class="toc-item-num">4.1 </span>Pearson Correlation Coefficient</a></span><ul class="toc-item"><li><span><a href="#Intuition-Behind-Pearson-Correlation-Coefficient" data-toc-modified-id="Intuition-Behind-Pearson-Correlation-Coefficient-4.1.1"><span class="toc-item-num">4.1.1 </span>Intuition Behind Pearson Correlation Coefficient</a></span><ul class="toc-item"><li><span><a href="#When-$ρ_{Χ_Υ}-=-1$-or--$ρ_{Χ_Υ}-=--1$" data-toc-modified-id="When-$ρ_{Χ_Υ}-=-1$-or--$ρ_{Χ_Υ}-=--1$-4.1.1.1"><span class="toc-item-num">4.1.1.1 </span>When $ρ_{Χ_Υ} = 1$ or $ρ_{Χ_Υ} = -1$</a></span></li></ul></li></ul></li><li><span><a href="#Cosine-Similarity" data-toc-modified-id="Cosine-Similarity-4.2"><span class="toc-item-num">4.2 </span>Cosine Similarity</a></span><ul class="toc-item"><li><span><a href="#Shift-Invariance" data-toc-modified-id="Shift-Invariance-4.2.1"><span class="toc-item-num">4.2.1 </span>Shift Invariance</a></span></li></ul></li></ul></li><li><span><a href="#Exercise-(20-minutes):" data-toc-modified-id="Exercise-(20-minutes):-5"><span class="toc-item-num">5 </span><span style="background-color: #ffff00">Exercise (20 minutes):</span></a></span><ul class="toc-item"><li><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#3.-Define-your-cosine-similarity-functions" data-toc-modified-id="3.-Define-your-cosine-similarity-functions-5.0.0.1"><span class="toc-item-num">5.0.0.1 </span>3. Define your cosine similarity functions</a></span></li><li><span><a href="#4.-Get-the-two-documents-from-the-BoW-feature-space-and-calculate-cosine-similarity" data-toc-modified-id="4.-Get-the-two-documents-from-the-BoW-feature-space-and-calculate-cosine-similarity-5.0.0.2"><span class="toc-item-num">5.0.0.2 </span>4. Get the two documents from the BoW feature space and calculate cosine similarity</a></span></li></ul></li></ul></li></ul></li><li><span><a href="#Challenge:-Use-the-Example-Below-to-Create-Your-Own-Cosine-Similarity-Function" data-toc-modified-id="Challenge:-Use-the-Example-Below-to-Create-Your-Own-Cosine-Similarity-Function-6"><span class="toc-item-num">6 </span>Challenge: Use the Example Below to Create Your Own Cosine Similarity Function</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Create-a-list-of-all-the-vocabulary-$V$" data-toc-modified-id="Create-a-list-of-all-the-vocabulary-$V$-6.0.1"><span class="toc-item-num">6.0.1 </span>Create a list of all the <strong>vocabulary $V$</strong></a></span><ul class="toc-item"><li><span><a href="#Native-Implementation:" data-toc-modified-id="Native-Implementation:-6.0.1.1"><span class="toc-item-num">6.0.1.1 </span>Native Implementation:</a></span></li></ul></li><li><span><a href="#Create-your-Bag-of-Words-model" data-toc-modified-id="Create-your-Bag-of-Words-model-6.0.2"><span class="toc-item-num">6.0.2 </span>Create your Bag of Words model</a></span></li></ul></li></ul></li></ul></div>
# -
# # Linear Algebra
#
# In the natural language processing, each document is a vector of numbers.
#
#
# ## Dot Products
#
# A dot product is defined as
#
# $ a \cdot b = \sum_{i}^{n} a_{i}b_{i} = a_{1}b_{1} + a_{2}b_{2} + a_{3}b_{3} + \dots + a_{n}b_{n}$
#
# The geometric definition of a dot product is
#
# $ a \cdot b = $\|\|b\|\|\|\|a\|\|
#
# ### What does a dot product conceptually mean?
#
# A dot product is a representation of the **similarity between two components**, because it is calculated based upon shared elements. It tells you how much one vector goes in the direction of another vector.
#
# The actual value of a dot product reflects the direction of change:
#
# * **Zero**: we don't have any growth in the original direction
# * **Positive** number: we have some growth in the original direction
# * **Negative** number: we have negative (reverse) growth in the original direction
# +
A = [0,2]
B = [0,1]
def dot_product(x,y):
return sum(a*b for a,b in zip(x,y))
dot_product(A,B)
# What will the dot product of A and B be?
# -
# 
# ## Exercises
# What will the dot product of `A` and `B` be?
A = [1,2]
B = [2,4]
dot_product(A,B)
# What will the dot product of `document_1` and `document_2` be?
document_1 = [0, 0, 1]
document_2 = [1, 0, 2]
# ## Using Scikit-Learn
# +
from sklearn.feature_extraction.text import CountVectorizer
# -
# ## Bag of Words Models
# +
corpus = [
"Some analysts think demand could drop this year because a large number of homeowners take on remodeling projectsafter buying a new property. With fewer homes selling, home values easing, and mortgage rates rising, they predict home renovations could fall to their lowest levels in three years.",
"Most home improvement stocks are expected to report fourth-quarter earnings next month.",
"The conversation boils down to how much leverage management can get out of its wide-ranging efforts to re-energize operations, branding, digital capabilities, and the menu–and, for investors, how much to pay for that.",
"RMD’s software acquisitions, efficiency, and mix overcame pricing and its gross margin improved by 90 bps Y/Y while its operating margin (including amortization) improved by 80 bps Y/Y. Since RMD expects the slower international flow generator growth to continue for the next few quarters, we have lowered our organic growth estimates to the mid-single digits. "
]
X = vectorizer.fit_transform(corpus).toarray()
import numpy as np
from sys import getsizeof
zeroes = np.where(X.flatten() == 0)[0].size
percent_sparse = zeroes / X.size
print(f"The bag of words feature space is {round(percent_sparse * 100,2)}% sparse. \n\
That's approximately {round(getsizeof(X) * percent_sparse,2)} bytes of wasted memory. This is why sklearn uses CSR (compressed sparse rows) instead of normal matrices!")
# -
# # Distance Measures
#
#
# ## Euclidean Distance
#
# Euclidean distances can range from 0 (completely identically) to $\infty$ (extremely dissimilar).
#
# The distance between two points, $x$ and $y$, can be defined as $d(x,y)$:
#
# $$
# d(x,y) = \sqrt{\sum_{i=1}^{n}(x_{i}-y_{i})^2}
# $$
#
# Compared to the other dominant distance measure (cosine similarity), **magnitude** plays an extremely important role.
# +
from math import sqrt
def euclidean_distance_1(x,y):
distance = sum((a-b)**2 for a, b in zip(x, y))
return sqrt(distance)
# -
# There's typically an easier way to write this function that takes advantage of Numpy's vectorization capabilities:
import numpy as np
def euclidean_distance_2(x,y):
x = np.array(x)
y = np.array(y)
return np.linalg.norm(x-y)
# ### Scikit Learn
from sklearn.metrics.pairwise import euclidean_distances
X = [document_1, document_2]
euclidean_distances(X)
# # Similarity Measures
#
# Similarity measures will always range between -1 and 1. A similarity of -1 means the two objects are complete opposites, while a similarity of 1 indicates the objects are identical.
#
#
# # Linear Relationships
#
# ## Pearson Correlation Coefficient
# * We use **ρ** when the correlation is being measured from the population, and **r** when it is being generated from a sample.
# * An r value of 1 represents a **perfect linear** relationship, and a value of -1 represents a perfect inverse linear relationship.
#
# The equation for Pearson's correlation coefficient is
# $$
# ρ_{Χ_Υ} = \frac{cov(X,Y)}{σ_Xσ_Y}
# $$
#
# ### Intuition Behind Pearson Correlation Coefficient
#
# #### When $ρ_{Χ_Υ} = 1$ or $ρ_{Χ_Υ} = -1$
#
# This requires **$cov(X,Y) = σ_Xσ_Y$** or **$-1 * cov(X,Y) = σ_Xσ_Y$** (in the case of $ρ = -1$) . This corresponds with all the data points lying perfectly on the same line.
# 
#
#
# ## Cosine Similarity
#
# The cosine similarity of two vectors (each vector will usually represent one document) is a measure that calculates $ cos(\theta)$, where $\theta$ is the angle between the two vectors.
#
# Therefore, if the vectors are **orthogonal** to each other (90 degrees), $cos(90) = 0$. If the vectors are in exactly the same direction, $\theta = 0$ and $cos(0) = 1$.
#
# Cosine similiarity **does not care about the magnitude of the vector, only the direction** in which it points. This can help normalize when comparing across documents that are different in terms of word count.
#
# 
#
# ### Shift Invariance
#
# * The Pearson correlation coefficient between X and Y does not change with you transform $X \rightarrow a + bX$ and $Y \rightarrow c + dY$, assuming $a$, $b$, $c$, and $d$ are constants and $b$ and $d$ are positive.
# * Cosine similarity does, however, change when transformed in this way.
#
#
# <h1><span style="background-color: #FFFF00">Exercise (20 minutes):</span></h1>
#
# >In Python, find the **cosine similarity** and the **Pearson correlation coefficient** of the two following sentences, assuming a **one-hot encoded binary bag of words** model. You may use a library to create the BoW feature space, but do not use libraries other than `numpy` or `scipy` to compute Pearson and cosine similarity:
#
# >`A = "John likes to watch movies. Mary likes movies too"`
#
# >`B = "John also likes to watch football games, but he likes to watch movies on occasion as well"`
# #### 3. Define your cosine similarity functions
#
# ```python
# from scipy.spatial.distance import cosine # we are importing this library to check that our own cosine similarity func works
# from numpy import dot # to calculate dot product
# from numpy.linalg import norm # to calculate the norm
#
# def cosine_similarity(A, B):
# numerator = dot(A, B)
# denominator = norm(A) * norm(B)
# return numerator / denominator
#
# def cosine_distance(A,B):
# return 1 - cosine_similarity
#
# A = [0,2,3,4,1,2]
# B = [1,3,4,0,0,2]
#
# # check that your native implementation and 3rd party library function produce the same values
# assert round(cosine_similarity(A,B),4) == round(cosine(A,B),4)
# ```
#
# #### 4. Get the two documents from the BoW feature space and calculate cosine similarity
#
# ```python
# cosine_similarity(X[0], X[1])
# ```
# >0.5241424183609592
# +
from scipy.spatial.distance import cosine
from numpy import dot
import numpy as np
from numpy.linalg import norm
def cosine_similarity(A, B):
numerator = dot(A, B)
denominator = norm(A) * norm(B)
return numerator / denominator # remember, you take 1 - the distance to get the distance
def cosine_distance(A,B):
return 1 - cosine_similarity
A = [0,2,3,4,1,2]
B = [1,3,4,0,0,2]
# check that your native implementation and 3rd party library function produce the same values
assert round(cosine_similarity(A,B),4) == round(1 - cosine(A,B),4)
# +
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
# take two very similar sentences, should have high similarity
# edit these sentences to become less similar, and the similarity score should decrease
data_corpus = ["John likes to watch movies. Mary likes movies too.",
"John also likes to watch football games"]
X = vectorizer.fit_transform(data_corpus)
X = X.toarray()
print(vectorizer.get_feature_names())
cosine_similarity(X[0], X[1])
# -
# # Challenge: Use the Example Below to Create Your Own Cosine Similarity Function
#
# ### Create a list of all the **vocabulary $V$**
#
# Using **`sklearn`**'s **`CountVectorizer`**:
# ```python
# from sklearn.feature_extraction.text import CountVectorizer
# vectorizer = CountVectorizer()
# data_corpus = ["John likes to watch movies. Mary likes movies too",
# "John also likes to watch football games, but he likes to watch movies on occasion as well"]
# X = vectorizer.fit_transform(data_corpus)
# V = vectorizer.get_feature_names()
# ```
#
# #### Native Implementation:
# ```python
# def get_vocabulary(sentences):
# vocabulary = {} # create an empty set - question: Why not a list?
# for sentence in sentences:
# # this is a very crude form of "tokenization", would not actually use in production
# for word in sentence.split(" "):
# if word not in vocabulary:
# vocabulary.add(word)
# return vocabulary
# ```
#
# ### Create your Bag of Words model
# ```python
# X = X.toarray()
# print(X)
# ```
# Your console output:
# ```python
# [[0 0 0 1 2 1 2 1 1 1]
# [1 1 1 1 1 0 0 1 0 1]]
# ```
vectors = [[0,0,0,1,2,1,2,1,1,1],
[1,1,1,1,1,0,0,1,0,1]]
import math
def find_norm(vector):
total = 0
for element in vector:
total += element ** 2
return math.sqrt(total)
norm(vectors[0]) # Numpy
find_norm(vectors[0]) # your own
dot_product(vectors[0], vectors[1]) / (find_norm(vectors[0]) * find_norm(vectors[1]))
from sklearn.metrics.pairwise import cosine_distances, cosine_similarity
cosine_similarity(vectors)
|
week2/Linear Algebra, Distance and Similarity.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="LohRBS18e9hB"
# # Imports
# + id="ijJQU6FJe9hG"
import os.path as osp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
# + id="58JBysxlPRmE"
def linear(X, a, b):
return X * a + b
# + id="Q15wTQ_QPWHR"
def rational(X, a, b):
return a / (1 + b * X)
# + id="gBQuzsUAPY9I"
def loss(func, X, a, b, y_true, reduction=True):
approx = func(X, a, b)
if reduction:
return np.sum((approx - y_true) ** 2)
else:
return approx - y_true
# + [markdown] id="wAz3oYTCe9hL"
# # Load all data
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="Xl_Jx-She9hL" outputId="bf350f54-5adb-4271-a759-032a70d6d895"
# Ground-truth values
alpha = 0.6121701756176187
beta = 0.16906975434563642
# Data from previous task
opt_data = pd.read_csv('task2_data_2d.csv')
approx_direct = pd.read_csv('task2_2d.csv')
approx_direct.sort_values(['approx_func', 'method'])
# Data from this task
approx_grad = pd.read_csv('task3.csv')
approx_grad.sort_values(['approx_func', 'method'])
# + [markdown] id="egTFPrkie9hN"
# # Charts
# + id="EbZRmRPUe9hN"
plt.rcParams["figure.figsize"] = (10, 8)
METHODS = {'brute_force_opt': 'Brute Force',
'gauss_opt': 'Gauss',
'nelder_mead_opt': 'Nelder-Mead',
'gd': 'Gradient Descent',
'conj_gd': 'Conjugate Gradient Descent',
'newton': 'Newton\'s',
'lm': 'Levenberg-Marquardt'}
# + colab={"base_uri": "https://localhost:8080/", "height": 544} id="Qh76604ve9hO" outputId="90b6825e-c3f6-4073-a3e7-0024d2d23f77"
plt.scatter(opt_data['X'], opt_data['y'])
true_loss = loss(linear, opt_data['X'], alpha, beta, opt_data['y'])
plt.plot(opt_data['X'], opt_data['y_clean'], c='blue', label=f'Generating line, a {alpha:.2f}, b {beta:.2f}, loss {true_loss:.1f}')
for method, color in zip(['brute_force_opt', 'gauss_opt', 'nelder_mead_opt'],
['green', 'red', 'purple']):
row = approx_direct[(approx_direct['method'] == method) & (approx_direct['approx_func'] == 'linear_approx')]
a, b = row['a'].values[0], row['b'].values[0]
y_pred = linear(opt_data['X'], a, b)
loss_value = row['loss'].values[0]
iters = row['iterations'].values[0]
plt.plot(opt_data['X'], y_pred, c=color, label=f'{METHODS[method]}: a {a:.2f}, b {b:.2f}, loss {loss_value:.1f}, iters {iters}')
for method, color in zip(['gd', 'conj_gd', 'newton', 'lm'],
['peru', 'olive', 'lime', 'magenta']):
row = approx_grad[(approx_grad['method'] == method) & (approx_grad['approx_func'] == 'linear')]
a, b = row['a'].values[0], row['b'].values[0]
y_pred = linear(opt_data['X'], a, b)
loss_value = row['loss'].values[0]
iters = row['iterations'].values[0]
plt.plot(opt_data['X'], y_pred, c=color, label=f'{METHODS[method]}: a {a:.2f}, b {b:.2f}, loss {loss_value:.1f}, iters {iters}')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend()
plt.suptitle('Linear approximates', fontsize=20)
plt.show();
# + colab={"base_uri": "https://localhost:8080/", "height": 544} id="cpDytbyXe9hP" outputId="176c215a-d6f4-4c68-eb65-7325df819252"
plt.scatter(opt_data['X'], opt_data['y'])
true_loss = loss(linear, opt_data['X'], alpha, beta, opt_data['y'])
plt.plot(opt_data['X'], opt_data['y_clean'], c='blue', label=f'Generating line, loss {true_loss:.1f}')
for method, color in zip(['brute_force_opt', 'gauss_opt', 'nelder_mead_opt'],
['green', 'red', 'purple']):
row = approx_direct[(approx_direct['method'] == method) & (approx_direct['approx_func'] == 'rational_approx')]
a, b = row['a'].values[0], row['b'].values[0]
y_pred = rational(opt_data['X'], a, b)
loss_value = row['loss'].values[0]
iters = row['iterations'].values[0]
plt.plot(opt_data['X'], y_pred, c=color, label=f'{METHODS[method]}: a {a:.2f}, b {b:.2f}, loss {loss_value:.1f}, iters {iters}')
for method, color in zip(['gd', 'conj_gd', 'newton', 'lm'],
['peru', 'olive', 'lime', 'magenta']):
row = approx_grad[(approx_grad['method'] == method) & (approx_grad['approx_func'] == 'rational')]
a, b = row['a'].values[0], row['b'].values[0]
y_pred = rational(opt_data['X'], a, b)
loss_value = row['loss'].values[0]
iters = row['iterations'].values[0]
plt.plot(opt_data['X'], y_pred, c=color, label=f'{METHODS[method]}: a {a:.2f}, b {b:.2f}, loss {loss_value:.1f}, iters {iters}')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend()
plt.suptitle('Rational approximates', fontsize=20)
plt.show();
|
task3/charts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="AOpGoE2T-YXS"
# ##### Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License").
#
# # Neural Machine Translation with Attention
#
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/sequences/_nmt.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
# Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/sequences/_nmt.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
# View source on GitHub</a>
# </td>
# </table>
#
# + [markdown] colab_type="text" id="CiwtNgENbx2g"
# # This notebook is still under construction! Please come back later.
#
#
# This notebook trains a sequence to sequence (seq2seq) model for Spanish to English translation using TF 2.0 APIs. This is an advanced example that assumes some knowledge of sequence to sequence models.
#
# After training the model in this notebook, you will be able to input a Spanish sentence, such as *"¿todavia estan en casa?"*, and return the English translation: *"are you still at home?"*
#
# The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating:
#
# <img src="https://tensorflow.org/images/spanish-english.png" alt="spanish-english attention plot">
#
# Note: This example takes approximately 10 mintues to run on a single P100 GPU.
# + colab={} colab_type="code" id="tnxXKDjq3jEL"
from __future__ import absolute_import, division, print_function
import collections
import io
import itertools
import os
import random
import re
import time
import unicodedata
import numpy as np
import tensorflow as tf # TF2
assert tf.__version__.startswith('2')
import matplotlib.pyplot as plt
print(tf.__version__)
# + [markdown] colab_type="text" id="wfodePkj3jEa"
# ## Download and prepare the dataset
#
# We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:
#
# ```
# May I borrow this book? ¿Puedo tomar prestado este libro?
# ```
#
# There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data:
#
# 1. Clean the sentences by removing special characters.
# 1. Add a *start* and *end* token to each sentence.
# 1. Create a word index and reverse word index (dictionaries mapping from word → id and id → word).
# 1. Pad each sentence to a maximum length.
# + colab={} colab_type="code" id="kRVATYOgJs1b"
# TODO(brianklee): This preprocessing should ideally be implemented in TF
# because preprocessing should be exported as part of the SavedModel.
# Converts the unicode file to ascii
# https://stackoverflow.com/a/518232/2809427
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
START_TOKEN = u'<start>'
END_TOKEN = u'<end>'
def preprocess_sentence(w):
# remove accents; lowercase everything
w = unicode_to_ascii(w.strip()).lower()
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# https://stackoverflow.com/a/3645931/3645946
w = re.sub(r'([?.!,¿])', r' \1 ', w)
# replacing everything with space except (a-z, '.', '?', '!', ',')
w = re.sub(r'[^a-z?.!,¿]+', ' ', w)
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
# + colab={} colab_type="code" id="PbX9r8blNIUu"
en_sentence = u"May I borrow this book?"
sp_sentence = u"¿Puedo tomar prestado este libro?"
print(preprocess_sentence(en_sentence))
print(preprocess_sentence(sp_sentence))
# + [markdown] colab_type="text" id="RNWJCJIwPSZp"
# Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset (of course, translation quality degrades with less data).
#
# + colab={} colab_type="code" id="OHn4Dct23jEm"
def load_anki_data(num_examples=None):
# Download the file
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip) + '/spa-eng/spa.txt'
with io.open(path_to_file, 'rb') as f:
lines = f.read().decode('utf8').strip().split('\n')
# Data comes as tab-separated strings; one per line.
eng_spa_pairs = [[preprocess_sentence(w) for w in line.split('\t')] for line in lines]
# The translations file is ordered from shortest to longest, so slicing from
# the front will select the shorter examples. This also speeds up training.
if num_examples is not None:
eng_spa_pairs = eng_spa_pairs[:num_examples]
eng_sentences, spa_sentences = zip(*eng_spa_pairs)
eng_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
spa_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
eng_tokenizer.fit_on_texts(eng_sentences)
spa_tokenizer.fit_on_texts(spa_sentences)
return (eng_spa_pairs, eng_tokenizer, spa_tokenizer)
# + colab={} colab_type="code" id="mfI6wprNPSZs"
NUM_EXAMPLES = 30000
sentence_pairs, english_tokenizer, spanish_tokenizer = load_anki_data(NUM_EXAMPLES)
# + colab={} colab_type="code" id="eAY9k49G3jE_"
# Turn our english/spanish pairs into TF Datasets by mapping words -> integers.
def make_dataset(eng_spa_pairs, eng_tokenizer, spa_tokenizer):
eng_sentences, spa_sentences = zip(*eng_spa_pairs)
eng_ints = eng_tokenizer.texts_to_sequences(eng_sentences)
spa_ints = spa_tokenizer.texts_to_sequences(spa_sentences)
padded_eng_ints = tf.keras.preprocessing.sequence.pad_sequences(
eng_ints, padding='post')
padded_spa_ints = tf.keras.preprocessing.sequence.pad_sequences(
spa_ints, padding='post')
dataset = tf.data.Dataset.from_tensor_slices((padded_eng_ints, padded_spa_ints))
return dataset
# + colab={} colab_type="code" id="sEyV3Vd4PSZy"
# Train/test split
train_size = int(len(sentence_pairs) * 0.8)
random.shuffle(sentence_pairs)
train_sentence_pairs, test_sentence_pairs = sentence_pairs[:train_size], sentence_pairs[train_size:]
# Show length
len(train_sentence_pairs), len(test_sentence_pairs)
# + colab={} colab_type="code" id="_vdlLeT9PSZ4"
_english, _spanish = train_sentence_pairs[0]
_eng_ints, _spa_ints = english_tokenizer.texts_to_sequences([_english])[0], spanish_tokenizer.texts_to_sequences([_spanish])[0]
print("Source language: ")
print('\n'.join('{:4d} ----> {}'.format(i, word) for i, word in zip(_eng_ints, _english.split())))
print("Target language: ")
print('\n'.join('{:4d} ----> {}'.format(i, word) for i, word in zip(_spa_ints, _spanish.split())))
# + colab={} colab_type="code" id="4QILQkOs3jFG"
# Set up datasets
BATCH_SIZE = 64
train_ds = make_dataset(train_sentence_pairs, english_tokenizer, spanish_tokenizer)
test_ds = make_dataset(test_sentence_pairs, english_tokenizer, spanish_tokenizer)
train_ds = train_ds.shuffle(len(train_sentence_pairs)).batch(BATCH_SIZE, drop_remainder=True)
test_ds = test_ds.batch(BATCH_SIZE, drop_remainder=True)
# + colab={} colab_type="code" id="YM69ROrxPSZ7"
print("Dataset outputs elements with shape ({}, {})".format(
*train_ds.output_shapes))
# + [markdown] colab_type="text" id="TNfHIF71ulLu"
# ## Write the encoder and decoder model
#
# Here, we'll implement an encoder-decoder model with attention. The following diagram shows that each input word is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence.
#
# <img src="https://www.tensorflow.org/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
#
# The input is put through an encoder model which gives us the encoder output of shape *(batch_size, max_length, hidden_size)* and the encoder hidden state of shape *(batch_size, hidden_size)*.
#
# + colab={} colab_type="code" id="Pm0KGxEbPSaB"
ENCODER_SIZE = DECODER_SIZE = 1024
EMBEDDING_DIM = 256
MAX_OUTPUT_LENGTH = train_ds.output_shapes[1][1]
def gru(units):
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
# + colab={} colab_type="code" id="nZ2rI24i3jFg"
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, encoder_size):
super(Encoder, self).__init__()
self.embedding_dim = embedding_dim
self.encoder_size = encoder_size
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(encoder_size)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initial_hidden_state(self, batch_size):
return tf.zeros((batch_size, self.encoder_size))
# + [markdown] colab_type="text" id="kvzxABcg91RS"
#
# For the decoder, we're using *Bahdanau attention*. Here are the equations that are implemented:
#
# <img src="https://www.tensorflow.org/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
# <img src="https://www.tensorflow.org/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
#
# Lets decide on notation before writing the simplified form:
#
# * FC = Fully connected (dense) layer
# * EO = Encoder output
# * H = hidden state
# * X = input to the decoder
#
# And the pseudo-code:
#
# * `score = FC(tanh(FC(EO) + FC(H)))`
# * `attention weights = softmax(score, axis = 1)`. Softmax by default is applied on the last axis but here we want to apply it on the *1st axis*, since the shape of score is *(batch_size, max_length, hidden_size)*. `Max_length` is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.
# * `context vector = sum(attention weights * EO, axis = 1)`. Same reason as above for choosing axis as 1.
# * `embedding output` = The input to the decoder X is passed through an embedding layer.
# * `merged vector = concat(embedding output, context vector)`
# * This merged vector is then given to the GRU
#
# The shapes of all the vectors at each step have been specified in the comments in the code:
# + colab={} colab_type="code" id="yJ_B3mhW3jFk"
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, hidden_state, enc_output):
# enc_output shape = (batch_size, max_length, hidden_size)
# (batch_size, hidden_size) -> (batch_size, 1, hidden_size)
hidden_with_time = tf.expand_dims(hidden_state, 1)
# score shape == (batch_size, max_length, 1)
score = self.V(tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum = (batch_size, hidden_size)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, decoder_size):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.decoder_size = decoder_size
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(decoder_size)
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(decoder_size)
def call(self, x, hidden, enc_output):
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
# + [markdown] colab_type="text" id="ErMEwyflPSaJ"
# ## Define a translate function
#
# Now, let's put the encoder and decoder halves together. The encoder step is fairly straightforward; we'll just reuse Keras's dynamic unroll. For the decoder, we have to make some choices about how to feed the decoder RNN. Overall the process goes as follows:
#
# 1. Pass the *input* through the *encoder* which return *encoder output* and the *encoder hidden state*.
# 2. The encoder output, encoder hidden state and the <START> token is passed to the decoder.
# 3. The decoder returns the *predictions* and the *decoder hidden state*.
# 4. The encoder output, hidden state and next token is then fed back into the decoder repeatedly. This has two different behaviors under training and inference:
# - during training, we use *teacher forcing*, where the correct next token is fed into the decoder, regardless of what the decoder emitted.
# - during inference, we use `tf.argmax(predictions)` to select the most likely continuation and feed it back into the decoder. Another strategy that yields more robust results is called *beam search*.
# 5. Repeat step 4 until either the decoder emits an <END> token, indicating that it's done translating, or we run into a hardcoded length limit.
#
# + colab={} colab_type="code" id="PqfjRVuRPSaK"
class NmtTranslator(tf.keras.Model):
def __init__(self, encoder, decoder, start_token_id, end_token_id):
super(NmtTranslator, self).__init__()
self.encoder = encoder
self.decoder = decoder
# (The token_id should match the decoder's language.)
# Uses start_token_id to initialize the decoder.
self.start_token_id = tf.constant(start_token_id)
# Check for sequence completion using this token_id
self.end_token_id = tf.constant(end_token_id)
@tf.function
def call(self, inp, target=None, max_output_length=MAX_OUTPUT_LENGTH):
'''Translate an input.
If target is provided, teacher forcing is used to generate the translation.
'''
batch_size = inp.shape[0]
hidden = self.encoder.initial_hidden_state(batch_size)
enc_output, enc_hidden = self.encoder(inp, hidden)
dec_hidden = enc_hidden
if target is not None:
output_length = target.shape[1]
else:
output_length = max_output_length
predictions_array = tf.TensorArray(tf.float32, size=output_length - 1)
attention_array = tf.TensorArray(tf.float32, size=output_length - 1)
# Feed <START> token to start decoder.
dec_input = tf.cast([self.start_token_id] * batch_size, tf.int32)
# Keep track of which sequences have emitted an <END> token
is_done = tf.zeros([batch_size], dtype=tf.bool)
for i in tf.range(output_length - 1):
dec_input = tf.expand_dims(dec_input, 1)
predictions, dec_hidden, attention_weights = self.decoder(dec_input, dec_hidden, enc_output)
predictions = tf.where(is_done, tf.zeros_like(predictions), predictions)
# Write predictions/attention for later visualization.
predictions_array = predictions_array.write(i, predictions)
attention_array = attention_array.write(i, attention_weights)
# Decide what to pass into the next iteration of the decoder.
if target is not None:
# if target is known, use teacher forcing
dec_input = target[:, i + 1]
else:
# Otherwise, pick the most likely continuation
dec_input = tf.argmax(predictions, axis=1, output_type=tf.int32)
# Figure out which sentences just completed.
is_done = tf.logical_or(is_done, tf.equal(dec_input, self.end_token_id))
# Exit early if all our sentences are done.
if tf.reduce_all(is_done):
break
# [time, batch, predictions] -> [batch, time, predictions]
return tf.transpose(predictions_array.stack(), [1, 0, 2]), tf.transpose(attention_array.stack(), [1, 0, 2, 3])
# + [markdown] colab_type="text" id="_ch_71VbIRfK"
# ## Define the loss function
#
# Our loss function is a word-for-word comparison between true answer and model prediction.
#
# real = [<start>, 'This', 'is', 'the', 'correct', 'answer', '.', '<end>', '<oov>']
# pred = ['This', 'is', 'what', 'the', 'model', 'emitted', '.', '<end>']
#
# results in comparing
#
# This/This, is/is, the/what, correct/the, answer/model, ./emitted, <end>/.
# and ignoring the rest of the prediction.
#
# + colab={} colab_type="code" id="WmTHr5iV3jFr"
def loss_fn(real, pred):
# The prediction doesn't include the <start> token.
real = real[:, 1:]
# Cut down the prediction to the correct shape (We ignore extra words).
pred = pred[:, :real.shape[1]]
# If real == <OOV>, then mask out the loss.
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
# Sum loss over the time dimension, but average it over the batch dimension.
return tf.reduce_mean(tf.reduce_sum(loss_, axis=1))
# + [markdown] colab_type="text" id="DMVWzzsfNl4e"
# ## Configure model directory
#
# We'll use one directory to save all of our relevant artifacts (summary logs, checkpoints, SavedModel exports, etc.)
# + colab={} colab_type="code" id="Zj8bXQTgNwrF"
# Where to save checkpoints, tensorboard summaries, etc.
MODEL_DIR = '/tmp/tensorflow/nmt_attention'
def apply_clean():
if tf.io.gfile.exists(MODEL_DIR):
print('Removing existing model dir: {}'.format(MODEL_DIR))
tf.io.gfile.rmtree(MODEL_DIR)
# + colab={} colab_type="code" id="mO2d7e6gTlRA"
# Optional: remove existing data
apply_clean()
# + colab={} colab_type="code" id="rlR-g2hR5Hl0"
# Summary writers
train_summary_writer = tf.summary.create_file_writer(
os.path.join(MODEL_DIR, 'summaries', 'train'), flush_millis=10000)
test_summary_writer = tf.summary.create_file_writer(
os.path.join(MODEL_DIR, 'summaries', 'eval'), flush_millis=10000, name='test')
# + colab={} colab_type="code" id="LttA5h8C8yOU"
# Set up all stateful objects
encoder = Encoder(len(english_tokenizer.word_index) + 1, EMBEDDING_DIM, ENCODER_SIZE)
decoder = Decoder(len(spanish_tokenizer.word_index) + 1, EMBEDDING_DIM, DECODER_SIZE)
start_token_id = spanish_tokenizer.word_index[START_TOKEN]
end_token_id = spanish_tokenizer.word_index[END_TOKEN]
model = NmtTranslator(encoder, decoder, start_token_id, end_token_id)
# TODO(brianklee): Investigate whether Adam defaults have changed and whether it affects training.
optimizer = tf.keras.optimizers.Adam(epsilon=1e-8)# tf.keras.optimizers.SGD(learning_rate=0.01)#Adam()
# + colab={} colab_type="code" id="JH1TiGS5PSak"
# Checkpoints
checkpoint_dir = os.path.join(MODEL_DIR, 'checkpoints')
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
checkpoint = tf.train.Checkpoint(
encoder=encoder, decoder=decoder, optimizer=optimizer)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
# + colab={} colab_type="code" id="ssRahnCqAVe6"
# SavedModel exports
export_path = os.path.join(MODEL_DIR, 'export')
# + [markdown] colab_type="text" id="qtdJxgsFZdHi"
# # Visualize the model's output
#
# Let's visualize our model's output. (It hasn't been trained yet, so it will output gibberish.)
#
# We'll use this visualization to check on the model's progress.
# + colab={} colab_type="code" id="3fGS9Jd3Zsai"
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence.split(), fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence.split(), fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def ints_to_words(tokenizer, ints):
return ' '.join(tokenizer.index_word[int(i)] if int(i) != 0 else '<OOV>' for i in ints)
def sentence_to_ints(tokenizer, sentence):
sentence = preprocess_sentence(sentence)
return tf.constant(tokenizer.texts_to_sequences([sentence])[0])
def translate_and_plot_ints(model, english_tokenizer, spanish_tokenizer, ints, target_ints=None):
"""Run translation on a sentence and plot an attention matrix.
Sentence should be passed in as list of integers.
"""
ints = tf.expand_dims(ints, 0)
predictions, attention = model(ints)
prediction_ids = tf.squeeze(tf.argmax(predictions, axis=-1))
attention = tf.squeeze(attention)
sentence = ints_to_words(english_tokenizer, ints[0])
predicted_sentence = ints_to_words(spanish_tokenizer, prediction_ids)
print(u'Input: {}'.format(sentence))
print(u'Predicted translation: {}'.format(predicted_sentence))
if target_ints is not None:
print(u'Correct translation: {}'.format(ints_to_words(spanish_tokenizer, target_ints)))
plot_attention(attention, sentence, predicted_sentence)
def translate_and_plot_words(model, english_tokenizer, spanish_tokenizer, sentence, target_sentence=None):
"""Same as translate_and_plot_ints, but pass in a sentence as a string."""
english_ints = sentence_to_ints(english_tokenizer, sentence)
spanish_ints = sentence_to_ints(spanish_tokenizer, target_sentence) if target_sentence is not None else None
translate_and_plot_ints(model, english_tokenizer, spanish_tokenizer, english_ints, target_ints=spanish_ints)
# + colab={} colab_type="code" id="sozn-RRBZzoa"
translate_and_plot_words(model, english_tokenizer, spanish_tokenizer, u"it's really cold here", u'hace mucho frio aqui')
# + [markdown] colab_type="text" id="tNdYJ8igFHTt"
# # Train the model
#
# + colab={} colab_type="code" id="z5wVVaWCY8nf"
def train(model, optimizer, dataset):
"""Trains model on `dataset` using `optimizer`."""
start = time.time()
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
for inp, target in dataset:
with tf.GradientTape() as tape:
predictions, _ = model(inp, target=target)
loss = loss_fn(target, predictions)
avg_loss(loss)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if tf.equal(optimizer.iterations % 10, 0):
tf.summary.scalar('loss', avg_loss.result(), step=optimizer.iterations)
avg_loss.reset_states()
rate = 10 / (time.time() - start)
print('Step #%d\tLoss: %.6f (%.2f steps/sec)' % (optimizer.iterations, loss, rate))
start = time.time()
if tf.equal(optimizer.iterations % 100, 0):
# translate_and_plot_words(model, english_index, spanish_index, u"it's really cold here.", u'hace mucho frio aqui.')
translate_and_plot_ints(model, english_tokenizer, spanish_tokenizer, inp[0], target[0])
def test(model, dataset, step_num):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
for inp, target in dataset:
predictions, _ = model(inp)
loss = loss_fn(target, predictions)
avg_loss(loss)
print('Model test set loss: {:0.4f}'.format(avg_loss.result()))
tf.summary.scalar('loss', avg_loss.result(), step=step_num)
# + colab={} colab_type="code" id="ddefjBMa3jF0"
NUM_TRAIN_EPOCHS = 10
for i in range(NUM_TRAIN_EPOCHS):
start = time.time()
with train_summary_writer.as_default():
train(model, optimizer, train_ds)
end = time.time()
print('\nTrain time for epoch #{} ({} total steps): {}'.format(
i + 1, optimizer.iterations, end - start))
with test_summary_writer.as_default():
test(model, test_ds, optimizer.iterations)
checkpoint.save(checkpoint_prefix)
# + colab={} colab_type="code" id="yvqeRHw2PSaq"
# TODO(brianklee): This seems to be complaining about input shapes not being set?
# tf.saved_model.save(model, export_path)
# + [markdown] colab_type="text" id="RTe5P5ioMJwN"
# ## Next steps
#
# * [Download a different dataset](http://www.manythings.org/anki/) to experiment with translations, for example, English to German, or English to French.
# * Experiment with training on a larger dataset, or using more epochs
#
# + colab={} colab_type="code" id="5k_dwb31ZmMX"
|
community/en/nmt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# 
# # Use Azure Machine Learning Pipelines for batch prediction
# In this tutorial, you use Azure Machine Learning service pipelines to run a batch scoring image classification job. The example job uses the pre-trained [Inception-V3](https://arxiv.org/abs/1512.00567) CNN (convolutional neural network) Tensorflow model to classify unlabeled images. Machine learning pipelines optimize your workflow with speed, portability, and reuse so you can focus on your expertise, machine learning, rather than on infrastructure and automation. After building and publishing a pipeline, you can configure a REST endpoint to enable triggering the pipeline from any HTTP library on any platform.
#
# In this tutorial, you learn the following tasks:
#
# > * Configure workspace and download sample data
# > * Create data objects to fetch and output data
# > * Download, prepare, and register the model to your workspace
# > * Provision compute targets and create a scoring script
# > * Use ParallelRunStep to do batch scoring
# > * Build, run, and publish a pipeline
# > * Enable a REST endpoint for the pipeline
#
# If you don't have an Azure subscription, create a free account before you begin. Try the [free or paid version of Azure Machine Learning service](https://aka.ms/AMLFree) today.
# ## Prerequisites
#
# * Complete the [setup tutorial](https://docs.microsoft.com/azure/machine-learning/service/tutorial-1st-experiment-sdk-setup) if you don't already have an Azure Machine Learning service workspace or notebook virtual machine.
# * After you complete the setup tutorial, open the **tutorials/tutorial-pipeline-batch-scoring-classification.ipynb** notebook using the same notebook server.
#
# This tutorial is also available on [GitHub](https://github.com/Azure/MachineLearningNotebooks/tree/master/tutorials) if you wish to run it in your own [local environment](how-to-configure-environment.md#local). Run `pip install azureml-sdk[notebooks] azureml-pipeline-core azureml-pipeline-steps pandas requests` to get the required packages.
# ## Configure workspace and create datastore
# Create a workspace object from the existing workspace. A [Workspace](https://docs.microsoft.com/python/api/azureml-core/azureml.core.workspace.workspace?view=azure-ml-py) is a class that accepts your Azure subscription and resource information. It also creates a cloud resource to monitor and track your model runs. `Workspace.from_config()` reads the file **config.json** and loads the authentication details into an object named `ws`. `ws` is used throughout the rest of the code in this tutorial.
from azureml.core import Workspace
ws = Workspace.from_config()
# ### Create a datastore for sample images
#
# Get the ImageNet evaluation public data sample from the public blob container `sampledata` on the account `pipelinedata`. Calling `register_azure_blob_container()` makes the data available to the workspace under the name `images_datastore`. Then specify the workspace default datastore as the output datastore, which you use for scoring output in the pipeline.
# +
from azureml.core.datastore import Datastore
batchscore_blob = Datastore.register_azure_blob_container(ws,
datastore_name="images_datastore",
container_name="sampledata",
account_name="pipelinedata",
overwrite=True)
def_data_store = ws.get_default_datastore()
# -
# ## Create data objects
#
# When building pipelines, `Dataset` objects are used for reading data from workspace datastores, and `PipelineData` objects are used for transferring intermediate data between pipeline steps.
#
# This batch scoring example only uses one pipeline step, but in use-cases with multiple steps, the typical flow will include:
#
# 1. Using `Dataset` objects as **inputs** to fetch raw data, performing some transformations, then **outputting** a `PipelineData` object.
# 1. Use the previous step's `PipelineData` **output object** as an *input object*, repeated for subsequent steps.
#
# For this scenario you create `Dataset` objects corresponding to the datastore directories for both the input images and the classification labels (y-test values). You also create a `PipelineData` object for the batch scoring output data.
# +
from azureml.core.dataset import Dataset
from azureml.pipeline.core import PipelineData
input_images = Dataset.File.from_files((batchscore_blob, "batchscoring/images/"))
label_ds = Dataset.File.from_files((batchscore_blob, "batchscoring/labels/"))
output_dir = PipelineData(name="scores",
datastore=def_data_store,
output_path_on_compute="batchscoring/results")
# -
# Next, we need to register the datasets with the workspace.
input_images = input_images.register(workspace = ws, name = "input_images")
label_ds = label_ds.register(workspace = ws, name = "label_ds", create_new_version=True)
# +
## Download and register the model
# -
# Download the pre-trained Tensorflow model to use it for batch scoring in the pipeline. First create a local directory where you store the model, then download and extract it.
# +
import os
import tarfile
import urllib.request
if not os.path.isdir("models"):
os.mkdir("models")
response = urllib.request.urlretrieve("http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz", "model.tar.gz")
tar = tarfile.open("model.tar.gz", "r:gz")
tar.extractall("models")
# -
# Now you register the model to your workspace, which allows you to easily retrieve it in the pipeline process. In the `register()` static function, the `model_name` parameter is the key you use to locate your model throughout the SDK.
# +
import shutil
from azureml.core.model import Model
# register downloaded model
model = Model.register(model_path="models/inception_v3.ckpt",
model_name="inception",
tags={"pretrained": "inception"},
description="Imagenet trained tensorflow inception",
workspace=ws)
# remove the downloaded dir after registration if you wish
shutil.rmtree("models")
# -
# ## Create and attach remote compute target
#
# Azure Machine Learning service pipelines cannot be run locally, and only run on cloud resources. Remote compute targets are reusable virtual compute environments where you run experiments and work-flows. Run the following code to create a GPU-enabled [`AmlCompute`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.compute.amlcompute.amlcompute?view=azure-ml-py) target, and attach it to your workspace. See the [conceptual article](https://docs.microsoft.com/azure/machine-learning/service/concept-compute-target) for more information on compute targets.
# +
from azureml.core.compute import AmlCompute, ComputeTarget
from azureml.exceptions import ComputeTargetException
compute_name = "gpu-cluster"
# checks to see if compute target already exists in workspace, else create it
try:
compute_target = ComputeTarget(workspace=ws, name=compute_name)
except ComputeTargetException:
config = AmlCompute.provisioning_configuration(vm_size="STANDARD_NC6",
vm_priority="lowpriority",
min_nodes=0,
max_nodes=1)
compute_target = ComputeTarget.create(workspace=ws, name=compute_name, provisioning_configuration=config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# -
# ## Write a scoring script
# To do the scoring, you create a batch scoring script `batch_scoring.py`, and write it to the current directory. The script takes a minibatch of input images, applies the classification model, and outputs the predictions to a results file.
#
# The script `batch_scoring.py` takes the following parameters, which get passed from the `ParallelRunStep` that you create later:
#
# - `--model_name`: the name of the model being used
# - `--labels_dir` : the directory path having the `labels.txt` file
#
# The pipelines infrastructure uses the `ArgumentParser` class to pass parameters into pipeline steps. For example, in the code below the first argument `--model_name` is given the property identifier `model_name`. In the `main()` function, this property is accessed using `Model.get_model_path(args.model_name)`.
# The pipeline in this tutorial only has one step and writes the output to a file, but for multi-step pipelines, you also use `ArgumentParser` to define a directory to write output data for input to subsequent steps. See the [notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/nyc-taxi-data-regression-model-building/nyc-taxi-data-regression-model-building.ipynb) for an example of passing data between multiple pipeline steps using the `ArgumentParser` design pattern.
# ## Build and run the pipeline
# Before running the pipeline, you create an object that defines the python environment and dependencies needed by your script `batch_scoring.py`. The main dependency required is Tensorflow, but you also install `azureml-defaults` for background processes from the SDK. Create a `RunConfiguration` object using the dependencies, and also specify Docker and Docker-GPU support.
# +
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.runconfig import DEFAULT_GPU_IMAGE
cd = CondaDependencies.create(pip_packages=["tensorflow-gpu==1.15.2",
"azureml-core", "azureml-dataprep[fuse]"])
env = Environment(name="parallelenv")
env.python.conda_dependencies=cd
env.docker.base_image = DEFAULT_GPU_IMAGE
# -
# ### Create the configuration to wrap the inference script
# Create the pipeline step using the script, environment configuration, and parameters. Specify the compute target you already attached to your workspace as the target of execution of the script. We will use PythonScriptStep to create the pipeline step.
# +
from azureml.pipeline.steps import ParallelRunConfig
parallel_run_config = ParallelRunConfig(
environment=env,
entry_script="batch_scoring.py",
source_directory="scripts",
output_action="append_row",
mini_batch_size="20",
error_threshold=1,
compute_target=compute_target,
process_count_per_node=2,
node_count=1
)
# -
# ### Create the pipeline step
#
# A pipeline step is an object that encapsulates everything you need for running a pipeline including:
#
# * environment and dependency settings
# * the compute resource to run the pipeline on
# * input and output data, and any custom parameters
# * reference to a script or SDK-logic to run during the step
#
# There are multiple classes that inherit from the parent class [`PipelineStep`](https://docs.microsoft.com/python/api/azureml-pipeline-core/azureml.pipeline.core.builder.pipelinestep?view=azure-ml-py) to assist with building a step using certain frameworks and stacks. In this example, you use the [`ParallelRunStep`](https://docs.microsoft.com/en-us/python/api/azureml-contrib-pipeline-steps/azureml.contrib.pipeline.steps.parallelrunstep?view=azure-ml-py) class to define your step logic using a scoring script.
#
# An object reference in the `outputs` array becomes available as an **input** for a subsequent pipeline step, for scenarios where there is more than one step.
# +
from azureml.pipeline.steps import ParallelRunStep
from datetime import datetime
parallel_step_name = "batchscoring-" + datetime.now().strftime("%Y%m%d%H%M")
label_config = label_ds.as_named_input("labels_input")
batch_score_step = ParallelRunStep(
name=parallel_step_name,
inputs=[input_images.as_named_input("input_images")],
output=output_dir,
arguments=["--model_name", "inception",
"--labels_dir", label_config],
side_inputs=[label_config],
parallel_run_config=parallel_run_config,
allow_reuse=False
)
# -
# For a list of all classes for different step types, see the [steps package](https://docs.microsoft.com/python/api/azureml-pipeline-steps/azureml.pipeline.steps?view=azure-ml-py).
# ### Run the pipeline
#
# Now you run the pipeline. First create a `Pipeline` object with your workspace reference and the pipeline step you created. The `steps` parameter is an array of steps, and in this case there is only one step for batch scoring. To build pipelines with multiple steps, you place the steps in order in this array.
#
# Next use the `Experiment.submit()` function to submit the pipeline for execution. You also specify the custom parameter `param_batch_size`. The `wait_for_completion` function will output logs during the pipeline build process, which allows you to see current progress.
#
# Note: The first pipeline run takes roughly **15 minutes**, as all dependencies must be downloaded, a Docker image is created, and the Python environment is provisioned/created. Running it again takes significantly less time as those resources are reused. However, total run time depends on the workload of your scripts and processes running in each pipeline step.
# +
from azureml.core import Experiment
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(workspace=ws, steps=[batch_score_step])
pipeline_run = Experiment(ws, "batch_scoring").submit(pipeline)
pipeline_run.wait_for_completion(show_output=True)
# -
# ### Download and review output
# Run the following code to download the output file created from the `batch_scoring.py` script, then explore the scoring results.
# +
batch_run = next(pipeline_run.get_children())
batch_output = batch_run.get_output_data("scores")
batch_output.download(local_path="inception_results")
import pandas as pd
for root, dirs, files in os.walk("inception_results"):
for file in files:
if file.endswith("parallel_run_step.txt"):
result_file = os.path.join(root,file)
df = pd.read_csv(result_file, delimiter=":", header=None)
df.columns = ["Filename", "Prediction"]
print("Prediction has ", df.shape[0], " rows")
df.head(10)
# -
# ## Publish and run from REST endpoint
# Run the following code to publish the pipeline to your workspace. In your workspace in the portal, you can see metadata for the pipeline including run history and durations. You can also run the pipeline manually from the portal.
#
# Additionally, publishing the pipeline enables a REST endpoint to rerun the pipeline from any HTTP library on any platform.
# +
published_pipeline = pipeline_run.publish_pipeline(
name="Inception_v3_scoring", description="Batch scoring using Inception v3 model", version="1.0")
published_pipeline
# -
# To run the pipeline from the REST endpoint, you first need an OAuth2 Bearer-type authentication header. This example uses interactive authentication for illustration purposes, but for most production scenarios requiring automated or headless authentication, use service principle authentication as [described in this notebook](https://aka.ms/pl-restep-auth).
#
# Service principle authentication involves creating an **App Registration** in **Azure Active Directory**, generating a client secret, and then granting your service principal **role access** to your machine learning workspace. You then use the [`ServicePrincipalAuthentication`](https://docs.microsoft.com/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py) class to manage your auth flow.
#
# Both `InteractiveLoginAuthentication` and `ServicePrincipalAuthentication` inherit from `AbstractAuthentication`, and in both cases you use the `get_authentication_header()` function in the same way to fetch the header.
# +
from azureml.core.authentication import InteractiveLoginAuthentication
interactive_auth = InteractiveLoginAuthentication()
auth_header = interactive_auth.get_authentication_header()
# -
# Get the REST url from the `endpoint` property of the published pipeline object. You can also find the REST url in your workspace in the portal. Build an HTTP POST request to the endpoint, specifying your authentication header. Additionally, add a JSON payload object with the experiment name and the batch size parameter. As a reminder, the `process_count_per_node` is passed through to `ParallelRunStep` because you defined it is defined as a `PipelineParameter` object in the step configuration.
#
# Make the request to trigger the run. Access the `Id` key from the response dict to get the value of the run id.
# +
import requests
rest_endpoint = published_pipeline.endpoint
response = requests.post(rest_endpoint,
headers=auth_header,
json={"ExperimentName": "batch_scoring",
"ParameterAssignments": {"process_count_per_node": 6}})
# +
try:
response.raise_for_status()
except Exception:
raise Exception("Received bad response from the endpoint: {}\n"
"Response Code: {}\n"
"Headers: {}\n"
"Content: {}".format(rest_endpoint, response.status_code, response.headers, response.content))
run_id = response.json().get('Id')
print('Submitted pipeline run: ', run_id)
# -
# Use the run id to monitor the status of the new run. This will take another 10-15 min to run and will look similar to the previous pipeline run, so if you don't need to see another pipeline run, you can skip watching the full output.
# +
from azureml.pipeline.core.run import PipelineRun
from azureml.widgets import RunDetails
published_pipeline_run = PipelineRun(ws.experiments["batch_scoring"], run_id)
RunDetails(published_pipeline_run).show()
# -
# ## Clean up resources
#
# Do not complete this section if you plan on running other Azure Machine Learning service tutorials.
#
# ### Stop the notebook VM
#
# If you used a cloud notebook server, stop the VM when you are not using it to reduce cost.
#
# 1. In your workspace, select **Compute**.
# 1. Select the **Notebook VMs** tab in the compute page.
# 1. From the list, select the VM.
# 1. Select **Stop**.
# 1. When you're ready to use the server again, select **Start**.
#
# ### Delete everything
#
# If you don't plan to use the resources you created, delete them, so you don't incur any charges.
#
# 1. In the Azure portal, select **Resource groups** on the far left.
# 1. From the list, select the resource group you created.
# 1. Select **Delete resource group**.
# 1. Enter the resource group name. Then select **Delete**.
#
# You can also keep the resource group but delete a single workspace. Display the workspace properties and select **Delete**.
# ## Next steps
#
# In this machine learning pipelines tutorial, you did the following tasks:
#
# > * Built a pipeline with environment dependencies to run on a remote GPU compute resource
# > * Created a scoring script to run batch predictions with a pre-trained Tensorflow model
# > * Published a pipeline and enabled it to be run from a REST endpoint
#
# See the [how-to](https://docs.microsoft.com/azure/machine-learning/service/how-to-create-your-first-pipeline?view=azure-devops) for additional detail on building pipelines with the machine learning SDK.
|
projects/azure-ml-tutorials/machine-learning-pipelines-advanced/tutorial-pipeline-batch-scoring-classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cdixson-ds/DS-Unit-2-Kaggle-Challenge/blob/master/LS_DS_223_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BTV4OvfvOHem" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 3*
#
# ---
# + [markdown] id="V_Pi7EaYOHe1" colab_type="text"
# # Cross-Validation
#
#
# ## Assignment
# - [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Continue to participate in our Kaggle challenge.
# - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.
# - [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
# - [ ] Commit your notebook to your fork of the GitHub repo.
#
#
# You won't be able to just copy from the lesson notebook to this assignment.
#
# - Because the lesson was ***regression***, but the assignment is ***classification.***
# - Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.
#
# So you will have to adapt the example, which is good real-world practice.
#
# 1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
# 2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`
# 3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)
# 4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))
#
#
#
# ## Stretch Goals
#
# ### Reading
# - <NAME>, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation
# - <NAME>, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)
# - <NAME>, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation
# - <NAME>, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)
# - <NAME>, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)
#
# ### Doing
# - Add your own stretch goals!
# - Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.
# - In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.
# - _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:
#
# > You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...
#
# The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?
#
# + [markdown] id="JyM2Ds2cOHe6" colab_type="text"
# ### BONUS: Stacking!
#
# Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:
#
# ```python
# import pandas as pd
#
# # Filenames of your submissions you want to ensemble
# files = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']
#
# target = 'status_group'
# submissions = (pd.read_csv(file)[[target]] for file in files)
# ensemble = pd.concat(submissions, axis='columns')
# majority_vote = ensemble.mode(axis='columns')[0]
#
# sample_submission = pd.read_csv('sample_submission.csv')
# submission = sample_submission.copy()
# submission[target] = majority_vote
# submission.to_csv('my-ultimate-ensemble-submission.csv', index=False)
# ```
# + id="1kFx3_VOOHe_" colab_type="code" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + id="wqP2JICPOHfK" colab_type="code" colab={}
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# + id="fZF03TaVOHfU" colab_type="code" outputId="cac1daaf-f48b-4df1-a5b7-163eb40c429a" colab={"base_uri": "https://localhost:8080/", "height": 35}
train.shape, test.shape
# + id="b2UuYLoF0LET" colab_type="code" colab={}
#wrangle train and test sets
import numpy as np
def wrangle(X):
#prevent settingwithcopywarning
X = X.copy()
#About 3% of the time, latitude has small values near zero,
#outside Tanzania, so we'll treat these values as zero
X['latitude'] = X['latitude'].replace(-2e-08, 0)
#replace zeros with nulls and impute missing values later
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+ '_MISSING'] = X[col].isnull()
#Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
X = X.drop(columns=duplicates)
#Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
#Convert data_recored to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
#Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
#Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] = X['construction_year']
X['years_MISSING'] = X['years'].isnull()
#return the wrangled dataframe
return X
# + id="2LgwV-Oz1Zco" colab_type="code" colab={}
train = wrangle(train)
test = wrangle(test)
# + id="SlW_Q-gT1v3z" colab_type="code" colab={}
#The status group column is the target
target = 'status_group'
# + id="RuUMiOg-2B0-" colab_type="code" colab={}
#Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# + id="FQNHYE842Mjn" colab_type="code" colab={}
#Get a list of numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# + id="GvU--dH72b5n" colab_type="code" colab={}
#Ge.t a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# + id="NnVBwtyV2wDP" colab_type="code" colab={}
#Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality[cardinality<=50].index.tolist()
# + id="echyPQ_329tN" colab_type="code" colab={}
#Combine the lists
features = numeric_features + categorical_features
# + id="xxhG9q8J77th" colab_type="code" colab={}
#Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_test = test[features]
# + [markdown] id="_SRavgqo_-tS" colab_type="text"
# Random Forests
# + id="e68JBvtJABMd" colab_type="code" outputId="824904fb-7df7-4f98-a8ce-ef81e5d0bf1c" colab={"base_uri": "https://localhost:8080/", "height": 54}
# %%time
from sklearn.preprocessing import StandardScaler
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
StandardScaler(),
RandomForestClassifier(n_jobs=-1, random_state=0)
)
#Fit on train
#pipeline.fit(X_train, y_train)
# + id="9J6zLP48CTCo" colab_type="code" colab={}
#ordinal encoding does not increase # of columns like onehotencoder
#print('X_train shape before encoding', X_train.shape)
#encoder = pipeline.named_steps['ordinalencoder']
#encoded = encoder.transform(X_train)
#print('X_train shape after encoding', encoded.shape)
# + id="IwaWsZDXS3QA" colab_type="code" colab={}
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from scipy.stats import randint, uniform
param_distributions = {
#'simpleimputer__stategy': ['mean', 'median'],
#do range instead of randint
'randomforestclassifier__n_estimators': randint(50,500),
'randomforestclassifier__max_depth': [None],
'randomforestclassifier__max_features': [None],
#'randomforestclassifier__min_samples_split': [2],
#'randomforestclassifier__min_samples_leaf': [4],
'randomforestclassifier__min_impurity_split': [None]
}
#param_distributions = {
# 'classifier__simpleimputer__stategy': ['mean', 'median'],
# 'classifier__randomforestclassifier__n_estimators': randint(50,500),
# 'classifier__randomforestclassifier__max_depth': [5,10,15,20,None],
# 'classifier__randomforestclassifier__max_features': uniform(0,1)
#}
# + id="Y3ASmp_rUYAX" colab_type="code" colab={}
search = RandomizedSearchCV(
pipeline,
param_distributions = param_distributions,
n_iter= 10,
cv = 3,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs = -1
)
# + id="6o47citrVw4T" colab_type="code" outputId="23ce421f-de77-4241-f0a6-939843f3ac73" colab={"base_uri": "https://localhost:8080/", "height": 637}
search.fit(X_train, y_train)
# + id="w2EgPbZhrjiO" colab_type="code" outputId="7e138d0e-04cc-4cd8-f956-0a17eba2e035" colab={"base_uri": "https://localhost:8080/", "height": 35}
#best acc score
search.best_score_
# + id="yRlpzoDrsBoZ" colab_type="code" outputId="50cc8f57-6739-42f6-c74d-0bb1fd8e51d9" colab={"base_uri": "https://localhost:8080/", "height": 90}
search.best_params_
# + id="LXQeQ67isFw_" colab_type="code" colab={}
best_est = search.best_estimator_
# + id="E-DjHuPcsPU6" colab_type="code" colab={}
y_pred = best_est.predict(X_test)
# + id="LfJxHzVvuyeU" colab_type="code" colab={}
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('cdixson_rand_search.csv', index=False)
|
LS_DS_223_assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Intertemporal consumer problem
# Team: M&M
#
# Members: <NAME> (hbk716) & <NAME> (pkt593)
#
# Imports and set magics:
# +
import numpy as np
from scipy import optimize
from types import SimpleNamespace
import sympy as sm
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
plt.style.use('seaborn-whitegrid')
# autoreload modules when code is run
# %load_ext autoreload
# %autoreload 2
# local modules
import modelproject as mp
# -
# # Model description
# We consider a standard **intertemporal consumer utility function in two periods** known from Macro II where:
#
# * $C_t$ is consumption in period t
# * $Y_t^L$ is labour income in period t
# * $V_1$ is the intial wealth
# * $T_t$ is lump sum tax payment in period t
# * $\phi$ is the degree of impatience
# * $r$ is the real rental rate
#
# all for $t\in\{1,2\}$
# We also have $C_t > 0$ and $\phi > 0$
#
# $$ U = u(C_1) + \frac{u(C_2)}{1+\phi}, \qquad u^{''} < 0 $$
#
# Given **saving** in period 1 we can find **wealth in period 2**:
#
# $$ V_2 = (1+r)\bigg(V_1+Y_1^L-T_1-C_1\bigg) = (1+r)S_1 $$
#
# And **comsumption in period 2** becomes:
#
# $$ C_2 = V_2 + Y_2^L - T_2 $$
#
# Consolidating the two we get the **intertemporal budget constraint**:
#
# $$C_1 + \frac{C_2}{1+r} = V_1+Y_1^L-T_1+\frac{Y_2^L-T_2}{1+r} $$
#
# Inserting the expression for $C_2$ into the utility function we get:
#
# $$ U = u(C_1) + \frac{u((1+r)(V_1+Y_1^L-T_1-C_1)+Y_2^L-T_2)}{1+\phi} $$
#
# Assuming our consumer is risk averse we can use the following **utility function**:
#
# $$ u(C_t) = lnC_t $$
#
# Which gives us the following **maximisation problem** to solve:
#
# \begin{equation}
# C_1^* = \text{arg}\max_{C_1} \, lnC_1 + \frac{ln((1+r)(V_1+Y_1^L-T_1-C_1)+Y_2^L-T_2)}{1+\phi}
# \end{equation}
#
#
# Further more we know from solving the model analytical that the solution must satisfy:
#
# $$ u'(C_1) = \frac{1+r}{1+\phi}u'(C_2) $$
# # Algorithm
# Given the model above we can now describe our algorithm:
#
# **Problem:** Solve the maximisation problem above
# **Inputs:** The variables mentioned above
# **Outputs:** $C_1, \, C_2, \, U$
# **Algorithm:** `U_optimize()`
# * Use scipy to numerically optimize U given parameters
#
# # Solving the model
# For simplicity, we assume that income is the same in both periods. We set the level of impatience and rental rate equal to each other and close to zero, as the rental rate is close to zero right now. The initial endowment is also picked arbitrarily. Our above stated theory says that consumption must be the same in both periods with the following parameters.
# +
# a. Create simplenamespace and set parameter values
par = SimpleNamespace()
par.r = 0.02
par.V_1 = 5
par.Y_L1 = 2
par.Y_L2 = 2
par.T_1 = 0.5
par.T_2 = 0.5
par.phi = 0.02
# b. Compute optimal housing quality, consumption and utility
c_1star, c_2star, u_star = mp.u_optimise(par)
# c. Print solution
print(f'The agent will choose optimal consumption in period 1 = {c_1star:.2f}, which implies optimal consumption in period 2 = {c_2star:.2f} and utility = {u_star:.2f}')
# -
# Our computation is what we could expect, as the consumer has no preference for consumption in one of the two periods, as the rental rate equals the level of impatience. We infer that the result is correct. Now we will get to the visualising of the results.
# # Visualising consumption in both periods
# For visualising the results we will show how consumption in the two periods varies with income levels. We will also take a look at utility.
#
# Our method for visualising in 2d will be the following:
#
# 1. Create array with relevant variable we want to change
# 2. Create empty containers for $C_1$, $C_2$ and U
# 3. Maximise U looping over the array and save values of $C_1$, $C_2$ and U in containers
# 4. Plot the findings
#
# We have created a function that does the first 2 out of 4 steps. We will start with income:
# +
# a. Set number of objects and create array of Y_L1's/Y_L2's and container for C_1*, C_2* and U*
par.N = 1000
Y_L1_vec, c1_vec, c2_vec, u_vec = mp.array(0.5,5, par.N)
Y_L2_vec, c1_vec, c2_vec, u_vec = mp.array(0.5,5, par.N)
# b. Loop the optimise function over the Y_Lt_vec arrays
for i in range(par.N):
par.Y_L1 = Y_L1_vec[i]
c1_vec[i], c2_vec[i], u_vec[i] = mp.u_optimise(par)
par.Y_L1 = 2
for i in range(par.N):
par.Y_L2 = Y_L2_vec[i]
c1_vec[i], c2_vec[i], u_vec[i] = mp.u_optimise(par)
par.Y_L2 = 2
# c. Create graphs and plot
mp.two_figures(Y_L1_vec, c1_vec, "Consumption in period 1", "$Y^L_1$", "$C_1$", Y_L1_vec, c2_vec, "Consumption in period 2", "$Y^L_1$", "$C_2$")
mp.one_figure(Y_L1_vec, u_vec, "Utility", "$Y^L_1$", "$U$")
mp.two_figures(Y_L2_vec, c1_vec, "Consumption in period 1", "$Y^L_2$", "$C_1$", Y_L2_vec, c2_vec, "Consumption in period 2", "$Y^L_2$", "$C_2$")
mp.one_figure(Y_L2_vec, u_vec, "Utility", "$Y^L_2$", "$U$")
# -
# Above we se that both utility curves are concave. This is due to the specification of the utility function. We also see not suprisingly that when labour income increases, consumption in both periods go up. We know that an increase in the labour income in period 1 increases overall income slightly more due to the rental rate. But as we see above it doesn't have a noticeable effect on utility or consumption.
# # How the rental rate and impatience affect allocation
# Now we would like to show how varying degrees of impatience influence consumption in both periods. Likewise we would like to illustrate how different levels of rental rates affect consumption in both periods. Our method is going to be the same as described above.
# Let's start with patience:
# +
# a. Create array of phi's and container for C_1*, C_2* and U*
phi_vec, c1_vec, c2_vec, u_vec = mp.array(0.001, 0.5, par.N)
# b. Loop the optimise function over the phi_vec array
for i in range(par.N):
par.phi = phi_vec[i]
c1_vec[i], c2_vec[i], u_vec[i] = mp.u_optimise(par)
# c. Create graph and plot
mp.two_figures(phi_vec, c1_vec, "Consumption in period 1", "$Impatience$", "$C_1$", phi_vec, c2_vec, "Consumption in period 2", "$Impatience$", "$C_2$")
mp.one_figure(phi_vec, u_vec, "Utility", "$Impatience$", "$Utility$")
# d. Reset phi
par.phi = 0.02
# -
# Hence we find that when the level of impatience increases, consumption in period 1 increases while it decreases in period 2(not suprising), while the overall utility falls. This is a consequense of the concave utility functions, why the increased consumption in period 1 can't fully compensate for the drop in consumption in period 2.
# Next we turn to the rental rate:
# +
# a. Create array of r's and container for C_1*, C_2* and U*
r_vec, c1_vec, c2_vec, u_vec = mp.array(0.001,0.2, par.N)
# b. Loop the optimise function over the r_vec array
for i in range(par.N):
par.r = r_vec[i]
c1_vec[i], c2_vec[i], u_vec[i] = mp.u_optimise(par)
# c. Create graph and plot
mp.two_figures(r_vec, c1_vec, "Consumption in period 1", "$Rental \: rate$", "$C_1$", r_vec, c2_vec, "Consumption in period 2", "$Rental \: rate$", "$C_2$")
mp.one_figure(r_vec, u_vec, "Utility", "$Rental \: rate$", "$U$")
# d. Reset r
par.r = 0.02
# -
# Above we see that consumption in period 1 is decreasing in r while consumption in period 2 is increasing, why we have that the substitution effect dominates the income effect. We also see that utility is increasing in the rental rate which is because of direct postive income effect a higher rental rate has.
#
# An interesting point to visualize is the combined effects of the rental rate and impatience on utility. We therefore set up a 3D function, where the combinations $r$ and $\phi$ are visualized.
# +
#To create the 3D graph, we first create our variables to input.
#We need three for the 3-dimensional space, who all need to be 2-dimensional
N = 100
shape_tuple = (N,N)
r_values = np.empty(shape_tuple)
phi_values = np.empty(shape_tuple)
u_values = np.empty(shape_tuple)
#After creating our empty tuples, we can now begin insert values into them.
#We run our optimization function for every combination of phi and r that exists between 0 and 1 with two decimals.
for i in range(N):
for j in range(N):
par.r = i/100
par.phi = j/100
r_values[i,j] = par.r
phi_values[i,j] = par.phi
c1, c2, u = mp.u_optimise(par)
u_values[i,j] = u
#We now have our three filled tuples, we now can plot them together.
#We use a widget here for better visualization.
#NOTE: Rerun all cells to see the widget.
# %matplotlib widget
fig = plt.figure(figsize=(10,7))
ax = plt.axes(projection='3d')
ax.plot_surface(phi_values,r_values, u_values, rstride=1, cstride=1,
cmap='viridis', edgecolor='none');
ax.set_xlabel('$\phi$')
ax.set_ylabel('$r$')
ax.set_zlabel('$U$')
ax.set_title('Utility for values of $\phi$ and $r$');
# -
# As we can see in the above graph, we come to the same conclusion as with our 2D graphs. Impatience decreases the overall utility for any given value of the rental rate, while the rental rate increases utility for any given rate of the impatience. We see that the maximum utility given that impatience and the rental rate would be non-fixed is the point, where the consumer has "limitless patience" and the rental rate is as high as it can get.
# # Extensions to the baseline model
# As an extension to out model we will give the consumer an option of a risk bearing asset, while stile having the option to invest in the risk free assest, which is the equivilent of the rental rate in the baseline model.
# The **risky asset**, $R$, give an expected return of:
#
# $$ E[R] = \beta*(1+r_{rb}) + (1-\beta)*0 $$
#
# While the expected return of the **risk-free assest**, $RF$, is:
#
# $$ E[RF] = 1 + r_{rf} $$
#
# We also set the percentage of the savings used on the risk free asset equal to $\alpha$
#
# With this new addition to our model the **new intertemporal budget constraint** becomes:
#
# $$ C_1 + \frac{C_2}{\alpha(1+r_{rf})+\beta(1-\alpha)(1+r_{rb})} = V_1 + Y_1^L - T_1 + \frac{1}{\alpha(1+r_{rf})+\beta(1-\alpha)(1+r_{rb})}(Y_2^L-T_2) $$
#
# And the **new maximisation problem** becomes:
#
# $$ C_1^* = \text{arg}\max_{C_1, \, \alpha} \, lnC_1 + \frac{ln(\alpha(1+r_{rf})(V_1+Y_1^L-T_1-C_1)+\beta(1-\alpha)(1+r_{rb})(V_1+Y_1^L-T_1-C_1)+Y_2^L-T_2)}{1+\phi} \quad st. 0 \leq \alpha \leq 1$$
#
# Now we can begin to solve the model using the same method as before:
# +
# a. Create simplenamespace and set parameter values
par = SimpleNamespace()
par.r_rf = 0.02
par.r_rb = 1.5
par.V_1 = 5
par.Y_L1 = 2
par.Y_L2 = 2
par.T_1 = 0.5
par.T_2 = 0.5
par.phi = 0.02
par.alpha = 0.5
par.beta = 0.5
# b. Compute optimal housing quality, consumption and utility
c_1star, c_2star, alpha_star, u_star = mp.u_optimise_ext(par)
# c. Print solution
#print(f'The agent will choose optimal consumption in period 1 = {c_1star:.2f}, which implies optimal consumption in period 2 = {c_2star:.2f} and utility = {u_star:.2f}')
# -
# We were not able to able to figure out how to optimise given two variables(both alpha and C_1 in our case) but would like to implement that before the exam, as that's what the extension to the model actually suggests. Regardless the extension show that given the new risk bearing asset and an $\alpha = 0.5$ consumption goes up in period 1 and down in period 2.
print(c_1star, c_2star, alpha_star, u_star )
# ## An implementation incorporating risk aversion
# $$ E[R] = \beta*(1+r_{rb}) + (1-\beta)*0 $$
#
# While the expected return of the **risk-free assest**, $RF$, is:
#
# $$ E[RF] = 1 + r_{rf} $$
# + [markdown] tags=[]
#
# $$ E(U) = u(C_1) + \frac{E\left(u(C_2)\right)}{1+\phi}, \qquad u^{''} < 0 $$
#
# Given **saving** in period 1 we can find **wealth in period 2**:
#
# $$S_{1} = V_1+Y_1^L-T_1-C_1 $$
#
#
# $$
# \begin{aligned}
# V_2 &= \begin{cases}
# \alpha\cdot(1+r_{rf})S_{1} + \left(1-\alpha\right)\left(1+r_{rb}\right)\cdot S_{1} & \text{with prob. } \beta \\
# \alpha\cdot(1+r_{rf})S_{1} & \text{with prob. } 1-\beta
# \end{cases}
# \end{aligned}
# $$
#
#
# And **comsumption in period 2** becomes:
#
# $$ C_2 = V_2 + Y_2^L - T_2 $$
#
# -
# Thus, utilty in period 2 is:
# $$
# \begin{aligned}
# C_{2} &= \begin{cases}
# \ln \left( \alpha\cdot(1+r_{rf})S_{1} + \left(1-\alpha\right)\left(1+r_{rb}\right)\cdot S_{1} + Y_{2}^{L}-T_{2} \right) & \text{with prob. } \beta \\
# \ln \left( \alpha\cdot(1+r_{rf})S_{1} + Y_{2}^{L}-T_{2} \right) & \text{with prob. } 1-\beta
# \end{cases}
# \end{aligned}
# $$
#
#
# And the expectation in period 1 is:
#
# $$
# E\left(u\left(C_{2}\right)\vert S_{1}, \alpha \right) = \beta\cdot\ln \left( \alpha\cdot(1+r_{rf})S_{1} + \left(1-\alpha\right)\left(1+r_{rb}\right)\cdot S_{1} + Y_{2}^{L}-T_{2} \right) + (1-\beta)\cdot\ln \left( \alpha\cdot(1+r_{rf})S_{1} + Y_{2}^{L}-T_{2} \right)
# $$
# With $S_{1} = V_1+Y_1^L-T_1-C_1 $$
#
# So the optimization problem becomes:
#
#
# $$ C_1^*,\alpha^* = \text{args}\max_{C_1, \, \alpha} \, ln( C_1) + \frac{\beta\cdot \left(\ln \left( \alpha\cdot(1+r_{rf})\left(V_1+Y_1^L-T_1-C_1\right) + \left(1-\alpha\right)\left(1+r_{rb}\right)\cdot\left(V_1+Y_1^L-T_1-C_1\right) + Y_{2}^{L}-T_{2} \right) \right) +(1-\beta) \cdot \left( \ln \left( \alpha\cdot(1+r_{rf})\left(V_1+Y_1^L-T_1-C_1\right) + Y_{2}^{L}-T_{2} \right) \right )}{1+\phi} \quad st. 0 \leq \alpha \leq 1$$
#
#
# An upper bound on $C_1$ is also imposed, in either possible states the agent must be able to pay all taxes in period 2 and also pay back any debt. (the possible $C_{2}$ must both be positive):
#
# $$
# \alpha\cdot(1+r_{rf})\left(V_1+Y_1^L-T_1-C_1\right) + \left(1-\alpha\right)\left(1+r_{rb}\right)\cdot\left(V_1+Y_1^L-T_1-C_1\right) + Y_{2}^{L}-T_{2}>0
# $$
# $$
# \land
# $$
# $$
# \alpha\cdot(1+r_{rf})\left(V_1+Y_1^L-T_1-C_1\right) + Y_{2}^{L}-T_{2} >0
# $$
#
#
#
# This could possible be implemented as a bound, if one derived C1 in both cases and the upper bound would be the minimum of those two
# +
par = SimpleNamespace()
par.r_rf = 0.02
par.r_rb = 1.2
par.V_1 = 5
par.Y_L1 = 2
par.Y_L2 = 2
par.T_1 = 0.5
par.T_2 = 0.5
par.phi = 0.02
par.alpha = 0.2
par.beta = 0.5
c_1star, c_2star_good,c_2star_bad, alpha_star, u_star = mp.u_optimise_ext2(par)
# -
print(c_1star, c_2star_good,c_2star_bad, alpha_star, u_star)
# # Conclusion
# In this assignment we have started with an two-period intertemporal consumer utility function and attempted to solve the maximisation problem presented using u_optimize in the scipy package. We have then attempted to chart the deeper mechanics of the model by visualizing how the model responds to changes in income levels across the two periods. We found that Consumption is very obviously improved by increased income, with a slighty bigger increase when the income rises in period 1. We have then visualized how the rental rate and impatience affects consumption and utility, finding that impatience decreases overall utility by consuming more in period 1, thereby removing some benefit from the rental rate. The rental rate increases the overall utility by consuming more in period 2. We also showed how different combinations of impatience and the rental rate affect utility, giving us the same conclusions as before. Lastly, we extent our model to encompass risk through risk-free and risky assets that also changes the consumer's behavior. While our implementation is limited, we do find that with an $\alpha$ = 0.5, we achieve increased consumption in period 1 compared to before.
#
# Further extensions to the model could be changing the utility function to be able to vary the degree of risk aversion. We could also have modelled labour, making the consumer have a choice between labour and freetime. To make the model more realistic the tax could also be calculated from a constant tax rate based on the labour income, instead of being lump sum. Also, simulating our extension model could reveal if the agent would get the same utility as in expectation.
|
modelproject/modelproject.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import cv2
from matplotlib import pyplot as plt
import matplotlib
import numpy as np
import time as t
print("OpenCV Version : {}".format(cv2.__version__))
matplotlib.rcParams['figure.figsize'] = [6,4]
# +
import os
img_list = list()
for i, path in enumerate(os.listdir("imgs")):
im = cv2.imread("imgs/" + path)
im_cropped = im[50:240, 30:-30]
cv2.imwrite("cropped_dragon_imgs/{}.jpg".format(i), im_cropped)
#plt.imshow(im_cropped)
#plt.show()
# -
np.save("dragon_imgs.npy", img_list)
im = cv2.imread('imgs/httpsmagiccards.infoscansenogw119.jpg')
im_cropped = im[30:280, 10:-10]
#print(im[50:220, 31:-31].shape)
#plt.imshow(im[45:220, 30:-30])
print(im_cropped.shape)
plt.imshow(im_cropped)
import os
img_list = os.listdir("imgs")
img_buffer = list()
for path in img_list:
im = cv2.imread("imgs/" + path)
img_buffer.append(im[50:220, 31:-31])
img_buffer_np = np.array(img_buffer)
plt.imshow(img_buffer_np[200])
np.save("imgs.npy", img_buffer_np)
def show_card_img(path):
im = cv2.imread("imgs/" + path)
print(im[50:220, 31:-31].shape)
#plt.imshow(im[45:220, 30:-30])
plt.imshow(im[50:220, 31:-31])
plt.show()
for path in img_list[10]:
show_card_img(path)
im = cv2.imread('imgs/httpsmagiccards.infoscansen4e175.jpg')
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,100,200)
#plt.imshow(edges)
#plt.show()
#test = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#_, contours,hierarchy = cv2.findContours(gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#idx =0
#for cnt in contours:
#idx += 1
#x,y,w,h = cv2.boundingRect(cnt)
#roi=im[y:y+h,x:x+w]
#plt.imshow(roi)
#cv2.imwrite(str(idx) + '.jpg', roi)
#cv2.rectangle(im,(x,y),(x+w,y+h),(200,0,0),2)
plt.imshow(im[40:220, 30:-30])
x,y,w,h = cv2.boundingRect(contours[6])
plt.imshow(im[y:y+h,x:x+w])
test[2]
|
extract image.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Basics of Signal Processing
# **Authors**: <NAME>, <NAME>, <NAME>
import numpy as np
import scipy
import matplotlib.pyplot as plt
from scipy.io import wavfile
import IPython.display as ipd
import scipy.signal as signal
import time
# Throughout this notebook, we will be working with a clip from [S<NAME>'s song, "Tom's Diner"](https://www.youtube.com/watch?v=FLP6QluMlrg). We will use `scipy` to read the audio data from the `.wav` file. It will return the sampling frequency `fs` as well as the audio samples.
fs, audio = wavfile.read("toms-diner.wav")
print(f"Loaded {audio.size} samples at a sampling rate of {fs}Hz")
ipd.Audio(audio, rate=fs)
# # Table of Contents
#
# I. Time Domain Filtering
#
# II. DFT
#
# III. Frequency Domain Filtering
#
# IV. Sampling Theory
#
# V. Spectral Analysis
# # I. Time Domain Filtering
#
# A discrete signal can be thought of as a function mapping integers to real values (i.e a function $f: \mathbb{N}\to\mathbb{R})$). This is the so-called "time-domain" representation of the signal because the integers often represent time in some sense.
#
# A system is a process which takes a signal as an input and returns a signal as an output. Digital systems which we use to process signals are called filters. Systems can have several different properties. Two important ones are linearity and time-invariance.
#
# **Linearity**: A system $H$ is linear if given input signal $x$, input signal $y$, and scalars $\alpha$ and $\beta$, $H(\alpha x + \beta y) = \alpha H(x) + \beta H(y)$
#
# **Time-Invariance**: A system is time-invariant when shifting the input signal in time results in an equal shift in time for the output signal (i.e if $H$ transforms $x[n]$ into $y[n]$, then $H$ transforms $x[n-N]$ into $y[n-N]$).
# ## I.a Linear Filtering
#
# When a system is Linear and Time-Invariant, we can characterize systems by their impulse response. The impulse response of a system $H$ is given by $h[n] = H(\delta[n])$ where $$\delta[n] = \begin{cases} 1 & \text{ if, } n=0\\ 0 & \text{ else.} \end{cases}$$
#
# This is useful because it means we can compute the response of the system by doing a **convolution** of the input with the impulse response.
#
# $$(x * y)[n] = \sum_{k=-\infty}^{\infty}x[k]y[n-k] $$
#
# For example, we can take a moving average by using the filter
# $$ h_{avg}[n] = \begin{cases} \frac{1}{5} & \text{ if } 0 \leq n \leq 4\\ 0 & \text{ else.}\end{cases} $$
#
# We can also define a so-called "edge detector" filter in order to detect edges in the audio.
# $$ h_{edge}[n] = \begin{cases} (-1)^n & \text{ if } 0 \leq n \leq 1\\ 0 & \text{ else.}\end{cases} $$
# +
# Edge detector and moving average filters
plt.stem(np.linspace(0, 100/44.1, 100), audio[:100])
plt.xlabel("time (ms)")
plt.show()
hi_pass = np.array([(-1)**n for n in range(2)])/2.0
lo_pass = np.array([1 for n in range(5)])/5.0
plt.stem(hi_pass)
plt.xlabel("samples")
plt.show()
plt.stem(lo_pass)
plt.xlabel("samples")
plt.show()
# +
plt.stem(np.linspace(0, 100/44.1, 100), np.convolve(audio[:100], hi_pass, "same"))
plt.xlabel("time (ms)")
plt.show()
plt.stem(np.linspace(0, 100/44.1, 100), np.convolve(audio[:100], lo_pass, "same"))
plt.xlabel("time (ms)")
plt.show()
# -
hi_pass_song = np.convolve(audio, hi_pass)
ipd.Audio(hi_pass_song, rate=fs)
hi_pass_song = np.convolve(audio, lo_pass)
ipd.Audio(hi_pass_song, rate=fs)
# ## I.b Autocorrelation
# Cross-correlation, in signal processing terms, is the process of convolving one signal with a flipped version of another. Cross-correlation produces a graph of correlation versus time, where correlation is the dot-product of the two signals at that particular point.
# - If you are looking for the timestamps of a particular noise within a longer signal with other sounds present, you may want to cross-correlate the two.
# - Cross-correlation is used in sonar to detect when the initial pulse (a known signal) is returned.
#
# Autocorrelation is the practice of cross-correlating a signal with itself. It is helpful for eliminating noise, as true frequencies will be preserved due to being periodic, while noise tends to be reduced.
#
# $$ r_x(n) = x[n] * x[-n] = \sum_{k=-\infty}^{\infty} x[k] x[n-k] $$
# +
#TODO: Change this value and see how the noise amplitude affects the signal before and after autocorrelation!
noise_amplitude = 2
sample = np.sin(2 * np.pi * np.arange(50)/16)
noise = 2*np.random.random(50) - 1
noisy_sample = sample+noise
autocorr = np.convolve(noisy_sample, np.flip(noisy_sample))
plt.stem(np.linspace(0,49/44.1, 50), noisy_sample)
plt.xlabel("time (ms)")
plt.show()
plt.stem(np.linspace(-49/44.1,50/44.1, 99), autocorr)
plt.xlabel("time (ms)")
plt.show()
# ipd.Audio(audio, rate=fs)
# -
# Autocorrelation will always have a peak in the middle, which will grow larger relative to the rest of the signal the more noisy your signal is. This peak has a strength equal to the overall power of the signal, since it occurs at an offset of zero (meaning the signal is completely overlapping with itself, and the magnitude is $\sum^{N}_{n=1} X[n]^2$.
#
# **Comprehension Question:**
#
# However, notice even when the signal is highly corrupted, you can still make out the base frequency in the autocorrelated signal. Why is this?
#
# **Answer:**
#
# Random noise tends to cancel when there is any offset, but pure frequencies still make it through. This is related to how convolution in the time domain equals multiplication in the frequency domain, and so pure frequencies will stand out above the noise when they are squared. We'll go over more about this later. Thus autocorrelation is often used to denoise signals.
#
# **Comprehension question:**
#
# Why is the signal contained within a triangular envelope?
#
# **Answer:**
#
# The length of the autocorrelation goes from -N to N, where N is the number of samples in the original signal. When some points are not overlapping, the non-overlapping points cannot contribute to the signal at all. The window this creates is effectively equivalent to convolving 2 boxes, which makes a triangular envelope.
# ## I.c Nonlinear Filtering
# Sometimes you end up with a signal that has salt and pepper noise (random bits set to 0 or 1) due to corruption or problems with a sensor. Nonlinear filtering, such as median filtering, applies a non-linear filter so that extremely high peaks made by these errors can be filtered out without disproportionately affecting the surrounding signal.
salt_and_pepper = np.random.binomial(1, 0.01, size=audio.shape) * 32000 + np.random.binomial(1, 0.01, size=audio.shape) * -32000
audio_corrupted = audio+salt_and_pepper
plt.stem(np.linspace(0,999/44.1, 1000), audio_corrupted[:1000])
plt.xlabel("time (ms)")
plt.show()
ipd.Audio(audio_corrupted, rate=fs)
# +
# median filter docs: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.medfilt.html
# try changing the next cell to see how different mean and median filtering sound!
audio_medfilt = signal.medfilt(audio_corrupted, kernel_size=5)
meanfilt = np.array([.2, .2, .2, .2, .2])
audio_meanfilt = np.convolve(audio_corrupted, meanfilt)
# -
ipd.Audio(audio_medfilt, rate=fs)
# # II. DFT
#
# Typically, when we look at signals, we look at them in the so-called time-domain. Each sample $x[k]$ represents the amplitude of the signal at time-step $k$. This tells us what the signal looks like. One question we might want to ask ourselves is _"How fast is the signal changing?"_
#
# For sinusoidal signals like $x[n] = \cos(\omega n)$ and $x[n] = \sin(\omega n)$, answering this question is easy because a larger $\omega$ means the signal is changing faster ($\omega$ is known as the angular frequency). For example, consider the plots below which each consist of 100 samples.
# +
n = np.linspace(0, 100, 100)
slow_cos = np.cos(2 * np.pi * n / 100)
fast_cos = np.cos(2 * np.pi * 5 * n / 100)
plt.figure(figsize=(15, 7))
plt.subplot(1, 2, 1)
plt.stem(n, slow_cos, use_line_collection=True)
plt.title("$\cos\\left(\\frac{2\pi}{100} n\\right)$")
plt.subplot(1, 2, 2)
plt.title("$\cos\\left(\\frac{10\pi}{100} n\\right)$")
plt.stem(n, fast_cos, use_line_collection=True)
plt.show()
# -
# $\cos\left(\frac{10\pi}{100} t\right)$ is clearly changing a lot faster. If we allow ourselves to consider complex signals, then we can generalized sinusoids using the complex exponential $e^{j\omega}$. Just like real sinusoids, the $\omega$ in the signal $x[n] = e^{j\omega n}$ determines how fast the signal changes (i.e rotates around the unit circle). If we can somehow "project" our time-domain signal $x[n]$ onto a "basis" of complex exponential signals, then, then the coefficients $X[k]$ should tell us how much the signal changes.
#
# The Discrete Fourier Transform is the change of basis which we use for a finite, length-$N$ signal to understand how fast it is changing. The basis used in the DFT are the $N$th roots of unity (i.e the complex solutions to $\omega=1$). More specifically, the $k$th basis vector is given by $\phi_k[n] = e^{j\frac{2\pi}{N}kn}$. Using the complex inner product $\langle \vec{x}, \vec{y} \rangle = \vec{y}^*\vec{x}$, the DFT coefficients are given by
#
# $$X[k] = \langle x, \phi_k \rangle = \sum_{n=0}^{N-1}x[n]e^{-j\frac{2\pi}{N}kn}.$$
#
# From the DFT coefficients, we can recover the time-domain coefficients using the inverse DFT.
#
# $$x[n] = \frac{1}{N} \sum_{k=0}^{N-1}X[k]e^{j\frac{2\pi}{N}kn}.$$
#
# There are many ways to compute the DFT. The fastest method is the Fast Fourier Transform (FFT), which is an algorithm which computes the DFT. It is built into `numpy` as part of the `fft` submodule.
#
# If we look at the DFT coefficients of the two cosines we saw earlier, we can see that it is indeed doing exactly what we wanted it to: characterizing the frequency of the signal.
# +
slow_cos_fft = np.fft.fft(slow_cos)
fast_cos_fft = np.fft.fft(fast_cos)
plt.figure(figsize=(15, 7))
plt.subplot(2, 2, 1)
plt.stem(n, np.abs(slow_cos_fft), use_line_collection=True)
plt.title("$|DFT\{\cos\\left(\\frac{2\pi}{100} n\\right)\}|$")
plt.subplot(2, 2, 2)
plt.title("$|DFT\{\cos\\left(\\frac{10\pi}{100} n\\right)\}|$")
plt.stem(n, np.abs(fast_cos_fft), use_line_collection=True)
plt.subplot(2, 2, 3)
plt.stem(n, np.angle(slow_cos_fft), use_line_collection=True)
plt.title("$\\arg \\left(DFT\{\cos\\left(\\frac{2\pi}{100} n\\right)\}\\right)$")
plt.subplot(2, 2, 4)
plt.title("$\\arg \\left(DFT\{\cos\\left(\\frac{10\pi}{100} n\\right)\}\\right)$")
plt.stem(n, np.angle(fast_cos_fft), use_line_collection=True)
plt.show()
# -
# Since $\cos\left(\frac{2\pi}{100}n\right) = \frac{1}{2}\left(e^{j\frac{2\pi}{100}n} + e^{-j\frac{2\pi}{100}n}\right)$, we should expect peaks at $k = 1$ and $k =-1$ (note that because the roots of unity are periodic, $k=-1$ is the same basis vector as $k=99$). Likewise, since $\cos\left(\frac{10\pi}{100}n\right) = \frac{1}{2}\left(e^{j\frac{10\pi}{100}n} + e^{-j\frac{10\pi}{100}n}\right)$, we should expect peaks at $k=5$ and $k=-5$.
#
# There are a few things to note:
# 1. The DFT coefficients are complex numbers, so we need both magnitude (top plots) and phase (bottom plots) to characterize the signal information
# 2. For both $\cos\left(\frac{2\pi}{100}n\right)$ and $\cos\left(\frac{10\pi}{100}n\right)$, we should only expect 2 non-zero coefficients. However, we have apparently many non-zero coefficients. These are due to numerical instability in the FFT algorithm (if you print them out, these coefficients are on the order of $10^{-3}$ in magnitude and so are insignificant).
# 3. The DFT basis is **not** orthonormal. This is why we must scale by $\frac{1}{N}$ when applying the inverse DFT (`np.fft.ifft` in numpy). This is also why the peak magnitudes of the example signals above are 50 and not $\frac{1}{2}$.
# 4. DFT basis vectors are complex conjugates of each other (i.e $\phi_k[n] = \phi_{N-k}[n]^*$). This means for real signals, $X[k] = X^*[N-k]$.
#
# ### Exercise
# To get a better feel for the DFT, compute and plot the magnitude of the DFT coefficients of our clip from <NAME> in decibels ($dB = 20\log_{10}(\cdot)$). Since our song is a real signal, do not plot the complex conjugate coefficients since they are redundant information.
# +
plt.figure(figsize=(15, 7))
# ** YOUR CODE HERE ** #
song_dft = 20 * np.log10(np.abs(np.fft.fft(audio)))
plt.plot(song_dft[:audio.size // 2]) # Coefficents N/2 to N are complex conjugate coefficients
plt.show()
# -
# **Comprehension Question**: Do you notice anything interesting about the chart above?
#
# **Answer**: Around index 150,000, there is a sharp decline in the magnitude of the DFT coefficients. It turns out that this DFT coefficient represents approximately 12.5 kHz (we'll see how to compute this later), which is close to the human hearing limit of about 20kHz.
# **Comprehension Question**: What does the first coefficient $X[0]$ of the DFT represent in simple terms?
#
# **Answer**: It is the sum of the signal (we can see this from the formula by letting $k=0$).
# ## II.a PSD
#
# In signal processing, due to noise, numerical stability, and other issues, we often care about the dominant frequencies in the signal (e.g when we are looking for formants in a vowel). This means we want to look at the magnitude of the DFT coefficients. However, sometimes peaks in the DFT are difficult to distinguish when looking at a magnitude plot. To better distinguish peaks, we can instead look at $|X[k]|^2$, the so-called **Power Spectral Density (PSD)**.
#
# The Power Spectral Density is the essentially the magnitude of the DFT of the auto-correlation of the signal $x$. This is because when $x[n]$ has DFT coefficients $X[k]$, then $x[-n]$ has DFT coefficients $X^*[k]$ and since auto-correlation is the convolution of $x[n] * x[-n]$, and convolution in the time-domain is multiplication in the frequency domain, $PSD = X[k] X^*[k] = |X[k]|^2$.
# ### Exercise
#
# Remember that formants are the dominant frequencies in vowels. That means we can use the PSD to roughly find formants and distinguish vowels from each other.
#
# We have two mystery recordings taken from [this source](https://linguistics.ucla.edu/people/hayes/103/Charts/VChart/). They were sampled at 16000Hz. Try and distingiush them by their dominant frequencies, we will go through the following procedure.
#
# 1. Split the recording into 25ms sections
# 2. Find the PSD of each section
# 3. Let the "PSD" of the recording be the maximum value of the PSD of each section at each particular point. This will help pick out the frequencies that are dominant in any section of the recording
# 4. Try and guess where the formants are. Can you tell by their relative positions which vowel each recording is?
#
# When plotting, you can use the `FREQS` variable for the x-axis of the plot, and remember, the complex conjugate coefficients of the DFT give you no extra information, so do not plot them.
# +
FREQS = np.linspace(0, 8000, 200)
_, vowel_1 = wavfile.read("mystery_vowel_1.wav")
_, vowel_2 = wavfile.read("mystery_vowel_2.wav")
# Cut each recording to an appropriate length
vowel_1 = vowel_1[13:]
vowel_2 = vowel_2[114:]
# +
# YOUR CODE HERE #
def compute_max_psd(recording):
reshaped = recording.reshape((-1, 400))
psd = np.log10(np.abs(np.fft.fft(reshaped)) ** 2)
max_psd = np.mean(psd, axis=0)
return max_psd[:200]
vowel_1_psd = compute_max_psd(vowel_1)
vowel_2_psd = compute_max_psd(vowel_2)
plt.figure(figsize=(15, 7))
# plt.subplot(2, 1, 1)
plt.title("PSD")
plt.plot(FREQS, vowel_1_psd)
plt.plot(FREQS, vowel_2_psd)
plt.legend(["Vowel 1", "Vowel 2"])
plt.xlabel("Hz")
# -
# **Answer**: There is a lot of energy in the lower end of the spectrum for Vowel 2. We can predict that there are two formants in that region since there are no other prominent peaks. That means F1 and F2 are very close together. In contrast, for Vowel 1, we see one peak region below 500Hz and another peak region after 2000Hz. This means Vowel 1 has a relatively low F1 and high F2, making it an "i", and Vowel 2 has a relatively high F1 and low F2, making it an "a".
# Listen to the audio and see if you were right!
ipd.Audio(vowel_1, rate=16000)
ipd.Audio(vowel_2, rate=16000)
# # III. Frequency Domain Filtering
# One really nice property of the DFT is that convolution in the time domain is equivalent to multiplication in the frequency domain, and convolution in frequency is equivalent to multiplication in time.
# - Important implications to sampling theory, which will be covered in the next section
# - Makes convolution much more efficient: convolution in time on long signals of length n is $O(n^2)$, while converting to frequency domain, multiplying, and converting back is $O(n \log{n})$
# - Makes it easy to control what frequencies you filter. If you want a high-pass filter with a specific cutoff for example, or a band-pass filter to only capture F0, you can design the filter in the frequency domain and then convert it back to the time domain!
#
#
# #### The Frequency Response
#
# Just like any signal in the time domain can be transformed into the frequency domain, so can every filter. For every impulse response $h[n]$ in the time domain, we can calculate $H[k]$ in the frequency domain by performing the DFT. Since multiplication in the frequency domain is equivalent to convolution in time, we can actually uniquely characterize any LTI filter $H$ by its impulse response or frequency response. Oftentimes, it is easier to design filters in the frequency domain. For example, in speech recognition where the signal is easily separable by formant in the frequency domain, and we want to design a filter that captures speech while ignoring other frequencies. We don't much care what the shape of the filter is in the time domain, so we can design a filter in the frequency domain and impose restrictions on it to create the filter we need.
#
# #### Types of Filters
#
# Highpass filters let high frequencies through, while blocking low frequencies. This kind of filter is used to filter out low frequency background interference from power lines or machinery.
#
# Lowpass filters let low frequencies through, while blocking high frequencies. This kind of filter is often used to reduce high frequency noise, or eliminate aliasing from downsampling.
#
# Bandpass/bandstop filters pass or block only a specific range of frequencies. This might be useful in audio processing for focusing solely on F1 or F2, for example.
# +
audio_fft = np.fft.fft(audio)
len_clip = audio.shape[0]
# You can try adjusting the cutoff frequency or changing the list comprehension
# under freq_filter to create different frequency responses!
cutoff_frequency = 500 * len_clip/fs
freq_filter = np.array([1 if n < cutoff_frequency else 0 for n in range(len_clip)])
time_filter = np.real(np.fft.ifft(freq_filter))
audio_fft = np.fft.fft(audio)
audio_filtered = np.real(np.fft.ifft(audio_fft*freq_filter))
plt.plot(np.linspace(0, 999/44.1, 1000), audio[:1000])
plt.xlabel("time (ms)")
plt.show()
plt.plot(freq_filter[:44100], color='r')
plt.xlabel("frequency (Hz)**")
plt.show()
plt.plot(np.linspace(0, 999/44.1, 1000),audio_filtered[:1000])
plt.xlabel("time (ms)")
plt.show()
# -
# We can also design filters in the frequency domain to optimize for specific conditions, rather than simply using a box filter in the frequency domain (which has drawbacks, as we'll see later). One example of such a filter is the Butterworth Filter, which is designed to minimize variation in the frequency response in the passband, and thus avoid distortions in the output signal.
#
# The Butterworth filter has 3 main parameters:
#
# - **N**, the order of the filter: how sharp the cutoff is
#
# - $\boldsymbol{\omega_h}$, the cutoff frequency: the frequency at which the frequency response drops to $1/\sqrt{2}$ of the passband response
#
# - **btype**, the type of filter (ie, lowpass, highpass, bandpass, bandstop)
#
# Documentation can be found at: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html#scipy.signal.butter
# +
# TODO: Try adjusting the parameters of the Butterworth filter to see how it affects
# the frequency response and sound!
sos = signal.butter(2, 500, 'hp', fs=fs, output='sos')
audio_filtered = signal.sosfilt(sos, audio)
b, a = signal.butter(2, 500, 'low', analog=True)
w, h = signal.freqs(b, a)
plt.semilogx(w, 20 * np.log10(abs(h)), color='r')
plt.title('Butterworth filter frequency response')
plt.xlabel('Frequency [radians / second]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which='both', axis='both')
plt.axvline(500, color='green') # cutoff frequency
plt.show()
# +
# TODO: Listen to the song filtered with different Butter and box filters
ipd.Audio(audio_filtered, rate=fs)
# -
# # IV. Sampling Theory
#
# In the real-world, most signals are continuous (i.e they are functions from $\mathbb{R}\to\mathbb{R}$). Meanwhile, computers operate in the discrete space (i.e they are functions from $\mathbb{N}\to\mathbb{R}$. This means that in order to analyze any continuous signal, we need to somehow discretize it so it can be stored in finite memory.
#
# Given a continuous signal $x_c(t)$, we can obtain a discrete signal by letting $x_d[n] = x_c(f(n))$ where $f: \mathbb{N}\to\mathbb{R}$ describes our sampling scheme.
#
# A **uniform, non-adaptive sampling** scheme is where we pick some sampling frequency $\omega_s$ and let $f(n) = \frac{n}{\omega_s}$. We can think of it as "saving" the value of the continuous time signal every $\frac{1}{\omega_s}$ seconds. _Uniform_ means that $\omega_s$ is constant (i.e it does not depend on $n$), and _non-adaptive_ means $\omega_s$ is independent of the samples we have seen so far. Uniform, non-adaptive sampling schemes are what we most frequently use for sampling because of their simplicity and well-known theoeretical guarantees. For the rest of the notebook, we will assume all sampling is uniform and non-adaptive.
#
# Because sampling has the potential to destroy information, we need to understand how it impacts the frequency domain. In continuous time, frequencies exist on the range $[0, \infty)$. However, in discrete time, the fastest that a signal can change is $\pi$ radians / sample (i.e alternating from 1 to -1 like $\cos(\pi n)$). When we take the DFT of a signal that we sampled, we want to know how our angular frequencies relate to the continuous frequencies.
#
# The easiest way to think of how continuous frequencies relate to discrete frequencies is by mapping the interval $\left[0, \frac{f_s}{2}\right]$ (continuous frequencies) to the interval $[0, \pi]$ (angular frequencies). Given an angular frequency $\omega_d\in[0, \pi]$, the continuous frequency that it represent $\omega_c = \frac{f_s}{2\pi}\omega_d$.
# ### Exercise
# Plot the magnitude of DFT coefficients (in decibels) of our clip from Tom's Diner and label the x-axis with the continuous time frequencies. Ignore the complex conjugate coefficients.
# +
plt.figure(figsize=(15, 7))
# ** YOUR CODE HERE ** #
freqs = np.linspace(0, fs / 2, audio.size // 2)
song_dft = 20 * np.log10(np.abs(np.fft.fft(audio)))
plt.plot(freqs, song_dft[:audio.size // 2]) # Coefficents N/2 to N are complex coefficients
plt.xlabel("Hz")
plt.show()
# -
# ## IV.a Aliasing
#
# How frequently we sample matters a lot. If we sample too slowly, then we lose information. If we sample too fast, then we are wasting memory. The three plots below are all samples of a 10 second long sine wave $x(t) = \sin(2\pi t)$.
# +
hundred_hz = np.linspace(0, 10, 1000)
ten_hz = np.linspace(0, 10, 100)
one_hz = np.linspace(0, 10, 10)
plt.figure(figsize=(15, 7))
plt.subplot(1, 3, 1)
plt.plot(one_hz, np.sin(2 * np.pi * one_hz))
plt.title("$f_s$ = 1Hz")
plt.subplot(1, 3, 2)
plt.plot(ten_hz, np.sin(2 * np.pi * ten_hz))
plt.title("$f_s$ = 10Hz")
plt.subplot(1, 3, 3)
plt.plot(hundred_hz, np.sin(2 * np.pi * hundred_hz))
plt.title("$f_s$ = 100Hz")
plt.show()
# -
# Notice how the faster sampling frequencies 10Hz and 100Hz look virtually identical and cycle 10 times in 10 seconds as we expect a 1Hz sine wave to do. However, when we sample at 1Hz, our samples look like they came from a 0.1Hz sine wave, not a 1Hz sine wave. When higher frequencies "masquerade" as lower frequencies, this is known as **aliasing**. The effects of aliasing are very clear in the frequency domain through the following example where we sample the signal $x_c(t) = \sin(2\pi t) + \sin(2\pi * 10t)$ with a sampling frequency of 11Hz vs a sampling frequency of 50Hz vs a sampling frequency of 1000Hz over the course of 1 second.
#
# +
def x_c(t):
return np.sin(2 * np.pi * t) + np.sin(2 * np.pi * 10 * t)
eleven_hz = np.linspace(0, 1, 11)
fifty_hz = np.linspace(0, 1, 50)
thousand_hz = np.linspace(0, 1, 1000)
plt.figure(figsize=(15, 15))
plt.subplot(3, 3, 1)
plt.plot(eleven_hz, x_c(eleven_hz))
plt.title("$f_s$ = 11Hz (Time Domain)")
plt.subplot(3, 3, 2)
plt.plot(fifty_hz, x_c(fifty_hz))
plt.title("$f_s$ = 50Hz (Time Domain)")
plt.subplot(3, 3, 3)
plt.plot(thousand_hz, x_c(thousand_hz))
plt.title("$f_s$ = 1000Hz (Time Domain)")
plt.subplot(3, 3, 4)
plt.plot(np.linspace(0, 11, eleven_hz.size), np.abs(np.fft.fft(x_c(eleven_hz))))
plt.title("$f_s$ = 11Hz (Frequency Domain)")
plt.xlabel("Hz")
plt.subplot(3, 3, 5)
plt.plot(np.linspace(0, 50, fifty_hz.size), np.abs(np.fft.fft(x_c(fifty_hz))))
plt.title("$f_s$ = 50Hz (Frequency Domain)")
plt.xlabel("Hz")
plt.subplot(3, 3, 6)
plt.plot(np.linspace(0, 1000, thousand_hz.size), np.abs(np.fft.fft(x_c(thousand_hz))))
plt.title("$f_s$ = 1000Hz (Frequency Domain)")
plt.xlabel("Hz")
plt.show()
# -
# When we sampled at 50Hz, we had 2 very clear frequencies in our spectrum. However, at 11Hz, the second peak disappeared entirely! We can think of it as "hiding" in the 1Hz peak in the spectrum. At 1000Hz, we can measure a much larger range of frequencies, and so all of our peaks remain in the plot (they look squished together due to the scale of the axis).
#
# The **Nyquist Theorem** tells us how fast we need to sample in order to prevent aliasing. It states that in order to avoid aliasing, our sampling frequency $f_s$ must be at least twice the highest frequency present in the signal ($f_s > 2 * f_{max}$). In practice, due to noise, there is no maximum frequency of the signal, so we always have some aliasing. This can be minimized by using an analog anti-aliasing filter before we sample. Note that the Nyquist theorem holds in discrete time as well. Namely, if we want to downsample a recording, then the most we can sample is by a factor of $M$ (i.e take every Mth sample) such that $\frac{\pi}{M} > 2 * \omega_{max}$.
#
# ### Exercise
# How much can we downsample our audio clip before aliasing starts to degrade our audio quality? Which parts of the audio degrade first (hint, think about which frequencies are masked).
two_downsampled = audio[::2]
ipd.Audio(two_downsampled, rate=fs // 2)
four_downsampled = audio[::4]
ipd.Audio(four_downsampled, rate=fs // 4)
eight_downsampled = audio[::8]
ipd.Audio(eight_downsampled, rate=fs // 8)
sixteen_downsampled = audio[::16]
ipd.Audio(sixteen_downsampled, rate=fs // 16)
# ## IV.b Quantization
#
# Earlier, we allowed our discrete signals to be functions from $\mathbb{N}\to\mathbb{R}$. In words, we discretized time, but our signal took values over a continuous range. This is not entirely accurate since computers require use bits to represent numbers, so if we use $B$ bits to represent the values our signal takes on, we can only represent $2^B$ possible values.
#
# ### Exercise
# See how changing the number of bits we use to represent audio impacts the quality of the audio (currently using 16bits)
ipd.Audio(audio // 4096, rate=fs)
# # V. Spectral Analysis - Hoang
#
# ## V.a Windowing
#
# **Why?**
# * We can only capture a finite length of a signal
# * Impossible to capture an infinitely long signal (x[n] from $n = -\infty$ to $n = \infty$
#
# **How?**
# * Time domain: Multiple the signal x[n] with a window: $x[n] \cdot w[n]$
# * Frequency domain: Convolution between the spectrum and the DTFT of the window, thus blurring the spectrum
#
# $$x[n] w[n] \stackrel{\mathcal{FT}}{\Leftrightarrow} \frac{1}{2\pi} \int_{-\pi}^{\pi} X(e^{ju}) W(e^{j(\omega-u)}) \,du$$
#
# **Important note**
# * Rectangular window has the best resolution but the most leakage
# * Never use this due to excessive sidelobe leakage outweight the resolution gain
# * Popular window: Hann, Hamming, Tukey, Blackman, etc.
# * Not applying one of these windows == rectangular window by default
# <img src="https://upload.wikimedia.org/wikipedia/commons/f/f2/Window_functions_in_the_frequency_domain.png" alt="Spectrum of different window functions" width="600"/>
#
# # V.b Spectral Estimation Methods
#
# **Periodogram**
# * Has excessive variance
#
# <img src="./Images/periodogram.png" alt="Natural spectral estimator: periodogram" width="500"/>
#
# **Blackman-Tukey**
# * Reduce variance by smoothing the periodogram
# * Window the autocorrelation before taking the PSD
#
# <img src="./Images/blackman_tukey.png" alt="Blackman-Tukey estimator" width="500"/>
#
# * Important tradeoff between PSD bias and variance. Control using autocorrelation window duration.
#
# <img src="./Images/autocorrelation_window_duration_tradeoff.png" alt="Autocorrelation window duration tradeoff" width="200"/>
#
# **Welch-Bartlett**
# * Reduce variance by averaging the periodogram
#
# <img src="./Images/welch_bartlett.png" alt="Welch-Bartlett estimator" width="525"/>
#
# ## V.c STFT/Spectrogram
#
# **Overview**
#
# <img src="./Images/STFT_steps.png" alt="Step-by-step to perform STFT" width="700"/>
# +
# Pre-process signal
clipped_audio = audio[:len(audio)//2] # clip the audio in half for better visibility
N = clipped_audio.size # number of samples in the clipped audio
Tmax = N/fs # total duration (seconds) of the clipped audio
# Print out signal attributes
print(f"Sampling rate: {fs} Hz")
print(f"Number of samples: {N} samples")
print(f"Duration of recording: {Tmax} seconds")
# -
# Function to plot the spectrogram
def plot_spectrogram(t, f, spt):
'''
t (numpy.ndarray): time array
f (numpy.ndarray): frequency array
spt (numpy.ndarray): 2D spectrogram matrix
'''
plt.figure(figsize=(15,7))
plt.pcolormesh(t, f, np.log10(spt))
plt.ylabel('Frequency (Hz)')
plt.ylim(0, 15000)
plt.xlabel('Time (sec)')
plt.show()
# +
# Spectrogram parameters
Tw_s = 0.01 # data window duration (seconds)
Tw = int(np.round(Tw_s * fs)) # data window duration (samples)
no = Tw//1.01 # number of overlaps
print(f'Window duration (seconds): {Tw_s} seconds out of {Tmax} seconds')
print(f'Window duration (samples): {Tw} samples out of {clipped_audio.size} samples')
print(f'Number of overlaps: Overlapping {no} samples out of {Tw} samples')
print('\n')
# Compute the spectrogram, window each segment of the signal by blackman window of length Tw
start_time = time.time()
f, t, Spectrogram = scipy.signal.spectrogram(x=clipped_audio, fs=fs, window='blackman', nperseg=Tw, noverlap=no, nfft=Tw)
print(f"Spectrogram computation duration: {time.time() - start_time} seconds")
# Plot the spectrogram
plot_spectrogram(t=t, f=f, spt=Spectrogram)
# -
# ## V.d Important Spectral Analysis Tradeoffs
#
# | Spectrogram Design Parameters | Tradeoffs |
# |:--------------------------------------: |:------------------------------------------------------------------:|
# | Window types | Frequency resolution (mainlobe width) vs. Sidelobe leakage |
# | Data window duration | Frequency resolution vs. Time resolution; Bias vs. Variance |
# | Step size/Overlap | Computation power vs. Resolution |
# | Autocorrelation window duration (BT) | Bias vs. Variance |
#
# **Data window duration tradeoff**
# * Most important tradeoff
#
# | Data Window Duration | Frequency Resolution | Time Resolution | PSD Bias | PSD Variance |
# |:---------------------------:|:---------------------------:|:----------------------:|:---------------:|:-------------------:|
# | Long | High | Low | Low | High |
# | Short | Low | High | High | Low |
#
# **Exercise 1: Adjust data window duration to observe Frequency resolution vs. Time resolution tradeoff**
# +
# Case 1: Long window --> High frequency resolution, Low time resolution
Tw_s = 1 # data window duration (seconds) # ADJUSTED
Tw = int(np.round(Tw_s * fs)) # data window duration (samples)
no = Tw//1.01 # number of overlaps # FIXED
print(f'Window duration (seconds): {Tw_s} seconds out of {Tmax} seconds')
print(f'Window duration (samples): {Tw} samples out of {clipped_audio.size} samples')
print(f'Number of overlaps: Overlapping {no} samples out of {Tw} samples')
print('\n')
# Compute the spectrogram
start_time = time.time()
f, t, Spectrogram = scipy.signal.spectrogram(x=clipped_audio, fs=fs, window='blackman', nperseg=Tw, noverlap=no, nfft=Tw)
print(f"Spectrogram computation duration: {time.time() - start_time} seconds")
# Plot the spectrogram
plot_spectrogram(t=t, f=f, spt=Spectrogram)
# +
# Case 2: Short window --> Low frequency resolution, High time resolution
Tw_s = 0.0001 # data window duration (seconds) # ADJUSTED
Tw = int(np.round(Tw_s * fs)) # data window duration (samples)
no = Tw//1.01 # number of overlaps # FIXED
print(f'Window duration (seconds): {Tw_s} seconds out of {Tmax} seconds')
print(f'Window duration (samples): {Tw} samples out of {clipped_audio.size} samples')
print(f'Number of overlaps: Overlapping {no} samples out of {Tw} samples')
print('\n')
# Compute the spectrogram
start_time = time.time()
f, t, Spectrogram = scipy.signal.spectrogram(x=clipped_audio, fs=fs, window='blackman', nperseg=Tw, noverlap=no, nfft=Tw)
print(f"Spectrogram computation duration: {time.time() - start_time} seconds")
# Plot the spectrogram
plot_spectrogram(t=t, f=f, spt=Spectrogram)
# -
# **Step size/Overlap tradeoff**
# * Small step size (more overlaps between segments of the signal) yields better resolution but consumes more computation power
#
# **Exercise 2: Adjust number of overlaps for observing Computation power vs. Resolution tradeoff**
# +
# Case 1: 1/2 overlap --> Compute faster but lower resolution
Tw_s = 0.01 # data window duration (seconds) # FIXED
Tw = int(np.round(Tw_s * fs)) # data window duration (samples)
no = Tw//2 # number of overlaps # ADJUSTED
print(f'Window duration (seconds): {Tw_s} seconds out of {Tmax} seconds')
print(f'Window duration (samples): {Tw} samples out of {clipped_audio.size} samples')
print(f'Number of overlaps: Overlapping {no} samples out of {Tw} samples')
print('\n')
# Compute the spectrogram
start_time = time.time()
f, t, Spectrogram = scipy.signal.spectrogram(x=clipped_audio, fs=fs, window='blackman', nperseg=Tw, noverlap=no, nfft=Tw)
print(f"Spectrogram computation duration: {time.time() - start_time} seconds")
# Plot the spectrogram
plot_spectrogram(t=t, f=f, spt=Spectrogram)
# +
# Case 1: 0 overlap --> Compute faster but lower resolution
Tw_s = 0.01 # data window duration (seconds) # FIXED
Tw = int(np.round(Tw_s * fs)) # data window duration (samples)
no = 0 # number of overlaps # ADJUSTED
print(f'Window duration (seconds): {Tw_s} seconds out of {Tmax} seconds')
print(f'Window duration (samples): {Tw} samples out of {clipped_audio.size} samples')
print(f'Number of overlaps: Overlapping {no} samples out of {Tw} samples')
print('\n')
# Compute the spectrogram
start_time = time.time()
f, t, Spectrogram = scipy.signal.spectrogram(x=clipped_audio, fs=fs, window='blackman', nperseg=Tw, noverlap=no, nfft=Tw)
print(f"Spectrogram computation duration: {time.time() - start_time} seconds")
# Plot the spectrogram
plot_spectrogram(t=t, f=f, spt=Spectrogram)
# -
# # Resources
#
# 1. [Anmol's Course Notes from EE120 (Signals and Systems)](https://aparande.gitbook.io/berkeley-notes/ee120-0)
# 2. [Anmol's Course Notes from EE123 (Digital Signal Processing)](https://aparande.gitbook.io/berkeley-notes/ee123-0)
# 3. [Discrete Time Signal Formula Sheet](https://anmolparande.com/resources/berkeley/discrete-formula-sheet.pdf)
# 4. [EE 525 Course (Statistical Signal Processing) at Portland State University](http://pdx.smartcatalogiq.com/2020-2021/Bulletin/Courses/EE-Electrical-Engineering/500/EE-525)
# 5. [Windowing on Wikipedia](https://en.wikipedia.org/wiki/Window_function)
# 6. [Scipy's Spectrogram Function Documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html)
|
notebook_sol.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Discrete Choice Models
# ## Fair's Affair data
# A survey of women only was conducted in 1974 by *Redbook* asking about extramarital affairs.
# %matplotlib inline
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import logit
print(sm.datasets.fair.SOURCE)
print( sm.datasets.fair.NOTE)
dta = sm.datasets.fair.load_pandas().data
dta['affair'] = (dta['affairs'] > 0).astype(float)
print(dta.head(10))
print(dta.describe())
affair_mod = logit("affair ~ occupation + educ + occupation_husb"
"+ rate_marriage + age + yrs_married + children"
" + religious", dta).fit()
print(affair_mod.summary())
# How well are we predicting?
affair_mod.pred_table()
# The coefficients of the discrete choice model do not tell us much. What we're after is marginal effects.
mfx = affair_mod.get_margeff()
print(mfx.summary())
respondent1000 = dta.iloc[1000]
print(respondent1000)
resp = dict(zip(range(1,9), respondent1000[["occupation", "educ",
"occupation_husb", "rate_marriage",
"age", "yrs_married", "children",
"religious"]].tolist()))
resp.update({0 : 1})
print(resp)
mfx = affair_mod.get_margeff(atexog=resp)
print(mfx.summary())
# `predict` expects a `DataFrame` since `patsy` is used to select columns.
respondent1000 = dta.iloc[[1000]]
affair_mod.predict(respondent1000)
affair_mod.fittedvalues[1000]
affair_mod.model.cdf(affair_mod.fittedvalues[1000])
# The "correct" model here is likely the Tobit model. We have an work in progress branch "tobit-model" on github, if anyone is interested in censored regression models.
# ### Exercise: Logit vs Probit
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
support = np.linspace(-6, 6, 1000)
ax.plot(support, stats.logistic.cdf(support), 'r-', label='Logistic')
ax.plot(support, stats.norm.cdf(support), label='Probit')
ax.legend();
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
support = np.linspace(-6, 6, 1000)
ax.plot(support, stats.logistic.pdf(support), 'r-', label='Logistic')
ax.plot(support, stats.norm.pdf(support), label='Probit')
ax.legend();
# Compare the estimates of the Logit Fair model above to a Probit model. Does the prediction table look better? Much difference in marginal effects?
# ### Generalized Linear Model Example
print(sm.datasets.star98.SOURCE)
print(sm.datasets.star98.DESCRLONG)
print(sm.datasets.star98.NOTE)
dta = sm.datasets.star98.load_pandas().data
print(dta.columns)
print(dta[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP', 'PERMINTE']].head(10))
print(dta[['AVYRSEXP', 'AVSALK', 'PERSPENK', 'PTRATIO', 'PCTAF', 'PCTCHRT', 'PCTYRRND']].head(10))
formula = 'NABOVE + NBELOW ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT '
formula += '+ PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
# #### Aside: Binomial distribution
# Toss a six-sided die 5 times, what's the probability of exactly 2 fours?
stats.binom(5, 1./6).pmf(2)
from scipy.special import comb
comb(5,2) * (1/6.)**2 * (5/6.)**3
from statsmodels.formula.api import glm
glm_mod = glm(formula, dta, family=sm.families.Binomial()).fit()
print(glm_mod.summary())
# The number of trials
glm_mod.model.data.orig_endog.sum(1)
glm_mod.fittedvalues * glm_mod.model.data.orig_endog.sum(1)
# First differences: We hold all explanatory variables constant at their means and manipulate the percentage of low income households to assess its impact
# on the response variables:
exog = glm_mod.model.data.orig_exog # get the dataframe
means25 = exog.mean()
print(means25)
means25['LOWINC'] = exog['LOWINC'].quantile(.25)
print(means25)
means75 = exog.mean()
means75['LOWINC'] = exog['LOWINC'].quantile(.75)
print(means75)
# Again, `predict` expects a `DataFrame` since `patsy` is used to select columns.
resp25 = glm_mod.predict(pd.DataFrame(means25).T)
resp75 = glm_mod.predict(pd.DataFrame(means75).T)
diff = resp75 - resp25
# The interquartile first difference for the percentage of low income households in a school district is:
print("%2.4f%%" % (diff[0]*100))
nobs = glm_mod.nobs
y = glm_mod.model.endog
yhat = glm_mod.mu
from statsmodels.graphics.api import abline_plot
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, ylabel='Observed Values', xlabel='Fitted Values')
ax.scatter(yhat, y)
y_vs_yhat = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()
fig = abline_plot(model_results=y_vs_yhat, ax=ax)
# #### Plot fitted values vs Pearson residuals
# Pearson residuals are defined to be
#
# $$\frac{(y - \mu)}{\sqrt{(var(\mu))}}$$
#
# where var is typically determined by the family. E.g., binomial variance is $np(1 - p)$
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, title='Residual Dependence Plot', xlabel='Fitted Values',
ylabel='Pearson Residuals')
ax.scatter(yhat, stats.zscore(glm_mod.resid_pearson))
ax.axis('tight')
ax.plot([0.0, 1.0],[0.0, 0.0], 'k-');
# #### Histogram of standardized deviance residuals with Kernel Density Estimate overlaid
# The definition of the deviance residuals depends on the family. For the Binomial distribution this is
#
# $$r_{dev} = sign\left(Y-\mu\right)*\sqrt{2n(Y\log\frac{Y}{\mu}+(1-Y)\log\frac{(1-Y)}{(1-\mu)}}$$
#
# They can be used to detect ill-fitting covariates
resid = glm_mod.resid_deviance
resid_std = stats.zscore(resid)
kde_resid = sm.nonparametric.KDEUnivariate(resid_std)
kde_resid.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, title="Standardized Deviance Residuals")
ax.hist(resid_std, bins=25, density=True);
ax.plot(kde_resid.support, kde_resid.density, 'r');
# #### QQ-plot of deviance residuals
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
fig = sm.graphics.qqplot(resid, line='r', ax=ax)
|
examples/notebooks/discrete_choice_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="T3KiRqhj_jaT" colab_type="text"
# ## Week 1
#
# Use Convolution to identify features in image regardless of their location
# + id="mxjO1mRRAD_3" colab_type="code" colab={}
train_datagen = ImageDataGenerator(rescale = 1. / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (150,150), #image will all resize to 150 x 150
batch_size = 20, #we have 2000 images, 100 batches, each size of 20
class_mode = 'binary'
)
test_datasgen = ImageDataGenerator(rescale = 1. / 255)
validation_generator = test_datasgen.flow_from_directory(
validation_dir,
target_size = (150,150), #image will all resize to 150 x 150
batch_size = 20, #we have 2000 images, 100 batches, each size of 20
class_mode = 'binary'
)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16,(3,3), activation = 'relu'), input_shape = (150,150,3)),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32,(3,3), activation = 'relu')),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64,(3,3), activation = 'relu')),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation = 'relu'),
tf.keras.layers.Dense(1, activation = 'sigmoid'),
])
from tensorflow.keras.optimizers import RMSprop
model.compile(loss = 'binary_crossentropy', optimizer = RMSprop(lr = 0.001), metrics = ['acc'] )
history = model.fit_generator(
train_generator,
steps_per_epoch = 100,
epochs = 15,
validation_data = validation_generator,
validation_steps = 50,
verbose = 2
)
|
Convolutional Neural Networks in TensorFlow/week1 Exploring a Larger Dataset/CNN in TensorFlow Week1 Note.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import time
from cricsheet.fow_analysis.collapses.extract_collapses import return_collapses
# ## Load and Format Single Match
filepath = '../data/raw/csv/howstat/fall_of_wickets/'
file = 'fow_1999.csv'
df = pd.read_csv(filepath+file, index_col=0, parse_dates=[2], infer_datetime_format=True)
df.groupby(['MatchId','MatchInnings', 'Team']).apply(return_collapses).reset_index()
# Questions to answer:
# - Number of collapses by Team, by year (unique collapses, innings with a collapse)
# - Positions most often involved
# - Batters most often involved
# ## Load and Format All Matches
# ### Experiments - Concatenating Dfs
#
# ~2500 matches.
# 1) What is the most efficient way to load all dataframes?
#
# 2) Is it more efficient to load and format one-by-one, or concatenate into a single df and groupby the whole df
#
# +
# Try with 100 dataframes initially
n = 100
filepath = '../data/raw/csv/howstat/fall_of_wickets/'
# -
# #### Method 1: Load all using glob generator + concat
import glob
import os
# +
start = time.time()
all_files = glob.glob(os.path.join(filepath, "*.csv"))
all_files_to_load = all_files[:]
df_from_each_file = (pd.read_csv(f, index_col=0, parse_dates=[2], infer_datetime_format=True) for f in all_files_to_load)
concatenated_df = pd.concat(df_from_each_file, ignore_index=True)
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
"""
Attempts:
13 seconds
6.19 seconds
"""
# #### Method 2: os.listdir + concat
# +
from os import listdir
start = time.time()
df = pd.concat([pd.read_csv(f'{filepath}/{f}', index_col=0, parse_dates=[2], infer_datetime_format=True) for f in os.listdir(filepath) if f.endswith('.csv')])
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
"""
Attempts:
6.37 seconds
6.012
"""
# #### Method 3: Dask
# +
import dask.dataframe as dd
start = time.time()
df = dd.read_csv(f'{filepath}*.csv')
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# +
start = time.time()
df = df.compute()
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
"""
Attempts:
33 seconds
"""
df.info()
# I prefer Method 1
# ### Apply Chosen Method
# +
import glob
import os
filepath_fow = '../data/raw/csv/howstat/fall_of_wickets/'
all_fow = glob.glob(os.path.join(filepath_fow, "*.csv"))
all_fow_to_load = all_fow[:]
df_fow_from_each_file = (pd.read_csv(f, index_col=0, parse_dates=[2], infer_datetime_format=True) for f in all_fow_to_load)
concatenated_df_fow = pd.concat(df_fow_from_each_file, ignore_index=True)
# -
concatenated_df_fow.info()
#concatenated_df_fow.to_csv('../data/interim/howstat/fall_of_wickets/fow_all.csv')
concatenated_df_fow = pd.read_csv('../data/interim/howstat/fall_of_wickets/fow_all.csv')
# +
filepath_scores = '../data/raw/csv/howstat/scorecards/'
all_scores = glob.glob(os.path.join(filepath_scores, "*.csv"))
all_scores_to_load = all_scores[:]
df_scores_from_each_file = (pd.read_csv(f, index_col=0, parse_dates=[2], infer_datetime_format=True) for f in all_scores_to_load)
concatenated_df_scores = pd.concat(df_scores_from_each_file, ignore_index=True)
# -
concatenated_df_scores = concatenated_df_scores[['MatchId', 'MatchInnings', 'Team', 'TeamInnings', 'Player', 'R', 'BF']]
concatenated_df_scores['BattingPosition'] = concatenated_df_scores.groupby(['MatchId','MatchInnings', 'Team']).cumcount() + 1
concatenated_df_scores.info()
#concatenated_df_scores.to_csv('../data/interim/howstat/scorecards/scorecards_all.csv')
concatenated_df_scores = pd.read_csv('../data/interim/howstat/scorecards/scorecards_all.csv')
# ### Experiments - Fuzzy Matching
#
#
# We want to get some information from the scorecards into the Fall of Wickets objects. Unfortunately the batter names don't match exactly (scorecards have initials as well). We need to do some fuzzy matching before joining info from the scorecards.
df_fow_to_merge = concatenated_df_fow[concatenated_df_fow.MatchId<=10]
df_scores_to_merge = concatenated_df_scores[concatenated_df_scores.MatchId<=10]
# #### Method 1: fuzzy wuzzy
# +
from cricsheet.utils import fuzzy_merge
start = time.time()
df_merged = fuzzy_merge(df_fow_to_merge, df_scores_to_merge, 'Player', 'Player', 80)
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
# 2 seconds for 10 matches.
#
# 183 seconds for 100 matches.
#
# We have 2400 so this won't scale. Estimate: >2hrs to match all.
# #### Method 2: fuzzy-pandas
# +
import fuzzy_pandas as fpd
start = time.time()
matches = fpd.fuzzy_merge(df_fow_to_merge, df_scores_to_merge,
left_on=['Player'],
right_on=['Player'],
method='levenshtein',
ignore_case=True,
keep='match',
join='left-outer',
threshold=0.3)
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
# 15 seconds for 10 matches.
#
# 51 seconds for 20 matches.
#
# We have 2400 so this won't scale. Estimate: >2hrs to match all.
# #### Method 3: difflib
# +
import difflib
from functools import partial
start = time.time()
f = partial(
difflib.get_close_matches, possibilities=df_scores_to_merge['Player'].tolist(), n=1, cutoff=0.3)
matches = df_fow_to_merge['Player'].map(f).str[0].fillna('')
scores = [
difflib.SequenceMatcher(None, x, y).ratio()
for x, y in zip(matches, df_fow_to_merge['Player'])
]
df_fuzzy_matched = df_fow_to_merge.assign(best=matches, score=scores)
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
# 0.85 seconds for 10 matches.
#
# 64 seconds for 100 matches.
#
# 548 seconds for 300 matches.
df_fuzzy_matched['score'].describe()
# All of the above approaches match all of df_fow with all of df_scorecard.
# That isn't necessary. We can do it for each Match, since the batters will match for each match.
# ### Load and Fuzzy-match a Match at a time
# +
import glob
import os
filepath_scores = '../data/raw/csv/howstat/scorecards/'
all_scores = glob.glob(os.path.join(filepath_scores, "*.csv"))
all_scores_to_load = all_scores[:100]
filepath_fow = '../data/raw/csv/howstat/fall_of_wickets/'
all_fow = glob.glob(os.path.join(filepath_fow, "*.csv"))
all_fow_to_load = all_fow[:100]
# +
import difflib
from functools import partial
def fuzzy_match(df1, df2, left_on, right_on):
f = partial(
difflib.get_close_matches, possibilities=df2[right_on].tolist(), n=2, cutoff=0.3)
matches = df1[left_on].map(f).str[0].fillna('')
scores = [
difflib.SequenceMatcher(None, x, y).ratio()
for x, y in zip(matches, df1[left_on])
]
df_fuzzy_matched = df1.assign(best=matches, score=scores)
return df_fuzzy_matched
# +
# check the match_ids match
l_merged_df = []
# for each fow file:
for i in range(len(all_fow_to_load)):
#print(f'file: {i}')
# read fow, read scorecard
df_fow = pd.read_csv(all_fow_to_load[i], index_col=0, parse_dates=[2], infer_datetime_format=True)
df_scores = pd.read_csv(all_scores_to_load[i], index_col=0, parse_dates=[2], infer_datetime_format=True)
# select cols in scorecard, rename
df_scores = df_scores[['MatchId', 'MatchInnings', 'Team', 'TeamInnings', 'Player', 'R', 'BF']]
df_scores['BattingPosition'] = df_scores.groupby(['MatchId','MatchInnings', 'Team']).cumcount() + 1
l_innings = []
for innings in df_fow.MatchInnings.unique():
#print(f'innings: {innings}')
df_fow_innings = df_fow[df_fow.MatchInnings==innings]
df_scores_innings = df_scores[df_scores.MatchInnings==innings]
# fuzzy match on Player
df_matched_innings = fuzzy_match(df_fow_innings, df_scores_innings, 'Player', 'Player')
# merge cols from scores
df_merged_innings = df_matched_innings.merge(df_scores_innings,
how='left',
left_on=['MatchId', 'MatchInnings', 'Team', 'TeamInnings', 'best'],
right_on=['MatchId', 'MatchInnings', 'Team', 'TeamInnings', 'Player']
)
# reformat
df_merged_innings.drop(['Player_x', 'Player_y'], axis=1, inplace=True)
df_merged_innings = df_merged_innings.rename({'best':'Player'}, axis=1)
df_merged_innings['Player'] = df_merged_innings['Player'].apply(lambda x: re.sub('[!,*)@#%(&$_?.^†]', '', x))
l_innings.append(df_merged_innings)
df_merged_match = pd.concat(l_innings)
l_merged_df.append(df_merged_match)
# -
df = pd.concat(l_merged_df)
from cricsheet.fow_analysis.collapses.preprocess_data import load_all_and_process
df = load_all_and_process()
df
# +
start = time.time()
df_grouped = df.groupby(['MatchId','MatchInnings', 'Team']).apply(return_collapses).reset_index()
end = time.time()
time_taken = end - start
print(f'{time_taken} seconds')
# -
df_grouped.to_csv('../data/processed/collapses/all_collapses.csv')
|
notebooks/ag-format-collapses-1.3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# +
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = '/Users/SyedAli/Desktop/CPics'
validation_data_dir = '/Users/SyedAli/Desktop/CPics'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
# +
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# +
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
# -
|
3D+Print+Convolutional+Neural+Network (2).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="A-AWSHU1NdiY"
# %%capture
# !pip install flair
# + id="22ZrZO2Qyewo"
import torch, flair
# dataset, model and embedding imports
from flair.datasets import UniversalDependenciesCorpus
from flair.embeddings import TransformerWordEmbeddings
from flair.models import SequenceTagger
from flair.trainers import ModelTrainer
from torch.optim.lr_scheduler import OneCycleLR
from flair.data import Sentence
from flair.models import MultiTagger
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 16, "status": "ok", "timestamp": 1654546649013, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}, "user_tz": -180} id="eTPe_N2dy1xA" outputId="c55bd939-acb5-4f47-a4cc-73d85afb07c4"
print(torch.cuda.is_available())
flair.device = 'cuda:0'
# + id="SXcfkIEfy5gZ"
flair.device = 'cuda:0'
# + id="zru2TbhUzRmq"
hf_model = "dbmdz/bert-base-turkish-cased"
tag_type = "dependency"
dataset = "boun"
path = f"./drive/MyDrive/Colab Notebooks/{dataset}-treebank"
output_folder = f"./drive/MyDrive/Colab Notebooks/{dataset}-{hf_model}-{tag_type}"
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11484, "status": "ok", "timestamp": 1654546781652, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}, "user_tz": -180} id="I6wDqJmyz9FR" outputId="a58c038c-6a41-4def-be25-2a2e2ceef205"
corpus = UniversalDependenciesCorpus(data_folder=path,
train_file=f"tr_{dataset}-ud-train.conllu",
dev_file=f"tr_{dataset}-ud-dev.conllu",
test_file=f"tr_{dataset}-ud-test.conllu")
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 378, "status": "ok", "timestamp": 1654546782024, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}, "user_tz": -180} id="ayB_7wZq0MMB" outputId="4db2cc18-844e-4171-d762-f96b979650fa"
tag_dictionary = corpus.make_label_dictionary(tag_type)
# + colab={"base_uri": "https://localhost:8080/", "height": 162, "referenced_widgets": ["396faf68d88f46ed9d83233126961b7a", "86c585e394394c49b1c0bb712c03724a", "d1f8c1e7d60f402294056af56fe48263", "ed4a3194c0ee4510b6a2ab0215851249", "a636f1fa76da408c82192dbd1833816f", "<KEY>", "<KEY>", "51fbbdd3e6074a7899fc55d97dedf306", "72d76b14ba7442c685f75e41261d3dd9", "30a29c0d90264db186a39f73a846e746", "d8bb38d1e2ea49c290d87e908c46f9e4", "<KEY>", "eac1698413c041c09554a28f72d4a9c2", "<KEY>", "<KEY>", "0af0caed5a934f30ae9ca2318312d58d", "70448f140deb4166abf9e1d352453fa3", "18b055283577457892652c779e50a325", "21680a7feaee40c0821ab7d41b5c8739", "<KEY>", "f82fcd40d2714b40adeb5effdfae047e", "d3d9702dcac94f4abdd256e13a5f908b", "<KEY>", "1e09c2ea53f043fabc90f83c34c628be", "d85e8643f1914373b3694ef3cbe7fe30", "4311baf490634452a063f613ff2eaa9a", "<KEY>", "<KEY>", "270a9eb5ab7d46e5915a39adaadc9247", "<KEY>", "<KEY>", "<KEY>", "6477321adee64e4795e511980c3d07a5", "2aaec4471eca4204b8d8414d561571de", "<KEY>", "13bae24c709f4e19956284dc84a16b1d", "af6e142201ef448382a9c099a3bd13b9", "a8652d67ec6a45f78489f9762776812f", "b1737b2a23484d849e7069023f8c85cc", "127beb9e722a431e9baac2595e5c0e4f", "ce67c1174e8548f394e60f1c5545ff8b", "2234a9d75c294a1a810e7a2512d6bcdd", "fe17b1d2ea0045d7b7147c8e75aab5e1", "56c7afa110d74d9eb0b7f9860bf5adef"]} executionInfo={"elapsed": 35863, "status": "ok", "timestamp": 1654546706702, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}, "user_tz": -180} id="UDpN_Hgq0pVK" outputId="34007186-74e5-4045-d76b-d809242c8d48"
embeddings = TransformerWordEmbeddings(model=hf_model,
layers="-1",
subtoken_pooling="first",
fine_tune=True,
use_context=True,
)
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
)
# + colab={"base_uri": "https://localhost:8080/"} id="1Vmh4YPjbZjj" outputId="2b3d12b9-f73a-4c1d-a0d3-46988a15657c" executionInfo={"status": "ok", "timestamp": 1651179231986, "user_tz": -180, "elapsed": 5960859, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}}
trainer = ModelTrainer(tagger, corpus)
trainer.fine_tune(output_folder,
learning_rate=5.0e-6,
mini_batch_size=4,
)
# + colab={"base_uri": "https://localhost:8080/"} id="PJUno9P5tUFe" executionInfo={"status": "ok", "timestamp": 1649967353252, "user_tz": -180, "elapsed": 3901, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}} outputId="3aa75ae7-d8bf-4e32-a997-4ab87d494786"
tagger = SequenceTagger.load(output_folder+'/final-model.pt')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 218, "status": "ok", "timestamp": 1649967679015, "user": {"displayName": "<NAME>\u0131ld\u0131z", "userId": "17719157978648562192"}, "user_tz": -180} id="BdFxIQm_b0aP" outputId="65d88b7d-acec-47d0-87b4-a83a9e985cc1"
sentence = Sentence('Kullanıcı kayıt sistemine belirli bir kimlik numarası ve şifresi kendi isim ve soyismine kayıtlı olacak şekilde erişimi olan kişidir. ')
#sentence = Sentence("<NAME>ye'nin başkentidir. ")
#sentence = Sentence("<NAME>'nin bir şehridir. ")
sentence = Sentence("E-dilekçe öğrenciler tarafından danışmanlarına gönderilen dilekçelerdir.")
tagger.predict(sentence)
for i in sentence:
print(i)
|
dependency-tagger-for-turkish-with-BERT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="_uYS6xIIpmWc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 910} executionInfo={"status": "ok", "timestamp": 1599869489706, "user_tz": -180, "elapsed": 22106, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GizA91R89ZnXiXnqI6dqFyGlJxLco8xoCOp48_2Zg=s64", "userId": "06174996150946640565"}} outputId="d5d91b40-6514-458a-f43a-3114e029a805"
# %pip install newspaper3k
# + id="OLx6vWlNr5FT" colab_type="code" colab={}
# %pip install wikipedia
# + id="svcz7sHMnoaK" colab_type="code" colab={}
import random
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
import string
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import newspaper
from newspaper import Article
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from keras.models import Model
from keras.layers import Dense
import tensorflow as tf
import pickle
import wikipedia
import webbrowser
# + id="O3fnL1jwtkbC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1599894866599, "user_tz": -180, "elapsed": 985, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GizA91R89ZnXiXnqI6dqFyGlJxLco8xoCOp48_2Zg=s64", "userId": "06174996150946640565"}} outputId="70ba6010-1b35-418a-b83e-3fad2013be18"
nltk.download('punkt', quiet=True)
# + id="1M8LNDQZwtiY" colab_type="code" colab={}
#mental_health = newspaper.build('https://www.medicalnewstoday.com/articles/154543')
#for article in mental_health.articles:
#print(article.url)
#for category in mental_health.category_urls():
#print(category)
#for feed_url in mental_health.feed_urls():
# print(feed_url)
# + id="wsWaUaw6uJvn" colab_type="code" colab={}
article = Article('https://www.medicalnewstoday.com/articles/154543')
article.download()
#print(article.html)
article.parse()
article.nlp()
corpus = article.text
#print(corpus)
# + id="vLJ3kw1j8ObU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} executionInfo={"status": "ok", "timestamp": 1599894902379, "user_tz": -180, "elapsed": 1080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GizA91R89ZnXiXnqI6dqFyGlJxLco8xoCOp48_2Zg=s64", "userId": "06174996150946640565"}} outputId="96a796ba-e642-4ec0-f826-e5e9e0011450"
sentence_list = nltk.sent_tokenize(corpus)
print(sentence_list)
# + id="nhoFfupfCP09" colab_type="code" colab={}
def greetings():
bot_greetings = ['Howdy', 'Hi', 'Hello', 'Hola', 'Namaste', 'Wassup', 'Greetings']
print('Hey there! Welcome onboard! What is your name')
user = input()
if user == None:
print(f'{random.choice(bot_greetings)} user. My name is <NAME> and I am here to answer any questions you have about mental health. If you wish to exit, please type "quit" ')
else:
print(f'{random.choice(bot_greetings)} {user}. My name is <NAME> and I am here to answer any questions you have about mental health. If you wish to exit, please type "quit" ')
# + id="BBPhawMLXH_5" colab_type="code" colab={}
def user_gratitude(user_input):
user_input = user_input.lower()
user_thanks = ["thanks", "thanks alot", "thank you", "that's helpful", "awesome doctor bot"]
bot_reply = ["glad I could help!", "any time!", "my pleasure", "happy to help"]
for word in user_input.split():
if word in user_thanks:
return random.choice(bot_reply)
# + id="qywkPx7q-OKV" colab_type="code" colab={}
def index_sort(list_variable):
length = len(list_variable)
list_index = list(range(0, length))
x = list_variable
for i in range(length):
for j in range(length):
if x[list_index[i]] > x[list_index[j]]:
# swap
temp = list_index[i]
list_index[i] = list_index[j]
list_index[j] = temp
return list_index
# + id="Rvhq2_Gp__dZ" colab_type="code" colab={}
def bot_response(user_input):
user_input = user_input.lower()
sentence_list.append(user_input)
bot_response = ' '
cm = CountVectorizer().fit_transform(sentence_list)
similarity_scores = cosine_similarity(cm[-1], cm)
similarity_scores_list = similarity_scores.flatten()
index = index_sort(similarity_scores_list)
index = index[1:]
response_flag = 0
j = 0
for i in range(len(index)):
if similarity_scores_list[index[i]] > 0.0:
bot_response = bot_response + ' ' + sentence_list[index[i]]
response_flag = 1
j = j + 1
if j > 5:
break
if response_flag == 0:
bot_reponse = bot_response + ' ' + "My apologies, I have no response for you."
sentence_list.remove(user_input)
return bot_response
# + id="RZJw4vwSDeLE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} executionInfo={"status": "ok", "timestamp": 1599895486655, "user_tz": -180, "elapsed": 86175, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GizA91R89ZnXiXnqI6dqFyGlJxLco8xoCOp48_2Zg=s64", "userId": "06174996150946640565"}} outputId="c029851d-0246-4127-d8c8-a8ce09a3c8fc"
greetings()
exit_list = ['exit', 'later', 'bye', 'quit', 'clear', 'break']
while(True):
user_input = input()
if user_input.lower() in exit_list:
while True:
print('Before you go, here is a list of wikipedia links relating to mental health that might be of interest to you')
mental_health = wikipedia.page('mental_health')
links = mental_health.links
print(links)
print("I you'd wish to proceed to a link type the link below, if not, type 'no thanks' to proceed with exit ")
user_link = input()
if user_link.lower() == 'no thanks':
print(f'Was a pleasure serving you {user}. Hope to see you back again')
break
else:
data = wikipedia.page(user_link)
print(data.content)
break
else:
if user_gratitude(user_input) != None:
print(f'Doc Bot: {user_gratitude(user_input)}')
else:
print(f'Doc Bot: {bot_response(user_input)}')
|
mental_health_chatbot.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Exercise: Creating Cells, Executing Code
#
# 1. Create a new code cell below where you define variables containing your name, your age in years, and your major.
# 2. Create another cell that uses these variables and prints a concatenated string stating your name, major, and your age in years, months, and days (assuming today is your birthday ;)). The output should look like that:
#
# ```
# Name: Science Cat, Major: Computer Science, Age: 94 years, or 1128 months, or 34310 days.
# ```
name = "<NAME>"
major = "Computer Science"
age = 36
print ("Name:", name+",", "Major:", major+",", "age:", age, "years, or", age * 12, "months, or", age * 12 * 365, "days")
# ### Exercise: Functions
# Write a function that
# * takes two numerical variables
# * multiplies them with each other
# * divides them by a numerical variable defined in the scope outside the function
# * and returns the result.
#
# Print the result of the function for three different sets of input variables.
side_effect_variable = 3
def multiply_with_side_effect(x, y):
return x*y/side_effect_variable
multiply_with_side_effect(2,4)
multiply_with_side_effect(3,3)
multiply_with_side_effect(9,9)
|
02-basic-python/lecture-02-exercise-solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import numpy as np
from pymongo import MongoClient
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
client = MongoClient('localhost', 27017)
db = client.TFE
collection = db.results2
# This is the analysis of attention model + word2vec on liar-liar dataset
res = [res for res in collection.find({'model' : 'Attention LSTM 1.2', 'finish' : True, 'params' : {'$exists' : True}})]
def getBestRecall(epochs):
currentBest = 0
currentBestEpoch = 0
for i, epoch in enumerate(epochs):
if epoch['valid']['recall'] > currentBest:
currentBest = epoch['valid']['recall']
currentBestEpoch = i
return epochs[currentBestEpoch]
def bestPrecision(epochs):
return sorted(epochs, key = lambda x: x['valid']['clr']['accuracy'], reverse = True)[0]
def bestf1(epochs):
return sorted(epochs, key = lambda x: x['valid']['clr']['weighted avg']['f1-score'], reverse = True)[0]
def bestRecall(epochs):
return sorted(epochs, key = lambda x: x['valid']['clr']['fake']['recall'], reverse = True)[0]
def lowerMissClass(epochs):
return sorted(epochs, key = lambda x: x['valid']['Confusion Matrix'][1] + x['valid']['Confusion Matrix'][2], reverse = False)[0]
def expoAverage(array, alpha):
newArray = [0.0 for i in range(0, len(array))]
newArray[0] = array[0]
for i in range(1, len(array)):
newArray[i] = alpha * array[i] + (1-alpha) * newArray[i-1]
return newArray
# +
def precision(confMat):
tn, fp, fn, tp = confMat
return tp / (tp + fp)
def recall(confMat):
tn, fp, fn, tp = confMat
return tp / (tp + fn)
def f1(confMat):
return 2 * (precision(confMat) * recall(confMat)) / (precision(confMat) + recall(confMat))
# -
df = pd.DataFrame(columns = ['precision', 'epoch', 'SEQ_LENGTH', 'EMBEDDING_DIM', 'HIDDEN', 'LAYERS', 'DROPOUT'])
cursor = collection.find({'model' : 'Attention LSTM 1.2', 'finish' : True, 'params' : {'$exists' : True}})
for res in cursor:
best = bestPrecision(res['result'])
df = df.append({'precision' : best['valid']['clr']['weighted avg']['precision'], 'epoch' : best['epoch'],
'SEQ_LENGTH' : res['params'][0]['SEQ_LENGTH'],
'EMBEDDING_DIM' : res['params'][0]['EMBEDDING_DIM'],
'HIDDEN' : res['params'][0]['HIDDEN'],
'LAYERS' : res['params'][0]['LAYERS'],
'DROPOUT' : res['params'][0]['DROPOUT']}, ignore_index=True)
cursor.close()
df.sort_values(by = 'precision', ascending = False).iloc[0]
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(10,10))
sns.pointplot(x = 'SEQ_LENGTH', y = 'precision', data = df, ax = axes[0][0], capsize=.2)
sns.pointplot(x = 'HIDDEN', y = 'precision', data = df, ax = axes[0][1], capsize=.2)
sns.pointplot(x = 'LAYERS', y = 'precision', data = df, ax = axes[1][0], capsize=.2)
sns.pointplot(x = 'DROPOUT', y = 'precision', data = df, ax = axes[1][1], capsize=.2)
plt.savefig('confInt_precision_liar_attention_word2vec.pdf')
sns.distplot(df['precision'], bins = 25, kde = False)
plt.savefig('distplot_precision_liar_attention_word2vec.pdf')
# +
res = collection.find_one({'model' : 'Attention LSTM 1.2', 'params.SEQ_LENGTH' : 20, 'params.HIDDEN' : 5, 'params.DROPOUT' : 0.75, 'params.LAYERS' : 1})
train = [r['train']['clr']['weighted avg']['recall'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['recall'] for r in res['result']]
loss = [r['train']['loss'] for r in res['result']]
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(15,15))
axes.flat[1].plot(train, label = 'train')
axes.flat[1].plot(valid, label = 'valid')
axes.flat[1].set_xlabel('epoch')
axes.flat[1].set_title('Recall metrics for train and validation set with respect to the epoch')
axes.flat[1].legend()
axes.flat[0].plot(loss, label = 'loss')
axes.flat[0].set_xlabel('epoch')
axes.flat[0].set_ylabel('Loss')
axes.flat[0].set_title('Loss with respect to the epoch')
axes.flat[0].legend()
train = [r['train']['clr']['weighted avg']['f1-score'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['f1-score'] for r in res['result']]
axes.flat[2].plot(train, label = 'train')
axes.flat[2].plot(valid, label = 'valid')
axes.flat[2].set_xlabel('epoch')
axes.flat[2].set_title('F1-score metrics for train and validation set with respect to the epoch')
axes.flat[2].legend()
train = [r['train']['clr']['weighted avg']['precision'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['precision'] for r in res['result']]
axes.flat[3].plot(train, label = 'train')
axes.flat[3].plot(valid, label = 'valid')
axes.flat[3].set_xlabel('epoch')
axes.flat[3].set_title('Precision metrics for train and validation set with respect to the epoch')
axes.flat[3].legend()
plt.savefig('output/attention1.pdf')
# -
df = pd.DataFrame(columns = ['f1-score', 'epoch', 'SEQ_LENGTH', 'EMBEDDING_DIM', 'HIDDEN', 'LAYERS', 'DROPOUT'])
cursor = collection.find({'model' : 'Attention LSTM 1.2', 'finish' : True, 'params' : {'$exists' : True}})
for res in cursor:
best = bestf1(res['result'])
df = df.append({'f1-score' : best['valid']['clr']['weighted avg']['f1-score'], 'epoch' : best['epoch'],
'SEQ_LENGTH' : res['params'][0]['SEQ_LENGTH'],
'EMBEDDING_DIM' : res['params'][0]['EMBEDDING_DIM'],
'HIDDEN' : res['params'][0]['HIDDEN'],
'LAYERS' : res['params'][0]['LAYERS'],
'DROPOUT' : res['params'][0]['DROPOUT']}, ignore_index=True)
cursor.close()
df.sort_values(by = 'f1-score', ascending = False).iloc[0]
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(10,10))
sns.pointplot(x = 'SEQ_LENGTH', y = 'f1-score', data = df, ax = axes[0][0])
sns.pointplot(x = 'HIDDEN', y = 'f1-score', data = df, ax = axes[0][1])
sns.pointplot(x = 'LAYERS', y = 'f1-score', data = df, ax = axes[1][0])
sns.pointplot(x = 'DROPOUT', y = 'f1-score', data = df, ax = axes[1][1])
sns.distplot(df['f1-score'], bins = 50)
# +
res = collection.find_one({'model' : 'Attention LSTM 1.2', 'params.SEQ_LENGTH' : 20, 'params.HIDDEN' : 5, 'params.DROPOUT' : 0.75, 'params.LAYERS' : 1})
train = [r['train']['recall'] for r in res['result']]
valid = [r['valid']['recall'] for r in res['result']]
loss = [r['train']['loss'] for r in res['result']]
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(15,15))
axes.flat[1].plot(train, label = 'train')
axes.flat[1].plot(valid, label = 'valid')
axes.flat[1].set_xlabel('epoch')
axes.flat[1].set_title('Recall metrics for train and validation set with respect to the epoch')
axes.flat[1].legend()
axes.flat[0].plot(loss, label = 'loss')
axes.flat[0].set_xlabel('epoch')
axes.flat[0].set_ylabel('Loss')
axes.flat[0].set_title('Loss with respect to the epoch')
axes.flat[0].legend()
train = [r['train']['clr']['weighted avg']['f1-score'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['f1-score'] for r in res['result']]
axes.flat[2].plot(train, label = 'train')
axes.flat[2].plot(valid, label = 'valid')
axes.flat[2].set_xlabel('epoch')
axes.flat[2].set_title('F1-score metrics for train and validation set with respect to the epoch')
axes.flat[2].legend()
train = [r['train']['clr']['weighted avg']['precision'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['precision'] for r in res['result']]
axes.flat[3].plot(train, label = 'train')
axes.flat[3].plot(valid, label = 'valid')
axes.flat[3].set_xlabel('epoch')
axes.flat[3].set_title('Precision metrics for train and validation set with respect to the epoch')
axes.flat[3].legend()
#plt.savefig('output/attention1.pdf')
# -
df = pd.DataFrame(columns = ['recall', 'epoch', 'SEQ_LENGTH', 'EMBEDDING_DIM', 'HIDDEN', 'LAYERS', 'DROPOUT'])
cursor = collection.find({'model' : 'Attention LSTM 1.2', 'finish' : True, 'params' : {'$exists' : True}})
for res in cursor:
best = bestRecall(res['result'])
df = df.append({'recall' : best['valid']['clr']['fake']['recall'], 'epoch' : best['epoch'],
'SEQ_LENGTH' : res['params'][0]['SEQ_LENGTH'],
'EMBEDDING_DIM' : res['params'][0]['EMBEDDING_DIM'],
'HIDDEN' : res['params'][0]['HIDDEN'],
'LAYERS' : res['params'][0]['LAYERS'],
'DROPOUT' : res['params'][0]['DROPOUT']}, ignore_index=True)
cursor.close()
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(10,10))
sns.pointplot(x = 'SEQ_LENGTH', y = 'recall', data = df, ax = axes[0][0])
sns.pointplot(x = 'HIDDEN', y = 'recall', data = df, ax = axes[0][1])
sns.pointplot(x = 'LAYERS', y = 'recall', data = df, ax = axes[1][0])
sns.pointplot(x = 'DROPOUT', y = 'recall', data = df, ax = axes[1][1])
sns.distplot(df['recall'], bins = 50)
df.sort_values(by = 'recall', ascending = False).iloc[0]
# +
res = collection.find_one({'model' : 'Attention LSTM 1.2', 'params.SEQ_LENGTH' : 10, 'params.HIDDEN' : 10, 'params.DROPOUT' : 0.25, 'params.LAYERS' : 1})
train = [r['train']['recall'] for r in res['result']]
valid = [r['valid']['recall'] for r in res['result']]
loss = [r['train']['loss'] for r in res['result']]
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize=(15,15))
axes.flat[1].plot(train, label = 'train')
axes.flat[1].plot(valid, label = 'valid')
axes.flat[1].set_xlabel('epoch')
axes.flat[1].set_title('Recall metrics for train and validation set with respect to the epoch')
axes.flat[1].legend()
axes.flat[0].plot(loss, label = 'loss')
axes.flat[0].set_xlabel('epoch')
axes.flat[0].set_ylabel('Loss')
axes.flat[0].set_title('Loss with respect to the epoch')
axes.flat[0].legend()
train = [r['train']['clr']['weighted avg']['f1-score'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['f1-score'] for r in res['result']]
axes.flat[2].plot(train, label = 'train')
axes.flat[2].plot(valid, label = 'valid')
axes.flat[2].set_xlabel('epoch')
axes.flat[2].set_title('F1-score metrics for train and validation set with respect to the epoch')
axes.flat[2].legend()
train = [r['train']['clr']['weighted avg']['precision'] for r in res['result']]
valid = [r['valid']['clr']['weighted avg']['precision'] for r in res['result']]
axes.flat[3].plot(train, label = 'train')
axes.flat[3].plot(valid, label = 'valid')
axes.flat[3].set_xlabel('epoch')
axes.flat[3].set_title('Precision metrics for train and validation set with respect to the epoch')
axes.flat[3].legend()
#plt.savefig('output/attention1.pdf')
# -
|
Code/Analysis/models/Attention/attention_lstm1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# MAP RUNNING TIME
#
# https://github.com/Make-School-Courses/DS-2.3-Data-Science-in-production/blob/master/Lessons/Pyspark_Notebooks/Pyspark_array_manipulation_1.ipynb
#
# The first line defines a base RDD from an external file. This dataset is not loaded in memory or otherwise acted on
# # .... UHHH MORE STUFF! (beyond what's in the linked ntbk)
#
# +
import math
from pyspark import SparkContext
# sc = SparkContext()
nums = sc.parallelize(range(100000), numSlices=100)
doubled = nums.map(lambda n: n*2)
total = doubled.filter(lambda n: n%4==0).reduce(lambda a,b: a+b)
print(math.sqrt(total))
# +
import math
def f(ls):
s = 0
for i in ls:
if (i*2)%4 == 0:
s += (i*2)
return math.sqrt(s)
print(f(range(100000)))
# -
# No need for .collect! Everything will be distributed, computation will happen, yay.
#
# (I have no clue what any of that means. That's just what Milad said)
#
# So the only difference between using spark and python native is here, nums c=will come after function, or doubled will be AFTER the condition (lambda n: n%4==0) -- _Cherish says: So the ordering of things is different_
#
# <b>Map reduce filter is the ONLY way when you use Pyspark. I think? God I am so lost someone help pls</b>
# # Difference between map and flatmap
# +
import keras
import pyspark
from pyspark import SparkContext
sc = SparkContext()
values = sc.parallelize([1, 2, 3, 4], 2)
print(values.map(range).collect())
print(values.flatMap(range).collect())
# -
|
Classwork/09.25.19..../Pyspark array manip 1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dapnn
# language: python
# name: dapnn
# ---
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#PDC-2020-and-2021" data-toc-modified-id="PDC-2020-and-2021-1"><span class="toc-item-num">1 </span>PDC 2020 and 2021</a></span></li><li><span><a href="#BINET-Logs" data-toc-modified-id="BINET-Logs-2"><span class="toc-item-num">2 </span>BINET Logs</a></span></li></ul></div>
# # Dataset Descriptions:
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# ## PDC 2020 and 2021
# +
#export
from dapnn.imports import *
from dapnn.data_processing import *
from dapnn.anomaly import *
# -
def get_ds_stats(path):
res=[]
for log in progress_bar(glob.glob(path)):
splits = log.split('.')[0].split('_')
log_name = splits[-1]
log = import_log(log)
number_cases = len(log.index.unique())
number_activities= len(log.activity.unique())
number_events = len(log)
number_attr = 1
number_anomalies= len(log[log['normal']==False]['trace_id'].unique()) if 'normal' in log.columns else 'unknown'
res.append([log_name,number_cases,number_activities,number_events
,number_anomalies])
res= pd.DataFrame(res,columns=['Name','# Cases','# Activities','# Events','# Anomalies'])
return res
pdc20_train= get_ds_stats('data/csv/PDC2020_training/*')
pdc20_train
pdc20_gt= get_ds_stats('data/csv/PDC2020_ground_truth/*')
pdc20_gt
# +
def format_ds(x):
min = x.min()
max = x.max()
return min if min == max else f'{x.min()}-{x.max()}'
pdc20_train_stats = pdc20_train.apply(format_ds).to_frame().T
pdc20_gt_stats = pdc20_gt.apply(format_ds).to_frame().T
# -
pdc21_train= get_ds_stats('data/csv/PDC2021_training/*')
pdc21_train
pdc21_gt= get_ds_stats('data/csv/PDC2021_ground_truth/*')
pdc21_gt
pdc21_train_stats = pdc21_train.apply(format_ds).to_frame().T
pdc21_gt_stats = pdc21_gt.apply(format_ds).to_frame().T
res =pd.concat([pdc20_train_stats,pdc20_gt_stats,pdc21_train_stats,pdc21_gt_stats])
res =res[res.columns[1:]]
res.index = ['PDC 2020 Train', 'PDC 2020 Test','PDC 2021 Train','PDC 2021 Test']
res['# Logs'] = [len(pdc20_train),len(pdc20_gt),len(pdc21_train),len(pdc21_gt)]
res =res[['# Logs','# Cases','# Activities','# Events','# Anomalies']]
res
print(res.to_latex())
# ## BINET Logs
res=[]
for fn in progress_bar(glob.glob('data/csv/binet_logs/*')):
log_name = fn.split('/')[-1][:-7]
log = import_log(fn)
number_cases = len(log.index.unique())
number_activities= len(log.activity.unique())
number_events = len(log)
number_attr =len(get_attr(attr_dict,fn))-1
number_anomalies= len(log[log['anomaly']!='normal']['trace_id'].unique())
res.append([log_name,number_cases,number_activities,number_events,number_attr,number_anomalies])
binet_logs= pd.DataFrame(res,columns=['Name','# Cases','# Activities','# Events','# Attributes','# Anomalies'])
binet_logs
groups = np.unique(binet_logs['Name'].apply(lambda x: x.split('-')[0]).values)
dfs =[]
for group in groups:
df = binet_logs[binet_logs.Name.str.contains(group)]
len_df=len(df)
df = df.apply(format_ds).to_frame().T
df['# Logs'] = [len_df]
dfs.append(df)
res =pd.concat(dfs)
res =res[res.columns[1:]]
res.index=[i.upper() if 'bpi' in i else i.capitalize() for i in groups ]
res = res[['# Logs','# Cases','# Activities','# Events','# Attributes','# Anomalies']].sort_index()
res
print(res.to_latex())
|
07_dataset_statistics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import re
import os
import random
import pprint
from collections import defaultdict
def remove_nan(df:pd.DataFrame) -> dict:
"""
Rimuove i valori nulli da una lista
"""
lookup_dict = df.to_dict('list')
for k, v in lookup_dict.items():
while np.nan in lookup_dict[k]:
lookup_dict[k].remove(np.nan)
return lookup_dict
df_entities = pd.read_excel("dataset.xlsx", sheet_name='entities_slots')
lookups = remove_nan(df_entities)
lookups
# +
df_user = pd.read_excel('dataset.xlsx',sheet_name='user', header=None)
df_user.columns = ['user','sentences']
df_user['user'] = df_user['user'].fillna(method='ffill', axis=0)
sentences = defaultdict(list)
df_grouped = df_user.groupby('user')
for group in df_grouped.groups:
sentences[group] = df_grouped.get_group(group)['sentences'].tolist()
# -
sentences
# +
df_bot = pd.read_excel('dataset.xlsx',sheet_name='bot', header=None)
df_bot.columns = ['bot','sentences']
df_bot['bot'] = df_bot['bot'].fillna(method='ffill', axis=0)
bot_sentences = defaultdict(list)
df_grouped = df_bot.groupby('bot')
for group in df_grouped.groups:
bot_sentences[group] = df_grouped.get_group(group)['sentences'].tolist()
bot_sentences
# -
df_dialogs = pd.read_excel('dataset.xlsx',sheet_name='dialogs')
dialogs = remove_nan(df_dialogs)
dialogs
# +
sentences_file = "sentences_origin.txt"
i = True
for k, v in sentences.items():
for values in v:
if i:
with open(sentences_file, 'w+') as f:
f.writelines(f'{values}|{k}\n')
i = False
else:
with open(sentences_file, 'a+') as f:
f.writelines(f'{values}|{k}\n')
# +
sentences, categories = [], []
with open(sentences_file, encoding='utf-8') as f:
dataset = f.read()
dataset = dataset.split("\n")
for data in dataset:
sentence = data.split("|")
if len(sentence) > 1:
sentences.append(sentence[0].upper())
categories.append(sentence[1])
assert len(sentences) == len(categories)
# -
sentences[0]
categories[0]
# +
sentences_file_generated = "sentences_generated.txt"
n_sentences = 1000
# +
slots = list(lookups.keys())
slots
# -
for i in range(n_sentences):
index = random.randint(0, len(sentences)-1)
sentence = sentences[index]
category = categories[index]
for key in slots:
"""
Ogni volta che regex individua lo slot nella frase
sostituisce il valore con uno estratto in modo casuale
"""
regex_str = fr'\[{key}]\((?P<value>[a-z ]+)\)+'
slot_match = re.compile(regex_str)
repl = f"[{key}]({random.choice(lookups[key])})"
sentence = slot_match.sub(repl, sentence)
"""
Poi riassocia la categoria di partenza
"""
if i == 0:
with open(sentences_file_generated, 'w+') as f:
f.writelines(f'{sentence}|{category}\n')
else:
with open(sentences_file_generated, 'a+') as f:
f.writelines(f'{sentence}|{category}\n')
def rimuovi_punteggiatura(text):
"""
I dati di training contengono parentesi quadre e tonde,
quindi non vanno eliminate in questa fase
"""
text = re.sub(r'[\.,;:!?]' , " ", text)
text = re.sub(r'\s+' , " ", text)
return text
with open(sentences_file_generated, encoding='utf-8') as f:
sentences = f.read()
sentences = sentences.split("\n")
# +
example = random.choice(sentences)
example, _ = example.split("|")
print("Before:",example)
example = re.sub(r'\[(?P<name>[a-zA-Z_]+)\]|\(|\)+', "", example)
print("After:",example)
# +
sentence = sentences[10]
print(f"Fase iniziale: {sentence} \n")
sentence, categ = sentence.split("|")
sentence = rimuovi_punteggiatura(sentence)
print(f"Fase finale: {sentence}")
# -
regex_str = r'\[(?P<name>[a-zA-Z_]+)\]\((?P<value>[a-zA-Z\' ]+)\)+'
slot_match = re.compile(regex_str)
# +
splits = slot_match.split(sentence)
splits
# +
matches = slot_match.findall(sentence)
dct = {k:v for k,v in matches}
dct
# -
for split in splits:
if split in list(dct.values()):
for value in split.split():
index = list(dct.values()).index(split)
key = list(dct.keys())[index]
print(value, "->", key)
elif split in list(dct.keys()):
pass
else:
for splt in split.split():
print(splt, "->", '0')
# +
arr_sentences = list()
arr_categories = list()
for n, sentence in enumerate(sentences):
try:
sentence, categ = sentence.split("|")
sentence = rimuovi_punteggiatura(sentence)
arr_categories.append(categ)
splits = slot_match.split(sentence)
# match = slot_match.search(frase)
matches = slot_match.findall(sentence)
dct = {k:v for k,v in matches}
if matches is not None:
for split in splits:
if split in list(dct.values()):
for value in split.split():
index = list(dct.values()).index(split)
key = list(dct.keys())[index]
arr_sentences.append([n, value, key])
elif split in list(dct.keys()):
pass
else:
for value in split.split():
arr_sentences.append([n, value, 'O'])
else:
"""
Serve per verificare se in qualche frase non avviene il match
"""
print(n, frase)
except Exception as err:
pass
arr_sentences[:10]
# -
df = pd.DataFrame(arr_sentences, columns=['n_frase','word','tag'])
df.head(15)
df_target = df[['n_frase','tag']]
df_target.head(10)
def prepare_target_crf(df:pd.DataFrame) -> list:
"""
Ultimo step per preparare i target per addestrare l'algoritmo
Parameters:
-----------
df : pd.DataFrame
il DataFrame deve contenere due colonne, una che indicizza la frase
e l'altra che indica se il valore è uno slot o meno
Returns:
-----------
y : list
una lista annidata
"""
y = list()
for k, v in df.groupby('n_frase'):
y.append(v['tag'].tolist())
return y
y = prepare_target_crf(df_target)
y[:5]
df_sample = df.head(100).copy()
# +
import spacy
nlp = spacy.load("it_core_news_lg")
# -
def spacy_entities_extractor(txt):
doc = nlp(txt.capitalize())
token = doc[0]
if token.ent_type_ != '':
return token.ent_type_ # pos_
else:
return 'O'
# +
df_sample['shift-3'] = df_sample.groupby('n_frase')['word'].shift(1).str.slice(-3)
df_sample['shift+3'] = df_sample.groupby('n_frase')['word'].shift(-1).str.slice(0,3)
df_sample['shift-3'].fillna('BOF', inplace=True)
df_sample['shift+3'].fillna('EOF', inplace=True)
df_sample['spacy'] = df_sample['word'].apply(spacy_entities_extractor)
df_sample['bias'] = 1
df_sample
# -
def extend_data(df:pd.DataFrame, spacy:bool=False) -> pd.DataFrame:
"""
Estende i dati attraverso un algortimo personalizzato
"""
df['shift-3'] = df.groupby('n_frase')['word'].shift(1).str.slice(-3)
df['shift+3'] = df.groupby('n_frase')['word'].shift(-1).str.slice(0,3)
df['shift-10'] = df.groupby('n_frase')['word'].shift(1).str.slice(-10)
df['shift+10'] = df.groupby('n_frase')['word'].shift(-1).str.slice(0,10)
df['shift-3'].fillna('BOF', inplace=True)
df['shift+3'].fillna('EOF', inplace=True)
df['shift-10'].fillna('BOF', inplace=True)
df['shift+10'].fillna('EOF', inplace=True)
if spacy:
df['spacy'] = df['word'].apply(spacy_entities_extractor)
df['bias'] = 1
try:
df.drop(columns=['tag'], inplace=True)
except:
pass
return df
df = extend_data(df, spacy=False)
df.head()
# +
X = list()
for k, v in df.groupby('n_frase'):
v.drop(columns='n_frase', inplace=True)
X.append(v.to_dict('records'))
# -
X[0]
y[0]
def prepare_data_crf(df:pd.DataFrame) -> list:
"""
Ultimo step per preparare i dati per addestrare l'algoritmo
Parameters:
-----------
df : pd.DataFrame
il numero di colonne del DataFrame dipende da come è stata impostata
la Data Augmentation... l'importante è che il df contenga
la colonna 'n_frase' usata come indice
Returns:
-----------
y : list
una lista annidata di dictionary
"""
X = list()
for k, v in df.groupby('n_frase'):
v.drop(columns='n_frase', inplace=True)
X.append(v.to_dict('records'))
return X
X = prepare_data_crf(df)
import sklearn_crfsuite
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=100,
all_possible_states=False, # Default
all_possible_transitions=False # Default
)
crf.fit(X, y)
sentences[5].split("|")[0]
for n, word in enumerate(X[5]):
print(f"{n} --> {word['word']}")
# +
y_pred = crf.predict_single(X[5])
for n, pred in enumerate(y_pred):
print(f"{n} --> {pred}")
# -
def prepare_sentence(sentence:str) -> [pd.DataFrame, list]:
"""
Prepare la frase per il predict
Parameters:
-----------
sentence : str
è la frase che sarà elaborata
Returns:
-----------
df : DataFrame
X_arr[0] : array
dati in formato utile a CRF per il predict
"""
sentence = rimuovi_punteggiatura(sentence)
X_arr = list()
df = pd.DataFrame(data = [i for i in sentence.split()], columns=['word'])
df['n_frase'] = 1
df = extend_data(df)
for k, v in df.groupby('n_frase'):
v.drop(columns='n_frase', inplace=True)
X_arr.append(v.to_dict('records'))
return df, X_arr[0]
def extend_sentence(sentence:str, model:sklearn_crfsuite.estimator.CRF) -> pd.DataFrame:
"""
Estrae slots e lo aggiunge al DataFrame come colonna
Parameters:
-----------
sentence : str
è la stringa che contiene la frase
model : sklearn_crfsuite.estimator.CRF
è il modello addestrato di ConditionalRandomField
Returns:
-----------
df : DataFrame
al DataFrame di partenza viene aggiunta una colonna
con l'indicazione del tipo di slot individuato
"""
df, X_arr = prepare_sentence(sentence)
df['slots'] = model.predict_single(X_arr)
return df
nuova_frase = 'CI SONO FATTORIE DIDATTICHE AD AFRAGOLA'
extend_sentence(nuova_frase, crf)
# ## Duckling
# [datetime.datetime(2021, 11, 21, 8, 0, tzinfo=<StaticTzInfo 'UTC\-08:00'>)]}
def slots_extractor(sentence:str, model:sklearn_crfsuite.estimator.CRF) -> defaultdict:
"""
Restituisce un dictionary degli slots individuati
Parameters:
-----------
sentence : str
è la stringa che contiene la frase
model : sklearn_crfsuite.estimator.CRF
è il modello addestrato di ConditionalRandomField
Returns:
-----------
dd : dictionary
"""
df = extend_sentence(sentence, model)
dd = defaultdict(list)
for k, v in df.query("slots != 'O'").groupby('slots'):
dd[k] = " ".join(v['word'])
"""
Estrazione date ed orari tramite duckling
"""
date_time = extract_datetime(sentence)
if date_time is not None and len(date_time) > 0:
dd['DATETIMES'] = date_time['datetime']
# else:
# sentence_dict['datetimes'] = False
return dd
# +
df = pd.read_csv(sentences_file_generated, sep="|", header=None)
df.columns = ["sentences", "intents"]
df.head()
# -
def conserva_solo_slot_name(text: str) -> str:
"""
Per facilitare la riduzione dello spazio dimensionale
vengono eliminati i valori degli slots
mentre vengono conservati i loro nomi
Parameters:
-----------
text : str
è la stringa che contiene la frase
Returns:
-----------
text : str
"""
text = rimuovi_punteggiatura(text)
pattern = r"(\([A-Za-z0-9 ]+\)|\[|\])"
text = re.sub(pattern, "", text)
return text
df["sentences"][5]
conserva_solob_slot_name(df["sentences"][5])
# +
df["sentences"] = df["sentences"].apply(conserva_solo_slot_name)
df.sentences[0]
# -
def replace_slot_values(sentence_dict: dict) -> dict:
"""
Integrazione dictionary - parte 1 di 2
Per operare una riduzione delle variabili
sostituisce il valore con il relativo slot
"""
sentence_dict["replaced_sentence"] = sentence_dict["sentence"]
for k, v in sentence_dict["slots"].items():
if k != "DATETIMES":
sentence_dict["replaced_sentence"] = re.sub(
v, k, sentence_dict["replaced_sentence"]
)
return sentence_dict
# +
import requests
import json
import datetime
import dateparser
def extract_datetime(text:str, url = "http://0.0.0.0:8000/parse"):
data = {"locale":"it_IT",
"text":text}
resp = None
datetimes = list()
try:
response = requests.post(url, data=data)
try:
if response.status_code == 200:
for dt in response.json():
if dt['dim'] == "time":
dtime = dt['value']['value']
dtime = dateparser.parse(dtime)
datetimes.append(dtime)
resp = dict()
if len(datetimes) > 1:
resp['datetime'] = list([min(datetimes), max(datetimes)])
else:
resp['datetime'] = list(datetimes)
except:
pass
except:
pass
return resp
# -
def add_slots(sentence: str, model: sklearn_crfsuite.estimator.CRF) -> dict:
"""
Integrazione dictionary - parte 2 di 2
"""
sentence_dict = {}
sentence = rimuovi_punteggiatura(sentence)
slots_dict = slots_extractor(sentence, model)
sentence_dict["sentence"] = sentence
sentence_dict["slots"] = slots_dict
sentence_dict = replace_slot_values(sentence_dict)
# date_time = extract_datetime(sentence)
# if date_time is not None and len(date_time) > 0:
# sentence_dict['datetimes'] = date_time['datetime']
# else:
# sentence_dict['datetimes'] = False
return sentence_dict
add_slots('CI SONO <NAME>, AD AFRAGOLA CHE SONO APERTE DOMANI ALLE 8 DI SERA ?', crf)
# +
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
# -
cv.fit(df.sentences)
# +
print(f"Il vocabolario contiene {len(cv.vocabulary_)} parole")
cv.vocabulary_
# +
fake_list = list()
for i in range(30):
fake_list.append(" ".join(random.choices(list(cv.vocabulary_.keys()), k=10)))
# -
fake_list[:10]
df_fake = pd.DataFrame({"sentences": fake_list, "intents": "fake"})
df_fake.head()
df = pd.concat([df, df_fake], axis=0, ignore_index=True)
df.tail()
# +
import sklearn
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
# -
df.intents.unique()
# +
le.fit(df.intents.unique())
for n, cls in enumerate(le.classes_):
print(f"{cls} --> {n}")
# -
labels_categories = le.transform(df.intents.values)
le.inverse_transform([1])[0]
# +
print(df.sentences[0])
np.max(cv.transform(df.sentences[0].split()).toarray(), axis=0)
# +
from sklearn.linear_model import SGDClassifier
classifier = SGDClassifier(fit_intercept=False, loss="log", random_state=200)
# -
classifier.fit(X=cv.transform(df["sentences"].values).toarray(), y=labels_categories)
df["intents"].values[0]
df["sentences"].values[0]
y_pred = classifier.predict(
[np.max(cv.transform(df["sentences"].values[0].split()).toarray(), axis=0)]
)
le.inverse_transform(y_pred)[0]
def get_intents_and_slots(
sentence: str,
model: sklearn_crfsuite.estimator.CRF,
cv: sklearn.feature_extraction.text.CountVectorizer,
le: sklearn.preprocessing._label.LabelEncoder,
threasold=0.25,
) -> dict:
"""
Estrae gli intents e gli slots dalla frase
"""
sentence_dict = add_slots(sentence, crf)
sentence = sentence_dict["replaced_sentence"]
arr = cv.transform(add_slots(sentence, crf)["replaced_sentence"].split()).toarray()
arr = np.max(arr, axis=0)
probs = model.predict_proba([arr])[0]
df = pd.DataFrame({"classes": le.classes_, "probs": probs})
df.sort_values("probs", ascending=False, inplace=True)
classes_with_prob = df[df["probs"] > threasold].to_dict("records")
sentence_dict["intents"] = classes_with_prob
max_intent = le.classes_[np.argmax(probs)]
sentence_dict["max_intent"] = max_intent
return sentence_dict
# +
nuova_frase = "QUANTE FATTORIE DIDATTICHE CI SONO IN PROVINCIA DI CASERTA CON ALLEVAMENTO"
data = get_intents_and_slots(nuova_frase, classifier, cv, le)
# -
data
# +
dialogs = {v[0]: v[1] for v in list(dialogs.values())}
pprint.pprint(dialogs)
# -
bot_sentences
# +
"""
Estrazione intent
"""
if data["intents"][0]["probs"] > 0.75:
intent = data["max_intent"]
else:
intent = "fake"
intent
# +
"""
Estrazione risposta
"""
try:
reply = dialogs[intent]
except:
reply = "fake"
reply
# -
# ### Il file "db_esempio" è stato ottenuto rielaborando un dataset scaricato sul portale degli OpenData disponibile al seguente [https://dati.regione.campania.it/catalogo/resources/Fattorie-didattiche.csv](https://dati.regione.campania.it/catalogo/resources/Fattorie-didattiche.csv)
df_query = pd.read_csv("db_esempio.csv")
df_query.head()
# +
"""
Estrazione frase con parametri
"""
if reply != "fake":
reply_str = random.choice(bot_sentences[reply])
reply_str
print(reply_str, "\n")
"""
Estrazione dei dati per la sostituzione dei parametri
"""
query_list = list()
for k, v in data["slots"].items():
query_list.append(f"{k} == '{v}'")
k = list(data["slots"].keys())
k = f"[{k}]"
v = list(data["slots"].values())
reply_str = re.sub(f"[{k}+]", v, reply_str)
query = " and ".join(query_list)
print(query)
# -
query
# +
n = df_query.query(query)["NOME"].drop_duplicates().count()
print(reply_str % (n))
# -
def bot_reply(
sentence: str,
model: sklearn_crfsuite.estimator.CRF,
cv: sklearn.feature_extraction.text.CountVectorizer,
le: sklearn.preprocessing._label.LabelEncoder,
dialogs: dict,
bot_sentences: dict,
db: str = "db_esempio.csv",
threasold=0.25,
) -> dict:
"""
Genera la risposta
"""
sentence = sentence.upper()
data = get_intents_and_slots(sentence, model, cv, le, threasold)
# intent = data['max_intent']
pprint.pprint(data)
print("---------------------------------------------")
if (data["intents"][0]["probs"] > 0.75) and data["intents"][0]["classes"] != "fake":
intent = data["max_intent"]
reply = dialogs[intent]
reply_str = random.choice(bot_sentences[reply])
query_list = list()
for k, v in data["slots"].items():
query_list.append(f"{k} == '{v}'")
reply_str = re.sub(fr"\[{k}\]", v, reply_str)
query = " and ".join(query_list)
df_query = pd.read_csv(db)
n = df_query.query(query)["NOME"].drop_duplicates().count()
return reply_str % (n)
else:
intent = "Non ho capito, riformula meglio la tua domanda"
return intent
# +
nuova_frase = "Quante fattorie didattiche ci sono a Aversa" # con allevamento ?"
bot_reply(nuova_frase, classifier, cv, le, dialogs, bot_sentences)
|
notebook/Presentazione.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.6 64-bit (''dl-ic-cpu'': conda)'
# language: python
# name: python3
# ---
import sys
import json
sys.path.append("../")
from src.data_utils import load_captions, make_train_test_images
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow_hub as hub
# Loading data
CAPTIONS_PATH = "../data/Flickr_Data/Flickr_TextData/Flickr8k.token.txt"
IMAGES_PATH = "../data/Flickr_Data/Images/"
captions = load_captions(CAPTIONS_PATH)
train, test, train_images, test_images = make_train_test_images(CAPTIONS_PATH, IMAGES_PATH)
# +
module_handle = "https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1"
detector = hub.load(module_handle).signatures['default']
# -
def load_img(path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=3)
return img
img = load_img(train_images[0])
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
out = detector(converted_img)
plt.imshow(img)
out['detection_boxes']
def extract_regions(img, bounding_boxes):
width, height, _ = img.shape
features = []
for n, (y1, x1, y2, x2) in enumerate(bounding_boxes):
x1 = int(width * x1)
x2 = int(width * x2)
y1 = int(height * y1)
y2 = int(height * y2)
if (x1 != x2) and (y1 != y2):
features.append(img[x1:x2+1, y1:y2+1, :])
return features
features = extract_regions(converted_img[0], out['detection_boxes'])
len(features)
plt.imshow(features[9])
|
notebooks/explo_bottom_up.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# default_exp data_man
# %reload_ext autoreload
# %autoreload 2
# # Data management
# > Create, from FT1 and FT2, a compact data set with photon and livetime info.
# ### Overview
#
# Fermi-LAT weekly data files are extracted from the [GSFC FTP server ](https://heasarc.gsfc.nasa.gov/FTP/fermi/data/lat/weekly),
# with subfolders for the photon data, `photon` and spacecraft data, `spacecraft`. It is [described here](http://fermi.gsfc.nasa.gov/ssc/data/access/http://fermi.gsfc.nasa.gov/ssc/data/access/)
#
# The class `FermiData` downloads these to temporary files and constructs a dict for each week with
# contents
#
# * photons: a table, entry per selected photon with columns, converted with `get_ft1_data`
#
# * run number (uint32, stored as a category by pandas)
# * time since the run start, in 2 $\mu$s intervals (uint32)
# * energy and event type (uint8)
# * position as HEALPix index (uint32)
#
# * sc_data: a table, an entry per 30-s interval, with columns, all float32, converted with `get_ft2_info`
# * start/stop time
# * S/C direction
# * zenith direction
# * gti_times: an array of interleaved start/stop intervals
# * file_date: modification date for the FT1 file at GSFC.
#
# These dict objects, one per week, are saved in a folder
#
# #### A note about timing
# A run is typically an orbit or less, at most 6 ks. Integerizing the offset from this using 32 bits, one has 5e5 intervals/s, so
# we choose 2$\mu$s.
from nbdev.showdoc import *
# +
# export
import os, sys
import dateutil, datetime
from astropy.io import fits
from ftplib import FTP_TLS as FTP
import healpy
from pathlib import Path
import numpy as np
import pandas as pd
import pickle
from wtlike.config import Config, Timer, UTC, MJD
# -
# export
def get_ft1_data( config, ft1_file):
"""
Read in a photon data (FT1) file, bin in energy and position to convert to a compact DataFrame
- `ft1_file` -- A monthly file generated by <NAME>, or a weekly file from GSFC
Depends on config items
- `theta_cut, z_cut` -- selection criteria
- `ebins, etypes` -- define band index
- `nside, nest` -- define HEALPix binning
Returns a tuple with
- `tstart`, the start MET time
- DataFrame with columns
- `band` (uint8): energy band index*2 + 0,1 for Front/Back
- `nest_index` if nest else `ring_index` (uint32): HEALPIx index for the nside
- `run_id` (uint32) The run number, stored as a categorical uint32 array
- `trun` (unit32): time since run the id in 2 $\mu s$ units
- gti times as an interleaved start, stop array.
For the selected events above 100 MeV, this represents 9 bytes per photon, vs. 27.
"""
delta_t = config.offset_size
ebins = config.energy_edges
etypes = config.etypes
nside = config.nside
nest = config.nest
z_cut =config.z_max
theta_cut = np.degrees(np.arccos(config.cos_theta_max))
verbose = config.verbose
with fits.open(ft1_file) as ft1:
tstart = ft1[1].header['TSTART']
## GTI - setup raveled array function to make cut
gti_data= ft1['GTI'].data
# extract arrays for values of interest
data =ft1['EVENTS'].data
a,b = sorted(gti_data.START), sorted(gti_data.STOP)
gti_times = np.ravel(np.column_stack((a,b)))
if np.any(np.diff(gti_times)<0):
raise Exception(f'Non-monatonic GTI found')
def apply_gti(time):
x = np.digitize(time, gti_times)
return np.bitwise_and(x,1).astype(bool)
# apply selections
sel = ((data['ENERGY'] > ebins[0]) &
(data['ZENITH_ANGLE'] < z_cut) &
(data['THETA'] < theta_cut))
dsel = data[sel]
# get the columns for output
glon, glat, energy, et, z, theta, time, ec =\
[dsel[x] for x in 'L B ENERGY EVENT_TYPE ZENITH_ANGLE THETA TIME EVENT_CLASS'.split()]
# generate event_type masks
et_mask={}
for ie in etypes:
et_mask[ie]= et[:,-1-ie]
if verbose>1:
total = sum(b)-sum(a)
fraction = total/(b[-1]-a[0])
print( f'FT1: {ft1_file.name}, GTI range {a[0]:.1f}-{b[-1]:.1f}: {len(data):,} photons'\
f'\n\tSelection E > {ebins[0]:.0f} MeV. theta<{theta_cut:.1f} and z<{z_cut} remove:'\
f' {100.- 100*len(dsel)/float(len(data)):.2f}%'
# f', GTI cut removes {sum(~gti_cut)}'
)
# event class -- turn into single int for later mask
# bits = np.array([1<<n for n in range(20)])
# def to_bin(x):
# return np.sum(bits[x[:20]])
# ec = [to_bin(row[20]) for row in ec
# pixelate direction
hpindex = healpy.ang2pix(nside, glon, glat, nest=nest, lonlat=True).astype(np.uint32)
hpname = 'nest_index' if nest else 'ring_index'
# digitize energy and create band index incluing (front/back)
band_index = (2*(np.digitize(energy, ebins, )-1) + et_mask[1]).astype(np.uint8)
#
run_id = dsel['RUN_ID'].astype(np.uint32)
df = pd.DataFrame(
{ 'band' : band_index,
hpname : hpindex,
#'time' : (time-tstart).astype(np.float32), # the old time
'run_id': pd.Categorical(run_id),
'trun' : ((time-run_id)/delta_t).astype(np.uint32),
} )
if verbose>1:
print(f'\tReturning tstart={tstart:.0f}, {len(dsel):,} photons.')
return tstart, df, gti_times
show_doc(get_ft1_data, title_level=2)
# export
def get_ft2_info(config, filename,
gti=lambda t: True):
"""Process a FT2 file, with S/C history data, and return a summary DataFrame
Parameters:
* config -- verbose, cos_theta_max, z_max
* filename -- spacecraft (FT2) file
* gti -- GTI object that checkes for allowed intervals, in MJD units
Returns: A DataFrame with fields consistent with GTI if specified
* start, stop -- interval in MJD units
* livetime -- sec
* ra_scz, dec_scz --spaceraft direction
* ra_zenith, dec_zenith -- local zenith
"""
# combine the files into a DataFrame with following fields besides START and STOP (lower case for column)
fields = ['LIVETIME','RA_SCZ','DEC_SCZ', 'RA_ZENITH','DEC_ZENITH']
with fits.open(filename) as hdu:
scdata = hdu['SC_DATA'].data
tstart, tstop = [float(hdu[0].header[field]) for field in ('TSTART','TSTOP') ]
if config.verbose>1:
print(f'FT2: {filename.name}, MET range {tstart:.1f}-{tstop:.1f},', end='')# {"not" if gti is None else ""} applying GTI')
# get times to check against MJD limits and GTI
start, stop = [MJD(np.array(scdata.START, float)),
MJD(np.array(scdata.STOP, float))]
# apply GTI to bin center (avoid edge effects?)
in_gti = gti(0.5*(start+stop))
if config.verbose>1:
s = sum(in_gti)
print(f' {len(start)} entries, {s} ({100*s/len(start):.1f}%) in GTI')
t = [('start', start[in_gti]), ('stop',stop[in_gti])]+\
[(field.lower(), np.array(scdata[field][in_gti],np.float32)) for field in fields ]
sc_data = pd.DataFrame(dict(t) )
return sc_data
show_doc(get_ft2_info, title_level=2)
# export
def filepaths(week):
"""Returns: A tuple with two elements for the week number, each with two triplets with:
ftp folder, ftp filename, local simple filename
"""
urls = []
for ftype, alias in [('spacecraft','ft2'), ('photon','ft1')]:
urls.append((
f'{ftype}',
f'lat_{ftype}_weekly_w{week:03d}_p{"305" if ftype=="photon" else "310" }_v001.fits',
f'week{week:03d}_{alias}.fits',
))
return urls
# export
class FermiData(dict):
""" Manage the full data set in weekly chunks
* Checking the current set of files at GSFC
* downloading a week at a time to a local tmp
* Converting to condensed format and saving to pickled dicts in wtlike_data
"""
ftp_site = 'heasarc.gsfc.nasa.gov'
ftp_path = 'fermi/data/lat/weekly'
local_path = '/tmp/from_gsfc'
def __init__(self, config=None):
""" Obtain list of the weekly FT1 and FT2 files at GSFC, Set up as a dict, with
keys= week numbers, values=mofification date strings
"""
self.config = config or Config()
self.wtlike_data_file_path = Path(self.config.datapath/'data_files')
assert self.wtlike_data_file_path.is_dir(), 'Data path invalid'
os.makedirs(self.local_path, exist_ok=True)
try:
with FTP(self.ftp_site) as ftp:
ftp.login()
ftp.prot_p()
ftp.cwd(self.ftp_path+'/photon') # or spacecraft
# aet modification time and type for all files in folder
parse_week = lambda fn: int(fn.split('_')[3][1:])
flist = ftp.mlsd(facts=['modify', 'type'])
self.fileinfo = sorted([(parse_week(name), fact['modify']) for name,fact in flist
if fact['type']=='file' and name.startswith('lat') ])
except Exception as msg:
raise Exception(f'FTP login to or download from {self.ftp_site} failed: {msg}')
self.update(self.fileinfo)
@property
def local_filedate(self):
""" the datetime object representing the last file date in local storage"""
from dateutil.parser import parse
weekly_folder = self.config.datapath/'data_files'
ff = sorted(list(weekly_folder.glob('*.pkl')))
if len(ff)==0:
print(f'No .pkl files found in {weekly_folder}', file=sys.stderr)
return None
wk = list(map(lambda f: int(os.path.splitext(f)[0][-3:]), ff))
lastweek = pickle.load(open(ff[-1],'rb'))
return dateutil.parser.parse(lastweek['file_date'])
@property
def gsfc_filedate(self):
return dateutil.parser.parse(list(self.values())[-1])
def download(self, week):
""" Download the given week to the tmp folder
"""
assert week in self, f'week {week} not found at FTP site'
with FTP(self.ftp_site) as ftp:
ftp.login()
ftp.prot_p()
for ftp_folder, ftp_filename, local_filename in filepaths(week):
ftp.cwd('/'+self.ftp_path+'/'+ftp_folder)
if self.config.verbose>0:
print(f'FermiData: {ftp_folder}/{ftp_filename} --> {local_filename}')
with open(f'{self.local_path}/{local_filename}', 'wb') as localfile:
ftp.retrbinary('RETR ' + ftp_filename, localfile.write)
def __str__(self):
return f'FermiData: {len(self.fileinfo)} week files at GSFC, from {self.fileinfo[0]} to {self.fileinfo[-1]}'
def in_temp(self):
"""return list of GSFC copied files in the local_path folder"""
names = [f.name for f in Path(self.local_path).glob('*')]
return names
def __call__(self, week, test=False, tries_left=3):
""" Process the given week:
* download from GSFC
* convert each
* save pickled dict summary
"""
assert week in self, f'week {week} not found at FTP site'
ff = filepaths(week)
ft1_file = Path(self.local_path)/ff[1][2]
ft2_file = Path(self.local_path)/ff[0][2]
if self.config.verbose>1:
print(f'FermiData: converting week {week}')
while tries_left>0:
try:
if not (ft1_file.exists() and ft2_file.exists()):
self.download(week)
tstart, photon_data, gti_times = get_ft1_data(self.config, ft1_file)
break
except Exception as e:
print(f'*** ERROR *** Failed to convert {ft1_file}: {e} download it again)')
os.unlink(ft1_file)
tries_left -=1
def apply_gti(time): # note MJD
x = np.digitize(time, MJD(gti_times))
return np.bitwise_and(x,1).astype(bool)
sc_data = get_ft2_info(self.config, ft2_file, apply_gti)
# finished with copies of FT1 and FT2 files: delete them
for file in (ft1_file,ft2_file):
os.unlink(file)
# package info into a dict for pickle
d = dict(tstart = tstart,
photons = photon_data,
sc_data = sc_data,
gti_times = gti_times,
file_date = self[week])
filename = self.wtlike_data_file_path/f'week_{week:03d}.pkl'
if filename.exists():
print(f'FermiData: replacing existing {filename}')
if not test:
with open(filename, 'wb') as out:
pickle.dump(d, out)
if self.config.verbose>0:
print(f'FermiData: Saved to {filename}')
if self.config.verbose>1:
print(photon_data.info())
def load_week(self, week):
"""Load a pickled week summary """
filename = self.wtlike_data_file_path/f'week_{week:03d}.pkl'
assert filename.exists(), f'File {filename} does not exist'
with open(filename, 'rb') as imp:
ret = pickle.load(imp)
return ret
def check_week(self, week):
"""Returns True if the local week needs updating"""
data = self.load_week(week)
if 'file_date' not in data:
return True
return data['file_date'] == self[week]
def needs_update(self, threshold=0):
""" Compare files on disk with the GSFC list and compile list that need to be downloaded
Check the file date of the last one on disk and include it if:
* it short and there is one or more GSFC weeks following it,
* It is the most recent week and is short by more than *threshold* days
"""
gg =self.wtlike_data_file_path.glob('*.pkl')
file_weeks= map(lambda n: int(n.name[5:8]), gg)
ondisk = np.array(list(file_weeks))
missing = list(set(self.keys()).difference(set(ondisk)))
last = ondisk[-1]
if last not in missing and not self.check_week( last):
delta = (self.gsfc_filedate -self.local_filedate).seconds/24/3600
if delta> threshold:
missing.append(last)
return missing
def process(self, days=1):
""" Download and process all weeks missing or needing an update, if within `days`
from the present
Return status: True if anything changed
"""
# will use multprocessing if len(todo)>1 and pool_size>1
todo = self.needs_update(days)
if len(todo)==0: return False
if self.config.pool_size >1 and len(todo)>1:
print('multitasking not applied yet', file=sys.stderr)
pass
list(map(self, todo))
return True
# +
# fd = FermiData(); print(fd)
# fd.local_filedate, fd.gsfc_filedate
# filename = fd.local_files()[-1]
# with open(filename, 'rb') as imp:
# fs= pickle.load(imp)
# fs.keys()
# ff = fd.local_files()
# wk = list(map(lambda f: int(os.path.splitext(f)[0][-3:]), ff))
# # get gti for weeks
# tt = []
# for f in ff:
# with open(f, 'rb') as imp:
# fs = pickle.load(imp)
# gti = fs['gti_times']
# tt += [gti[0], gti[-1]]
# uu = MJD(np.array(tt)); uu
# import matplotlib.pyplot as plt
# from wtlike.config import first_data, day
# plt.plot( wk, (uu[0::2]-first_data)/7 -wk+9.7, '.')
# ft = lambda t: (t-first_data) / 7 + 9.7
# ft(54685.15), ft(59464.84)
# def find_week(self, mjd):
# """ find the week number that contains the MJD """
# # estimate that could be off on boundary -- assume not more than a week
# week= int((mjd-first_data) / 7 + 9.7 )
# x = self.load_week(week)
# # check with actual GTI info
# gti = x['gti_times']
# first, last = MJD(gti[0]), MJD(gti[-1])
# if mjd< first: return week-1
# elif mjd>last: return week+1
# return week
# -
# ### FermiData methods
show_doc(FermiData.download)
show_doc(FermiData.needs_update)
self = FermiData()
print(self, self.needs_update())
show_doc(FermiData.process)
show_doc(FermiData.check_week)
config = Config()
if config.valid:
self = FermiData(Config(verbose=1))
check =self.needs_update(0.5)
print(self, f'\n\tweek(s) needing update: {check}' )
# +
#hide
# with Timer() as t:
# self.process(0.5)
# print(t)
# -
# ## check the weekly files
# export
def check_data(config=None):
"""
Return: sorted list of files, last week number, number of days in last week"""
if config is None: config=Config()
if config.valid:
weekly_folder = config.datapath/'data_files'
ff = sorted(list(weekly_folder.glob('*.pkl')))
if len(ff)==0:
print(f'No .pkl files found in {weekly_folder}', file=sys.stderr)
return
wk = list(map(lambda f: int(os.path.splitext(f)[0][-3:]), ff))
lastweek = pickle.load(open(ff[-1],'rb'))
file_date = lastweek['file_date']
gti = lastweek['gti_times'];
days = (gti[-1]-gti[0])/(24*3600)
if config.verbose>0:
print(f'Weekly folder "{weekly_folder}" contains {len(wk)} weeks.'\
f'\n\t Last week, # {wk[-1]}, has {days:.3f} days, ends at UTC {UTC(MJD(gti[-1]))}, filedate {file_date}' )
return ff, wk[-1], days
else:
print(f'Config not valid, {config.errors}', file=sys.stderr)
return None
# +
#ret = check_data()
# -
# export
def update_data(update_threshold=1, config=None):
"""Bring all of the local week data summaries up to date, downloading the missing ones from GSFC.
If the last one is the current week, check to see if needs updating, by comparing file date, in days,
from the last update with the current one at GSFC.
"""
ff = FermiData(config)
return ff.process(update_threshold)
#export
def get_week_files(config, week_range=None):
"""Return list of week files
- week_range [None] -- tuple with inclusive range. If None, get all
"""
data_folder = config.datapath/'data_files'
data_files = sorted(list(data_folder.glob('*.pkl')))
weeks = week_range or config.week_range
if week_range is not None:
slc = slice(*week_range) if type(week_range)==tuple else slice(week_range,week_range)
wk_table = pd.Series(data=[df for df in data_files],
index= [ int(df.name[-7:-4]) for df in data_files],
)
data_files = wk_table.loc[slc].values
if config.verbose>0:
q = lambda x: x if x is not None else ""
print(f'LoadData: Loading weeks[{q(slc.start)}:{q(slc.stop)}:{q(slc.step)}]', end='' if config.verbose<2 else '\n')
else:
if config.verbose>0: print(f'LoadData: loading all {len(data_files)} weekly files')
if len(data_files)==0:
msg = f'Specified week_range {week_range} produced no output. Note that week numbers are 9-'
raise Exception(msg)
return data_files
# hide
from nbdev.export import notebook2script
notebook2script()
# !date
|
nbs/01_data_man.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (fastai_dev)
# language: python
# name: fastai_dev
# ---
# # A tutorial on image classification using fastaiv2
# > Fastai v2 is a deep learning library which makes training deep learning models simple. Here in this tutorial we will understand how to train a image classifier effectively. We will also deep dive into midlevel apis and see how they will help us is in customizing different aspects of deep learning modelling.
#
# - toc: true
# - badges: true
# - comments: true
# - hide: false
# - search_exclude: false
# - categories: [fastai, python, deep-learning]
# - image: images/favicon.ico
# ## Introduction
# classifying objects is one of the primary tasks of Deep learning in computer vision. If this happens at image level it is called image classification and at pixel level it is called segmentation. Image (object) classification is the core for any analytics work you do on images. In this blogpost, lets see how image classification is done effectively using fastaiv2.
# ## Dataset and DataBlock
# The fundamental thing which we need for any task is dataset. In fastaiv2, dataset is represented in many formats.
from fastai2.vision.all import *
# > Note: Fastaiv2 has several [datasets](https://course.fast.ai/datasets) apis which we can download simply using the below command
path = untar_data(URLs.PETS)
files = get_image_files(path) # get all the image files
# There are several ways in which we can create a dataloader using Fastaiv2 and using DataBlock we can define in the following way.
# - create a func (label_func) which extracts class name from the dataset. dsets.vocab shows the classes of the dataset
# - get_image_files extracts images from path provided at dblocks.datasets. We can also use **FileGetter(extensions=".jpg")** as an input to get_items if u want to specify the params.
# - There are several [splitters](https://github.com/fastai/fastai2/blob/master/nbs/05_data.transforms.ipynb) available in the fastaiv2 including
# - RandomSplitter: takes fraction of validation data as input
# - TrainTestSplitter: A wrapper to sklearn train_test_split function
# - IndexSplitter: takes the index of valid_idx as input
# - GrandparentSplitter: used when your train and val datasets are present in different folders
# - FuncSplitter: When you write your own function to divide the dataset into train and validation
# - FileSplitter: image names present in a file.txt as valid
# - RandomSubsetSplitter: give fractions to both train and val
# - ColSplitter: split the dataset based on a column
# - blocks: Kind of outputs we need, here since we are using images and need category as output we are using ImageBlock and CategoyBlock.
# - Use ImageBlock(cls=PILImageBW) when u need a Black & white image as input to network
# - Use MultiCategory block when u are training a multi-class classification.
# - TODO
def label_func(fname):
return "cat" if fname.name[0].isupper() else "dog"
dblock = DataBlock(blocks = (ImageBlock, CategoryBlock),
get_items = get_image_files,
get_y = label_func,
splitter = RandomSplitter(),
item_tfms = Resize(224))
dsets = dblock.datasets(path)
# > Tip: dsets is similar to dataset in Pytorch. We can write our own dsets if fastai DataBlock doesn't work for us, please make sure we implement show method to visualize an input.
#
# > Important: if you have multiple input blocks, we need to include **n_inp** input and accordingly adjust our blocks
#
# dsets is a dataset. if we need a dataloader, we need to call dataloader method. dataloader method is very similar to dataloader in Pytorch. It has arguments like batch_size(bs), shuffle, pin_memory, drop_last.
#
# > Tip: Fastai uses delegates for auto-completion and to see what arguments are present, do **shift+tab** to check all the arguments on a function.
#
# > Important: Fastai dataloader comes with a show batch functionality, which will help you visualize a batch of images.
dls = dblock.dataloaders(path/"images", bs=32)
# > Warning: show_batch doesn't work if all the images are not of the same size. we need to define item_tfms to resize all the images to the same size.
dls.show_batch()
# > Important: In vision we also have ImageDataLoaders where we can initialize as a dataloader in a single line depending on the type of input
#
# ## ImageDataLoaders
# - There are several different [image data loaders](http://dev.fast.ai/vision.data) depending on the type of input data. Here since our data is coming from paths
fnames = get_image_files(path/"images")
dls = ImageDataLoaders.from_path_func(path/"images",
fnames,
label_func,
valid_pct=0.2,
item_tfms=Resize(224))
# ## Transformations and Agumentations.
#
# Deep learning architecutures contain large number of parameters (in millions) and it is very hard to achieve generalization with limited datasets. even the large scale imagenet dataset also contain 1.3 million images. Thanks to the image agumentations, we can randomly flip, rotate, resize, pad, crop and do many other image [agumentations](https://github.com/albumentations-team/albumentations) and create an artificially large dataset which will help us in training deep learning models.
#
# In fastaiv2, we have two different types transformations
# - item transforms
# - batch transforms
#
# Traditionally everything is performed as item transform but fastai cleverly divided transformations/agumentations into two different types. one which is applied on images and one which is applied on tensors. one which are applied on images are called item transforms and these are performed on cpu irrespective of GPU availability. batch transforms are applied on tensors and can be done GPU like normalization and standardization. we can have a look at different transforms implementation [here](http://dev.fast.ai/vision.augment)
dls = ImageDataLoaders.from_path_func(path/"images",
fnames,
label_func,
valid_pct=0.2,
item_tfms=[FlipItem(0.5), Resize(224, method="pad")],
batch_tfms=[Normalize.from_stats(*imagenet_stats)])
# > Tip: use dls.one_batch() gives one batch of imgtensor and labels. use this to test/check loss functions, network and dataloader ouputs.
#
# TODO:
# - writing your own item_tfms or batch_tfms
# ## Networks
# We will specifically talk about fastaiv2 computer vision architectures. fastaiv2.vision.learner has create_head, create_body and create_cnn_model. specifically a network is divided into two parts: body and head. later we will see in learner how this will be useful.
#
encoder = create_body(resnet18, n_in=3, pretrained=False)
head = create_head(nf=512, n_out=2, concat_pool=False)
model = nn.Sequential(encoder, head)
# > Tip: We can directly call create_cnn_learner and define the network in one single line. The nf feature is directly computed in this case.
model = create_cnn_model(resnet18, n_out=2, pretrained=True, n_in=3, custom_head=None)
# > Important: resnet18 is a function which takes pretrained as an argument, if you are defining your own architecture, make sure that it takes this as an argument.
from segmentation_models_pytorch.encoders import get_encoder
def SimpleArch(encoder: str= "resnet18"):
def model(pretrained: Union[str]=None):
return get_encoder(encoder, in_channels=3, depth=5, weights=pretrained)
return model
# > Note: we can define custom heads to the create_cnn_model.
@delegates(create_head, but="nf, n_out")
class FastaiDecoder(Module):
def __init__(self, name, nf, n_out, **kwargs):
store_attr(self, "name, nf, n_out")
setattr(self, f"{self.name}_decoder", create_head(nf, n_out, **kwargs))
def forward(self, x):
x = getattr(self, f"{self.name}_decoder")(x)
return x
encoder = create_body(SimpleArch("resnet18"), n_in=3, pretrained=None)
head = FastaiDecoder("decoder", 1024, 2)
model = nn.Sequential(encoder, head)
# > Tip: FastaiDecoder is inherited from fastaiv2 Module and not torch nn module. when using fastaiv2 Module we need not use super for inheritance.
#
# > Tip: If we want to train a cnn arch and head, we can directly define them inside cnn_learner and no need to use create_head and create_body functions. Use create_head and create_body when we are using Learner API.
#
# As defined in fastai docs, a learner is a Trainer for model using data to minimize `loss_func` with optimizer `opt_func`. We can use `cnn_learner` or `Learner`, A cnn_learner is a wrapper on top of learner for training vision models much more efficiently as described above. We will be using `Learner` here.
# ## Learner
#
# Learner takes various params. Lets look at each and everything individually
# - dls: dataloader
# - model: neural network model
# - loss_func: loss function as input, for example CrossEntropyLossFlat() defined in fastai2
# - opt_func: optimizer function, Example: Adam
#
# ### Splitter and lr
#
# How do we set different learning rates to different layer aka discriminative learning? To solve this Learner has a parameter called `splitter` which takes a function (Defined as Groups below). This function takes model as input and outputs a list of lists which contain model parameters. The `lr` tuple length is equivalent to `Groups` output length.
#
# Below is a groups function defined, it takes model as input and gives encoder params and decoder params in a list of 2 values.
def Groups(model):
return [list(model[0].parameters()), list(model[1].parameters())]
# ### Callbacks
# fastai's training loop is highly extensible, with a rich callback system. There are several [callbacks](https://docs.fast.ai/callbacks.html#callbacks) defined and we can define our own using [callback](https://docs.fast.ai/callback.html#callback). Some of them include,
#
# ### schedular
# schedular is passed aS A callback in the fastai framework. There are already defined learner method like `fit_one_cycle`, `fit_sgdr`, `fit_flat_cos`. If we want to define our own schedular, we can define in the following way. The schedular we defined below using `combine_scheds` has two params
# - pcts: [0.1, 0.9] list meaning, first 10% of the time use SchedLin and Next 90% of the training time use SchedCos
# - scheds: list of schedulars to [use](https://github.com/fastai/fastai2/blob/master/fastai2/callback/schedule.py)
lr = 0.1
sched = {"lr": combine_scheds([0.1, 0.9], [SchedLin(0, lr), SchedCos(lr, 0.01)])}
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), opt_func=Adam, splitter=Groups, train_bn=False, lr=(0.001, 0.1))
# > Note: lr defined when initalizing `Learner` module is overwritten by ParamScheduler.
learn.fit(1, cbs=ParamScheduler(sched))
# ### train_bn, unfreeze, freeze and summary
# - `unfreeze` will set all params (expect batch norm) requires grad to `True`.
# - `.freeze` will set (expect batch norm) requires grad to `False` upto last param group. internally it calls freeze_to(-1).
# - freeze_to can be used to customize till where u want to freeze.
# - If train_bn, batchnorm layer learnable params are trained even for frozen layer groups. set it to `False`
learn.freeze()
learn.summary()
# ### Using SGDR to training
learn.fit_sgdr(n_cycles=2, cycle_len=1, lr_max=0.1, cycle_mult=2)
plt.plot(learn.recorder.lrs); plt.show()
# ### Training using mix precision
learn.to_fp16()
learn.fit_sgdr(n_cycles=2, cycle_len=1, lr_max=0.1, cycle_mult=2)
# ### Predict, save and show_results
# Once the model is trained, we want to check performance on test data or deploy (save) the model to validate on new data. Also we need to do an error analysis to understand where the model is making mistakes. The learner has all these methods
learn.predict("/home/fractaluser/.fastai/data/oxford-iiit-pet/images/Abyssinian_10.jpg")
learn.save("model.pkl", with_opt=False)
learn.show_results()
# # work in progress
# - training on a TPU
# ## Resources
# - https://www.kaggle.com/jhoward/fastai-v2-pipeline-tutorial
# - https://docs.fast.ai/basic_train.html#Discriminative-layer-training
# <script src="https://utteranc.es/client.js"
# repo="carbon2silicon"
# issue-term="pathname"
# theme="github-light"
# crossorigin="anonymous"
# async>
# </script>
|
_notebooks/2020-07-04-fastaiv2_image_classification_101.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import warnings
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import math
dataset = pd.read_csv("./datasets/MSFT.csv", index_col='Date', parse_dates=['Date'])
dataset.head()
training_set = dataset['2021-01-01':'2021-03-12'].iloc[:,4:5].values
training_set
test_set = dataset['2021-03-15':].iloc[:,4:5].values
test_set
dataset["Close"]['2021-01-01':'2021-03-12'].plot(figsize=(25,10),legend=True)
dataset["Close"]['2021-03-15':].plot(figsize=(16,4),legend=True)
plt.legend(['Training set','Test set'])
plt.title("NIFTY"+' stock price')
plt.show()
# ### MinMax Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0,1))
training_set_scaled = sc.fit_transform(training_set)
# +
# we create a data structure with 60 timesteps and 1 output
# So for each element of training set, we have 60 previous training set elements
X_train = []
y_train = []
previous = 30
for i in range(previous,len(training_set_scaled)):
X_train.append(training_set_scaled[i-previous:i,0])
y_train.append(training_set_scaled[i,0])
X_train, y_train = np.array(X_train), np.array(y_train)
# -
# Reshaping X_train for efficient modelling
X_train = np.reshape(X_train, (X_train.shape[0],X_train.shape[1],1))
X_train.shape
# ### The GRU Architecture
from keras.models import Sequential
from keras.layers import GRU
#Dense, LSTM, Dropout, Bidirectional
from keras.layers import Dropout
from keras.layers import Dense
# +
# The GRU architecture
regressorGRU = Sequential()
# First GRU layer with Dropout regularisation
regressorGRU.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1],1)))
regressorGRU.add(Dropout(0.2))
# Second GRU layer
regressorGRU.add(GRU(units=50, return_sequences=True))
regressorGRU.add(Dropout(0.2))
# Third GRU layer
regressorGRU.add(GRU(units=50, return_sequences=True))
regressorGRU.add(Dropout(0.2))
# Fourth GRU layer
regressorGRU.add(GRU(units=50))
regressorGRU.add(Dropout(0.2))
# The output layer
regressorGRU.add(Dense(units=1))
# Compiling the RNN
regressorGRU.compile(optimizer='adam',loss='mean_squared_error')
# Fitting to the training set
regressorGRU.fit(X_train,y_train,epochs=100,batch_size=32)
# -
# testing the model
dataset_total = pd.concat((dataset["Close"][:'2021-02'],dataset["Close"]['2021-03':]),axis=0)
inputs = dataset_total[len(dataset_total)-len(test_set) - previous:].values
inputs = inputs.reshape(-1,1)
inputs = sc.fit_transform(inputs)
len(inputs)
dataset_total=dataset_total[len(dataset_total)-len(test_set) - previous:]
dataset_total=pd.DataFrame(dataset_total)
dataset_total[:5]
# Preparing X_test and predicting the prices
X_test = []
for i in range(previous,len(inputs)):
X_test.append(inputs[i-previous:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
predicted_stock_price = regressorGRU.predict(X_test)
predicted_stock_price = sc.inverse_transform(predicted_stock_price)
# +
#Checking
print(test_set[-1],predicted_stock_price[-1])
# Visualizing the results
plt.figure(figsize=(18,5))
plt.plot(test_set[:], color='red',label='Real Stock Price')
plt.plot(predicted_stock_price[:], color='blue',label='Predicted Stock Price')
plt.title("Stock Price Prediction(GRU)")
plt.xlabel('Time')
plt.ylabel(" Stock Price")
plt.legend()
plt.show();
# -
# Evaluating our model
import math
from sklearn.metrics import mean_squared_error
rmse = math.sqrt(mean_squared_error(test_set, predicted_stock_price))
print("The root mean squared error is {}.".format(rmse))
rmse1 = math.sqrt(mean_squared_error(training_set, predicted_stock_price))
print("The root mean squared error is {}.".format(rmse1))
# +
print(len(test_set))
print(len(predicted_stock_price))
#Prediction Diffrence from prev day prediction to current day prediction
prevday_pred = predicted_stock_price[25]
currentday_pred= predicted_stock_price[26]
diff = currentday_pred - prevday_pred
#Actual Diffrence from prev day Actual price to current day Actual price
_prev = test_set[25]
_current = test_set[26]
_diff = _current - _prev
#printing the results
print("Previous Day Pred",prevday_pred)
print("Next day Pred",currentday_pred)
print("Prediction Diffrence",diff)
print("Actual prev Day Price",_prev)
print("Actual next day price",_current)
print("Actual Diffrence",_diff)
#plt/between original price and prediction price
plt.figure(figsize=(18,5))
plt.plot(test_set[-190:], color='red',label='RealStock Price')
plt.plot(predicted_stock_price[-189:], color='blue',label='Predicted Stock Price')
plt.legend()
plt.show()
# -
results=dataset_total.tail(35)
results["Predicted_Price"]=predicted_stock_price
results.columns=["Actaul Price","Predicted_Price"]
results["%_Change"]=results.Predicted_Price.pct_change(axis=0)
results["Class"]=np.where(results["Predicted_Price"].shift(-1)>results["Predicted_Price"],"up","Down")
results.tail()
from sklearn.metrics import r2_score
r2_score(predicted_stock_price , test_set)
|
GRU_MSFT_DATA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Lab 02 - Simple Linear Regression
#
# Regressions are any learning problem that aim to describe the relation between a set of explanatory
# variables (i.e. features) and a continuous response (or a set of responses). Therefore our dataset is of the form:
#
# $$S=\left\{\left(\mathbf{x}_i, y_i\right)\right\}^m_{i=1} \quad s.t. \quad \mathbf{x}_i\in\mathbb{R^d},\,\,y_i\in\mathbb{R}$$
#
# In the case of Linear Regression the relation learned is a linear one. That is, we search for a linear function to map
# $\mathcal{X}$ to $\mathcal{Y}$. So the hypothesis class of linear regression is:
#
# $$ \mathcal{H}_{reg} = \left\{h:h\left(x_1,\ldots,x_d\right)=w_0 + \sum w_i x_i\right\} $$
#
# Note that the linear function is linear in the parameters $w_0,w_1,\ldots,w_d$. Let us simulate a dataset fitting the case of a simple linear regression:
#
# $$ y_i = w_1 x_i + w_0 \quad i=1,\ldots,m $$
#
# So each hypothesis in the class $\mathcal{H}_{reg}$ is defined by two parameters $w_0,w_1$ - the intercept and slope of
# the line. Suppose the data is generated from the following line: $Y=2X+1$. So $w_0=1$ and $w_2=2$. Let us draw and plot
# samples from this function.
# -
import sys
sys.path.append("../")
from utils import *
# ## Linear Regression
# + pycharm={"name": "#%%\n"}
w0, w1 = 1, 2
x = np.linspace(0, 100, 10)
y = w1*x + w0
# + pycharm={"name": "#%%\n"}
fig = go.Figure([go.Scatter(x=x, y=y, name="Real Model", showlegend=True,
marker=dict(color="orange", opacity=.7),
line=dict(color="black", dash="dash", width=1))],
layout=go.Layout(title=r"$\text{(1) Simulated Data}$",
xaxis={"title": "x - Explanatory Variable"},
yaxis={"title": "y - Response"},
height=400))
fig.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# Using this sample as a **training set**, let us compute the Ordinary Least Squares (OLS) estimators $\hat{w_0},\widehat{w_1}$ of the model. Then, if we are given new samples $x_j$ we can predict its response $\hat{y}_j$:
#
# $$ \hat{y}_j = \hat{w_1} x_j + \hat{w}_0 $$
#
# Over the dataset above, try and think what would you expect the output to be?
# + pycharm={"is_executing": false, "name": "#%%\n"}
from sklearn.linear_model import LinearRegression
noiseless_model = LinearRegression()
noiseless_model.fit(x.reshape((-1,1)), y)
print("Estimated intercept:", noiseless_model.intercept_)
print("Estimated coefficient:", noiseless_model.coef_[0])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Linear Regression With Noise
# As the dataset used to fit the model lays exactly on a straight line, the estimated coefficients are the correct
# ones (up to floating point precision). Next, let us add some Gaussian noise to the data and see how it influences our
# estimation. So:
#
# $$\forall i \in \left[ m \right]\quad y_i=w_1\cdot x_i + w_0 + \varepsilon_i \quad s.t.\quad
# \varepsilon\sim\mathcal{N}\left(0,\sigma^2I_m\right)$$
#
# Namely, the noise of each sample distributes by a Gaussian with zero mean and $\sigma^2$ variance, and is uncorrelated between samples.
#
# *Notice that from now on we mark the $y$'s generated by the noise-less model with `y_`. This is so it is clear that the "real"
# $y$'s observed in a given sample are noisy.*
# + pycharm={"is_executing": false, "name": "#%%\n"}
if "y_" not in locals(): y_ = y
epsilon = np.random.normal(loc=0, scale=40, size=len(x))
y = y_ + epsilon
fig.add_trace(go.Scatter(x=x, y=y, name="Observed Points", mode="markers", line=dict(width=1)))
fig.update_layout(title=r"$\text{(2) Simulated Data - With Noise}$")
fig.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# Try and execute the block above several times. See how each time the "Observed Points" look different. These datasets,
# though all come from the same model, look very different. Try to think:
#
# * What would happen if we attempt fitting a model to these observations (i.e. the ones with the noise)?
# * How would it influence our estimation of the coefficients $w_0, w_1$?
# * Where will the regression line be?
#
# + pycharm={"is_executing": false, "name": "#%%\n"}
from pandas import x
model = LinearRegression().fit(x.reshape((-1,1)), y)
DataFrame({"Model":["Noise-less","Noisy"],
"Intercept": [noiseless_model.intercept_, model.intercept_],
"Slope": [noiseless_model.coef_[0], model.coef_[0]]})
# + pycharm={"is_executing": false, "name": "#%%\n"}
y_hat = model.predict(x.reshape(-1,1))
fig.data = [fig.data[0], fig.data[1]]
fig.update_layout(title=r"$\text{(3) Fitted Model Over Noisy Data}$")
fig.add_traces([go.Scatter(x=x, y=y_hat, mode="markers", name="Predicted Responses", marker=dict(color="blue")),
go.Scatter(x=x, y=y_hat, mode="lines", name="Fitted Model", line=dict(color="blue", width=1))])
fig.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# Let us better understand what took place. Schematically, we started with some model
# $$ Y=w_1X+w_0 \quad s.t. w_1=2,w_0=1 $$
#
# and obtained a dataset from this model
# $$ Y=w_1X + w_0 + \mathcal{N}\left(0,\sigma^2\right) $$
#
# Then, using the dataset we estimated the model parameters to obtain $\widehat{w_1},\widehat{w_0}$. However, we should look
# at these steps from two different points of view: the "observer" and the "all-knowing".
# - The "observer" is us whenever we work with data. We somehow obtained samples/observations that we assume to be generated
# from some "true" function/model $f$. As in reality data is noisy, when we assume something about the "true" function we
# also make assumptions about the noise. Then, as we do not know $f$ we try to learn it based on the observations.
# - The "all-knowing", unlike the "observer", knows exactly how $f$ looks and for each sample what is the noise.
#
# In the graph above the <span style="color:Black">**Real Model**</span> is only known to the "all-knowing". We, as the
# "observer" only witness the <span style="color:red">**Observed Points**</span>. We **assumed** the data came from a linear
# model with Gaussian Noise and therefore fitted the OLS estimators $\widehat{w}_1, \widehat{w}_0$. These estimators give
# us the <span style="color:blue">**Fitted Model**</span> and a <span style="color:blue">**Predicted Response**</span> to
# each observation.
# + [markdown] pycharm={"name": "#%% md\n"}
# Using these estimators of the model coefficients we can do two things:
# - **Inference**: We can study the estimated model. What are the statistical properties of our estimators? How confident are
# we in the estimation? Are the features associated with the helpful/relevant for predicting/explaining the response? Etc.
# - **Prediction**: We can use this estimated model to predict the responses of new data-points. How accurate are our predictions? How does the training set (and its size) influence this accuracy?
#
# In the scope of this course we are mainly interested in using the fitted model for prediction, with only slightly
# investigating the properties of our fitted model.
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Multivatiate Linaer Regression
# Lastly, using a more complicated model, we fit a model and answer some inference and prediction questions.
# To gain a better understanding, please look at the graph below and answer the question before reading the code.
#
# + pycharm={"is_executing": false, "name": "#%%\n"}
response = lambda x1, x2: 5*x1 + .1*x2 + 3
min_x1, min_x2, max_x1, max_x2 = -10, -10, 10, 10
xv1, xv2 = np.meshgrid(np.linspace(min_x1, max_x1, 10), np.linspace(min_x2, max_x2, 10))
surface = response(xv1, xv2)
x = np.random.uniform((min_x1, min_x2), (max_x1, max_x2), (10, 2))
y_ = response(x[:,0], x[:,1])
y = y_ + np.random.normal(0, 30, len(x))
model = LinearRegression().fit(x, y)
y_hat = model.predict(x)
DataFrame({"Coefficient": [rf"$w_{{0}}$".format(i) for i in range(len(model.coef_)+1)],
"Estimated Value": np.concatenate([[model.intercept_], model.coef_])})
# + pycharm={"is_executing": false, "name": "#%%\n"}
go.Figure([go.Surface(x=xv1, y=xv2, z=surface, opacity=.5, showscale=False),
go.Scatter3d(name="Real (noise-less) Points", x=x[:,0], y=x[:,1], z=y_, mode="markers", marker=dict(color="black", size=2)),
go.Scatter3d(name="Observed Points", x=x[:,0], y=x[:,1], z=y, mode="markers", marker=dict(color="red", size=2)),
go.Scatter3d(name="Predicted Points", x=x[:,0], y=x[:,1], z=y_hat, mode="markers", marker=dict(color="blue", size=2))],
layout=go.Layout(
title=r"$\text{(4) Bivariate Linear Regression}$",
scene=dict(xaxis=dict(title="Feature 1"),
yaxis=dict(title="Feature 2"),
zaxis=dict(title="Response"),
camera=dict(eye=dict(x=-1, y=-2, z=.5)))
)).show()
# + [markdown] pycharm={"name": "#%% md\n"}
# # Time To Think...
# In the scenario above we performed a linear regression over observations with more than two features (i.e multi-variate
# linear regression). In gradient color we see the subspace from which our data-points are drawn. As we have 2 features, the subspace is a 2D plane.
#
# Try rotating the figure above and look at the plane from its different axes (such that it looks like a line rather than a plane). This view allows you to see the fit between the one specific feature and the response, similar to the case of fitting a simple linear regression using that feature.
#
# Run the code generating the data and graph with more/less samples and high/lower noise levels. How do these changes influence the quality of the fit?
|
lab/Lab 02 - Simple Linear Regression.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# ---
# R Notebook
# Parts of code taken from Tim Churches blog on "Analysing COVID-19 (2019-nCoV) outbreak data with R"
# https://timchurches.github.io/blog/posts/2020-02-18-analysing-covid-19-2019-ncov-outbreak-data-with-r-part-1/
# Data taken from: https://github.com/covid19india/api
#
# In this notebook, I will try to fit the SEIR(Susceptible, Exposed, Infected, Recovered) model to Maharashtra Covid-19 data.
# In a closed population (that is, assuming no births or deaths), the SEIR model is:
#
# \begin{aligned}
# \frac{dS}{dt} & = -\frac{\beta IS}{N}\\
# \\
# \frac{dE}{dt} & = \frac{\beta IS}{N} - \kappa E\\
# \\
# \frac{dI}{dt} & = \kappa E - \gamma I\\
# \\
# \frac{dR}{dt} & = \gamma I
# \end{aligned}
#
#
# Load the needed packages
library(ggplot2)
library(jsonlite)
library(deSolve)
library(tidyverse)
require(lubridate)
# Get the states data from covid19india.org
states_daily = fromJSON("https://api.covid19india.org/states_daily.json")
dat = states_daily$states_daily
# Have a look at the data.
dat
# Convert the date which is in string to date format, add a column for short representation of date
dat$date = dmy(dat$date)
dat$short_date = substr(dat$date, start = 6, stop = 10)
# The dat DataFrame has all states data. Filter Maharashtra Data and get the daily Confirmed, Recovered, Deceased cases of Maharashtra along with dates. Put these data in new DataFrame mh_dat.
# +
Confirmed <- dat %>% filter(status == "Confirmed") %>% pull(mh)
Confirmed <- as.numeric(Confirmed)
Recovered <- dat %>% filter(status == "Recovered") %>% pull(mh)
Recovered <- as.numeric(Recovered)
Deceased <- dat %>% filter(status == "Deceased") %>% pull(mh)
Deceased <- as.numeric(Deceased)
Date <- dat %>% filter(status == "Confirmed") %>% pull(date)
Short_date <- dat %>% filter(status == "Confirmed") %>% pull(short_date)
mh_dat <- data.frame("Confirmed" = Confirmed, "Recovered" = Recovered, "Deceased" = Deceased,
"Date" = Date, "Short_date"=Short_date)
# -
# For SEIR model we need cumulative daily confirmed cases and not daily confirmed cases. Get the cumulative confirmed, Recovered and, Deceased cases and then subtract cumulative recovered and cumulative deceased cases from cumulative confirmed cases to get actual cumulative confirmed cases for each day.
# +
mh_dat <- within(mh_dat, Cum_Confirmed <- cumsum(Confirmed))
mh_dat <- within(mh_dat, Cum_Recovered <- cumsum(Recovered))
mh_dat <- within(mh_dat, Cum_Deceased <- cumsum(Deceased))
mh_dat$Cum_Confirmed = mh_dat$Cum_Confirmed - (mh_dat$Cum_Recovered + mh_dat$Cum_Deceased)
# -
# Let's look at newly created mh_dat DataFrame containing Maharashtra Data
mh_dat
# Before going ahead, let's plot and view cumulative confirmed, recovered, and deceased cases per day starting from 14th March.
ggplot()+labs(title="Confirmed, recovered and deceased cases in Maharashtra", x="Dates", y="Cases")+
geom_point(mh_dat, mapping=aes(x = Short_date, y = Cum_Confirmed, group=1, color="Total confirmed")) +
geom_line(mh_dat, mapping=aes(x = Short_date, y = Cum_Confirmed, group=1, color="Total confirmed")) +
geom_point(mh_dat, mapping=aes(x = Short_date, y = Cum_Recovered, group=1, color="Total Recovered")) +
geom_line(mh_dat, mapping=aes(x = Short_date, y = Cum_Recovered, group=1, color="Total Recovered")) +
geom_point(mh_dat, mapping=aes(x = Short_date, y = Cum_Deceased, group=1, color="Total Deceased")) +
geom_line(mh_dat, mapping=aes(x = Short_date, y = Cum_Deceased, group=1, color="Total Deceased"))
# Express the differential equations of SEIR model as an R function
SEIR <- function(time, state, parameters) {
par <- as.list(c(state, parameters))
with(par, {
dS <- -beta * I * S/N
dE <- beta * I * S/N - kappa * E
dI <- kappa * E - gamma * I
dR <- gamma * I
list(c(dS, dE, dI, dR))
})
}
# Get the cumulative infected/confirmed cases from 14th March 2020 till 5th April 2020.
Infected <- mh_dat %>% filter(Date >= dmy("14-Mar-20"), Date <= dmy("05-Apr-20")) %>%
pull(Cum_Confirmed)
# Create an incrementing Day vector the same length as Infected. Initialize N with a population of Maharashtra. Initially, everyone is in susceptible class, so initialize Susceptible with S = N - cumulative confirmed cases on 14th march(First day of data), Infected (I) with cumulative Infected cases on 14th March, Exposed (E) with 30, No one has recovered on 14th March so R = 0.
Day <- 1:(length(Infected))
N <- 114200000
init <- c(S = N - Infected[1], E=30, I = Infected[1], R = 0)
# Define a function RSS(Residual Sum of Squares) which needs to be minimized for Infected and predicted Infected cases to get optimum value for parameters β, γ, κ of differential equations.
RSS <- function(parameters) {
names(parameters) <- c("beta", "kappa", "gamma")
out <- ode(y = init, times = Day, func = SEIR, parms = parameters)
fit <- out[, 4]
sum((Infected - fit)^2)
}
# Fit the SEIR model to our data by finding the values for β, γ, κ that minimize the residual sum of squares between the observed cumulative infected cases and the predicted cumulative infected cases.
Opt <- optim(c(0.5, 0.5, 0.5), RSS, method = "L-BFGS-B", lower = c(0, 0, 0), upper = c(1, 1, 1))
Opt$message
# Examine the fitted values for β, γ, κ.
Opt_par <- setNames(Opt$par, c("beta", "kappa", "gamma"))
Opt_par
# Let's use this model to predict the cumulative infected cases until 25th April.
# +
t <- 1:as.integer(dmy("25-Apr-20") - dmy("14-Mar-20"))
fitted_cumulative_incidence <- data.frame(ode(y = init, times = t, func = SEIR, parms = Opt_par))
fitted_cumulative_incidence <- fitted_cumulative_incidence %>%
mutate(Date = dmy("14-Mar-20") + days(t - 1)) %>%
left_join(mh_dat %>% ungroup() %>% select(Date, Cum_Confirmed))
fitted_cumulative_incidence %>% filter(Date <= dmy("25-Apr-2020")) %>%
ggplot(aes(x = Date)) + geom_line(aes(y = I), colour = "red") +
geom_point(aes(y = Cum_Confirmed), colour = "orange") +
labs(y = "Cumulative Infected",
title = "COVID-19 fitted vs actual cumulative infected, Maharashtra",
subtitle = "(red=fitted infected from SEIR model, orange=actual cumulative infected)")
# -
# We can see that the actual infected is much lower than that predicted by the SEIR model. The reason is that model is not taking into account the measures by the government like lockdown, quarantine. So, using this model we can say that measures taken by the government are in the right direction and working.
# Let's see what will happen if the outbreak is left to run without any intervention.
# +
t <- 1:150
fitted_cumulative_incidence <- data.frame(ode(y = init, times = t,
func = SEIR, parms = Opt_par))
fitted_cumulative_incidence <- fitted_cumulative_incidence %>%
mutate(Date = dmy("14-Mar-20") + days(t - 1)) %>%
left_join(mh_dat %>% ungroup() %>% select(Date, Cum_Confirmed))
fitted_cumulative_incidence %>% ggplot(aes(x = Date)) +
geom_line(aes(y = I), colour = "red") +
geom_line(aes(y = S), colour = "black") +
geom_line(aes(y = R), colour = "green") +
geom_line(aes(y = E), colour = "brown") +
geom_point(aes(y = Cum_Confirmed), colour = "orange") +
scale_y_continuous(labels = scales::comma) +
labs(y = "Persons", title = "COVID-19 fitted vs actual cumulative infected, Maharashtra",
subtitle = "(black=susceptible cases, brown=exposed cases, red=infected cases,
green=recovered cases, orange points = actual cumulative infected cases till date.)")
# -
# Clearly this is a disaster with about 5-6 million infected cases reaching a peak somewhere around mid June.
|
SEIR-MH.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Derivada - Definição
# * Iremos calcular a derivada pela definição
# * Importando biblitecas
import numpy as np
import matplotlib.pyplot as plt
# * Criando função para calcular derivada
def derivada(f,x,metodo,h):
if metodo == 'central':
return (f(x + h) - f(x - h))/(2*h)
elif metodo == 'forward':
return (f(x + h) - f(x))/h
elif metodo == 'backward':
return (f(x) - f(x - h))/h
else:
raise ValueError("Método deve ser: 'central', 'forward' or 'backward'.")
# * Criando vetor x
x = np.linspace(0,1,101)
# Criando função f(x)
f = lambda x: x**2
# * Aplicando função f(x) em x
fx=f(x)
# * Calculando derivada para diferentes valor de h
derivada_h0001 = derivada(f,x,"forward",0.001)
derivada_h001 = derivada(f,x,"forward",0.01)
derivada_h01 = derivada(f,x,"forward",0.1)
derivada_h1 = derivada(f,x,"forward",1.0)
# * Derivada oficial
f_prime = lambda x: 2*x
fprime_x = f_prime(x)
# * Graficando para comparação
plt.plot(x,fx,color='black',label='f(x)')
plt.plot(x,derivada_h0001,color='red',label="f'(x), h=0.001")
plt.plot(x,derivada_h001,color='blue',label="f'(x), h=0.01")
plt.plot(x,derivada_h01,color='green',label="f'(x), h=0.1")
plt.plot(x,derivada_h01,color='cyan',label="f'(x), h=1.0")
plt.plot(x,fprime_x,color='orange',label='Original')
plt.legend()
# * Quanto menor o valor de h, melhor o resultado da derivada
|
Data_Science/Calculo/Derivada - definicao.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="cCSFCpnXvx7P"
# #Setup
# + [markdown] id="Rng0aHCyv2IR"
# ##Imports
# + colab={"base_uri": "https://localhost:8080/"} id="mhGqUOCLv38g" outputId="5e207922-bcd8-4831-811f-0a9121802930"
from google.colab import drive
drive.mount('/content/gdrive')
import sys
sys.path.append('/content/gdrive/MyDrive/TFE_crowd_counting/CSRNet-pytorch')
import os
from image import *
from model import CSRNet
import glob
import torch
# + [markdown] id="S5ynhsRrv9ap"
# ##Variables
# Paramaters to change according to the wanted output
# + id="cDWNfWZsw6wS"
#parameters to change
INPUT_MODEL_NAME = 'A10_SH_C.tar' #Name of the saved CSRNet model weight
OUTPUT_MODEL_NAME = 'A10_SH_512x384' # Will be directory with the json and the bins, usable with tensorflowJS
Input_img_size = (512,384) #Size of the model predictions
use_gpu = False
# + [markdown] id="pDBj2WefxErH"
# #paths
# + id="0SL2m_SawKAo"
root = '/content/gdrive/MyDrive/TFE_crowd_counting'
scripts_path = os.path.join(root,'CSRNet-pytorch')
models_path = os.path.join(scripts_path,'models')
onnx_path = os.path.join(scripts_path,'models/ONNX')
input_model_path = os.path.join(models_path,INPUT_MODEL_NAME)
onnx_model_path = os.path.join(onnx_path, OUTPUT_MODEL_NAME+'.onnx')
tf_model_path = os.path.join(models_path, OUTPUT_MODEL_NAME+'.pb')
output_path = os.path.join(models_path, OUTPUT_MODEL_NAME)
# + [markdown] id="fNCvGnDeYpL9"
# #Utils
# + id="iaJs12xVxmeJ"
#return the loaded model located at model_path (or the basic shangai partAmodel if no path is given )
#using gpu or using cpu (if the use_gpu parameter is set to False)
def load_model(model_path, use_gpu = use_gpu):
from torchvision import datasets, transforms
transform=transforms.Compose([
transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
model = CSRNet()
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['state_dict'])
if use_gpu:
model = model.cuda()
else:
model = model.cpu()
return model
# + [markdown] id="9nY3Xsb1yARQ"
# #Conversion
# + colab={"base_uri": "https://localhost:8080/"} id="HIa0HscpyEBx" outputId="0b6b255f-88bf-4257-dd63-38f79866d4e4"
#Step 1: convert to onnx
use_gpu = False
model = load_model(input_model_path)
input_names = [ "image" ]
output_names = [ "output1" ]
if use_gpu:
dummy_input = torch.randn(1, 3, Input_img_size[0], Input_img_size[1], device='cuda')
else:
dummy_input = torch.randn(1, 3, Input_img_size[1], Input_img_size[0])
torch.onnx.export(model, dummy_input, onnx_model_path, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
# + colab={"base_uri": "https://localhost:8080/"} id="JNRV1m30y6Q8" outputId="fdcd1425-8449-4121-8e1f-cdd7c1db0695"
#Step 2: export to tensorflow
# #!pip install git+https://github.com/onnx/onnx-tensorflow.git #May be commented if the package is already installed
# !onnx-tf convert -i {onnx_model_path} -o {tf_model_path}
# + id="CUuE6BC7zMKI"
#Step 3: export to tensorflow.js
# #!pip install tensorflowjs #May be commented if the package is already installed
# + colab={"base_uri": "https://localhost:8080/"} id="RtOPKDwdVH3H" outputId="4725cf8a-3713-4760-d451-3a21af35ee3e"
#Copy past the two path as argument for conversion
print(tf_model_path)
print(output_path)
# + colab={"base_uri": "https://localhost:8080/"} id="hLt07r9oOxyI" outputId="ae35cc8d-e9de-4b3e-860c-cc86aa26ca02"
# !tensorflowjs_converter --input_format='tf_saved_model' '/content/gdrive/MyDrive/TFE_crowd_counting/CSRNet-pytorch/models/A10_SH_512x384.pb' '/content/gdrive/MyDrive/TFE_crowd_counting/CSRNet-pytorch/models/A10_SH_512x384'
|
exemples/Conversion_to_JS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import os
os.chdir("..")
import matplotlib.pyplot as plt
import sys
import numpy as np
import os
from math import log
import random
import time
import pickle
from numpy.linalg import pinv
from arm_class import ArmGaussian
from LinUCB_class import PolicyLinUCB
from dLinUCB_class import DynamicLinUCB
from D_LinUCB_class import DLinUCB
from SW_LinUCB_class import SWLinUCB
from environment_class import Environment
from simulator_class import Simulator
from utils import plot_regret, scatter_abrupt, scatter_smooth, action_check, get_B_T_smooth, detection_sorted
# General parameters for all the experiments
delta = 0.01 # Probability of being outside the confidence interval
s = 1 # Bound on the theta_star parameter
lambda_ = 0.1 # Regularisation parameter
q = 5 # Diplaying the quantile (in %)
# +
def experiment_smooth(n_mc, option):
mab = Environment(d, theta, sigma_noise, verbose)
simulator = Simulator(mab, theta, policies, k, d, steps, {}, verbose)
if option == "par":
print('n_process:', n_process)
print('Step1:', step_1)
print("steps:", steps)
print("n_mc:", n_mc)
print("Angle Init:", angle_init)
print("Angle End:", angle_end)
print("q:", q)
avgRegret, qRegret, QRegret = simulator.run_multiprocessing_smooth(n_process, step_1, steps,
n_mc, R, angle_init, angle_end,
q, t_saved)
return avgRegret, qRegret, QRegret
else:
avgRegret, qRegret, QRegret, timedic, theta_true, theta_hat = simulator.run_smooth_environment(step_1, steps,
n_mc, q, R, angle_init, angle_end,
n_scat, n_scat_true,
t_saved)
return avgRegret, qRegret, QRegret, timedic, theta_true, theta_hat
def data_from_experiment_smooth(n_mc, option):
if option == "par":
avgRegret, qRegret, QRegret = experiment_smooth(n_mc, option)
data = [[policy, avgRegret[policy], qRegret[policy],
QRegret[policy]] for policy in avgRegret]
return data
else:
avgRegret, qRegret, QRegret, timedic, theta_true, theta_hat = experiment_smooth(n_mc, option)
data = [[policy, avgRegret[policy], qRegret[policy],
QRegret[policy]] for policy in avgRegret]
return data, theta_hat, theta_true, timedic
# +
# The saved files should be located in the saved/ folder
# The output images would be located in the out/ folder
# If the folders are not created they must be created to run the code without bugs
path = os.getcwd()
out_dir = 'out_SLOW_NEW'
saved_data_path = 'saved_SLOW_NEW'
detection_folder = 'detection_out_SLOW_NEW'
if not os.path.exists(path + '/' + out_dir):
os.mkdir(path + '/' + out_dir)
print('Creating the folder %s' %out_dir)
else:
print("%s already exists" %out_dir)
if not os.path.exists(path + '/' + saved_data_path):
os.mkdir(path + '/' + saved_data_path)
print('Creating the folder %s' %saved_data_path)
else:
print("%s already exists" %saved_data_path)
if not os.path.exists(path + '/' + detection_folder):
os.mkdir(path + '/' + detection_folder)
print('Creating the folder %s' %detection_folder)
else:
print("%s already exists" %detection_folder)
# +
def save_file(filename, var):
with open(saved_data_path + '/' + str(filename) + '.pkl', 'wb') as f:
pickle.dump(var, f)
def load_file(filename):
with open(saved_data_path + '/' + str(filename) + '.pkl', 'rb') as f:
res = pickle.load(f)
return res
def save_file_from_folder(folder, filename, var):
with open(str(folder) + '/' + str(filename) + '.pkl', 'wb') as f:
pickle.dump(var, f)
def load_file_from_folder(folder, filename):
with open(str(folder) + '/' + str(filename) + '.pkl', 'rb') as f:
res = pickle.load(f)
return res
# -
# ## Smoothly changing environment
# +
# PARAMETERS
d = 2 # Dimension of the problem
k = 50 # Number of arms available at each step
# Steps_part
step_1 = 3000 # number of steps for the smooth modification
steps = 6000 # for the scatter plot only -> total number of steps
steps_calibration = 6000
n_scat = 400
n_scat_true = 300
t_saved = None # Saving the entire trajectory
# The following commented lines allow to save only some points on the trajectory
# number_t_saved = steps//10
# t_saved = np.int_(np.linspace(0, steps - 1, number_t_saved))
alpha = 1
sigma_noise = 1
verbose = False
q = 5 # 5 percent quantiles used
R = 1 # True parameter evolving on the unit circle
angle_init = 0
angle_end = np.pi/2
B_T = get_B_T_smooth(step_1, R, angle_init, angle_end, d)
print('B_T value:', B_T)
print('Sigma value for the experimenxt:', sigma_noise)
theta = np.array([1, 0]) # Starting point of the unknown parameter
bp = {} # No breakpoints but continuous changes
gamma = 1 - (B_T/(d*steps_calibration))**(2/3) # Optimal Value to minimize the asymptotical regret
tau = (d*steps_calibration/B_T)**(2/3) # Optimal Value to minimize the asymptotical regret
policies = [DLinUCB(d, delta, alpha, lambda_, s, gamma, '', sm = False, sigma_noise = sigma_noise, verbose=verbose),
SWLinUCB(d, delta, alpha, lambda_, s, tau, '', sm=False, sigma_noise=sigma_noise, verbose=verbose),
DynamicLinUCB(d, delta, alpha, lambda_, s, tau=200, name='',filename = 'smooth_exp_final',
sm=True, sigma_noise = sigma_noise, delta_2=0.01, tilde_delta=0.002, verbose=False,
omniscient=False),
PolicyLinUCB(d, delta, alpha, lambda_, s, '', sm = True, sigma_noise = sigma_noise, verbose=verbose)
]
# -
# Small experiment with 2 repetitions
data_2, hat_2, true_2, time_2 = data_from_experiment_smooth(n_mc=2, option = '')
plot_regret(data_2, t_saved, filename = None, log=False, qtl=True, loc=2, font=13, bp = bp, bp_2 = {})
scatter_smooth(hat_2, None, true_2, loc=0, font=10, circle=True)
# Larger experiment with 100 repetitions
data, hat, true, time = data_from_experiment_smooth(n_mc=100, option = '')
plot_regret(data, t_saved, filename = None, log=False, qtl=True, loc=2, font=13, bp = bp, bp_2 = {})
scatter_smooth(hat, None, true_2, loc=0, font=10, circle=True)
scatter_smooth(hat, None, true, loc=1, font=13, circle=True)
plot_regret(data, t_saved, filename = None, log=False, qtl=True, loc=2, font=13, bp = bp, bp_2 = {})
|
Experiments/exp_SLOW_ENV_NEW.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# load data from PostgreSQL to csv
import pandas
import pickle
import numpy
import time
import psycopg2
t_host = "localhost"
t_port = "5432"
t_dbname = "postgres"
t_user = "postgres"
t_pw = "<PASSWORD>"
db_conn = psycopg2.connect(host=t_host, port=t_port, dbname=t_dbname, user=t_user, password=<PASSWORD>)
db_cursor = db_conn.cursor()
time_start = time.time()
# create a query to specify which values we want from the database.
s = "SELECT * FROM higgs"
# Use the COPY function on the SQL we created above.
SQL_for_file_output = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(s)
# Set up a variable to store our file path and name.
t_path_n_file = "higgs.csv"
with open(t_path_n_file, 'w') as f_output:
db_cursor.copy_expert(SQL_for_file_output, f_output)
time_end=time.time()
total_time = time_end-time_start
print('loading data out of PostgreSQL to csv time cost',total_time*1000,'ms')
# +
# do the inference from postgres
import pandas
import pickle
import joblib
import torch
import torchvision
from hummingbird.ml import convert
import psycopg2
import numpy
import time
filename = 'rf-10-8-6.pkl'
loaded_model = joblib.load(filename)
model = convert(loaded_model,'pytorch')
t_host = "localhost"
t_port = "5432"
t_dbname = "postgres"
t_user = "postgres"
t_pw = "<PASSWORD>"
db_conn = psycopg2.connect(host=t_host, port=t_port, dbname=t_dbname, user=t_user, password=t_pw)
db_cursor = db_conn.cursor()
input_size = 110000
batch_size = 10000
exe_total_time = 0
time_start = time.time()
try:
for i in range(int(input_size/batch_size)):
db_cursor.execute("SELECT leptonpT,leptoneta,leptonphi,missingenergymagnitude,missingenergyphi,"+
"jet1pt,jet1eta,jet1phi,jet1btag,jet2pt,jet2eta,jet2phi,jet2btag,jet3pt,jet3eta,"+
"jet3phi,jet3btag,jet4pt,jet4eta,jet4phi,jet4btag,mjj,mjjj,mlv,mjlv,mbb,mwbb,mwwbb from higgs;")
some_tuple = db_cursor.fetchmany(batch_size)
exe_time_start = time.time()
pred = model.predict(some_tuple)
exe_total_time = exe_total_time + time.time() - exe_time_start
except psycopg2.Error as e:
t_message = "Postgres Database error: " + e + "/n"
time_end=time.time()
print('exe time cost',exe_total_time*1000,'ms')
print('total time cost',(time_end-time_start)*1000,'ms')
# +
# do the inference from csv
import pandas
import pickle
import joblib
import torch
import torchvision
from hummingbird.ml import convert
filename = 'rf-10-8-6.pkl'
loaded_model = joblib.load(filename)
model = convert(loaded_model,'pytorch')
import numpy as np
import time
total_start_time = time.time()
input_size = 110000
batch_size = 10000
total_time = 0
data = pandas.read_csv("HIGGS.csv",usecols=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28])
for i in range(int(input_size/batch_size)):
thisdata = data[i*batch_size:(i+1)*batch_size]
time_start = time.time()
pred = model.predict(thisdata)
time_end=time.time()
total_time = total_time + (time_end-time_start)
total_end_time = time.time()
print('exe time cost',total_time*1000,'ms')
print('total time cost',(total_end_time-total_start_time)*1000,'ms')
# +
# do the inference via Connector-X from PostgreSQL
import connectorx as cx
from sklearn.ensemble import RandomForestClassifier
import pandas
import pickle
import joblib
import psycopg2
import torch
import torchvision
from hummingbird.ml import convert
filename = 'rf-10-8-6.pkl'
loaded_model = joblib.load(filename)
model = convert(loaded_model,'pytorch')
import numpy as np
import time
input_size = 11000
batch_size = 1000
exe_total_time = 0
total_time_start = time.time()
try:
query = "SELECT leptonpT,leptoneta,leptonphi,missingenergymagnitude,missingenergyphi,"+"jet1pt,jet1eta,jet1phi,jet1btag,jet2pt,jet2eta,jet2phi,jet2btag,jet3pt,jet3eta,"+"jet3phi,jet3btag,jet4pt,jet4eta,jet4phi,jet4btag,mjj,mjjj,mlv,mjlv,mbb,mwbb,mwwbb from higgs"
data = cx.read_sql("postgresql://postgres:postgres@localhost:5432/postgres", query)
for i in range(int(input_size/batch_size)):
thisdata = data[i*batch_size:(i+1)*batch_size]
time_start = time.time()
pred = model.predict(thisdata)
time_end=time.time()
exe_total_time = exe_total_time + (time_end-time_start)
except psycopg2.Error as e:
t_message = "Postgres Database error: " + e + "/n"
total_time_end=time.time()
print('exe time cost',exe_total_time*1000,'ms')
print('total time cost',(total_time_end-total_time_start)*1000,'ms')
# +
# do the inference loading all from postgres
from sklearn.ensemble import RandomForestClassifier
import pandas
import pickle
import joblib
import psycopg2
import torch
import torchvision
from hummingbird.ml import convert
filename = 'rf-10-8-6.pkl'
loaded_model = joblib.load(filename)
model = convert(loaded_model,'pytorch')
import numpy
import time
t_host = "localhost"
t_port = "5432"
t_dbname = "postgres"
t_user = "postgres"
t_pw = "<PASSWORD>"
db_conn = psycopg2.connect(host=t_host, port=t_port, dbname=t_dbname, user=t_user, password=t_pw)
db_cursor = db_conn.cursor()
input_size = 11000
batch_size = 1000
exe_total_time = 0
time_start = time.time()
try:
db_cursor.execute("SELECT leptonpT,leptoneta,leptonphi,missingenergymagnitude,missingenergyphi,"+
"jet1pt,jet1eta,jet1phi,jet1btag,jet2pt,jet2eta,jet2phi,jet2btag,jet3pt,jet3eta,"+
"jet3phi,jet3btag,jet4pt,jet4eta,jet4phi,jet4btag,mjj,mjjj,mlv,mjlv,mbb,mwbb,mwwbb from higgs;")
all_tuple = db_cursor.fetchall()
for i in range(int(input_size/batch_size)):
thisdata = all_tuple[i*batch_size:(i+1)*batch_size]
exe_time_start = time.time()
pred = model.predict(thisdata)
exe_total_time = exe_total_time + time.time() - exe_time_start
except psycopg2.Error as e:
t_message = "Postgres Database error: " + e + "/n"
time_end=time.time()
print('exe time cost',exe_total_time*1000,'ms')
print('total time cost',(time_end-time_start)*1000,'ms')
# -
|
model-inference/decisionTree/experiments/higgs/HummingBird/.ipynb_checkpoints/Pytorch_Test-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Lesson 4: Non-Primitive Data Types
#
# In this lesson, we will learn about...
#
# # + Lists
# # + Tuples
# # + Sets
# # + Dictionaries
# + [markdown] slideshow={"slide_type": "slide"}
# While there are many others, we will look at the four main **collection** data types. Many have roots in math. They can hold any data type, and even multiple types at once. They have many similarities but some important differences.
#
# |Type|Representation|Ordered?|Changeable / Mutable?|Duplicates?|Special Qualities|
# |:---:|:---:|:---:|:---:|:---:|:---:|
# |List|`[]`|Yes|Yes|Yes||
# |Tuple|`()`|Yes|No|Yes||
# |Set|`{}`|No|Yes|No|Unindexed|
# |Dictionary|`{:}`|No|Yes|No|Indexed by key|
# + [markdown] slideshow={"slide_type": "slide"}
# # List
#
# This is the closest data type to the traditional array.
# + slideshow={"slide_type": "-"}
fruits = ["apple", "banana", "orange"]
print(fruits)
print(len(fruits))
print(fruits[1])
print(fruits[-1])
print(fruits[0:2])
print("banana" in fruits)
print(fruits[0].upper())
print(fruits[1][1])
# + [markdown] slideshow={"slide_type": "notes"}
# # + You can use the same operators as strings to determine the length, get specific elements, and see if an element is in the list.
# # + Since this list stores strings, you can then even use string functions on the result!
# + slideshow={"slide_type": "subslide"}
fruits = ["apple", "banana", "orange"]
veggies = ["carrot", "squash"]
fruits[1] = "pineapple"
print(fruits)
fruits.append("blueberry")
fruits.insert(1, "tomato")
print(fruits)
fruits.remove("apple")
print(fruits)
popped = fruits.pop()
print(fruits)
print(popped)
print(fruits + veggies)
fruits.clear()
print(fruits)
# + [markdown] slideshow={"slide_type": "notes"}
# # + You can update an item a a given index by referencing its postiion and assigning a value
# # + You can `insert()` a value at a given index. This pushes other values back.
# # + You can `remove()` a given value. An error is thrown if it doesn't exist.
# # + You can `pop()` the last item (or specify an index) off the list. This also returns what it popped.
# # + Concatenate lists by adding them together.
# # + `clear()` a list to remove all its contents.
# + [markdown] slideshow={"slide_type": "-"}
# There are *many* more list methods available. A reference can be found [here](https://www.w3schools.com/python/python_ref_list.asp).
# + [markdown] slideshow={"slide_type": "slide"}
# # Tuple
#
# These are unordered and unchangeable / immutable.
# + slideshow={"slide_type": "-"}
fruits = ("apple", "banana", "orange")
print(fruits)
print(len(fruits))
print(fruits[1])
print(fruits[-1])
print(fruits[0:2])
print("banana" in fruits)
print(fruits[0].upper())
print(fruits[1][1])
# + [markdown] slideshow={"slide_type": "notes"}
# # + You can use the same operators as strings to determine the length, get specific elements, and see if an element is in the tuple.
# # + Since this tuple stores strings, you can then even use string functions on the result!
# + slideshow={"slide_type": "subslide"}
fruits = ("apple", "banana", "orange")
veggies = ("carrot", "squash")
# fruits[1] = "pineapple" ## Illegal!
# print(fruits)
# fruits.append("blueberry") ## Illegal!
# fruits.insert(1, "tomato") ## Illegal!
# print(fruits)
# fruits.remove("apple") ## Illegal!
# print(fruits)
# popped = fruits.pop() ## Illegal!
# print(fruits)
# print(popped)
print(fruits + veggies)
# fruits.clear() ## Illegal!
# print(fruits)
# + [markdown] slideshow={"slide_type": "notes"}
# # + Concatenate tuples by adding them together.
# + [markdown] slideshow={"slide_type": "subslide"}
# There are only two methods available for tuples, `count()` and `index()`. [Learn more here](https://www.w3schools.com/python/python_ref_tuple.asp).
# + [markdown] slideshow={"slide_type": "slide"}
# # Sets
#
# These have no order and do not allow duplicates. They are also unindexed.
# + slideshow={"slide_type": "-"}
fruits = {"apple", "banana", "orange"}
print(fruits)
print(len(fruits))
# print(fruits[1]) ## Illegal!
# print(fruits[-1]) ## Illegal!
# print(fruits[0:2]) ## Illegal!
print("banana" in fruits)
# print(fruits[0].upper()) ## Illegal!
# print(fruits[1][1]) ## Illegal!
# + [markdown] slideshow={"slide_type": "notes"}
# # + You can use the same operators as strings to see if an element is in the list.
# + slideshow={"slide_type": "subslide"}
fruits = {"apple", "banana", "orange"}
veggies = {"carrot", "squash"}
# fruits[1] = "pineapple" ## Illegal!
# print(fruits)
fruits.add("blueberry")
# fruits.insert(1, "tomato") ## Illegal!
print(fruits)
fruits.remove("apple")
print(fruits)
fruits.discard("lemon")
print(fruits)
popped = fruits.pop()
print(fruits)
print(popped)
union = fruits.union(veggies)
print(union)
fruits.clear()
print(fruits)
# + [markdown] slideshow={"slide_type": "notes"}
# # + You can update an item a a given index by referencing its postiion and assigning a value
# # + You can `add()` a value.
# # + You can `remove()` a given value. An error is thrown if it doesn't exist.
# # + You can `discard()` a value. No error is thrown if it doesn't exist.
# # + You can `pop()` the last item off the set. This also returns what it popped. Remember that the last item is arbitrary.
# # + Several methods exist to do set operations, like `union()` and `intersection()`.
# # + `clear()` a set to remove all its contents.
# + [markdown] slideshow={"slide_type": "subslide"}
# There are *many* more set methods available. A reference can be found [here](https://www.w3schools.com/python/python_ref_set.asp).
# + [markdown] slideshow={"slide_type": "slide"}
# # Dictionary
#
# A dictionary is a map between a string key and a value.
# + slideshow={"slide_type": "-"}
colors = {"orange": "orange", "banana": "yellow", "apple": "red"}
print(colors)
print(colors.values())
print(colors.items())
print(len(colors))
print(colors["banana"])
print("banana" in colors)
print("yellow" in colors)
# + [markdown] slideshow={"slide_type": "notes"}
# # + Use `values()` to get the actual values, or `items()` to get tuples of key-value pairs.
# # + You can use the `in` keyword to see if a **key** is in the dictionary. To check values, you will have to check the `values()`.
# + slideshow={"slide_type": "subslide"}
colors = {"orange": "orange", "banana": "yellow", "apple": "red"}
colors2 = {"tomato": "red"}
colors["blueberry"] = "blue"
print(colors.items())
popped = colors.pop("apple")
print(colors.items())
print(popped)
popped = colors.popitem()
print(colors.items())
print(popped)
colors["more"] = colors2
print(colors.items())
colors.clear()
print(colors.items())
# + [markdown] slideshow={"slide_type": "notes"}
# # + Add new items to the dictionary by assigning a value to the new key.
# # + `pop()` removes the item with the specified key, and returns its value.
# # + `popitem()` removes the last added item, and returns a tuple of the key and value.
# # + You can nest dictionaries by setting a key to have a dictionary as the value.
# # + `clear()` removes all items from the dictionary.
# + [markdown] slideshow={"slide_type": "-"}
# There are *many* more dictionary methods available. A reference can be found [here](https://www.w3schools.com/python/python_ref_dictionary.asp).
|
04-nonprimitives/04-nonprimitives.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gwenostergren/20MA573/blob/master/src/HW03_All_Parts.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="w66H_SVWRBnH" colab_type="code" colab={}
#Packages to Import
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import scipy.optimize as so
# + [markdown] id="5TQaYfXTP_Yz" colab_type="text"
# #Option Combinations
# + [markdown] colab_type="text" id="C8HzenUTSqhK"
# Recall that,
#
#
# __Written K-strike straddle__ is a portfolio of
# - selling K-strike call of one unit
# - selling K-strike put of one unit
#
# __Butterfly__ with three stikes $K_1 < K_2 < K_3$ is the portfolio of
# - 1 unit of written K-strike straddle
# - 1 unit of purchased K-strike call
# - 1 unit of purchased K-strike put
#
# Plot a diagram of exercise price versus payoff for the following portfolios:
# - written 40-strike straddle
# - a butterfly consists of
# - written 40-strike straddle
# - purchased 45-strike call
# - purchased 35-strike put
# + id="mwyXphpuOD3k" colab_type="code" colab={}
'''Option Class Initialization Based on Lecture Notes'''
class VanillaOption:
def __init__(
self,
otype = 1, # 1: 'call', -1: 'put'
strike = 110.,
maturity = 1.,
market_price = 10.):
self.otype = otype
self.strike = strike
self.maturity = maturity
self.market_price = market_price
def explain_yourself(self): #printing option type
if self.otype == 1:
print("I am a call.")
if self.otype == -1:
print("I am a put.")
def payoff(self, s): #s: excercise price
otype = self.otype
k = self.strike
maturity = self.maturity
return max([0, (s - k)*otype])
# + id="TlDpvvdVZRnk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="1d855f20-6e92-4b0f-b886-250ee386dd22"
# Plotting Excercise Price vs Payoff of a Written 40-Strike Saddle
option_wss_call = VanillaOption(otype = 1, strike = 40, maturity = 1.)
option_wss_put = VanillaOption(otype = -1, strike = 40, maturity = 1.)
strike_list = range(10,70)
wss_call_payoff = [option_wss_call.payoff(s) for s in strike_list]
wss_put_payoff = [option_wss_put.payoff(s) for s in strike_list]
wss_payoff_list = []
if(len(wss_call_payoff)==len(wss_put_payoff)):
for i in range(0, len(wss_call_payoff)):
wss_payoff_list.append(wss_call_payoff[i] + wss_put_payoff[i])
else:
print("Error, option payoff not called on same range")
plt.plot(strike_list, wss_payoff_list)
plt.xlabel('Exercise Price')
plt.ylabel('Payoff')
plt.title('Written 40 Strike Straddle Payoff');
plt.show()
# + id="bYCRVJPlZUvU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="e12d253d-07db-4509-d099-767bb7d41f82"
# Plotting Excercise Price vs Payoff of an Example Butterfly Portfolio
option_fly_call = VanillaOption(otype= 1, strike= 45, maturity= 1.)
option_fly_put = VanillaOption(otype= -1, strike= 35, maturity= 1.)
fly_call_payoff = [option_fly_call.payoff(s) for s in strike_list]
fly_put_payoff = [option_fly_put.payoff(s) for s in strike_list]
fly_payoff_list = []
if(len(wss_payoff_list) == len(fly_call_payoff) and len(wss_payoff_list) == len(fly_put_payoff)):
for i in range(0,len(fly_call_payoff)):
fly_payoff_list.append(wss_payoff_list[i] + fly_call_payoff[i] + fly_put_payoff[i])
else:
print("Error, option payoff not called on same range")
plt.plot(strike_list, wss_payoff_list)
plt.xlabel('Exercise Price')
plt.ylabel('Payoff')
plt.title('Butterfly Portfolio Payoff');
plt.show()
# + [markdown] id="NEtrDZH9QGCJ" colab_type="text"
# #BSM Price Change
# + [markdown] id="R3uJxJbwtZjp" colab_type="text"
# Consider an european option with
# - call type
# - strike = 110
# - maturity = T
# underlying a Gbm stock with
# - initial: 100
# - interest rate: 4.75%
# - vol ratio: $\sigma$
#
# We denote this bsm price by $f(\sigma, T)$.
#
# - Let $\sigma = 20\%$ fixed. plot $T \mapsto f(0.2, T)$ when $T$ is ranging over $(0.5, 2)$.
#
# - Let $T = 1$ fixed. plot $\sigma \mapsto f(\sigma, 1)$ when $\sigma$ is ranging over $(.05, 0.5)$
#
# - Describe your observations. Do you think the same behavior is also true for put?
#
# - Could you prove your observations?
# + colab_type="code" id="Fkeqt5l6TWgP" colab={}
'''Geometric Brownian Motion Class Initialization Based on Lecture '''
class Gbm:
def __init__(self,
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2
):
self.init_state = init_state
self.drift_ratio = drift_ratio
self.vol_ratio = vol_ratio
#Black-Scholes-Merton formula.
def bsm_price(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2)
* maturity) / (sigma * np.sqrt(maturity))
d2 = d1 - sigma * np.sqrt(maturity)
return (otype * s0 * ss.norm.cdf(otype * d1)
- otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))
# + id="EbN8N3RQemax" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="d1746fbe-b789-4093-fcf0-656461481da2"
#Let Sigma = 20%, Plot T and BSM when T is between (0.5,2)
stock1 = Gbm(init_state= 100., drift_ratio=0.0475, vol_ratio=0.2)
mat_bsmlist = []
mat_list = np.arange(0.5, 2., 0.001)
for i in mat_list:
mat_test = VanillaOption(otype= 1, strike= 110, maturity= i)
mat_bsmlist.append(stock1.bsm_price(mat_test))
plt.plot(mat_list, mat_bsmlist)
plt.xlabel('Maturity')
plt.ylabel('BSM Price')
plt.title('BSM Price and Maturity Relationship');
plt.show()
# + id="2OfHScUfjhur" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="c3c4dd39-f980-4913-b604-7bb70d8428d2"
#Let T = 1. Plot Sigma and BSM price when sigma is between (0.05,0.5)
option1_bsmtest = VanillaOption(otype= 1, strike= 110, maturity= 1.)
sig_bsmlist = []
sig_list = np.arange(0.05, 0.5, 0.001)
for i in sig_list:
sig_test = Gbm(init_state= 100., drift_ratio=0.0475, vol_ratio= i)
sig_bsmlist.append(sig_test.bsm_price(option1_bsmtest))
plt.plot(sig_list, sig_bsmlist)
plt.xlabel('Sigma')
plt.ylabel('BSM Price')
plt.title('BSM Price and Sigma Relationship');
plt.show()
# + [markdown] id="NpQyXMxPlDDV" colab_type="text"
# **Describe your observations. Do you think the same behavior is true for put?**
#
# In both cases, as sigma or the maturity increases, the BSM price increases. I believe this behavior is consistent with what it should be. As the volatility of a stock increases, there is a possibility for larger profits and the positive correlation between the price and the volitility reflects this relationship. Also, as the maturity of a stock increases, there is additional opportunity for larger profits and the positive correlation between the price and maturity reflects this.
#
# Since calls and puts are priced similarly under BSM pricing, and higher volatility or maturity also creates opportunity for higher profits, I believe that puts should chart the same positive linear relationship.
# + [markdown] id="XcgmdIPRnu1U" colab_type="text"
# **Could you prove your observations?**
# If there was a negative correlation instead of a positive correlation, it would mean that the lower the risk the higher the reward. Which would create an arbitrage opportunity.
#
# + [markdown] id="8LKqC5CaQLyW" colab_type="text"
# #Implied Volatility
# + [markdown] id="iHMqWczsik6_" colab_type="text"
# - Prove the following facts: Supose $f$ is a function satisfying
# - $f(0) = f_{min},$ and $\lim_{x\to \infty}f(x) = f_{max}$
# - $f$ is continuous
# - $f$ is strictly increasing
#
# then, for any $p\in (f_{min}, f_{max})$,
# - there exists unique $\hat \sigma$, such that $f(\hat \sigma) = p$ and
# $$\hat \sigma = \arg\min_{\sigma\in (0,\infty)} | f(\sigma) - p|.$$
# + [markdown] id="T82RgfRvoSO8" colab_type="text"
# **Proof**
#
# By the intermediate value theorem, it follows that there exists a unique $\hat \sigma$ such that $f( \hat \sigma) = p$, and that this $\hat \sigma$ is unique. To prove the second part of this statement, we first rewrite it. $\hat \sigma = \arg\min_{\sigma\in (0,\infty)} | f(\sigma) - p| = [x | y $ such that $ |f(y)-p| \leq |f(x)-p|]$. Since sigma is unique by IVT, we have that this set must be equal to $\hat \sigma$.
#
# + [markdown] id="F9tYcXcNcbil" colab_type="text"
# - Now we denote by $f(\sigma)$ the BSM put price with the following parameters:
# - vol_ratio = $\sigma$; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.
#
# Answer the following questions:
# - What is $f_{min}$ and $f_{max}$?
# - Is $f$ strictly increasing on $(0,\infty)$? Justify your answer.
# - If the market put price is $10$, then what's the implied volatility?
# + [markdown] id="Yb5WeJlQp971" colab_type="text"
# - Find its implied volatility with the following parameters:
# - BSM call price is 10.; spot_price = 100.; drift_ratio = .0475; strike = 110.; maturity = 1.
#
#
# + id="cE1W8b5IOC_u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="494d84e5-2f4f-4d6e-c84f-c172ef388e7e"
##What is fmin and fmax in these conditions?##
option2_voltest = VanillaOption(otype= -1, strike= 110, maturity= 1.)
sig_vol_list = []
sig_list = np.arange(0.0001, 1000., 1.)
for i in sig_list:
sig_vol_test = Gbm(init_state= 100., drift_ratio = 0.0475, vol_ratio = i)
sig_vol_list.append(sig_vol_test.bsm_price(option1_bsmtest))
max_bsm_price = max(sig_vol_list)
min_bsm_price = min(sig_vol_list)
print("The maximum value of f(sigma) is " + str(max_bsm_price))
print("The minimum value of f(sigma) is " + str(min_bsm_price))
# + id="t5Oi3W-_q-wr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="137367c4-29b0-447b-d0ab-c5d939771181"
##Is f strictly increasing from 0 to infinity?##
sig_vol_list = []
sig_list = np.arange(0.0001, 12., 0.001)
for i in sig_list:
sig_vol_test = Gbm(init_state= 100., drift_ratio = 0.0475, vol_ratio = i)
sig_vol_list.append(sig_vol_test.bsm_price(option1_bsmtest))
plt.plot(sig_list, sig_vol_list)
plt.xlabel('Sigma')
plt.ylabel('BSM Price')
plt.title('BSM Price and Sigma Relationship');
plt.show()
# + [markdown] id="tUfYZBa9raXJ" colab_type="text"
# From the plot above, we cans see that f should be strictly increasing from 0 to infinity. Once the plot approaches 100, it will even out as the inputed sigma approaches infinity. In the visible area here, it is never decreasing.
# + id="jvvvmYQIpD6m" colab_type="code" colab={}
def error_function(vol, gbm, option):
gbm.vol_ratio = vol
return abs(option.market_price - gbm.bsm_price(option))
def implied_volatility(gbm, option):
init_vol = .1 #initial guess
return so.fmin(error_function, init_vol,
args = (gbm, option), disp = 0)[0]
# + id="TJ8Yct1Psmyr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="b597db3a-0c73-4fca-d34f-43c07d038121"
##If the market put price is 10, what is the implied volatility? ##
gbm1 = Gbm(init_state= 100., drift_ratio= 0.0475)
option3_voltest = VanillaOption(otype= -1, strike= 110, market_price= 10, maturity= 1.)
print("The implied volatility is " )
implied_volatility(gbm1, option3_voltest)
# + id="FW1SO-RU7qdB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="dabe036c-ae25-49a2-ca65-90b00f6b2e10"
#Find its implied volatility within the following parameters
option4_iv = VanillaOption(otype= 1, strike= 110, market_price= 10, maturity= 1.)
gbm2= Gbm(init_state=100, drift_ratio= 0.0475)
print("The implied volatility is " )
implied_volatility(gbm1, option4_iv)
|
src/HW03_All_Parts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GIS Client
#
# A web client is web application that recieves and pushes data from servers. This notebook presents some example on how connect to geo-webserver and what is possible to do with interactive maps.
#
# Some of the examples are based on this [blog-post](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a) and the [ofitial documentation of ipyleaflet]() :
# ## Displaying a Base Map
# +
from ipyleaflet import Map, Marker
# location for centeting the map
center = (51.999285, 4.373791)
# create map instance with a center point an a zoom level
m = Map(center=center, zoom=14)
# create marker
marker = Marker(location=center, draggable=True)
# add marker to the map instance
m.add_layer(marker)
# display map
# by default ipyleaflet will display Open Street Mas as base map
display(m)
# +
# Change base map
from ipyleaflet import basemaps
# Using topographic map
m = Map(basemap=basemaps.OpenTopoMap, center=center, zoom=5)
m.add_layer(marker)
display(m)
# +
# Using a Night-lights base map
m = Map(basemap=basemaps.NASAGIBS.ViirsEarthAtNight2012, center=center, zoom=3)
# More options: https://ipyleaflet.readthedocs.io/en/latest/api_reference/basemaps.html
m.add_layer(marker)
display(m)
# +
# Using a funny-looking basemap
m = Map(basemap=basemaps.Stamen.Watercolor, center=center, zoom=10)
# More options: https://ipyleaflet.readthedocs.io/en/latest/api_reference/basemaps.html
m.add_layer(marker)
display(m)
# -
# ## Add Layers from GeoServer (WMS)
# +
# For this to work you need point the url to your GeoServer instance. the tiger_roads layer comes with the standard distribution of GeoServer.
from ipyleaflet import WMSLayer
wms = WMSLayer(
url='http://ec2-35-158-182-30.eu-central-1.compute.amazonaws.com:8080/geoserver/tiger/wms?',
layers='tiger_roads',
format='image/png',
transparent=True,
attribution='some words of appreciation'
)
new_york=(40.712776, -74.005974)
m = Map(basemap=basemaps.Stamen.Watercolor, center=new_york, zoom=14)
m.add_layer(wms)
display(m)
|
notebooks/session-notes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#make sure you have read binary svm before moving onto multiclass
# https://github.com/je-suis-tm/machine-learning/blob/master/binary%20support%20vector%20machine.ipynb
#this notebook includes multiclass svm ovr,ovo,dag
import networkx as nx
import cvxopt.solvers
import pandas as pd
import numpy as np
import copy
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.svm import SVC
import os
os.chdir('d:/python/data')
# -
#plz refer to binary svm for this function
def binary_svm(x_train,y_train,kernel='linear',poly_constant=0.0,poly_power=1,gamma=5):
y_product=np.outer(y_train,y_train)
if kernel=='linear':
x_product=np.outer(x_train,x_train)
elif kernel=='polynomial':
temp=np.outer(x_train,x_train)
x_product=np.apply_along_axis(lambda x:(x+poly_constant)**poly_power,0,temp.ravel()).reshape(temp.shape)
else:
temp=np.mat([i-j for j in x_train for i in x_train]).reshape(len(x_train),len(x_train))
x_product=np.apply_along_axis(lambda x:np.exp(-1*gamma*(np.linalg.norm(x))**2),0,temp.ravel()).reshape(temp.shape)
P=cvxopt.matrix(x_product*y_product)
q=cvxopt.matrix(-1*np.ones(len(x_train)))
G=cvxopt.matrix(np.diag(-1 * np.ones(len(x_train))))
h=cvxopt.matrix(np.zeros(len(x_train)))
A=cvxopt.matrix(y_train,(1,len(x_train)))
b=cvxopt.matrix(0.0)
solution=cvxopt.solvers.qp(P, q, G, h, A, b)
alpha=pd.Series(solution['x'])
w=np.sum(alpha*y_train*x_train)
b=-(min(x_train[y_train==1.0]*w)+max(x_train[y_train==-1.0]*w))/2
return w,b
#first, one vs one multiclass svm
#given n classes, we do n*(n-1)/2 times binary classification as one vs one
#we would obtain w and b for each binary classification
#when we make a prediction, we use each w and b to get the classification
#now that we have a classification list of n*(n-1)/2
#we just select the value with the most frequency in the list
#that would be our prediction, voila!
def get_accuracy_ovo(train,test,**kwargs):
#calculate w and b for each binary classification
multiclass=train['y'].drop_duplicates().tolist()
multiclass_params={}
for i in range(len(multiclass)):
for j in range(i+1,len(multiclass)):
data=copy.deepcopy(train)
temp=np.select([data['y']==multiclass[i],data['y']==multiclass[j]], \
[-1.0,1.0],default=0.0)
data['y']=temp
data=data[data['y']!=0.0]
multiclass_params['{},{}'.format(multiclass[i], \
multiclass[j])]=binary_svm(data['x'], \
data['y'], \
**kwargs)
result=[]
#store all the predictions in one list
#and select the value with the most frequency in this list
predict=[]
for i in train['x']:
temp=[]
for j in multiclass_params:
w=multiclass_params[j][0]
b=multiclass_params[j][1]
value=np.sign(np.multiply(w,i)+b)
temp.append(j.split(',')[0] if value==-1.0 else j.split(',')[1])
predict.append(max(set(temp), key=temp.count))
predict=pd.Series(predict).apply(int)
result.append('train accuracy: %.2f'%(
len(predict[predict==train['y']])/len(predict)*100)+'%')
#kinda the same as training sample prediction
predict=[]
for i in test['x']:
temp=[]
for j in multiclass_params:
w=multiclass_params[j][0]
b=multiclass_params[j][1]
value=np.sign(np.multiply(w,i)+b)
temp.append(j.split(',')[0] if value==-1 else j.split(',')[1])
predict.append(max(set(temp), key=temp.count))
predict=pd.Series(predict).apply(int)
result.append('test accuracy: %.2f'%(
len(predict[predict==test['y']])/len(predict)*100)+'%')
return result
#alternatively, one vs rest multiclass svm
#given n classes, we do n times binary classification as one vs rest
#we would obtain w and b for each binary classification
#when we make a prediction, we use each w and b to get the decision function value
#we select the classifier with the maximum decision function value
#that classifier would return +1.0 and we would take it as the result
def get_accuracy_ovr(train,test,**kwargs):
multiclass=train['y'].drop_duplicates()
multiclass_params={}
#calculate w and b for each binary classification
for i in multiclass:
data=copy.deepcopy(train)
data['y']=np.where(data['y']==i,1.0,-1.0)
multiclass_params[i]=binary_svm(data['x'],data['y'],**kwargs)
result=[]
#store all the decision function values in one list
#and select the classifier which gives the largest value
predict=[]
for i in train['x']:
max_value=float('-inf')
idx=0
for j in multiclass_params:
w=multiclass_params[j][0]
b=multiclass_params[j][1]
value=np.multiply(w,i)+b
if value>max_value:
max_value=value
idx=j
predict.append(idx)
predict=pd.Series(predict).apply(int)
result.append('train accuracy: %.2f'%(
len(predict[predict==train['y']])/len(predict)*100)+'%')
#kinda the same as training sample prediction
predict=[]
for i in test['x']:
max_value=float('-inf')
idx=0
for j in multiclass_params:
w=multiclass_params[j][0]
b=multiclass_params[j][1]
value=np.multiply(w,i)+b
if value>max_value:
max_value=value
idx=j
predict.append(idx)
predict=pd.Series(predict).apply(int)
result.append('test accuracy: %.2f'%(
len(predict[predict==test['y']])/len(predict)*100)+'%')
return result
#dagsvm is not supported in sklearn
#it is an optimization for multiclass ovo
#it uses graph theory to avoid n*(n-1)/2 binary classification
#it only takes n-1 binary classification
def get_accuracy_dag(train,test,**kwargs):
#the same as ovo
#except one more line to build a graph structure
#we denote the class as the node
#the edge as binary svm between two classes
#the weight as parameters w and b
multiclass=train['y'].drop_duplicates().tolist()
graph=nx.DiGraph()
for i in range(len(multiclass)):
for j in range(i+1,len(multiclass)):
data=copy.deepcopy(train)
temp=np.select([data['y']==multiclass[i],data['y']==multiclass[j]], \
[-1.0,1.0],default=0.0)
data['y']=temp
data=data[data['y']!=0.0]
graph.add_edge(multiclass[i],multiclass[j],weight=binary_svm(data['x'], \
data['y'], \
**kwargs))
result=[]
#use directed acyclic graph to boost the speed of ovo
#for ovo, the time complexity is n*(n-1)/2
#where n is the dimension of classes
#for dag, the time complexity is only n-1
#in dag, once we have checked two classes and got the result
#we would remove the negative result from the graph structure
#and move on to the comparison with the next class
#until we only have one class left in dag
#which would become the final result
#as a tradeoff for time complexity
#the result isnt as accurate as ovo
predict=[]
for i in train['x']:
g=copy.deepcopy(graph)
while len(g.nodes)>1:
#beware, graph.nodes aint a list type
node0=list(g.nodes)[0]
node1=list(g.nodes)[1]
#since the graph structure is directed
#the opposite direction between nodes do not exist
#we would not get the parameters w and b
#thats why we need to put a try to avoid keyerror
#if we make this graph structure undirected
#it would be impossible to identify which class is -1.0
try:
w=g[node0][node1]['weight'][0]
b=g[node0][node1]['weight'][1]
value=np.sign(np.multiply(w,i)+b)
g.remove_node(node1 if value==-1 else node0)
except KeyError:
w=g[node1][node0]['weight'][0]
b=g[node1][node0]['weight'][1]
value=np.sign(np.multiply(w,i)+b)
g.remove_node(node0 if value==-1 else node1)
predict+=list(g.nodes)
predict=pd.Series(predict).apply(int)
result.append('train accuracy: %.2f'%(
len(predict[predict==train['y']])/len(predict)*100)+'%')
#the same as training samples
predict=[]
for i in test['x']:
g=copy.deepcopy(graph)
while len(g.nodes)>1:
node0=list(g.nodes)[0]
node1=list(g.nodes)[1]
try:
w=g[node0][node1]['weight'][0]
b=g[node0][node1]['weight'][1]
value=np.sign(np.multiply(w,i)+b)
g.remove_node(node1 if value==-1 else node0)
except KeyError:
w=g[node1][node0]['weight'][0]
b=g[node1][node0]['weight'][1]
value=np.sign(np.multiply(w,i)+b)
g.remove_node(node0 if value==-1 else node1)
predict+=list(g.nodes)
predict=pd.Series(predict).apply(int)
result.append('test accuracy: %.2f'%(
len(predict[predict==test['y']])/len(predict)*100)+'%')
return result
#using official sklearn package with the same parameters
def skl_multiclass_svm(x_train,x_test,y_train,y_test,**kwargs):
m=SVC(**kwargs).fit(np.array(x_train).reshape(-1, 1), \
np.array(y_train).ravel())
train=m.score(np.array(x_train).reshape(-1, 1), \
np.array(y_train).ravel())*100
test=m.score(np.array(x_test).reshape(-1, 1), \
np.array(y_test).ravel())*100
print('\ntrain accuracy: %s'%(train)+'%')
print('\ntest accuracy: %s'%(test)+'%')
df=pd.read_csv('iris.csv')
df['y']=np.select([df['type']=='Iris-setosa', \
df['type']=='Iris-versicolor', \
df['type']=='Iris-virginica'],[1,2,3])
#for the simplicity, let us reduce the dimension of x to 1
temp=pd.concat([df[i] for i in df.columns if 'length' in i or 'width' in i],axis=1)
x=PCA(n_components=1).fit_transform(temp)
x=pd.Series([x[i].item() for i in range(len(x))])
y=df['y']
#train test split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)
#crucial!!!!
#or we would get errors in the next step
x_test.reset_index(inplace=True,drop=True)
y_test.reset_index(inplace=True,drop=True)
x_train.reset_index(inplace=True,drop=True)
y_train.reset_index(inplace=True,drop=True)
train=pd.DataFrame({'x':x_train,'y':y_train})
test=pd.DataFrame({'x':x_test,'y':y_test})
ovr=get_accuracy_ovr(train,test)
ovo=get_accuracy_ovo(train,test)
dag=get_accuracy_dag(train,test)
print('one vs rest self implementation')
for i in ovr:
print('\n',i)
#normally ovo should work better than ovr
#as time complexity of ovo is higher
#n*(n-1)/2>n
print('one vs one self implementation')
for i in ovo:
print('\n',i)
print('dag self implementation')
for i in dag:
print('\n',i)
print('one vs rest sklearn')
skl_multiclass_svm(x_train,x_test,y_train,y_test,kernel='linear',decision_function_shape='ovr')
print('one vs one sklearn')
skl_multiclass_svm(x_train,x_test,y_train,y_test,kernel='linear',decision_function_shape='ovo')
|
multiclass support vector machine.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow2_p36)
# language: python
# name: conda_tensorflow2_p36
# ---
# +
import time
import os
import gc
import pandas as pd
import numpy as np
from sklearn import linear_model
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
np.set_printoptions(precision=6, suppress=True)
import PIL
from PIL import Image
import tensorflow as tf
from tensorflow.keras import *
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
tf.__version__
# -
tf.config.list_physical_devices('GPU')
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
strategy = tf.distribute.MirroredStrategy()
# # Hyperparameters
PRE_TRAINED = './models/pg_convnet.h5'
BEST_PATH = './models/pg_cv_transfer_test.h5'
TRAINING_EPOCHS = 200
LEARNING_RATE = 0.002
EPSILON = 1e-08
BATCH_SIZE = 128
# # Data preparation
l = np.load('./results/pg_dataset.npz', allow_pickle=True)
data_indices_2020S = l['data_indices_2020S']
output_label_2020S = l['output_label_2020S']
data_indices_2020W = l['data_indices_2020W']
output_label_2020W = l['output_label_2020W']
OUTPUT_MAXS = l['OUTPUT_MAXS']
OUTPUT_MINS = l['OUTPUT_MINS']
COLUMNS = ['node_len', 'node_dia', 'plant_h', 'leaf_area']
output_2020S_df = pd.DataFrame(output_label_2020S, index=data_indices_2020S, columns=COLUMNS)
output_2020W_df = pd.DataFrame(output_label_2020W, index=data_indices_2020W, columns=COLUMNS)
DIRECTORY = './images/2020_W/'
dir_list = os.listdir(DIRECTORY)
dataset_list = [direc for direc in dir_list if direc.startswith('LAI_OVER') or direc.startswith('LAI2_OVER')]
dataset_list.sort()
data_indices = []
input_images = []
output_labels = []
for DATE in output_2020W_df.index:
for DIRECTORY in dataset_list:
if DATE in DIRECTORY:
file_list = os.listdir(f'./images/2020_W/{DIRECTORY}')
file_list = [file for file in file_list if file.endswith('.jpg')]
for FILE in file_list:
TIME = pd.Timedelta(FILE.split()[-1].split('.')[0])
if TIME >= pd.Timedelta('08:00:00') and TIME <= pd.Timedelta('16:00:00'):
image = Image.open(f'./images/2020_W/{DIRECTORY}/{FILE}')
data_indices.append(pd.Timestamp(DATE))
input_images.append(img_to_array(image))
output_labels.append(output_2020W_df.loc[DATE].values)
input_images = np.stack(input_images, axis=0)
output_labels = np.stack(output_labels, axis=0)
data_indices = np.array(data_indices)
print(len(data_indices))
print(input_images.shape)
print(output_labels.shape)
data_indices, input_images, output_labels = resample(data_indices, input_images, output_labels, n_samples=3000, replace=False, random_state=4574)
print(len(data_indices))
print(input_images.shape)
print(output_labels.shape)
gc.collect()
N_TRAIN = int(output_labels.shape[0]*.3)
train_input = input_images[:N_TRAIN, ...]
train_label = output_labels[:N_TRAIN, ...]
train_index = data_indices[:N_TRAIN]
test_input = input_images[N_TRAIN:, ...]
test_label = output_labels[N_TRAIN:, ...]
test_index = data_indices[N_TRAIN:]
train_index, val_index, train_input, val_input, train_label, val_label = train_test_split(train_index, train_input, train_label, test_size=0.3, shuffle=True, random_state=3101)
datagen = ImageDataGenerator(
rescale=1/255,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
train_iterator = datagen.flow(train_input, train_label, batch_size=BATCH_SIZE)
val_iterator = datagen.flow(val_input, val_label, batch_size=BATCH_SIZE)
test_iterator = datagen.flow(test_input, test_label, batch_size=BATCH_SIZE)
print(f'number of data set: {input_images.shape[0]}')
print(f'number of training set: {train_input.shape[0]}')
print(f'number of validation set: {val_input.shape[0]}')
print(f'number of test set: {test_input.shape[0]}')
gc.collect()
# # Model construction
class ResidualBlock(layers.Layer):
def __init__(self, num_filter, stride=1):
super(ResidualBlock, self).__init__()
self.n = num_filter
self.s = stride
self.conv1 = layers.Conv2D(filters=self.n, kernel_size=1, strides=1, kernel_initializer='glorot_normal', padding='same')
self.norm1 = layers.BatchNormalization()
self.act1 = layers.Activation(activations.relu)
self.conv2 = layers.Conv2D(filters=self.n, kernel_size=3, strides=stride, kernel_initializer='glorot_normal', padding='same')
self.norm2 = layers.BatchNormalization()
self.act2 = layers.Activation(activations.relu)
self.conv3 = layers.Conv2D(filters=self.n*4, kernel_size=1, strides=1, kernel_initializer='glorot_normal', padding='same')
self.norm3 = layers.BatchNormalization()
self.act3 = layers.Activation(activations.relu)
self.downsample = Sequential()
self.downsample.add(layers.Conv2D(filters=self.n*4, kernel_size=1, strides=stride, kernel_initializer='glorot_normal'))
self.downsample.add(layers.BatchNormalization())
def call(self, inp, training=None, **kwargs):
shortcut = self.downsample(inp)
inp = self.act1(self.norm1(self.conv1(inp), training=training))
inp = self.act2(self.norm2(self.conv2(inp), training=training))
inp = self.norm3(self.conv3(inp), training=training)
oup = self.act3(layers.add([shortcut, inp]))
return oup
def block_maker(num_filter, num_blocks, stride=1):
res_block = tf.keras.Sequential()
res_block.add(ResidualBlock(num_filter, stride=stride))
for _ in range(1, num_blocks):
res_block.add(ResidualBlock(num_filter, stride=1))
return res_block
# +
class ResNet2D(Model):
def __init__(self, layer_params):
super(ResNet2D, self).__init__()
self.conv1 = layers.Conv2D(filters=64, kernel_size=7, strides=2, kernel_initializer='glorot_normal', padding="same")
self.norm1 = layers.BatchNormalization()
self.act1 = layers.Activation(activations.relu)
self.pool1 = layers.MaxPool2D(pool_size=(3, 3), strides=2, padding="same")
self.layer1 = block_maker(num_filter=64, num_blocks=layer_params[0])
self.layer2 = block_maker(num_filter=128, num_blocks=layer_params[1], stride=2)
self.layer3 = block_maker(num_filter=256, num_blocks=layer_params[2], stride=2)
self.layer4 = block_maker(num_filter=512, num_blocks=layer_params[3], stride=2)
# self.avgpool = layers.GlobalAveragePooling1D()
self.flat = layers.Flatten()
self.dense1 = layers.Dense(units=512, activation='relu')
self.dense2 = layers.Dense(units=128, activation='relu')
self.fc = layers.Dense(units=4)
def call(self, inp, training=None, mask=None):
inp = self.conv1(inp)
inp = self.norm1(inp, training=training)
inp = self.act1(inp)
inp = self.pool1(inp)
inp = self.layer1(inp, training=training)
inp = self.layer2(inp, training=training)
inp = self.layer3(inp, training=training)
inp = self.layer4(inp, training=training)
# inp = self.avgpool(inp)
inp = self.dense1(self.flat(inp))
inp = self.dense2(inp)
output = self.fc(inp)
return output
# -
with strategy.scope():
model = ResNet2D(layer_params=[2, 3, 2, 1])
# +
cbs = callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=.1, patience=5, verbose=0, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0)
save = callbacks.ModelCheckpoint(
BEST_PATH, monitor='val_loss', verbose=0,
save_best_only=True, save_weights_only=True, mode='min', save_freq='epoch')
early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=20)
# -
with strategy.scope():
opt = optimizers.Adam(learning_rate=LEARNING_RATE, epsilon=EPSILON)
model.compile(optimizer=opt, loss='mae')
model.predict(val_input[:5, ...]/255)
model.load_weights(PRE_TRAINED)
model.save_weights(BEST_PATH) # For deep copy
model.load_weights(BEST_PATH)
with strategy.scope():
prediction_layer1 = layers.Conv2D(filters=64, kernel_size=7, strides=2, kernel_initializer='glorot_normal', padding="same")
base_model = Sequential(model.layers[1:-3])
prediction_layer2 = layers.Dense(512, activation='relu')
prediction_layer3 = layers.Dense(128, activation='relu')
prediction_layer4 = layers.Dense(4)
base_model.trainable = False
with strategy.scope():
new_model = Sequential([
prediction_layer1,
base_model,
prediction_layer2,
prediction_layer3,
prediction_layer4
])
gc.collect()
with strategy.scope():
opt = optimizers.Adam(learning_rate=LEARNING_RATE, epsilon=EPSILON)
new_model.compile(optimizer=opt, loss='mae')
new_model.fit(train_iterator, epochs=TRAINING_EPOCHS, validation_data=val_iterator,
verbose=1, callbacks=[cbs, save, early_stop])
new_model.load_weights(BEST_PATH)
new_model.evaluate(val_iterator)
del(train_index)
del(train_input)
del(train_iterator)
del(train_label)
del(val_index)
del(val_input)
del(val_iterator)
del(val_label)
gc.collect()
pred_result = model.predict(test_input/255)
print(pred_result.shape)
print(test_label.shape)
test_label = (OUTPUT_MAXS - OUTPUT_MINS)*test_label + OUTPUT_MINS
pred_result = (OUTPUT_MAXS - OUTPUT_MINS)*pred_result + OUTPUT_MINS
pred_df = pd.DataFrame(np.concatenate([test_label, pred_result], axis=1), index=test_index)
pred_df.columns = ['label_node_len', 'label_node_dia', 'label_plant_h', 'label_leaf_area', 'pred_node_len', 'pred_node_dia', 'pred_plant_h', 'pred_leaf_area']
pred_df.to_csv('./results/model_output/pg_transfer_pred_result.csv')
|
legacy/9-5_pg_CNN_transfer_exp.ipynb
|