code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import spacy
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import sys
import pandas as pd
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
f = open('../DATA1.txt', encoding = "ISO-8859-1")
x = f.readlines()
f.close()
# +
emotion = []
sentences = []
currIdx = 0
for line in x:
slashIdx = line.find('\t')
if slashIdx != -1:
emotion.append(line[0:slashIdx])
sentences.append(line[slashIdx+2:len(line)-2])
else:
sentences[currIdx] += ' ' + line[0:len(line)-2]
if line.find('"') != -1:
currIdx += 1
# +
nlp = spacy.load("en")
means = []
maxs = []
mins = []
f1 = open("word_vec.txt", "w+")
for i in range(len(sentences)):
sentences_tokens = nlp(sentences[i])
sentences_vectors = np.vstack([word.vector for word in sentences_tokens if (word.has_vector and str(word) not in stop_words)])
means.append(np.mean(sentences_vectors, axis=0))
maxs.append(np.max(sentences_vectors, axis=0))
mins.append(np.min(sentences_vectors, axis=0))
f1.write('[')
for j in range(len(means[i])):
f1.write(str(means[i][j]) + ', ')
f1.write(emotion[i] + '],\n')
f1.close()
# -
pca = PCA(n_components=2)
sentences_vecs_transformed = pca.fit_transform(maxs)
sentences_vecs_transformed1 = np.c_[emotion, sentences_vecs_transformed]
# anger 0
# disgust 1096
# fear 2192
# guilt 3287
# joy 4380
# sadness 5474
# shame 6570
plt.plot(sentences_vecs_transformed[0:50,0], sentences_vecs_transformed[0:50,1], 'ro') # anger
plt.plot(sentences_vecs_transformed[1500:1550,0], sentences_vecs_transformed[1500:1550,1], 'bo') # disgust
plt.plot(sentences_vecs_transformed[3000:3050,0], sentences_vecs_transformed[3000:3050,1], 'go') # fear
plt.plot(sentences_vecs_transformed[3500:3550,0], sentences_vecs_transformed[3500:3550,1], 'ko') # guilt
plt.plot(sentences_vecs_transformed[4000:4050,0], sentences_vecs_transformed[4000:4050,1], 'co') # joy
plt.plot(sentences_vecs_transformed[6000:6050,0], sentences_vecs_transformed[6000:6050,1], 'yo') # sadness
plt.plot(sentences_vecs_transformed[7000:7050,0], sentences_vecs_transformed[7000:7050,1], 'mo') # shame
plt.show()
| embeddings/wordvecs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch
# language: python
# name: torch
# ---
# %load_ext autoreload
# %autoreload 2
# +
import numpy as np
import pandas as pd
#np.random.seed(123)
# viz
import matplotlib.pyplot as plt
# notebook settings
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
pd.set_option('display.max_columns', 1000)
# -
# ## Sample Prep
samples = pd.read_csv('../data/TCGA/rna-seq_adeno/meta/gdc_sample_sheet.2020-01-27.tsv', sep="\t")
# get file type
samples['data'] = [val[1] for i,val in samples['File Name'].str.split(".").items()]
samples['Project ID'].value_counts()
# Samples with RNAseq adjacent normal tissue
samples['Sample Type'].value_counts()
samples.loc[samples['Sample Type']=='Primary Tumor, Primary Tumor', 'Sample Type'] = 'Primary Tumor'
samples.loc[samples['Sample Type']=='Solid Tissue Normal, Solid Tissue Normal', 'Sample Type'] = 'Solid Tissue Normal'
samples['Sample Type'].value_counts()
# all cases with adjacent normal tissue
cases = samples[samples['Sample Type']=='Solid Tissue Normal']['Case ID']
# disparity in cases
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor')
& (samples['data']=='FPKM')]['Case ID'].nunique()
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal')
& (samples['data']=='FPKM')]['Case ID'].nunique()
# divide, join, subset
case_tumor = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor') &
(samples['data']=='FPKM')]
case_norm = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal') &
(samples['data']=='FPKM')]
cases = pd.merge(case_tumor['Case ID'], case_norm['Case ID'])['Case ID']
cases.shape
case_tumor = case_tumor[case_tumor['Case ID'].isin(cases)]
case_norm = case_norm[case_norm['Case ID'].isin(cases)]
# random shuffle so test dataset loader has random order
cases = pd.concat([case_tumor, case_norm]).sample(frac=1)
case_tumor.shape
case_norm.shape
cases.shape
# ## Shuffle Labels
cases['Sample Type'] = np.random.choice(cases['Sample Type'].values, len(cases))
# ## Map Ensembl genes to Proteins
id_map = pd.read_csv("/srv/home/wconnell/keiser/data/uniprot_mapping_ids/map_ensembl_uniprot.csv")
reviewed_proteins = pd.read_csv("/srv/home/wconnell/keiser/data/uniprot_mapping_ids/TCGA_rnaseq_uniprot_features.tab.gz", sep="\t")
proteins = pd.merge(id_map, reviewed_proteins, left_on='UNIPROT_ID', right_on='Entry name')
proteins['hgnc'] = [gene.split(";")[0] for gene in proteins['Gene names (primary )']]
proteins.shape
# ## Dataset Prep
# Need to ensure test set has health/diseased samples matched for each case
def train_test_split_case(samples, cases, test_size=0.25):
"""
Splits dataframe into random train and test sets for Siamese network evaluation.
The test set will only contain cases with a single pair of matched samples.
The train set will contain all others, even cases that have > 2 samples, irrespective if they are exactly matched.
"""
# total test size, round to even number for subsetting test
n_test = int(len(samples) * test_size)
if n_test % 2 != 0: n_test = n_test - 1
# subset samples with a single matched pair
ids = samples.groupby([cases]).size()
assert (ids > 1).all(), "Some samples do not have at least a single matched pair."
ids = ids[ids == 2].index.values
test_ids = np.random.choice(ids, size=int(n_test/2), replace=False)
return samples[~samples[cases].isin(test_ids)].reset_index(drop=True), samples[samples[cases].isin(test_ids)].reset_index(drop=True)
# +
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
target = 'Sample Type'
cases[target] = cases[target].astype('category').cat.reorder_categories(['Solid Tissue Normal', 'Primary Tumor'])
train, test = train_test_split_case(samples=cases, cases='Case ID', test_size=0.25)
train[target].value_counts()
test[target].value_counts()
# -
cases
# +
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
#torch.manual_seed(123)
from trainer import fit
import visualization as vis
import numpy as np
cuda = torch.cuda.is_available()
print("Cuda is available: {}".format(cuda))
classes = {key:val for val,key in enumerate(train[target].cat.categories.values)}
classes
# +
from tcga_datasets import TCGA, SiameseTCGA
root_dir = "../data/TCGA/rna-seq_adeno/"
batch_size = 1
train_dataset = TCGA(root_dir, samples=train, train=True, target=target, log=True)
test_dataset = TCGA(root_dir, samples=test, train=False, target=target, log=True)
scaler = StandardScaler()
train_dataset.data = pd.DataFrame(scaler.fit_transform(train_dataset.data),
index=train_dataset.data.index,
columns=train_dataset.data.columns)
test_dataset.data = pd.DataFrame(scaler.transform(test_dataset.data),
index=test_dataset.data.index,
columns=test_dataset.data.columns)
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {'num_workers': 10}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# -
# #### Subset gene data to annotated proteins
# +
assert np.array_equal(train_dataset.data.columns, test_dataset.data.columns)
parsed_cols = [ens[0] for ens in train_dataset.data.columns.str.split(".")]
train_dataset.data.columns, test_dataset.data.columns = parsed_cols, parsed_cols
protein_overlap_idx = np.isin(train_dataset.data.columns, proteins['ENSEMBL_ID'].values)
train_dataset.data = train_dataset.data.loc[:,protein_overlap_idx]
test_dataset.data = test_dataset.data.loc[:,protein_overlap_idx]
# -
# ## Siamese Network
# +
# Step 1 set up dataloader
root_dir = "../data/TCGA"
siamese_train_dataset = SiameseTCGA(train_dataset) # Returns pairs of images and target same/different
siamese_test_dataset = SiameseTCGA(test_dataset)
batch_size = 8
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {}
siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Set up the network and training parameters
from tcga_networks import EmbeddingNet, SiameseNet
from losses import ContrastiveLoss
from metrics import AccumulatedAccuracyMetric
# Step 2
n_samples, n_features = siamese_train_dataset.data.shape
embedding_net = EmbeddingNet(n_features)
# Step 3
model = SiameseNet(embedding_net)
if cuda:
model.cuda()
# Step 4
margin = 1.
loss_fn = ContrastiveLoss(margin)
lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 10
# print training metrics every log_interval * batch_size
log_interval = 30
# -
train_loss, val_loss = fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler,
n_epochs, cuda, log_interval)
plt.plot(range(0, n_epochs), train_loss, 'rx-')
plt.plot(range(0, n_epochs), val_loss, 'bx-')
train_embeddings_cl, train_labels_cl = vis.extract_embeddings(train_loader, model)
vis.plot_embeddings(train_embeddings_cl, train_labels_cl, siamese_train_dataset.labels_dict)
# Frusturatingly the test_loader(shuffl=False) only works to load here because it has been specified for 1 sample at a time. The data order is the same as siamese_test_loader(shuffle=False).
val_embeddings_baseline, val_labels_baseline = vis.extract_embeddings(test_loader, model)
vis.plot_embeddings(val_embeddings_baseline, val_labels_baseline, siamese_test_dataset.labels_dict)
# #### Write out test set for DE analysis
siamese_test_dataset.samples['label'] = siamese_test_dataset.labels
siamese_test_dataset.samples.to_pickle("../data/tmp/test_dataset_meta.pkl.gz")
siamese_test_dataset.data.to_pickle("../data/tmp/test_dataset.pkl.gz")
# ## Integrated Gradients
# Test completeness axiom through comparison of different baselines
#
# "Integrated gradients satisfy an
# axiom called completeness that the attributions add up to
# the difference between the output of F at the input x and
# the baseline x'."
import copy
from captum.attr import LayerActivation
from captum.attr import IntegratedGradients
# ignore matching samples by case ID in this experiment
#
# because randomly shuffling the labels will not preserve
#
# matched health/diseased samples for each case
#
# not possible to test that with only 2 groups
def matched_pairs(SiameseTCGA, exp, ctrl):
"""
Returns raw data from pairs of samples by case in test set.
"""
gb = SiameseTCGA.samples.groupby('Sample Type')
gb_splits = [gb.get_group(sample_type).sort_values(by='Case ID') for sample_type in gb.groups]
assert np.array_equal(gb_splits[0]['Case ID'].values, gb_splits[1]['Case ID'].values), "Order of matched sample pairs not in sync."
cases = gb_splits[0]['Case ID'].values
ctrl_data = Variable(SiameseTCGA.test_data[gb.groups[ctrl]], requires_grad=True)
exp_data = Variable(SiameseTCGA.test_data[gb.groups[exp]], requires_grad=True)
return ctrl_data, exp_data, cases
def attribution_pairs(SiameseTCGA, exp, ctrl):
# subset different samples
negative_pairs = np.array(SiameseTCGA.test_pairs)
negative_pairs = negative_pairs[negative_pairs[:,2] == 0]
# map labels to integers
ctrl = siamese_test_dataset.labels_dict[ctrl]
exp = siamese_test_dataset.labels_dict[exp]
# ordered indices of samples
ctrl_data = [idx for pair in negative_pairs[:, :2] for idx in pair if np.isin(idx, SiameseTCGA.label_to_indices[ctrl])]
exp_data = [idx for pair in negative_pairs[:, :2] for idx in pair if np.isin(idx, SiameseTCGA.label_to_indices[exp])]
# data
ctrl_data = Variable(SiameseTCGA.test_data[ctrl_data], requires_grad=True)
exp_data = Variable(SiameseTCGA.test_data[exp_data], requires_grad=True)
return ctrl_data, exp_data
# # IG with Control vector
ctrl_data, exp_data = attribution_pairs(siamese_test_dataset, exp='Primary Tumor', ctrl='Solid Tissue Normal')
from torch.nn import PairwiseDistance
pdist = PairwiseDistance(p=2)
pdist
ig = IntegratedGradients(model.get_dist)
torch.cuda.empty_cache()
# ## Healthy as baseline
attr, delta = ig.attribute(exp_data.cuda(), ctrl_data.cuda(), target=None, n_steps=50, return_convergence_delta=True,
additional_forward_args=(ctrl_data.cuda(), pdist))
attr = attr.cpu().detach().numpy()
delta
attr.shape
# ## Average Protein Feature Attributions
proteins['ENSEMBL_ID'].values.shape
attr.shape[1]
feat_imp = pd.DataFrame(data=attr.mean(axis=0), index=siamese_test_dataset.data.columns, columns=['Attribution'])
feat_imp.shape
feat_imp = pd.merge(feat_imp, proteins.drop_duplicates(subset='ENSEMBL_ID'), left_index=True, right_on='ENSEMBL_ID', how='left').sort_values(by='Attribution', ascending=False).reset_index(drop=True)
feat_imp.shape
feat_imp.to_pickle("../data/tmp/attr_avg.pkl.gz")
# ### Now go to /srv/home/wconnell/github/diffxpy/notebook/2020.02.05_test_DE_analysis and run
feat_imp[['Attribution', 'hgnc', 'Protein names', 'Gene ontology (biological process)', 'Gene ontology (molecular function)']]
| notebook/2020.02.13_shuffled-IG-randPairs-proteinsOnly-adenoContrastive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: my_conda_env
# language: python
# name: python3
# ---
# # II- Demo. Optimal Interpolation
# An example of simulated SSH data access is provided in the "example_data_access_meom.ipynb" notebook. Here, an example of a mapping technique based on a simple optimal interpolation is proposed. The notebook is structured as follow:
#
# 1) set optimal interpolation parameters,
# 2) reading of pseudo-observations,
# 3) perform optimal interpolation and,
# 4) save the results (reconstructed SSH field)
#
#
# Here, we assume a vector of observations, noted $y$ defined as:
#
# $$y = H x + \epsilon $$
#
# where $H$ is a linear observation operator between the reconstruction grid space and the observation space
# , $x$ is the state to estimate and $\epsilon$ is an independent observation error.
#
# The optimal interpolation consists in estimating an analysed state $x_{a}$ in combining the available observations to approximate the real state $x$:
#
# $$x_{a} = K y $$
# where $K$ is the weigth matrix defined as:
#
# $$ K = BH^T(HBH^T + R)^{-1} $$
#
# $B$ is the covariance matrix of $x$, and $R$ the covariance matrix of the error vector $\epsilon$ ($^T$ is the transpose operator)
import xarray as xr
import numpy
import warnings
import logging
import sys
import os
warnings.filterwarnings('ignore')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sys.path.append('..')
from src.mod_oi import *
from src.mod_inout import *
# ### 1) set optimal interpolation parameters
# +
# OI Grid
lon_min = 295. # domain min longitude
lon_max = 305. # domain max longitude
lat_min = 33. # domain min latitude
lat_max = 43. # domain max latitude
time_min = numpy.datetime64('2017-01-01') # domain min time
time_max = numpy.datetime64('2017-12-31') # domain max time
dx = 0.2 # zonal grid spatial step (in degree)
dy = 0.2 # meridional grid spatial step (in degree)
dt = numpy.timedelta64(1, 'D') # temporal grid step
glon = numpy.arange(lon_min, lon_max + dx, dx) # output OI longitude grid
glat = numpy.arange(lat_min, lat_max + dy, dy) # output OI latitude grid
gtime = numpy.arange(time_min, time_max + dt, dt) # output OI time grid
# OI parameters
Lx = 1. # Zonal decorrelation scale (in degree)
Ly = 1. # Meridional decorrelation scale (in degree)
Lt = 7. # Temporal decorrelation scale (in days)
noise = 0.05 # Noise level (5%)
# -
# ### Open your AVISO+ session: fill the ```<AVISO_LOGIN>``` and ```<AVISO_PWD>``` items below
my_aviso_session = rq.Session()
my_aviso_session.auth = ("<AVISO_LOGIN>", "<AVISO_PWD>")
url_alongtrack = 'https://tds.aviso.altimetry.fr/thredds/dodsC/2021a-SSH-mapping-OSE-along-track-data'
url_map = 'https://tds.aviso.altimetry.fr/thredds/dodsC/2021a-SSH-mapping-OSE-grid-data'
# ### 2) reading of pseudo-observations + define output folder
inputs = [f'{url_alongtrack}/dt_gulfstream_alg_phy_l3_20161201-20180131_285-315_23-53.nc',
f'{url_alongtrack}/dt_gulfstream_j3_phy_l3_20161201-20180131_285-315_23-53.nc',
f'{url_alongtrack}/dt_gulfstream_s3a_phy_l3_20161201-20180131_285-315_23-53.nc',
f'{url_alongtrack}/dt_gulfstream_h2g_phy_l3_20161201-20180131_285-315_23-53.nc',
f'{url_alongtrack}/dt_gulfstream_j2g_phy_l3_20161201-20180131_285-315_23-53.nc',
f'{url_alongtrack}/dt_gulfstream_j2n_phy_l3_20161201-20180131_285-315_23-53.nc']
# Define outputs
output_directory = '../results/'
if not os.path.exists(output_directory):
os.mkdir(output_directory)
output_oi = f'{output_directory}/OSE_ssh_mapping_BASELINE.nc'
# ### 3) perform optimal interpolation
# %%time
# set OI param & grid
ds_oi1_param = oi_param(Lx, Ly, Lt, noise)
ds_oi1_grid = oi_grid(glon, glat, gtime)
# Read input obs + discard a bit...
coarsening = {'time': 5}
#ds_oi1_obs = read_obs(inputs, ds_oi1_grid, ds_oi1_param, coarsening)
ds_oi1_obs = read_obs_from_aviso(inputs, my_aviso_session, ds_oi1_grid, ds_oi1_param, coarsening)
# Run OI (take 1h on my laptop)
for it in range(len(gtime)):
oi_core(it, ds_oi1_grid, ds_oi1_param, ds_oi1_obs)
# ### 4) save the results (reconstructed SSH field)
url_ds_mdt = f'{url_map}/mdt.nc'
ds_oi1_grid = reformate_oi_output(ds_oi1_grid, url_ds_mdt, my_aviso_session)
ds_oi1_grid.to_netcdf(output_oi)
| notebooks/baseline_oi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import sympy.physics.mechanics as mech
from sympy import S,Rational,pi
import sympy as sp
# +
l1,l2,l,t,m1,m2,m,g= sp.symbols(r'l_1 l_2 l t m_1 m_2 m g')
q1,q2 = mech.dynamicsymbols(r'\theta_1 \theta_2')
q1d,q2d = mech.dynamicsymbols(r'\theta_1 \theta_2', 1)
# Create and initialize the reference frame
N = mech.ReferenceFrame('N')
pointN = mech.Point('N*')
pointN.set_vel(N, 0)
# Create the points
point1 = pointN.locatenew('p_1', l1*(sp.sin(q1)*N.x-sp.cos(q1)*N.y))
point2 = point1.locatenew('p_2', l2*(sp.sin(q2)*N.x-sp.cos(q2)*N.y))
# Set the points' velocities
point1.set_vel(N, point1.pos_from(pointN).dt(N))
point2.set_vel(N, point2.pos_from(pointN).dt(N))
# Create the particles
particle1 = mech.Particle('P_1',point1,m1)
particle2 = mech.Particle('P_2',point2,m2)
# Set the particles' potential energy
# Define forces
forces=[(point1,-particle1.mass*g*N.y),(point2,-particle2.mass*g*N.y)]
# Construct the Lagrangian
L = mech.Lagrangian(N, particle1,particle2)
# Create the LagrangesMethod object
LM = mech.LagrangesMethod(L, [q1,q2], hol_coneqs=None, forcelist=forces, frame=N)
# Form Lagranges Equations
ELeqns = LM.form_lagranges_equations()
sp.simplify(ELeqns)
# -
sp.simplify(LM.rhs())
| Pendula/Simple/2Pendulum/2Pendulum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import warnings
warnings.filterwarnings("ignore")
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from ipywidgets import interact, interact_manual
from IPython.display import display
plt.rcParams['figure.figsize']=(16,8)
plt.style.use('fivethirtyeight')
# -
#read dataset
jobs = pd.read_csv('jobs.csv')
#dataset shape
jobs.shape
#checking for null values
jobs.isnull().sum().sum()
jobs.head(10)
# +
plt.subplot(241)
sns.countplot(jobs['FinancialBudget'])
plt.subplot(242)
sns.countplot(jobs['ContactLevel'])
plt.subplot(243)
sns.countplot(jobs['Supervision'])
plt.subplot(244)
sns.countplot(jobs['ProblemSolving'])
plt.subplot(245)
sns.countplot(jobs['OrgImpact'])
plt.subplot(246)
sns.countplot(jobs['Experience'])
plt.subplot(247)
sns.countplot(jobs['EducationLevel'])
plt.subplot(248)
sns.countplot(jobs['PayGrade'])
plt.suptitle('Job Vacancies for different Levels',)
plt.show()
# -
sns.heatmap(jobs[['FinancialBudget', 'ContactLevel', 'Supervision', 'ProblemSolving', 'OrgImpact', 'Experience',
'EducationLevel', 'PayGrade']].corr(), annot=True)
plt.title('Correlation map in the jobs data')
plt.show()
# +
#group the data based on jobtitle
x=jobs.groupby(['JobFamilyDescription']).agg('mean')
x.drop(['ID','JobFamily','JobClass'], axis=1, inplace=True)
x.style.background_gradient(cmap = 'Reds')
# -
x.plot()
plt.xticks(np.arange(15), ('Accounting And Finance', 'Administrative Support', 'Baker',
'Buildings And Facilities', 'Buyer', 'Cashier',
'Communications And Media', 'Corporate Research',
'Finance And Accounting', 'Human Resources', 'Meat Cutter', 'Produce',
'Secretary', 'Stockkeeping', 'Systems Analyst'),
rotation = 90)
plt.title('Comparison of Different Jobs', fontsize = 20)
plt.show()
@interact_manual
def check(column = jobs.select_dtypes('number').columns[3:],
column1 = jobs.select_dtypes('number').columns[4:],):
sns.barplot(jobs[column], jobs[column1])
plt.xticks(rotation = 90)
plt.show()
| Jobs_data/EDA on Jobs data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing
# ## Exceptions
#
# ### Learning Objectives
# - Understand that exceptions are effectively specialized runtime tests
# - Learn when to use exceptions and what exceptions are available
# - Exceptions are more sophisticated than assertions. They are the standard error messaging system in most modern programming languages. Fundamentally, when an error is encountered, an informative exception is ‘thrown’ or ‘raised’.
#
# For example, instead of the assertion in the case before, an exception can be used.
#
def mean(num_list):
if len(num_list) == 0 :
raise Exception("The algebraic mean of an empty list is undefined. \
Please provide a list of numbers")
else :
return sum(num_list)/len(num_list)
# Once an exception is raised, it will be passed upward in the program scope. An exception be used to trigger additional error messages or an alternative behavior. rather than immediately halting code execution, the exception can be ‘caught’ upstream with a try-except block. When wrapped in a try-except block, the exception can be intercepted before it reaches global scope and halts execution.
#
# To add information or replace the message before it is passed upstream, the try-catch block can be used to catch-and-reraise the exception:
#
def mean(num_list):
try:
return sum(num_list)/len(num_list)
except ZeroDivisionError as detail :
msg = "The algebraic mean of an empty list is undefined. Please provide a list of numbers."
raise ZeroDivisionError(detail.__str__() + "\n" + msg)
#
# Alternatively, the exception can simply be handled intelligently. If an alternative behavior is preferred, the exception can be disregarded and a responsive behavior can be implemented like so:
#
def mean(num_list):
try:
return sum(num_list)/len(num_list)
except ZeroDivisionError :
return 0
#
# If a single function might raise more than one type of exception, each can be caught and handled separately.
#
# +
def mean(num_list):
try:
return sum(num_list)/len(num_list)
except ZeroDivisionError :
return 0
except TypeError as detail :
msg = "The algebraic mean of an non-numerical list is undefined. Please provide a list of numbers."
raise TypeError(detail.__str__() + "\n" + msg)
# -
# ### Challenge: What Else Can Go Wrong?
#
# 1. Think of some other type of exception that could be raised by the try block.
# 2. Guard against it by adding an except clause.
#
def mean(num_list):
try:
return sum(num_list)/len(num_list)
except ZeroDivisionError :
return Infinity
except TypeError as detail :
msg = "The algebraic mean of an non-numerical list is undefined. Please provide a list of numbers."
raise TypeError(detail.__str__() + "\n" + msg)
# your except clause here:
#
# ### Challenge: Cause all of the errors
#
# - Use the mean function in three different ways, so that you cause each exceptional case
# cause the empty list case and check that it returns infinity
# cause the type error and check that it causes the error message
# cause the type of exception that you guarded against in the previous challenge
# Exceptions have the advantage of being simple to include and powerfully helpful to the user. However, not all behaviors can or should be found with runtime exceptions. Most behaviors should be validated with unit tests.
#
# ### Key Points
#
# - Exceptions are effectively specialized runtime tests
# - Exceptions can be caught and handled with a try-except block
# - Many built-in Exception types are available
| files/03-exceptions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# Computers deal in fixed-size bit sequences. For instance, a dedicated machine instruction on a so-called "64-bit machine" may take two 64-bit sequences as input, and produce a 64-bit sequence as output. If you need more than 64 bits for your intermediate results, you must resort to issueing multiple such (or similar) instructions.
#
# Intuitively, this bears resemblance to a common grade school mantra like:
#
# > Doing arithmetic with up to 3-4 decimal places should be sufficient for all practical intents and purposes.
#
# The purpose of this notebook is to showcase some practical examples where 64 bits proves insufficient. We take 64 bits since this is default in R. In particular, R uses double-precision binary floating-point numbers. We begin with a brief recap of what that means, more conretely.
# ## Binary Approximation
#
# Computers deal in bit sequences, and so all data must be represented in binary notation, in one way or another.
#
# The reader is humbly assumed to be familiar with the classical binary representation of the natural numbers i.e., the numbers 0, 1, 2, 3, ... This is a positional notation, where the symbols `0` or `1` (bits) are put in a sequence, with each position signifying the inclusion of a power of 2, with the most-signficant bit on the left end, and the least-signifcant bit to the right end of the sequence.
#
# For instance, the 6-bit sequence `101010` signifies the number
#
# \begin{align}
# 1\cdot2^5 + 0\cdot2^4 + 1\cdot2^3 + 0\cdot2^2 + 1\cdot 2^1 + 0 \cdot 2^0
# &= 1\cdot2^5 + 1\cdot2^3 + 1\cdot 2^1 \\
# &= 32 + 8 + 2 \\
# &= 42
# \end{align}
#
# This notation bears structural resemblance to the classical decimal notation we learn in grade school. This too, is a positional notation, except that the symbols are `0`, `1`, `2`, ... , `9` (digits), and a position signifies the magnitude of an inclusion of a power of 10.
#
# For instance, the 4-digit sequence `1337` signifies the number
#
# \begin{align}
# 1\cdot{10}^{3} + 3\cdot{10}^{2} + 3\cdot{10}^{1} + 7\cdot{10}^{0}
# &= 1000 + 300 + 30 + 7 \\
# &= 1337
# \end{align}
#
# Classical binary represenation is elegant because there is a one-to-one mapping between natural numbers sequences of bits.
#
# This elegance, much like one's spirit in grade school, breaks down with the introduction of fractions. Some fractions have a finite, decimal representation, compossed of a "decimal" and a "fractional" part, separated by a "radix point".
#
# Numerically, we extend the powers of 10, right of 0, into the negative.
#
# For instance, the 4-digit sequence `13.37` signifies the number
#
# \begin{align}
# 1\cdot{10}^{1} + 3\cdot{10}^{0} + 3\cdot{10}^{-1} + 7\cdot{10}^{-2}
# &= 10 + 3 + 0.3 + 0.07 \\
# &= 13.37
# \end{align}
#
# This representation is useful for numbers where it applies since many standard algorithms, which worked for natural numbers, work for this representation, if we disregard the radix point until the very last.
#
# There is no one-to-one mapping between the binary and decimal numbers, as soon as there is a radix point involved.
#
# The following sections discuss two classical arithmetics, which are commonly supported directly in computer hardware. "Direct support" means that there typically exists a single machine instruction for every single operation we discuss, on most modern CPUs and GPUs.
#
# Which (few) numbers can be represented exactly?
# ## Scientific Notation
# ## Combinators
#
#
# ### Sum
# Here's a simple implementation of a summation function. This example is interesting also because you can imagine a summation intertwined with another operation.
fpa_sum0 <- function (xs) {
sum <- 0.0
for (x in xs) {
sum <- sum + x;
}
return(sum)
}
# This seems good and dandy for small, and even some fairly large data sets:
lapply(list(
c(0.25, 0.25, 0.25),
rep(0.25,100),
rep(0.25,100000)
), fpa_sum0)
# Yet, it yields some odd results for some fairly small data sets:
lapply(list(
rep(0.1,80),
rep(0.2,120),
rep(0.3,150)
), fpa_sum0)
# Fortunately, if you use the built-in `sum`, you avoid such problems.
lapply(list(
c(0.25, 0.25, 0.25),
rep(0.25,100),
rep(0.25,100000)
), sum)
lapply(list(
rep(0.1,100),
rep(0.2,150),
rep(0.3,200)
), sum)
# # Conclusion
#
# The take-away from this is two-fold:
#
# 1. Use built-in, or carefully constructed combinators to gain a greater numerical stability.
# 2. Avoid a word-at-a-time style of programming. [(Backus, 1978)](#cite-backus-1978)
# # References
#
# 1. <a name="cite-backus-1978"></a><NAME>. 1978. *Can programming be liberated from the von Neumann style? A functional style and its algebra of programs.* Commun. ACM 21, 8 (August 1978), 613-641.
| notebooks/Numerical Literacy in R.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Experiment 4: Source identification. (N-class classification.)
#
# Evaluate performance on a harder problem: identifying which source an image came from. This is harder than source verification, because you must decide which of N sources an image is from.
#
# **Caution**: with small # of distinct compression features (a few hundred), it's impossible to uniquely identify more than that number of sources.
# %matplotlib widget
# %load_ext autoreload
# %autoreload 2
# +
import os
import sys
import subprocess
import random
import pickle
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# from tqdm.autonotebook import tqdm
from tqdm.notebook import tqdm
from image_compression_attribution.common.code.models import quant_matrices, compr_levels
from image_compression_attribution.common.code.summarize_quant_matrices import summarize_compression_features
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 500)
pd.set_option('display.max_colwidth', 500)
from sklearn.metrics import make_scorer, roc_curve
from scipy.optimize import brentq
from scipy.interpolate import interp1d
#WARNING: this method does not seem to work well when there are large gaps
#in the ROC curve. Hence, only use this if you have interpolated between
#ROC curve data points to fill in the roc curve on a grid with small intervals.
#https://github.com/scikit-learn/scikit-learn/issues/15247#issuecomment-542138349
def calculate_eer(fpr, tpr):
'''
Returns the equal error rate for a binary classifier output.
'''
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
return eer
# -
RND_SEED=1234
np.random.seed(RND_SEED)
# +
SUMMARY_FILE = "/app/dataset/data.csv"
RESULTS_FOLDER = "results/exp_04"
os.makedirs(RESULTS_FOLDER, exist_ok=True)
df = pd.read_csv(SUMMARY_FILE)
df['timestamp'] = pd.to_datetime(df['timestamp'], utc=True)
#Drop non-image files, e.g. html files returned
#due to download errors
df, df_dropped = df[ df['mime'].str.startswith('image') ].reset_index(drop=True), \
df[ ~df['mime'].str.startswith('image') ].reset_index(drop=True)
sources = sorted(list(df['source'].unique()))
# +
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import IsolationForest
#Guide to LabelEncoder:
#https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html
#create numerical class labels for quantization categorical names (suitable for
#use as ML training feature vector)
le_qs = preprocessing.LabelEncoder()
le_qs.fit(df['q_name'])
df['q_name_class'] = le_qs.transform(df['q_name'])
sources = sorted(list(df['source'].unique()))
le_sources = preprocessing.LabelEncoder()
le_sources.fit(sources)
df['source_class'] = le_sources.transform(df['source'])
# -
df
df_source_decoder = df[["source", "source_class"]].drop_duplicates().sort_values(by='source_class').reset_index(drop=True)
df_source_decoder
# +
import sklearn.metrics
#Prepare Train and Test Split.
#Do random sampling to make train/test split.
PERCENT_TEST = 0.40
#Want to group images from articles:
# get articles (i.e. unique URLs)
ds_articles4 = df['articleHash'].drop_duplicates().reset_index(drop=True)
#So randomly sample articles to build train & test sets, e.g 60%/40% split
num_articles4 = len(ds_articles4)
num_test_articles4 = int(PERCENT_TEST*num_articles4)
test_articles4 = ds_articles4.sample(num_test_articles4)
num_train_articles4 = num_articles4 - num_test_articles4
train_articles4 = ds_articles4[~ds_articles4.isin(test_articles4)]
df_train4 = df[df['articleHash'].isin(train_articles4)]
df_test4 = df[df['articleHash'].isin(test_articles4)]
#assert len(train_articles4) + len(test_articles4) == len(df)
assert len(df_train4) + len(df_test4) == len(df)
#-------------------------------------------------------------
#Fit model
sk_model = quant_matrices.attribution_quant_matrices_sk()
sk_model.fit( df_train4[['q_name_class']], df_train4['source_class'] )
#make predictions
predicted_source_class4 = sk_model.model.predict(df_test4[['q_name_class']])
#-------------------------------------------------------------
#Compute accuracy
accuracy4 = sklearn.metrics.accuracy_score(df_test4['source_class'], predicted_source_class4, normalize=True)
print("overall accuracy = {}".format(accuracy4))
print("saving accuracy to file")
with open(os.path.join(RESULTS_FOLDER,"accuracy.txt"),"w") as file1:
file1.write("{:.1f}".format(accuracy4*100))
# -
#confusion matrix:
confusion_m4 = sklearn.metrics.confusion_matrix(df_test4['source_class'], predicted_source_class4, labels=df_source_decoder['source_class'])
confusion_m4
# visualize confusion matrix
fig, ax = plt.subplots(figsize=(8,8))
disp = sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix=confusion_m4,
display_labels=df_source_decoder['source'])
disp.plot(xticks_rotation='vertical', cmap="plasma", ax=ax, include_values=False, colorbar=False)
# plt.tight_layout()
plt.show()
#uncomment to save:
plt.savefig(os.path.join(RESULTS_FOLDER,"confusion_matrix.pdf"), bbox_inches='tight')
# ### Observations:
# * not bad overall (pretty diagonal) but some confusions
# # Extra experiment: see how accuracy drops with # of sources
# Keep first first k sources and compute accuracy. Sweep through k values, e.g. 5 to 30.
# +
#Prepare Train and Test Split.
#Do random sampling to make train/test split.
PERCENT_TEST = 0.40
num_sources_list = []
accuracy_list = []
for num_sources in range(5, len(df['source'].unique())+1):
df_tmp = df[df['source'].isin(sources[0:num_sources])]
assert len(df_tmp['source'].unique()) == num_sources
#Want to group images from articles:
# get articles (i.e. unique URLs)
ds_articles4 = df_tmp['articleHash'].drop_duplicates().reset_index(drop=True)
#So randomly sample articles to build train & test sets, e.g 60%/40% split
num_articles4 = len(ds_articles4)
num_test_articles4 = int(PERCENT_TEST*num_articles4)
test_articles4 = ds_articles4.sample(num_test_articles4)
num_train_articles4 = num_articles4 - num_test_articles4
train_articles4 = ds_articles4[~ds_articles4.isin(test_articles4)]
df_train4 = df_tmp[df_tmp['articleHash'].isin(train_articles4)]
df_test4 = df_tmp[df_tmp['articleHash'].isin(test_articles4)]
#assert len(train_articles4) + len(test_articles4) == len(df)
assert len(df_train4) + len(df_test4) == len(df_tmp)
#-------------------------------------------------------------
#Fit model
sk_model = quant_matrices.attribution_quant_matrices_sk()
sk_model.fit( df_train4[['q_name_class']], df_train4['source_class'] )
#make predictions
predicted_source_class4 = sk_model.model.predict(df_test4[['q_name_class']])
#-------------------------------------------------------------
#Compute accuracy
accuracy4 = sklearn.metrics.accuracy_score(df_test4['source_class'], predicted_source_class4, normalize=True)
print("Run experiment with {} sources: accuracy {}".format(num_sources, accuracy4))
num_sources_list.append(num_sources)
accuracy_list.append(accuracy4)
# +
x_num_sources = np.array(num_sources_list)
y_accuracy = np.array(accuracy_list)
# plt.figure(figsize=(6,5))
# plt.figure(figsize=(5,2))
plt.figure(figsize=(5,1.6))
plt.plot(x_num_sources, y_accuracy, 'o-')
plt.ylim(0,1)
plt.xlabel('Number of sources')
plt.ylabel("Test accuracy")
plt.title("Identification accuracy vs number of sources")
plt.yticks(np.arange(0.0, 1.2, 0.2))
#uncomment to save:
plt.savefig(os.path.join(RESULTS_FOLDER,"accuracy_vs_num_sources.pdf"), bbox_inches='tight')
# -
plt.figure(figsize=(5,2))
plt.plot(x_num_sources, y_accuracy, 'o-')
plt.ylim(0,1)
plt.xlabel('Number sources')
plt.ylabel("Test accuracy")
plt.title("Identification accuracy vs number of sources")
| image_compression_attribution/common/publications/2021-summer-attrib/exp_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import joblib
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
continue
import torch
import torch.nn as nn
import numpy as np
import time
import math
from matplotlib import pyplot
import joblib
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
import datetime
torch.manual_seed(0)
np.random.seed(0)
pd.set_option('plotting.backend', 'pandas_bokeh')
import pandas_bokeh
pandas_bokeh.output_notebook()
calculate_loss_over_all_values = False
# +
# import importlib
# login_str = 'login_haj'
# module = importlib.import_module(login_str, package=None)
# account_name = login_str
# print('Logging in with account : '+str(account_name))
# module.retry_autologin()
# -
from jugaad_trader import Zerodha
kite = Zerodha(user_id='ZERODHA_CLIENT_ID',password='<PASSWORD>',twofa='<PASSWORD>')
kite.login()
kite.profile()['user_name']
# +
input_window = 300
output_window = 5
batch_size = 10 # batch size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
#pe.requires_grad = False
self.register_buffer('pe', pe)
def forward(self, x):
return x + self.pe[:x.size(0), :]
class TransAm(nn.Module):
def __init__(self,feature_size=30,num_layers=2,dropout=0.2):
super(TransAm, self).__init__()
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(feature_size)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=feature_size, nhead=10, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.decoder = nn.Linear(feature_size,1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self,src):
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = self.pos_encoder(src)
output = self.transformer_encoder(src,self.src_mask)#, self.src_mask)
output = self.decoder(output)
return output
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def create_inout_sequences(input_data, tw):
inout_seq = []
L = len(input_data)
for i in range(L-tw):
train_seq = np.append(input_data[i:i+tw][:-output_window] , output_window * [0])
train_label = input_data[i:i+tw]
#train_label = input_data[i+output_window:i+tw+output_window]
inout_seq.append((train_seq ,train_label))
return torch.FloatTensor(inout_seq)
def get_data2(inst):
global scaler
old_lst=[]
interval='5minute'
todaydt=datetime.date.today()
hud_ago=todaydt-datetime.timedelta(days=50) #59
to_date=datetime.date.isoformat(todaydt)
from_date=datetime.date.isoformat(hud_ago)
for i2 in range(1):
new_lst = module.kite.historical_data(inst, from_date, to_date, interval,continuous=False)
old_lst = new_lst + old_lst
todaydt=todaydt-datetime.timedelta(days=51) #60
hud_ago=hud_ago-datetime.timedelta(days=51) #60
to_date=datetime.date.isoformat(todaydt)
from_date=datetime.date.isoformat(hud_ago)
df=pd.DataFrame(old_lst)
df_nifty = df
this_inst_df = df_nifty
amplitude = this_inst_df['close'].to_numpy()[-905:]
amplitude = amplitude.reshape(-1)
scaler = MinMaxScaler(feature_range=(-15, 15))
amplitude = scaler.fit_transform(amplitude.reshape(-1, 1)).reshape(-1)
sampels = int(amplitude.shape[0]*0)
train_data = amplitude[:sampels]
test_data = amplitude
train_sequence = create_inout_sequences(train_data,input_window)
train_sequence = train_sequence[:-output_window]
test_data = create_inout_sequences(test_data,input_window)
test_data = test_data[:-output_window]
return train_sequence.to(device),test_data.to(device)
def get_batch(source, i,batch_size):
seq_len = min(batch_size, len(source) - 1 - i)
data = source[i:i+seq_len]
input = torch.stack(torch.stack([item[0] for item in data]).chunk(input_window,1)) # 1 is feature size
target = torch.stack(torch.stack([item[1] for item in data]).chunk(input_window,1))
return input, target
def evaluate(eval_model, data_source):
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.
eval_batch_size = 1000
with torch.no_grad():
for i in range(0, len(data_source) - 1, eval_batch_size):
data, targets = get_batch(data_source, i,eval_batch_size)
output = eval_model(data)
if calculate_loss_over_all_values:
total_loss += len(data[0])* criterion(output, targets).to(device).item()
else:
total_loss += len(data[0])* criterion(output[-output_window:], targets[-output_window:]).to(device).item()
return total_loss / len(data_source)
plot_counter = 0
def plot_and_loss(eval_model, data_source,epoch,tknip):
global plot_counter
eval_model.eval()
total_loss = 0.
test_result = torch.Tensor(0)
truth = torch.Tensor(0)
with torch.no_grad():
for i in range(0, len(data_source) - 1):
data, target = get_batch(data_source, i,1)
# look like the model returns static values for the output window
output = eval_model(data)
if calculate_loss_over_all_values:
total_loss += criterion(output, target).item()
else:
total_loss += criterion(output[-output_window:], target[-output_window:]).item()
test_result = torch.cat((test_result.to(device), output[-1].view(-1).to(device)), 0) #todo: check this. -> looks good to me
truth = torch.cat((truth.to(device), target[-1].view(-1).to(device)), 0)
test_result = test_result.cpu().numpy()
truth = truth.cpu().numpy()
len(test_result)
return total_loss / i
def predict_future_open(eval_model, data_source,steps,tkn):
eval_model.eval()
total_loss = 0.
test_result = torch.Tensor(0)
truth = torch.Tensor(0)
_ , data = get_batch(data_source, 0,1)
with torch.no_grad():
for i in range(0, steps,1):
input = torch.clone(data[-input_window:])
input[-output_window:] = 0
output = eval_model(data[-input_window:])
data = torch.cat((data, output[-1:]))
data = data.cpu().view(-1)
pyplot.plot(data,color="red")
pyplot.plot(data[:input_window],color="blue")
pyplot.grid(True, which='both')
pyplot.axhline(y=0, color='k')
return data
def predict_future(eval_model, data_source,steps,tkn):
eval_model.eval()
total_loss = 0.
test_result = torch.Tensor(0)
truth = torch.Tensor(0)
_ , data = get_batch(data_source, 0,1)
with torch.no_grad():
for i in range(0, steps,1):
input = torch.clone(data[-input_window:])
input[-output_window:] = 0
output = eval_model(data[-input_window:])
data = torch.cat((data, output[-1:]))
data = data.cpu().view(-1)
pyplot.plot(data,color="red")
pyplot.plot(data[:input_window],color="blue")
pyplot.grid(True, which='both')
pyplot.axhline(y=0, color='k')
pyplot.savefig(f'./nmnm/transformer-future_{plot_counter}_{steps}_{tkn}.png')
pyplot.close()
model= torch.load('./best_model_multi8.pt',map_location=torch.device('cpu'))
# + code_folding=[]
# train_data, val_data = get_data2(2029825)
# predict_future(model,val_data,2000,2029825)
# +
# look_up = 1001
# inst_check_list = [1793,5633,6401,3861249,2995969,25601,325121,6483969,40193,41729,54273,
# 60417,5436929,70401,1510401,4267265,4268801]
# for one in tqdm(inst_check_list):
# train_data, val_data = get_data2(one)
# col_list = []
# orig_data = np.array([])
# orig_data
# for one_part_point in range(15): # total_parts
# # print(val_data[-(300*(one_part_point+1))::].shape)
# dpp = predict_future_open(model, val_data[-(300*(one_part_point+1))::],2000,123123)
# col_list.append(np.append(orig_data,dpp))
# orig_data = np.append(orig_data,dpp[:input_window])
# # col_list.append(dpp)
# col_list.append(orig_data)
# pyplot.savefig(f'./nmnm/test_plot.png')
# pyplot.close()
# plot_df = pd.DataFrame(col_list)
# trps = plot_df.transpose()
# trps.plot()
# pd.DataFrame(orig_data).plot()
# # predict_future(model,val_data,look_up,one)
# -
# +
col_list = []
orig_data = np.array([])
test_len = 15
for one_part_point in tqdm(range(test_len)): # total_parts
dpp = predict_future_open(model, val_data[input_window*(one_part_point):input_window*(one_part_point+1)],1000,123123)
col_list.append(np.append(orig_data,dpp))
orig_data = np.append(orig_data,dpp[:input_window])
col_list.append(orig_data)
pyplot.savefig(f'./nmnm/test_plot.png')
pyplot.close()
plot_df = pd.DataFrame(col_list)
trps = plot_df.transpose()
trps.plot()
# +
# for jj in range(8):
# print(jj+1)
# model= torch.load(f'./best_model_multi{jj+1}.pt',map_location=torch.device('cpu'))
# col_list = []
# orig_data = np.array([])
# test_len = 8
# for one_part_point in tqdm(range(test_len)): # total_parts
# dpp = predict_future_open(model, val_data[input_window*(one_part_point):input_window*(one_part_point+1)],100,123123)
# col_list.append(np.append(orig_data,dpp))
# orig_data = np.append(orig_data,dpp[:input_window])
# col_list.append(orig_data)
# pyplot.savefig(f'./nmnm/test_plot.png')
# pyplot.close()
# plot_df = pd.DataFrame(col_list)
# trps = plot_df.transpose()
# trps.plot()
# print('*'*60)
# -
train_data, val_data = get_data2(3356417)
val_data[-1][0]
# +
train_data, val_data = get_data2(3529217)
col_list = []
orig_data = np.array([])
test_len = 2
model= torch.load(f'./best_model_multi18.pt',map_location=torch.device('cpu'))
for one_part_point in tqdm(range(test_len)): # total_parts
dpp = predict_future_open(model, val_data[input_window*(one_part_point):input_window*(one_part_point+1)],
1000,123123)
mod = dpp[0].numpy()
if (orig_data.size != 0): #check not empty
org = orig_data[-1]
diff = org-mod
dpp = dpp + diff
col_list.append(np.append(orig_data,dpp))
orig_data = np.append(orig_data,dpp[:input_window])
pyplot.savefig(f'./nmnm/test_plot.png')
pyplot.close()
plot_df = pd.DataFrame(col_list)
trps = plot_df.transpose()
trps.plot()
# +
train_data, val_data = get_data2(3356417) #3529217
col_list = []
orig_data = np.array([])
test_len = 2
model= torch.load(f'./best_model_multi18.pt',map_location=torch.device('cpu'))
for one_part_point in tqdm(range(test_len)): # total_parts
dpp = predict_future_open(model, val_data[input_window*(one_part_point):input_window*(one_part_point+1)],
1000,123123)
if (orig_data.size != 0): #check not empty
diff = orig_data[-1] - dpp[301].numpy()
dpp = dpp - diff
col_list.append(np.append(orig_data,dpp))
orig_data = np.append(orig_data,dpp[:input_window])
pyplot.savefig(f'./nmnm/test_plot.png')
pyplot.close()
plot_df = pd.DataFrame(col_list)
trps = plot_df.transpose()
trps.plot()
# -
for i in range(14,19):
for jj in [1459457,70401,261889,]:
# for jj in [3861249,6401,3677697,3669505]:
print('*'*50)
print(i)
print(jj)
train_data, val_data = get_data2(jj)
col_list = []
orig_data = np.array([])
test_len = 6
model= torch.load(f'./best_model_multi{i}.pt',map_location=torch.device('cpu'))
for one_part_point in tqdm(range(test_len)): # total_parts
dpp = predict_future_open(model, val_data[input_window*(one_part_point):input_window*(one_part_point+1)],
300,123123)
mod = dpp[0].numpy()
if (orig_data.size != 0): #check not empty
org = orig_data[-1]
diff = org-mod
dpp = dpp + diff
col_list.append(np.append(orig_data,dpp))
orig_data = np.append(orig_data,dpp[:input_window])
pyplot.savefig(f'./nmnm/test_plot.png')
pyplot.close()
plot_df = pd.DataFrame(col_list)
trps = plot_df.transpose()
trps.plot()
loss_df = pd.read_excel('../valid_loss_map_df_5x (17).xlsx')
loss_df['name'] =''
type(loss_df['inst'][0])
all_inst = pd.read_excel('./all_inst.xlsx')
df3 = pd.merge(loss_df,all_inst,left_on=['inst'], right_on = ['instrument_token'], how = 'left')
df3['ltp']=0.0
df3 = df3[0:143]
inedx_counter = 0
for one_symbol in tqdm(df3.tradingsymbol):
ltp = module.kite.quote([f'NSE:{one_symbol}'])[f'NSE:{one_symbol}']['last_price']
df3.at[inedx_counter, 'ltp'] = ltp
# print(one_symbol)
# print(ltp)
inedx_counter += 1
# +
model_loss_list =[]
criterion = nn.MSELoss()
for i in tqdm(range(18)):
this_model = f'./best_model_multi{i+1}.pt'
this_total_loss = 0.0
model = torch.load(this_model, map_location=torch.device('cpu'))
inedx_counter = 0
df3['loss'] = 0.0
for one_inst in tqdm(df3.inst.astype(dtype='int32')):
_, val_data_ip = get_data2(one_inst)
this_loss = plot_and_loss(model, val_data_ip, 1, one_inst)
this_total_loss+=this_loss
df3.at[inedx_counter, 'loss'] = this_loss
inedx_counter+=1
print(this_model)
print(this_total_loss)
model_loss_list.append({'model':this_model,'this_total_loss':this_total_loss})
model_loss_list_edf = pd.DataFrame(model_loss_list)
model_loss_list_edf
# +
this_model = f'./best_model_multi7.pt'
this_total_loss = 0.0
model = torch.load(this_model, map_location=torch.device('cpu'))
inedx_counter = 0
df3['loss'] = 0.0
for one_inst in tqdm(df3.inst.astype(dtype='int32')):
_, val_data_ip = get_data2(one_inst)
this_loss = plot_and_loss(model, val_data_ip, 1, one_inst)
df3.at[inedx_counter, 'loss'] = this_loss
inedx_counter+=1
print(this_model)
print(this_total_loss)
import matplotlib.pyplot as plt
plt.matshow(df3.corr())
plt.show()
df3['ltp_by_loss'] = df3['ltp']/df3['loss']
# df3[['ltp_by_loss']]
df3['ltp_by_lossx10'] = df3['ltp_by_loss']*20
df3['lossx10'] = df3['loss']*20
df3[['ltp','lossx10','ltp_by_lossx10']].plot()
ax = df3[['ltp','lossx10','ltp_by_lossx10']].plot.hist(bins=100, alpha=0.3)
df3[df3.ltp_by_loss > 180]
# +
this_model = f'./best_model_multi3.pt'
this_total_loss = 0.0
model = torch.load(this_model, map_location=torch.device('cpu'))
inedx_counter = 0
df3['loss'] = 0.0
for one_inst in tqdm(df3.inst.astype(dtype='int32')):
_, val_data_ip = get_data2(one_inst)
this_loss = plot_and_loss(model, val_data_ip, 1, one_inst)
df3.at[inedx_counter, 'loss'] = this_loss
inedx_counter+=1
print(this_model)
print(this_total_loss)
import matplotlib.pyplot as plt
plt.matshow(df3.corr())
plt.show()
df3['ltp_by_loss'] = df3['ltp']/df3['loss']
# df3[['ltp_by_loss']]
df3['ltp_by_lossx10'] = df3['ltp_by_loss']*20
df3['lossx10'] = df3['loss']*20
df3[['ltp','lossx10','ltp_by_lossx10']].plot()
ax = df3[['ltp','lossx10','ltp_by_lossx10']].plot.hist(bins=100, alpha=0.3)
df3[df3.ltp_by_loss > 180]
# -
df3.to_excel('./df3.xlsx',index=False)
import QuantConnect_Reserved
| Inference/inference_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # subplot
# %matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# +
# basic usage
x1 = [0,1,2,3,4,5,6,7,8,9]
x2 = [1,3,2,5,4,7,6,9,8,11]
plt.figure(figsize=(5,2))
plt.subplot(121),plt.plot(range(len(x1)), x1),plt.title('Input')
plt.subplot(122),plt.plot(range(len(x2)), x2),plt.title('Output')
plt.show()
# +
# faces
from sklearn.datasets import fetch_olivetti_faces
dataset = fetch_olivetti_faces()
x_data = dataset.data
y_data = dataset.target
# -
# faces subplot
plt.figure(figsize=(50,10))
for i in range(10):
sub = plt.subplot(1,10,i+1)
array_img = x_data[i]
#array to matrix
m_img = array_img.reshape(64,64)
sub.imshow(m_img)
plt.show()
# +
# faces subplot more lines
k = 3
plt.figure(figsize=(50,10*k))
for j in range(k):
for i in range(10):
sub = plt.subplot(k,10,i+1+10*j)
array_img = x_data[i+10*j]
#array to matrix
m_img = array_img.reshape(64,64)
sub.imshow(m_img)
plt.show()
# -
| matplotlib/sub_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JeanSosa/OOP-1-1/blob/main/Prelim_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="opupEwIxT9C3" outputId="23191365-0877-47d6-bfee-f86c66779c6e"
class Student:
def __init__(self,fullname,student_number,age,school,course):
self.fullname=fullname
self.student_number=student_number
self.age=age
self.school=school
self.course=course
def info(self):
print("Fullname:",self.fullname)
print("Student_number:",self.student_number)
print("Age:",self.age)
print("School:",self.school)
print("Course:",self.course)
myself = Student("<NAME>", "202102079", "19","Cavite State University","BS COMPUTER ENGINEERING")
myself.info()
| Prelim_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Evaluating OpenCL Caffe caching mechanisms
# ## Table of Contents
# 1. [Overview](#overview)
# 1. [Experimental analysis code](#code) [for developers]
# 1. [Mali-T628](#mali_t628)
# 1. [Original caching mechanism](#mali_t628_original)
# 1. [Proposed caching mechanism](#mali_t628_proposed)
# 1. [Compare the proposed mechanism vs the original mechanism](#mali_t628_compare)
# 1. [GTX 1080](#gtx_1080)
# 1. [Original caching mechanism](#gtx_1080_original)
# 1. [Proposed caching mechanism](#gtx_1080_proposed)
# 1. [Compare the proposed mechanism vs the original mechanism](#gtx_1080_compare)
# <a id="overview"></a>
# ## Overview
# This Jupyter notebook studies the performance (speed) of OpenCL API build and compile calls for the Caffe framework built with the ViennaCL library using two mechanisms:
# - [`original`] the current ViennaCL caching mechanism, in which the program binary is cached after the `clBuildProgram()` call but before the `clCreateKernelsInProgram()` call;
# - [`proposed`] the new caching mechanism proposed by [dividiti](http://dividiti.com), in which the program binary is cached after both the `clBuildProgram()` and `clCreateKernelsInProgram()` calls;
#
# on two experimental platforms:
# - [[Mali-T628](#mali_t628)] ARM Mali-T628 GPU in the Odroid-XU3 development platform (GPU driver v12.0);
# - [[GTX 1080](#gtx_1080)] NVIDIA GTX 1080 GPU installed in a HP 640 workstation (GPU driver v375.26).
# Our experiments show that ARM's OpenCL implementation only compiles kernels when the user invokes the `clCreateKernelsInProgram()` API call; therefore, on the Mali-T628 platform, the original mechanism is ineffective; the proposed mechanism accelerates the OpenCL Caffe initialisation time by over 50 times on subsequent invocations.
#
# Our experiments also show that NVIDIA's OpenCL implementation compiles kernels when the user invokes the `clBuildProgram()` API call; therefore, on the GTX 1080 platform, both the original and the proposed mechanisms perform similarly.
# ### Common experimental setup
# The experiments were performed using the [Collective Knowledge](http://cknowledge.org) framework for reproducible and collaborative R&D using the following preparatory steps on each of the platforms.
# For simplicity, only one instance of Caffe, one instance of Caffe model and one instance of [dividiti's OpenCL profiler](https://github.com/dividiti/dvdt-prof) were registered on the platform at the time e.g.:
# ```
# $ ck show env --tags=caffemodel
# Env UID: Target OS: Bits: Name: Version: Tags:
# e8811419b9c1149c linux-32 32 Caffe model (net and weights) (bvlc, alexnet) trunk 32bits,alexnet,bvlc,caffe,caffemodel,host-os-linux-32,mirror,net,target-os-linux-32,v0,weights
# $ ck show env --tags=lib,caffe
# Env UID: Target OS: Bits: Name: Version: Tags:
# 53834f239eff6c18 linux-32 32 BVLC Caffe framework (opencl,viennacl) master-69f35c5 32bits,bvlc,caffe,host-os-linux-32,lib,target-os-linux-32,v0,v0.0,vmaster,vopencl
# $ ck show env --tags=dvdt,prof
# Env UID: Target OS: Bits: Name: Version: Tags:
# 2ea881239f688658 linux-32 32 dividiti's OpenCL API profiler (cjson) trunk 32bits,cjson,dividiti,dvdt,host-os-linux-32,opencl,prof,profiler,target-os-linux-32,tool,v0,vtrunk
# ```
# To start with, clean previous installations and experiments, and install ViennaCL source-only packages:
# 1. Clean all previous OpenCL Caffe caching mechanism experiments:
# ```
# $ ck rm local:experiment:original*cache* --force
# $ ck rm local:experiment:proposed*cache* --force
# ```
#
# 1. Clean all previous installations of Caffe with ViennaCL and ViennaCL sources:
# ```
# $ ck clean env --tags=lib,caffe,vviennacl
# $ ck clean env --tags=lib,viennacl,vsrc
# ```
#
# 1. Install [ViennaCL's master](https://github.com/viennacl/viennacl-dev) (**original**) and [dividiti's fork](https://github.com/dividiti/viennacl-dev) (**proposed**):
# ```
# $ ck install package:lib-viennacl-master-src
# $ ck install package:lib-viennacl-dvdt-src
# ```
#
# First, perform the **original** set of experiments:
# 1. Install Caffe with ViennaCL, while selecting [ViennaCL's master](https://github.com/viennacl/viennacl-dev):
# 1. On the GTX 1080 platform:
# ```
# $ ck install package:lib-caffe-bvlc-opencl-viennacl-universal
# ```
# Also, on this platform, `ck-caffe:program:caffe` sometimes fails after reporting the benchmarking results; to perform the requested number of repetitions even if some of them fail, change `"ignore_return_code":"no"` to `"ignore_return_code":"yes"` (for the `time_gpu` command in `program/caffe/.cm/meta.json`).
#
# 1. On the Mali-T628 platform:
# ```
# $ ck install package:lib-caffe-bvlc-opencl-viennacl-universal \
# --env.DISABLE_DOUBLE_SUPPORT=ON \
# --env.DISABLE_DEVICE_HOST_UNIFIED_MEMORY=ON \
# --env.CK_HOST_CPU_NUMBER_OF_PROCESSORS=3
# ```
# 1. Run the experiments as detailed below ([Mali-T628](#mali_t628_original) or [GTX 1080](#gtx_1080_original)).
#
# 1. Remove Caffe with ViennaCL (***necessary*** before performing the **proposed** set of experiments):
# ```
# $ ck clean env --tags=lib,caffe,vviennacl
# ```
#
# Then, perform the **proposed** set of experiments:
# 1. Install Caffe with ViennaCL, while selecting [dividiti's fork](https://github.com/dividiti/viennacl-dev) e.g.
# ```
# $ ck install package:lib-caffe-bvlc-opencl-viennacl-universal
# ```
#
# 1. Run the experiments as detailed below ([Mali-T628](#mali_t628_proposed) or [GTX 1080](#gtx_1080_proposed)).
# 1. Remove Caffe with ViennaCL (***optional***):
# ```
# $ ck clean env --tags=lib,caffe,vviennacl
# ```
# <a id="code"></a>
# ## Data wrangling code
# **NB:** Please ignore this section if you are not interested in re-running or modifying this notebook.
# ### Includes
# #### Standard
import os
import sys
import json
import re
# #### Date util
import dateutil.parser
# #### Scientific
# If some of the scientific packages are missing, please install them using:
# ```
# # pip install jupyter pandas numpy matplotlib
# ```
import IPython as ip
import pandas as pd
import numpy as np
import matplotlib as mp
print ('IPython version: %s' % ip.__version__)
print ('Pandas version: %s' % pd.__version__)
print ('NumPy version: %s' % np.__version__)
print ('Matplotlib version: %s' % mp.__version__)
import matplotlib.pyplot as plt
from matplotlib import cm
# %matplotlib inline
from IPython.display import Image
from IPython.core.display import HTML
# #### Collective Knowledge
# If CK is not installed, please install it using:
# ```
# # pip install ck
# ```
import ck.kernel as ck
print ('CK version: %s' % ck.__version__)
# ### Create, build and compile OpenCL API calls
# OpenCL API calls to create program.
create_calls = [ 'clCreateProgramWithSource', 'clCreateProgramWithBinary' ]
# OpenCL API calls to build program.
build_calls = [ 'clBuildProgram' ]
# OpenCL API calls to compile kernels.
compile_calls = [ 'clCreateKernel', 'clCreateKernelsInProgram' ]
# All OpenCL API calls to create program, build program and compile kernels.
create_build_compile_calls = create_calls + build_calls + compile_calls
# ### Calculate time elapsed between two ISO timestamps
# +
# Return the difference between the end and start timestamps in seconds.
def ts_delta_s(ts_end, ts_start):
delta = dateutil.parser.parse(ts_end) - dateutil.parser.parse(ts_start)
delta_s = delta.total_seconds()
return delta_s
# Return the difference between the end and start timestamps in milliseconds.
def ts_delta_ms(ts_end, ts_start):
delta_s = ts_delta_s(ts_end, ts_start)
delta_ms = delta_s * 1e3
return delta_ms
# -
# ### Access the results
def get_results(repo_uoa, common_tags):
module_uoa = 'experiment'
r = ck.access({'action':'search', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'tags':common_tags})
if r['return']>0:
print ("Error: %s" % r['error'])
exit(1)
experiments = r['lst']
experiment_dfs = []
for experiment in experiments:
data_uoa = experiment['data_uoa']
r = ck.access({'action':'list_points', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa})
if r['return']>0:
print ("Error: %s" % r['error'])
exit(1)
unique_tags = ','.join([ tag for tag in r['dict']['tags'] if tag not in common_tags])
point_dfs = []
for point in r['points']:
point_file_path = os.path.join(r['path'], 'ckp-%s.0001.json' % point)
with open(point_file_path) as point_file:
print ('Reading: %s...' % point_file_path)
point_data_raw = json.load(point_file)
# Traces for all repetitions of this point.
trace_list = [
characteristics['run'].get('dvdt_prof',[]) for characteristics in point_data_raw['characteristics_list']
]
# All OpenCL API calls to create program, build program and compile kernels.
create_build_compile_dfs = []
for trace in trace_list:
# Only include the first repetition of the 'cache-cold' experiments
# (as the subsequent ones are in fact 'cache-warm').
if (unique_tags=='cache-cold' or unique_tags=='cuda-cache-cold') and create_build_compile_dfs: continue
create_build_compile_trace = [
{ 'call' : call['call'], 'time_ms': ts_delta_ms(call['timestamp']['end'], call['timestamp']['start']) }
for call in trace if call['call'] in create_build_compile_calls
]
create_build_compile_df = pd.DataFrame(create_build_compile_trace).set_index(['call'], append=True)
create_build_compile_df.index.names = [ 'id', 'call' ]
create_build_compile_dfs.append(create_build_compile_df)
# Aggregate all calls.
point_df = pd.concat(create_build_compile_dfs, axis=1)
point_dfs.append(point_df)
# Aggregate all points.
experiment_df = pd.concat(point_dfs)
experiment_df.columns = [ [unique_tags]*len(experiment_df.columns), range(len(experiment_df.columns)) ]
experiment_df.columns.names = [ 'experiment', 'repetition' ]
experiment_dfs.append(experiment_df)
# Aggregate all experiments.
result_df = pd.concat(experiment_dfs, axis=1)
# Convert to preferred format (unify clCreateProgram* calls, repetitions as columns, replace missing data with zeros).
result_df = result_df \
.rename(index={'clCreateProgramWithBinary':'clCreateProgram*', 'clCreateProgramWithSource':'clCreateProgram*'}) \
.stack('experiment')
return result_df
# ### Show the results
def show_results(results):
pd.options.display.max_columns = len(results.columns)
pd.options.display.max_rows = len(results.index)
return results
# ### Plot the results
def plot_results(results, title='Execution time (ms)', rot=0):
mean = results.mean(axis=1).unstack('experiment')
std = results.std(axis=1).unstack('experiment')
ymax = mean.max().max()
mean.plot(yerr=std, kind='bar', title=title,
rot=rot, figsize=[16, 8], ylim=[0,ymax*1.05],
grid=True, legend=True, colormap=cm.autumn
)
# ### Compare the results
def compare_results(original, proposed, experiment, call):
def cumulative_per_experiment_per_call(results, experiment, call):
return results \
.reorder_levels(['experiment', 'call', 'id']) \
.loc[experiment] \
.loc[call] \
.mean(axis=1).sum()
original_per_experiment_per_call = cumulative_per_experiment_per_call(original, experiment, call)
proposed_per_experiment_per_call = cumulative_per_experiment_per_call(proposed, experiment, call)
print ('[%s] all %s() calls w/ original: %.1f (ms)' % (experiment, call, original_per_experiment_per_call))
print ('[%s] all %s() calls w/ proposed: %.1f (ms)' % (experiment, call, proposed_per_experiment_per_call))
proposed_vs_original_pc = \
100.0 * (proposed_per_experiment_per_call-original_per_experiment_per_call) / original_per_experiment_per_call
print ('[%s] all %s() calls (proposed-original)/original: %.1f%%' % (experiment, call, proposed_vs_original_pc))
# <a id="mali_t628"></a>
# ## Mali-T628
# <a id="mali_t628_original"></a>
# ### Mali-T628 - original caching mechanism
# #### Experimental setup
# The experimental data were collected on the Mali-T628 experimental platform as follows:
# ```
# $ export VIENNACL_CACHE_DIR=/tmp/viennacl-cache/ && rm -rf $VIENNACL_CACHE_DIR && mkdir $VIENNACL_CACHE_DIR
# $ ck benchmark program:caffe \
# --record --record_repo=local --record_uoa=original-cache-none \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 \
# --tags=caffe,opencl,build,compile,original,mali-t628,cache-none
# $ ck benchmark program:caffe \
# --record --record_repo=local --record_uoa=original-cache-cold \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=1 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,original,mali-t628,cache-cold
# $ ck benchmark program:caffe \
# --record --record_repo=local --record_uoa=original-cache-warm \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,original,mali-t628,cache-warm
# ```
#
# The experimental data were archived as follows:
# ```
# $ ck zip local:experiment:original-cache* \
# --archive_name=ck-caffe-opencl-build-compile-original-mali-t628.zip
# ```
#
# The resulting archive was copied to another machine and prepared for analysis as follows:
# ```
# $ ck add repo:ck-caffe-opencl-build-compile-original-mali-t628 \
# --zip=ck-caffe-opencl-build-compile-original-mali-t628.zip --quiet
# ```
# #### Experimental analysis
repo_uoa = 'ck-caffe-opencl-build-compile-original-mali-t628'
common_tags = 'caffe,opencl,build,compile,original,mali-t628'
mali_t628_original = get_results(repo_uoa, common_tags)
show_results(mali_t628_original)
plot_results(mali_t628_original, rot=90)
# The `clCreateProgram*()` calls only take considerable time for the 'cache-warm' experiment (i.e. `clCreateProgramWithBinary()`), while the `clBuildProgram()` calls only take considerable time for the 'cache-none' and 'cache-cold' experiments.
plot_results(mali_t628_original \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[build_calls] \
.reorder_levels(['id', 'call', 'experiment']))
# The execution time of the `clCreateKernelsInProgram()` calls, however, is practically the same whether using the original ViennaCL caching mechanism or not, which suggests it's simply ineffective on this platform.
plot_results(mali_t628_original \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[compile_calls] \
.reorder_levels(['id', 'call', 'experiment']))
# <a id="mali_t628_proposed"></a>
# ### Mali-T628 - proposed caching mechanism
# #### Experimental setup
# The experimental data were collected on the Mali-T628 experimental platform as follows:
# ```
# $ export VIENNACL_CACHE_DIR=/tmp/viennacl-cache/ && rm -rf $VIENNACL_CACHE_DIR && mkdir $VIENNACL_CACHE_DIR
# $ ck benchmark program:caffe \
# --record --record_repo=local --record_uoa=proposed-cache-none \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 \
# --tags=caffe,opencl,build,compile,proposed,mali-t628,cache-none
# $ ck benchmark program:caffe \
# --record --record_repo=local --record_uoa=proposed-cache-cold \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=1 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,proposed,mali-t628,cache-cold
# $ ck benchmark program:caffe \
# --record --record_repo=local --record_uoa=proposed-cache-warm \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,proposed,mali-t628,cache-warm
# ```
#
# The experimental data were archived as follows:
# ```
# $ ck zip local:experiment:proposed-cache* \
# --archive_name=ck-caffe-opencl-build-compile-proposed-mali-t628.zip
# ```
#
# The resulting archive was copied to another machine and prepared for analysis as follows:
# ```
# $ ck add repo:ck-caffe-opencl-build-compile-proposed-mali-t628 \
# --zip=ck-caffe-opencl-build-compile-proposed-mali-t628.zip --quiet
# ```
# #### Experimental analysis
repo_uoa = 'ck-caffe-opencl-build-compile-proposed-mali-t628'
common_tags = 'caffe,opencl,build,compile,proposed,mali-t628'
mali_t628_proposed = get_results(repo_uoa, common_tags)
show_results(mali_t628_proposed)
plot_results(mali_t628_proposed, rot=90)
plot_results(mali_t628_proposed \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[build_calls] \
.reorder_levels(['id', 'call', 'experiment']))
plot_results(mali_t628_proposed \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[compile_calls] \
.reorder_levels(['id', 'call', 'experiment']))
# <a id="mali_t628_compare"></a>
# ### Mali-T628 - compare the original mechanism vs the proposed mechanism
compare_results(mali_t628_original, mali_t628_proposed, 'cache-warm', 'clCreateKernelsInProgram')
compare_results(mali_t628_original, mali_t628_proposed, 'cache-warm', 'clBuildProgram')
# <a id="gtx_1080"></a>
# ## GTX 1080
# <a id="gtx_1080_original"></a>
# ### GTX 1080 - original caching mechanism
# #### Experimental setup
# The experimental data were collected on the GTX 1080 experimental platform as follows:
# ```
# $ export CUDA_CACHE_DIR=$HOME/.nv/ComputeCache/ && rm -rf $CUDA_CACHE_DIR
# $ export VIENNACL_CACHE_DIR=/tmp/viennacl-cache/ && rm -rf $VIENNACL_CACHE_DIR && mkdir $VIENNACL_CACHE_DIR
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=original-cuda-cache-cold \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=1 \
# --env.CK_CAFFE_BATCH_SIZE=1 \
# --tags=caffe,opencl,build,compile,original,gtx-1080,cuda-cache-cold
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=original-cuda-cache-warm \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 \
# --tags=caffe,opencl,build,compile,original,gtx-1080,cuda-cache-warm
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=original-cache-none \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.CUDA_CACHE_DISABLE=1 \
# --tags=caffe,opencl,build,compile,original,gtx-1080,cache-none
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=original-cache-cold \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=1 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.CUDA_CACHE_DISABLE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,original,gtx-1080,cache-cold
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=original-cache-warm \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.CUDA_CACHE_DISABLE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,original,gtx-1080,cache-warm
# ```
#
# The experimental data were archived as follows:
# ```
# $ ck zip local:experiment:original*cache* \
# --archive_name=ck-caffe-opencl-build-compile-original-gtx-1080.zip
# ```
#
# The resulting archive was copied to another machine and prepared for analysis as follows:
# ```
# $ ck add repo:ck-caffe-opencl-build-compile-original-gtx-1080 \
# --zip=ck-caffe-opencl-build-compile-original-gtx-1080.zip --quiet
# ```
# #### Experimental analysis
repo_uoa = 'ck-caffe-opencl-build-compile-original-gtx-1080'
common_tags = 'caffe,opencl,build,compile,original,gtx-1080'
gtx_1080_original = get_results(repo_uoa, common_tags)
show_results(gtx_1080_original)
plot_results(gtx_1080_original, rot=90)
plot_results(gtx_1080_original \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[build_calls] \
.reorder_levels(['id', 'call', 'experiment']))
plot_results(gtx_1080_original \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[compile_calls] \
.reorder_levels(['id', 'call', 'experiment']))
# <a id="gtx_1080_proposed"></a>
# ### GTX 1080 - proposed caching mechanism
# #### Experimental setup
# The experimental data were collected on the GTX 1080 experimental platform as follows:
# ```
# $ export CUDA_CACHE_DIR=$HOME/.nv/ComputeCache/ && rm -rf $CUDA_CACHE_DIR
# $ export VIENNACL_CACHE_DIR=/tmp/viennacl-cache/ && rm -rf $VIENNACL_CACHE_DIR && mkdir $VIENNACL_CACHE_DIR
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=proposed-cuda-cache-cold \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=1 \
# --env.CK_CAFFE_BATCH_SIZE=1 \
# --tags=caffe,opencl,build,compile,proposed,gtx-1080,cuda-cache-cold
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=proposed-cuda-cache-warm \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 \
# --tags=caffe,opencl,build,compile,proposed,gtx-1080,cuda-cache-warm
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=proposed-cache-none \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.CUDA_CACHE_DISABLE=1 \
# --tags=caffe,opencl,build,compile,proposed,gtx-1080,cache-none
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=proposed-cache-cold \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=1 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.CUDA_CACHE_DISABLE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,proposed,gtx-1080,cache-cold
# $ ck benchmark program:caffe \
# --record --record_failed \
# --record_repo=local --record_uoa=proposed-cache-warm \
# --dvdt_prof --skip_stat_analysis \
# --cmd_key=time_gpu --cpu_freq=max --repetitions=3 \
# --env.CK_CAFFE_BATCH_SIZE=1 --env.CUDA_CACHE_DISABLE=1 --env.VIENNACL_CACHE_PATH=$VIENNACL_CACHE_DIR \
# --tags=caffe,opencl,build,compile,proposed,gtx-1080,cache-warm
# ```
#
# The experimental data were archived as follows:
# ```
# $ ck zip local:experiment:proposed*cache* \
# --archive_name=ck-caffe-opencl-build-compile-proposed-gtx-1080.zip
# ```
#
# The resulting archive was copied to another machine and prepared for analysis as follows:
# ```
# $ ck add repo:ck-caffe-opencl-build-compile-proposed-gtx-1080 \
# --zip=ck-caffe-opencl-build-compile-proposed-gtx-1080.zip --quiet
# ```
# #### Experimental analysis
repo_uoa = 'ck-caffe-opencl-build-compile-proposed-gtx-1080'
common_tags = 'caffe,opencl,build,compile,proposed,gtx-1080'
gtx_1080_proposed = get_results(repo_uoa, common_tags)
show_results(gtx_1080_proposed)
plot_results(gtx_1080_proposed, rot=90)
plot_results(gtx_1080_proposed \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[build_calls] \
.reorder_levels(['id', 'call', 'experiment']))
plot_results(gtx_1080_proposed \
.reorder_levels(['call', 'id', 'experiment']) \
.ix[compile_calls] \
.reorder_levels(['id', 'call', 'experiment']))
# <a id="gtx_1080_compare"></a>
# ### GTX 1080 - compare the original mechanism vs the proposed mechanism
compare_results(gtx_1080_original, gtx_1080_proposed, 'cache-warm', 'clBuildProgram')
compare_results(gtx_1080_original, gtx_1080_proposed, 'cache-warm', 'clCreateKernelsInProgram')
| script/explore-opencl-build-compile-time/explore-opencl-build-compile-time.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Install churnmodels
# In he project root folder open a terminal and run
# > python setup.py sdist
#
# A new package will be created and stored in the subfolder "./dist"
# To install the package run pip install, e.g.:
# > pip install dist/churnmodels-0.0.1.tar.gz
#
import churnmodels
print(churnmodels.__version__)
# expected outcome : the actual version number
# ## Open a DB session
#
# ### DBs with schemas
# +
# if connecting to an DB using schemas we need to run the following line BEFORE importing the churnmodels schema
# I) set the environment variable CHURN_DB_SCHEMA
# os.environ["CHURN_DB_SCHEMA"]="biznet"
# II) import the churnmodels library
from churnmodels.schema import Subscription, Event, Account
# -
# ### Open an SQLite DB session
# +
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
os.environ["CHURN_DB_DIALECT"] = "sqlite"
if os.environ["CHURN_DB_DIALECT"] == "sqlite":
from churnmodels.schema import Subscription, Event, Account
# the following example will depend on a sqlite db
sqlfile="../churn.db"
engine = create_engine(f"sqlite:///{sqlfile}")
session = sessionmaker(bind=engine)()
# -
# ### Open a PostGres DB session
# +
# for a postgres we do this...
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# it is very important that we first set the environment variables ...
# which type of DB do we have?
os.environ["CHURN_DB_DIALECT"]= "postgres" # given that sqlite is the default we actually do not need this line
if os.environ["CHURN_DB_DIALECT"] == "postgres":
# we need to give DB-name, user and password
model="biznet1"
os.environ["CHURN_DB"]= "churn"
os.environ["CHURN_DB_SCHEMA"]= model
os.environ["CHURN_DB_USER"]= "postgres"
os.environ["CHURN_DB_PASS"]= "password"
# ...having set the environment variables, our SQLAlchemy model will incorporate them, recognizing that we want to set up a Postgres DB
from churnmodels import schema
user=os.environ["CHURN_DB_USER"]
pw=os.environ["CHURN_DB_PASS"]
dbname=os.environ["CHURN_DB"]
schema=os.environ["CHURN_DB_SCHEMA"]
database_uri = f"postgresql://{user}:{pw}@localhost:5432/{dbname}"
engine = create_engine(database_uri)
session = sessionmaker(bind=engine)()
# -
# ## Open a DB session
# ## Extra Code for dates
# We need these libs and functions to deal with dates
# +
from datetime import datetime
from dateutil.relativedelta import relativedelta
def days_between(d1, d2):
d1 = datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
def pretty_sql(q1):
# debug: looking at the SQL pretty printed
import sqlparse
text1=str(q1.statement.compile(engine, compile_kwargs={"literal_binds": True}))
text2=sqlparse.format(text1, reindent=True, keyword_case='upper')
return text2
#print(text2)
# -
# ## Net Retention (§2.1)
#
# +
from sqlalchemy import func, or_
from churnmodels.schema import Subscription, Event
import pandas as pd
"""
-- PostGres SQL for start_accounts:
select account_id, sum (mrr) as total_mrr
from subscription s inner join date_range d on
s.start_date <= d.start_date
and (s.end_date > d.start_date or s.end_date is null)
group by account_id
"""
d_start_date = "2020-01-01"
d_end_date = "2020-03-01"
d_start_date = "2020-03-01"
d_end_date = "2020-04-01"
# I) start_accounts
q_start_accounts = session.query(Subscription.account_id, func.sum(Subscription.mrr).label("total_mrr"))\
.filter(
# SQL: s.start_date <= d.start_date
Subscription.start_date <= d_start_date,
# SQL: s.end_date > d.start_date or s.end_date is null
or_(Subscription.end_date > d_start_date, Subscription.end_date == None))\
.group_by(Subscription.account_id) # SQL: group by account_id
# getting the result from the DB stored into a pandas DataFrame
start_accounts = pd.read_sql(q_start_accounts.statement, engine).set_index("account_id")
print(start_accounts)
# -
# II) end_accounts
q_end_accounts = session.query(Subscription.account_id, func.sum(Subscription.mrr).label("total_mrr")).filter(
Subscription.start_date <= d_end_date,
or_(Subscription.end_date > d_end_date, Subscription.end_date == None)).group_by(Subscription.account_id)
# q = q.filter(Subscription.account_id==64)
end_accounts = pd.read_sql(q_end_accounts.statement, engine).set_index("account_id")
print(end_accounts)
# III) retained_accounts <- inner join on start_accounts and end_accounts
# the line shows how to realize an inner join with pandas:
retained_accounts = pd.merge(start_accounts, end_accounts, on="account_id")
print(retained_accounts)
# the resulting columns total_mrr_x, total_mrr_y are identical
# Alternatively to the pandas call we can combine all to a single SQL statement
# If subqueries are large it is more efficient not to store the subselects into pandas DataFrames
# With sqlalchemy we can simply join two subqueries:
# +
# III) retained_accounts (better alternative)
# Alternatively to the pandas call we can combine all to a single SQL statement
# If subqueries are large it is more efficient not to store the subselects into pandas DataFrames
# With sqlalchemy we can simply join two subqueries:
qe_s=q_end_accounts.subquery()
qs_s=q_start_accounts.subquery()
q_retained_accounts = session.query(qs_s.c.account_id, qe_s.c.total_mrr)\
.select_from(qs_s.join(qe_s, qs_s.c.account_id == qe_s.c.account_id)) #<- see the reference to "c" (for columns) of the subquery ⌈object
# let's have a look at the SQL statement sqlalchemy produces
#print(f"{qretained.statement}\n")
# reading into pandas
retained_accounts = pd.read_sql(q_retained_accounts.statement, engine).set_index("account_id")
retained_accounts.sort_values(by=['account_id'], inplace=True)
print(retained_accounts)
#print(pretty_sql(q_retained_accounts))
# +
start_mrr = session.query(func.sum(q_start_accounts.subquery().c.total_mrr)).one()[0] or 0
retain_mrr = session.query(func.sum(q_retained_accounts.subquery().c.total_mrr)).one()[0] or 0
net_mrr_retention_rate=retain_mrr /start_mrr # <- churned
net_mrr_churn_rate = 1.0 - retain_mrr /start_mrr # <- "survived", i.e. complentary to net_mrr_retention_rate
df=pd.DataFrame.from_dict({
"net_mrr_retention_rate":[net_mrr_retention_rate],
"net_mrr_churn_rate":[net_mrr_churn_rate],
"start_mrr":[start_mrr],
"retain_mrr":[retain_mrr]
})
print(df)
# -
# ## Churn Rate (§2.2)
# +
# we keep the start and end accounts from above
# 2.2.III) churned_accounts <- start_accounts LEFT OUTER JOIN to end_accounts
# the line shows how to realize an inner join with pandas:
qe_s=q_end_accounts.subquery()
q_churned_accounts = q_start_accounts.join(qe_s, Subscription.account_id == qe_s.c.account_id, isouter=True).filter(qe_s.c.account_id == None)
# reading into pandas
churned_accounts = pd.read_sql(q_churned_accounts.statement, engine).set_index("account_id")
print(churned_accounts)
# +
n_start = session.query(func.count(q_start_accounts.subquery().c.account_id)).one()[0] or 0
n_churn = session.query(func.count(q_churned_accounts.subquery().c.account_id)).one()[0] or 0
churn_rate=n_churn /n_start # <- churned
retention_rate = 1.0 - n_churn /n_start # <- "survived", i.e. complentary = 1-churn_rate
df=pd.DataFrame.from_dict({
"churn_rate":[churn_rate],
"retention_rate":[retention_rate],
"n_start":[n_start],
"n_churn":[n_churn]
})
print(df)
# -
# ## Activity Churn (§2.3)
# +
#
start_date = datetime.strptime(d_start_date, "%Y-%m-%d")
end_date = datetime.strptime(d_end_date, "%Y-%m-%d")
inactivity_interval=relativedelta(months=+1)
start_date_int = start_date-inactivity_interval
end_date_int = end_date-inactivity_interval
# I) start_accounts
q_start_accounts_int = session.query(Event.account_id)\
.filter(Event.event_time > start_date_int, Event.event_time <= d_start_date)\
.distinct()
start_accounts_int = pd.read_sql(q_start_accounts_int.statement, engine).set_index("account_id")
nn_start = session.query(func.count(q_start_accounts_int.subquery().c.account_id)).one()[0] or 0
# -
# II) end_accounts
q_end_accounts_int = session.query(Event.account_id)\
.filter(Event.event_time > end_date_int, Event.event_time <= d_end_date)\
.distinct()
end_accounts_int = pd.read_sql(q_end_accounts_int.statement, engine).set_index("account_id")
nn_end = session.query(func.count(q_end_accounts_int.subquery().c.account_id)).one()[0] or 0
# +
qe_s_int=q_end_accounts_int.subquery()
q_churned_accounts_int = q_start_accounts_int.join(qe_s_int, Event.account_id == qe_s_int.c.account_id, isouter=True)\
.filter(qe_s_int.c.account_id == None)\
.distinct()
# reading into pandas
churned_accounts_int = pd.read_sql(q_churned_accounts_int.statement, engine).set_index("account_id")
nn_churn = session.query(func.count(q_churned_accounts_int.subquery().c.account_id)).one()[0] or 0
churn_rate_int=nn_churn / nn_start # <- churned
retention_rate_int = 1.0 - nn_churn / nn_start # <- "survived", i.e. complentary = 1-churn_rate
df=pd.DataFrame.from_dict({
"churn_rate_int":[churn_rate_int],
"retention_rate_int":[retention_rate_int],
"nn_start":[nn_start],
"nn_churn":[nn_churn]
})
print(df)
#print(pretty_sql(q_churned_accounts_int))
# -
# ## MRR Churn (§2.4)
# +
qs=q_start_accounts.subquery()
qe=q_end_accounts.subquery()
q_downsell_accounts = session.query(qs.c.account_id, (qs.c.total_mrr-qe.c.total_mrr).label("downsell_amount"))\
.join(qe, qs.c.account_id == qe.c.account_id)\
.filter(qe.c.total_mrr < qs.c.total_mrr)
pd.read_sql(q_downsell_accounts.statement, engine).set_index("account_id")
# +
start_mrr = session.query(func.sum(q_start_accounts.subquery().c.total_mrr)).one()[0] or 0
churn_mrr = session.query(func.sum(q_churned_accounts.subquery().c.total_mrr)).one()[0] or 0
downsell_mrr = session.query(func.sum(q_downsell_accounts.subquery().c.downsell_amount)).one()[0] or 0
mrr_churn_rate = (churn_mrr+downsell_mrr) /start_mrr
df=pd.DataFrame.from_dict({
"mrr_churn_rate":[mrr_churn_rate],
"start_mrr":[start_mrr],
"churn_mrr":[churn_mrr],
"downsell_mrr":[downsell_mrr],
})
print(df)
# -
# ## Churn Rate scaled (§2.5)
# +
period_days=days_between(d_start_date, d_end_date)
measured_churn = n_churn / n_start
#period_days = end_date - start_date
annual_churn = 1- pow(1- measured_churn, 365.0 / period_days)
monthly_churn = 1- pow(1- measured_churn, 365.0 /12.0 / period_days)
df=pd.DataFrame.from_dict({
"n_start":[n_start],
"n_churn":[n_churn],
"measured_churn":[measured_churn],
"period_days":[period_days],
"annual_churn":[annual_churn],
"monthly_churn":[monthly_churn]
})
print(df)
# -
| listings/part1/jup_chap02_measuring-churn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit (conda)
# metadata:
# interpreter:
# hash: 524e93b8e0226315aee682eacc6843c81ddb16bd5c2ad8dc8495f33ccc2f5477
# name: python3
# ---
#all imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Code from <NAME> to calculate kx+1 series
# +
def coll_conj(a,k):
lst=[]
ctr=0
while ctr<main_counter+1:
if a in glob_lst:
return(a)
if a in lst:
b=lst.index(a)
min_el=min(lst[b:])
glob_lst.append(min_el)
return(min_el)
elif a%2==0:
lst.append(a)
a=a//2
else:
lst.append(a)
a=(k*a+1)//2
ctr+=1
global glob_lst
global main_counter
main_counter=100
start=1
end=5
iterations=1000
df = pd.DataFrame(columns = ['k', 'sequence'])
for k in range(start,end+1,2):
glob_lst=[]
for a in range(1,iterations+1,2):
coll_out=coll_conj(a,k)
df.loc[k] = [k, glob_lst]
df.head(10)
# +
import random as r
def prime(n):
n=int(n)+2
k=list(range(3,n+1,2))
if n%2==0:
len_k=(n-4)/2
else:
len_k=(n-3)/2
i=0
try:
while True:
if k[i]==0:
pass
else:
j=3
while True:
a=k[i]*j
try:
k[(a-3)//2]=0
except:
break
j+=2
i+=1
if i==len_k:
break
prime_lst=[2]
for i in k:
if i==0:
continue
else:
prime_lst+=[i]
return(prime_lst)
except:
return([2])
def coprime(a,b):
low=min(a,b)
j=prime(low**(1/2))
for i in j:
if a%i==0 and b%i==0:
return(False)
else:
return(True)
def factor(a,j=[]):
if j==[]:
j=prime(a**(1/2))
fact_lst=[]
for i in j:
if a%i==0:
fact_lst.append(i)
new=a//i
fact_lst=fact_lst+factor(new,j)
break
if fact_lst==[]:
fact_lst=[a]
return(fact_lst)
def coll_num(up,dn,k):
if len(up)!=len(dn):
raise Exception ('U and D must have same length')
tot_sum=0
pow_2=1
pow_k=k**(sum(up))
for i in range(len(up)):
pow_k//=k**(up[i])
iter_sum=pow_2*pow_k*(k**up[i]-2**up[i])
pow_2*=2**(up[i]+dn[i])
tot_sum+=iter_sum
return(tot_sum)
def fact_coll(up,dn,k):
c=coll_num(up,dn,k)
c2=2**sum(up+dn)-k**sum(up)
c2=c2*(k-2)
if c2<0:
raise Exception('No Cycle Exists')
return(c,factor(c),c2,factor(c2))
def first_cycle(up,dn,k):
c=fact_coll(up,dn,k)
lst=[]
prod=1
for i in c[3]:
if i not in c[1]:
lst+=[i]
prod*=i
t0=c[0]*prod//c[2]
return (prod,lst,t0)
def ran_cycle(k):
rand=r.randrange(1,15)
u=[]
d=[]
for i in range(rand):
u+=[r.randrange(1,4)]
d+=[r.randrange(1,4)]
sum_u=sum(u)
sum_d=sum(d)
if 2**(sum_u+sum_d)-k**sum_u <0:
return(ran_cycle())
c=first_cycle(u,d)
return(rand,u,d,c[2],c[0])
def first_cycle_output(up,dn,k):
a=first_cycle(up,dn,k)
print(k,'x+',a[0],' is the first expression with the first cycle with these up and downs and '
,a[2],' is the first element of the cycle',sep='')
first_cycle_output([2],[3],5)
# -
| 04 Cycles/Python/notebook/playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > @Date : 2021-01-19 10:33:32
# >
# > @Author : <NAME> (<EMAIL>)
# >
# > @Link : github.com/taseikyo
#
# # Python 正则表达式 特殊符号和字符
#
# > 原文:https://www.kingkk.com/2017/12/%E6%AD%A3%E5%88%99%E8%A1%A8%E8%BE%BE%E5%BC%8F%E7%89%B9%E6%AE%8A%E7%AC%A6%E5%8F%B7%E5%92%8C%E5%AD%97%E7%AC%A6 kingkk 2017-12-04
# ## 择一匹配符号:`|`
# +
import re
# 匹配 bat 或 bet 或 bit
bt = 'bat|bet|bit'
m = re.match(bt, 'bat')
if m: print(m.group())
else: print("None match!")
# -
m = re.match(bt, 'bot')
if m: print(m.group())
else: print("None match!")
m = re.search(bt, 'he bit me')
if m: print(m.group())
else: print("None match!")
# ## 匹配任意单个字符(包括空格,除了换行符\n):`.`
#
# tip:若想搜索真正的点号可以通过 `\` 转义
anyend = '.end'
# 匹配任意字符
m = re.match(anyend, 'bend')
if m: print(m.group())
else: print("None match!")
# 不匹配无字符
m = re.match(anyend, 'end')
if m: print(m.group())
else: print("None match!")
# 不匹配换行
m = re.match(anyend, '\nend')
if m: print(m.group())
else: print("None match!")
# 匹配 ' '
m = re.match(anyend, ' end')
if m: print(m.group())
else: print("None match!")
# ## 创建字符集: [ ]
#
# [abc] 匹配 a 或 b 或 c 中任意一个字符
m = re.match('[cr][23][dp][o2]', 'r2d2')
if m: print(m.group())
else: print("None match!")
m = re.match('[cr][23][dp][o2]', 'c3eo')
if m: print(m.group())
else: print("None match!")
# ## 分组
#
# group() 通常用于显示所有匹配部分,但也可以用于取各个子组 groups() 可以用来获取一个包含所有匹配字符串的元组
# +
m = re.match('(\w\w\w)-(\d\d\d)','abc-123')
m.group()
# 'abc-123'
# 子组 1
m.group(1)
# 'abc'
# 子组 2
m.group(2)
# '123'
# 全部子组,存放与元组中
m.groups()
# ('abc', '123')
# -
# 不存在子组
m = re.match('ab','ab')
m.group(), m.groups()
# 一个子组
m = re.match('(ab)','ab')
m.group(), m.group(1), m.groups()
# +
# 两个嵌套子组
m = re.match('(a(b))','ab')
m.group(), m.group(1), m.group(2), m.groups()
# -
# ## 匹配起始结尾、边界单词
#
# 例:(tips 该操作符多用于 search 而不是 match)
#
# - ^form 匹配以 form 作为起始的字符串
# - form$ 匹配以 form 为结尾的字符串
# - ^form$ 等价于匹配单个 form 字符串
# - \b 匹配边界字符串
# - \B 匹配非边界字符串
m = re.search('^is', 'is a dog')
if m: print(m.group())
else: print("None match!")
m = re.search('is$', 'is a dog')
if m: print(m.group())
else: print("None match!")
m = re.search('^dog$', 'is a dog')
if m: print(m.group())
else: print("None match!")
m = re.search('^is a dog$', 'is a dog')
if m: print(m.group())
else: print("None match!")
# 前后出现空格或者换行时都属于 \b 的边界匹配
# 匹配空格作为边界
m = re.search(r'\bdog', 'is a dog')
if m: print(m.group())
else: print("None match!")
# 匹配换行符作为边界
m = re.search(r'\bdog', 'is a\ndog')
if m: print(m.group())
else: print("None match!")
# 匹配失败
m = re.search(r'\bog', 'is a dog')
if m: print(m.group())
else: print("None match!")
# \B 匹配非边界字符串
m = re.search(r'\Bog', 'is a dog')
if m: print(m.group())
else: print("None match!")
# tips: 使用 r'xxx'的原始字符串避免正则匹配时的转义
# ## 脱字符 `^`
#
# 直接使用表示匹配字符串的起始部分 紧跟在左括号右边表示不匹配给定字符集,例如
#
# - [^\n] 不匹配换行符
# - [^aeiou] 不匹配元音字符
re.findall('[^aeiou]', 'abcdefg')
# ## 拓展符号
#
# (?iLmsux) 用以标记并实现某些功能
#
# 这里的”i”, “L”, “m”, “s”, “u”, “x”,它们不匹配任何字串,而是表示对应 python 中 re 模块当中的 (re.I, re.L, re.M, re.S, re.U, re.X) 的 6 种选项。
#
# ```
# I = IGNORECASE # 忽略大小写
# L = LOCALE # 字符集本地化,为了支持多语言版本的字符集使用环境
# U = UNICODE # 使用\w,\W,\b,\B这些元字符时将按照UNICODE定义的属性
# M = MULTILINE # 多行模式,改变 ^ 和 $ 的行为
# S = DOTALL # '.' 的匹配不受限制,包括换行符
# X = VERBOSE # 冗余模式,可以忽略正则表达式中的空白和#号的注释
# ```
#
# ### re.I/IGNORECASE (忽略大小写的匹配)
re.findall(r'(?i)yes', 'Yes?yes.Yes!!')
re.findall(r'yes', 'Yes?yes.Yes!!', re.I)
re.findall(r'(?i)th\w+', 'The?tHoEs.tHat!!')
re.findall(r'th\w+', 'The?tHoEs.tHat!!', re.I)
# ### re.M/MULTILINE(进行跨行搜索)
# 匹配 th 开头的段落
re.findall(r'(?m)(^th[\w]+)',"""
this is first line,
another line,
that line,it's the best
""")
re.findall(r'(^th[\w]+)',"""
this is first line,
another line,
that line,it's the best
""", re.M)
# ### re.S/DOTALL (使点号 . 可以用来表示换行符 \n)
# 成功匹配到 \ n
re.findall(r'(?s)(th.+)',"""
this is first line,
another line,
that line,it's the best
""")
# 成功匹配到 \ n
re.findall(r'(th.+)',"""
this is first line,
another line,
that line,it's the best
""", re.S)
# ### re.X/VERBOSE (抑制正则表达式中的空白符,以创建更易读的正则表达式)
#
# tips: 空格符可用 [ ] 等字符类代替,并且可以在正则中通过井号 # 来注释
re.search(r'''(?x)
\((\d{3})\) #区号
[ ] #空白符
(\d{3}) #前缀
- #横线
(\d{4}) #终点数字''',
'(800) 555-1212').groups()
# ### `(?:)` 对正则表达式进行分组,但不保存改分组 常用于需要对改分组进行 +* 等操作,但又不需要将改分组提取的情况
# 使用 (?:) 时,groups 中并未出现 code
m = re.match(r'http://(?:\w+\.)*(\w+\.com)','http://code.google.com')
m.groups()
# 未使用 (?:) 时,groups 中出现 code
m=re.match(r'http://(\w+\.)*(\w+\.com)', 'http://code.google.com')
m.groups()
# 仅提取域名
re.findall(r'http://(?:\w+\.)*(\w+\.com)',
'http://google.com http://www.google.com http://code.google.com')
# ### (?P=name) 使用自定义表示符,而并非 1 至 N 递增
#
# +
astr1 = 'AAbb aabb'
astr2 = 'aabb aabb'
astr3 = 'aabb ccbb'
for s in astr1,astr2,astr3:
result = re.match(r'(?i)(?P<pattern>[\w]+) (?P=pattern)',s)
if result:
print(f'{s} match result is {result.group()}')
else:
print(f'{s} match Nothing')
# -
# 从下面例子可以看到 (`?P<pattern>`) 可以用来标记一些模糊的模式,然后在同一个正则表达式中,我们可以通过 (`?P=pattern`) 来复用之前的内容,这就是它们两个的用法区别,一个用于标记,一个用于在同一个正则表达式中复用,特别提醒:同一个正则表达式,这是因为,有些时候我们可能需要在不同的表达式中复用,如下
astr = 'aabb aacc aadd'
result = re.sub(r'(?i)aa(?P<pattern>[\w]+)',r'bb\g<pattern>',astr)
print(f'{astr} has been substituted to {result}')
# 可以看到 `\g<name>` 可以用于不同正则表达式的引用!
# ### (?=) 正向前视匹配断言
#
# 所谓的前视,就是往正则匹配方向的前方,我们所谓的后方进行判断。通过添加一些判断条件,使匹配更加精准。 匹配后续内容等于 (?=) 中内容的字符串
#匹配域名为 google.com 的用户名
re.findall(r'\w+(?=@google.com)', '''
host@<EMAIL>
<EMAIL>
<EMAIL>
<EMAIL>
''')
# ### (?!) 负向前视断言
#
# 匹配后续内容不等于 (?!) 中内容的字符串
# 匹配域名不为 google 的用户名
re.findall(r'(?m)^\w+@(?!google.com)','''
<EMAIL>@<EMAIL>
<EMAIL>
<EMAIL>
<EMAIL>
''')
# ## 非贪婪匹配 ?
#
# 贪婪匹配:正则表达式通过从左至右,试图尽可能多的获取匹配字符 通过在 * + 后使用 ? 进行非贪婪匹配
| code/python3-re-expr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9 (XPython)
# language: python
# name: xpython
# ---
# # This is a test run by nbval
#
# Please keep the cell outputs. You can regenerate the outputs by running the Notebook again and saving it.
a = 3
a
print("Hello World!")
# +
# %matplotlib widget
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
plt.plot(np.sin(np.linspace(0, 20, 100)));
| test/test_xeus_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# This is a notebook to explore opSim outputs in different ways, mostly useful to supernova analysis. We will look at the opsim output called Enigma_1189
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# Required packages sqlachemy, pandas (both are part of anaconda distribution, or can be installed with a python installer)
# One step requires the LSST stack, can be skipped for a particular OPSIM database in question
import opsimsummary as oss
import opsimsummary.summarize_opsim as so
from sqlalchemy import create_engine
import pandas as pd
print so.__file__
# +
# This step requires LSST SIMS package MAF. The main goal of this step is to set DD and WFD to integer keys that
# label an observation as Deep Drilling or for Wide Fast Deep.
# If you want to skip this step, you can use the next cell by uncommenting it, and commenting out this cell, if all you
# care about is the database used in this example. But there is no guarantee that the numbers in the cell below will work
# on other versions of opsim database outputs
from lsst.sims.maf import db
from lsst.sims.maf.utils import opsimUtils
# +
# DD = 366
# WFD = 364
# -
# ## Read in OpSim output for modern versions: (sqlite formats)
# Description of OpSim outputs are available on the page https://confluence.lsstcorp.org/display/SIM/OpSim+Datasets+for+Cadence+Workshop+LSST2015http://tusken.astro.washington.edu:8080
# Here we will use the opsim output http://ops2.tuc.noao.edu/runs/enigma_1189/data/enigma_1189_sqlite.db.gz
# I have downloaded this database, unzipped and use the variable dbname to point to its location
# Change dbname to point at your own location of the opsim output
dbname = '/Users/rbiswas/data/LSST/OpSimData/minion_1016_sqlite.db'
opsdb = db.OpsimDatabase(dbname)
propID, propTags = opsdb.fetchPropInfo()
DD = propTags['DD'][0]
WFD = propTags['WFD'][0]
print("The propID for the Deep Drilling Field {0:2d}".format(DD))
print("The propID for the Wide Fast Deep Field {0:2d}".format(WFD))
# ## Read in the OpSim DataBase into a pandas dataFrame
engine = create_engine('sqlite:///' + dbname)
# The opsim database is a large file (approx 4.0 GB), but still possible to read into memory on new computers. You usually only need the Summary Table, which is about 900 MB. If you are only interested in the Deep Drilling Fields, you can use the read_sql_query to only select information pertaining to Deep Drilling Observations. This has a memory footprint of about 40 MB.
# Obviously, you can reduce this further by narrowing down the columns to those of interest only. For the entire Summary Table, this step takes a few minutes on my computer.
# If you are going to do the read from disk step very often, you can further reduce the time used by storing the output on disk as a hdf5 file and reading that into memory
# We will look at three different Summaries of OpSim Runs. A summary of the
# 1. Deep Drilling fields: These are the observations corresponding to propID of the variable DD above, and are restricted to a handful of fields
# 2. WFD (Main) Survey: These are the observations corresponding to the propID of the variables WFD
# 3. Combined Survey: These are observations combining DEEP and WFD in the DDF. Note that this leads to duplicate observations which must be subsequently dropped.
# +
# Load to a dataframe
# Summary = pd.read_hdf('storage.h5', 'table')
# Summary = pd.read_sql_table('Summary', engine, index_col='obsHistID')
# EnigmaDeep = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID is 366', engine)
# EnigmaD = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID is 366', engine)
# -
# If we knew ahead of time the proposal ID, then we could have done this quicker using
OpSim_combined = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID in ({0}, {1})'.format(DD, WFD) , engine, index_col='obsHistID')
OpSim_Deep = pd.read_sql_query('SELECT * FROM SUMMARY WHERE PROPID is ' + str(DD), engine, index_col='obsHistID')
# We can also sub-select this from the all-encompassing Summay Table. This can be done in two way:
# ## Some properties of the OpSim Outputs
# ### Construct our Summary
OpSimDeepSummary = so.SummaryOpsim(OpSim_Deep)
OpSimCombinedSummary = so.SummaryOpsim(OpSim_combined)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='mollweide');
fig = OpSimDeepSummary.showFields(ax=fig.axes[0], marker='o', s=40)
OpSimCombinedSummary.showFields(ax=ax, marker='o', color='r', s=8)
# +
#fieldList = EnigmaDeepSummary.fieldIds
# -
# #### First Season
# We can visualize the cadence during the first season using the cadence plot for a particular field: The following plot shows how many visits we have in different filters on a particular night:
firstSeasonDeep = OpSimDeepSummary.cadence_plot(fieldID=1427, observedOnly=False, sql_query='night < 366')
firstSeasonCombined = OpSimCombinedSummary.cadence_plot(fieldID=1427, observedOnly=False, sql_query='night < 366')
firstSeasonCombined[0].savefig('minion_1427.pdf')
firstSeason_main[0].savefig('minion_1430.pdf')
firstSeason = OpSimDeepSummary.cadence_plot(fieldID=744, observedOnly=False, sql_query='night < 732',
nightMin=0, nightMax=732)
tenCadence = OpSimCombinedSummary.cadence_plot(fieldID=fieldList[2000], observedOnly=False, sql_query='night < 3500', nightMax=3500)
import opsimsummary as oss
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(190.), np.radians(-83.0))
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(116.), np.radians(-66.0))
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(116.), np.radians(-66.0))
fieldIDFromRADec = oss.fieldID(OpSim_combined, np.radians(20.), np.radians(-83.0))
print(fieldIDFromRADec)
np.radians(-83.)
OpSim_combined.query('fieldID == 290')[['fieldRA', 'fieldDec']].head()
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='mollweide');
fig = OpSimDeepSummary.showFields(ax=fig.axes[0], marker='o', s=40)
OpS
OpSim_combined.index
OpSim_combined.ix[[1, 2.]]
# Suppose we have a supernova with a peak around a particular MJD of 49540, and we want to see what the observations happened around it:
SN = OpSimDeepSummary.cadence_plot(summarydf=OpSimDeepSummary.df, fieldID=1427, #racol='fieldRA', deccol='fieldDec',
observedOnly=False, mjd_center=59640., mjd_range=[-30., 50.])
# ax = plt.gca()
# ax.axvline(49540, color='r', lw=2.)
# ax.xaxis.get_major_formatter().set_useOffset(False)
SN[0].savefig('SN_observaton.pdf')
# # Scratch
SN_matrix.sum(axis=1).sum()
EnigmaDeep.query('fieldID == 744 and expMJD < 49590 and expMJD > 49510').expMJD.size
SN_matrix[SN_matrix > 0.5] = 1
SN_matrix.sum().sum()
len(SN_matrix.sum(axis=1).dropna())
nightlySN_matrix = SN_matrix.copy(deep=True)
nightlySN_matrix[SN_matrix > 0.5] =1
nightlySN_matrix.sum(axis=1).dropna().sum()
nightlySN_matrix.sum(axis=1).dropna().size
nightlySN_matrix.sum(ax)
| example/ExploringOpSimOutputs_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, sys
from instapipeline import QuantiusAnnotation, SpotAnnotationAnalysis, clus
from sklearn.neighbors import KDTree
from matplotlib.lines import Line2D
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# -
# # Objective
# Compare the effects of SNR and NND on detection, using the "Fig 2b" dataset.
#
# Use median and quartiles as the metric for seeing how the distribution of NND in detected vs non-detected populations changes with respect to SNR.
#
# # Takeaways
#
# Trends using median and quartiles for the SNR vs. NND bin plot
#
# From the scatter plots (one for each value of mean SNR for all images in the dataset), we can trivially observe that the prevalence of undetected (magenta) spots decreases when the median SNR increases.
#
# From the individual strip plots and especially the plots showing the mean NND of undetected and detected spots for each SNR bin, we can observe that as spot SNR increases, median NND of undetected spots decreases. So at lower SNR, crowded spots are less likely to be detected. This is as expected.
density_list = [0.0025, 0.0027, 0.003, 0.0037, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009]
clustering_params = ['AffinityPropagation', -350]
correctness_threshold = 4
declumping_params = ['KMeans', 2]
height = 300
detected_color = 'xkcd:bright blue'
undetected_color = 'xkcd:orange'
# ## Scatter plots
if not os.path.exists('data'):
os.mkdir('data')
data_list = []
for mean_snr in [7, 9]:
data = []
cwd = os.getcwd() + '/snr_' + str(mean_snr)
json_paths = [cwd + '/anno/Fig2B_snr' + str(mean_snr) + '.json',
cwd + '/anno/Fig2B_snr' + str(mean_snr) + '_inv.json']
# for each file
for json_path in json_paths:
# for each image
for density in density_list:
if(json_path == json_paths[0]):
img_name = 'snr_' + str(mean_snr) + '_1_density_' + str(density) + '_spot_img.png'
img_path = cwd + '/spot_images/og/' + img_name
else:
img_name = 'snr_' + str(mean_snr) + '_1_density_' + str(density) + '_spot_img_inv.png'
img_path = cwd + '/spot_images/inv/' + img_name
csv_path = cwd + '/spot_data/' + 'snr_' + str(mean_snr) + '_1_density_' + str(density) + '_coord_snr_list.csv'
""" Get cluster centroids """
qa = QuantiusAnnotation(json_path, img_name)
anno_all = qa.df()
sa = SpotAnnotationAnalysis(qa)
clusters = sa.get_clusters(anno_all, clustering_params)
cluster_size_threshold = clus.get_cluster_size_threshold(clusters)
small_clusters, large_clusters = clus.sort_clusters_by_size(clusters, cluster_size_threshold)
clumpiness_threshold = clus.get_clumpiness_threshold(large_clusters)
clumpy_clusters, nonclumpy_clusters = clus.sort_clusters_by_clumpiness(large_clusters, clumpiness_threshold)
result_clusters = nonclumpy_clusters
for i in range(len(clumpy_clusters.index)):
subclusters = clus.declump(clumpy_clusters, i, declumping_params)
result_clusters = pd.concat([subclusters, result_clusters], ignore_index=True)
""" Count up num_spots_detected, num_spots_total at each SNR """
result_points = result_clusters.loc[:, ['centroid_x', 'centroid_y']].to_numpy()
result_kdt = KDTree(result_points, leaf_size=2, metric='euclidean')
ref_df = pd.read_csv(csv_path)
ref_points = ref_df.loc[:, ['col', 'row']].to_numpy()
ref_snrs = ref_df.loc[:, ['snr']].to_numpy()
ref_kdt = KDTree(ref_points, leaf_size=2, metric='euclidean')
# for each spot in the image
for ref_point, ref_snr in zip(ref_points, ref_snrs):
dist, ind = ref_kdt.query([ref_point], k=2)
ref_nnd = dist[0][1]
ref_snr = ref_snr[0]
dist, ind = result_kdt.query([[ref_point[0], height-ref_point[1]]], k=1)
if dist[0][0] <= correctness_threshold:
color = detected_color
else:
color = undetected_color
data.append((ref_nnd, ref_snr, color))
data_list.append(data)
np.savetxt('./data/snr%s_data.csv' % mean_snr, data, delimiter=',', fmt='%s')
for mean_snr in [5, 7, 9]:
csv_path = './data/snr%s_data.csv' % mean_snr
data = np.asarray(pd.read_csv(csv_path))
fig = plt.figure(figsize = (12,8))
for ref_nnd, ref_snr, color in data:
plt.scatter([ref_nnd], [ref_snr], color=color, alpha=0.5, s=12)
plt.xlabel('NND')
plt.ylabel('SNR')
leg_elem_1 = Line2D([0], [0], marker='o', color='None', markeredgecolor=detected_color, markerfacecolor=detected_color, label='detected expert annotation', markersize=15)
leg_elem_2 = Line2D([0], [0], marker='o', color='None', markeredgecolor=undetected_color, markerfacecolor=undetected_color, label='undected expert annotation', markersize=15)
handle_list = [leg_elem_1, leg_elem_2]
plt.legend(handles=handle_list, loc='upper right', frameon=1, prop={'size': 12}, framealpha=0.5)
plt.title('SNR vs. NND for each spot. Mean SNR for all images = ' + str(mean_snr) + '. Green = detected, magenta = not detected.')
plt.show()
# ## Strip plots and median NND for each bin
#
# For each dataset (mean SNR of image = [5, 7, 9].
# +
import sys
sys.path.append('/usr/local/lib/python3.6/site-packages')
import seaborn as sns
import numpy as np
total_df = pd.DataFrame({'SNR':[],'NND':[], 'detected':[]})
for mean_snr in [5, 7, 9]:
csv_path = './data/snr%s_data.csv' % mean_snr
data = np.asarray(pd.read_csv(csv_path))
nnd_list, snr_list, detected_list = [], [], []
snr_bins = [(x, y) for x, y in zip(range(3, 13, 1), range(4, 14, 1)) ]
for ref_nnd, ref_snr, color in data:
nnd_list.append(ref_nnd)
detected_list.append(color==detected_color)
ref_snr_max = 0
for ref_nnd, ref_snr, color in data:
for i in range(len(snr_bins)):
low, high = snr_bins[i]
if (ref_snr >= low and ref_snr < high):
snr_list.append(str(snr_bins[i]))
d = {'SNR':snr_list,'NND':nnd_list, 'detected':detected_list}
df = pd.DataFrame(d)
total_df = pd.concat([total_df, df], ignore_index=True)
snr_bin_list = [str(x) for x in snr_bins]
# plot strip plot
fig = plt.figure()
sns.stripplot(x="SNR", y="NND", data=df, order=snr_bin_list, hue="detected", jitter=True, palette=sns.color_palette([undetected_color,detected_color]), dodge=True)
plt.title('NND vs. SNR bin for detected and undetected spots. Mean SNR for all images = ' + str(mean_snr) + '.')
plt.xlabel('SNR bin')
plt.show()
##########################################
# plot mean NND of each bin
fig = plt.figure()
mean_nnd_undetected, mean_nnd_detected, std_nnd_undetected, std_nnd_detected, snr_bin_list_undetected, snr_bin_list_detected = [], [], [], [], [], []
for nnd, snr_bin in zip(nnd_list, snr_bin_list):
df_slice = df[df.SNR == snr_bin]
df_undetected = df_slice[df_slice.detected == False]
nnd_undetected = df_undetected['NND'].tolist()
if len(nnd_undetected) != 0:
mean_nnd_undetected.append(np.mean(nnd_undetected))
std_nnd_undetected.append(np.var(nnd_undetected))
snr_bin_list_undetected.append(snr_bin)
df_detected = df_slice[df_slice.detected == True]
nnd_detected = df_detected['NND'].tolist()
if len(nnd_detected) != 0:
mean_nnd_detected.append(np.mean(nnd_detected))
std_nnd_detected.append(np.var(nnd_detected))
snr_bin_list_detected.append(snr_bin)
for snr, nnd in zip(snr_bin_list_detected, mean_nnd_detected):
plt.scatter([snr], [nnd], color=detected_color)
plt.plot(snr_bin_list_detected, mean_nnd_detected, color=detected_color)
plt.scatter(snr_bin_list_undetected, mean_nnd_undetected, color=undetected_color)
plt.plot(snr_bin_list_undetected, mean_nnd_undetected, color=undetected_color)
leg_elem_1 = Line2D([0], [0], marker='o', color=detected_color, markeredgecolor=detected_color, markerfacecolor=detected_color, label='true positive', markersize=10)
leg_elem_2 = Line2D([0], [0], marker='o', color=undetected_color, markeredgecolor=undetected_color, markerfacecolor=undetected_color, label='false negative', markersize=10)
handle_list = [leg_elem_1, leg_elem_2]
plt.legend(handles=handle_list, loc='upper right', frameon=1, prop={'size': 10}, framealpha=0.5)
plt.xlabel('SNR (midpoint of bin)')
plt.ylabel('Mean NND')
plt.title('Mean NND vs. SNR bin for each spot')
plt.xticks(range(8), np.arange(5.5, 13, 1))
plt.savefig('nnd_vs_snr_bin_mean_snr%s.pdf' % str(mean_snr), transparent=True, bbox_inches="tight")
plt.show()
| figures/SI/fig_7c/fig_7c.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Tutorial showing use of a `Workspace` object
# ### Part 2: Switchboards
#
# "This tutorial introduces the `Switchboard` workspace object and demonstrates its use. You may have gotten the sense from the last tutorial that screen real estate can quickly be taken up by plots and tables. Wouldn't it me nice if we could interactively switch between plots or figures using buttons or sliders instead of having to scroll through endless pages of plots? `Switchboard` to the rescue!
#
# First though, let's run GST on the standard 1Q model to get some results (the same ones as the first tutorial).
# +
import numpy as np
import pygsti
from pygsti.modelpacks import smq1Q_XYI
#The usual GST setup: we're going to run GST on the standard XYI 1-qubit model
target_model = smq1Q_XYI.target_model()
prep_fiducials = smq1Q_XYI.prep_fiducials()
meas_fiducials = smq1Q_XYI.meas_fiducials()
germs = smq1Q_XYI.germs()
maxLengths = [1,2,4,8]
listOfExperiments = pygsti.circuits.create_lsgst_circuits(
target_model.operations.keys(), prep_fiducials, meas_fiducials, germs, maxLengths)
#Create some datasets for analysis
mdl_datagen1 = target_model.depolarize(op_noise=0.1, spam_noise=0.001)
mdl_datagen2 = target_model.depolarize(op_noise=0.05, spam_noise=0.01).rotate(rotate=(0.01,0,0))
ds1 = pygsti.data.simulate_data(mdl_datagen1, listOfExperiments, num_samples=1000,
sample_error="binomial", seed=1234)
ds2 = pygsti.data.simulate_data(mdl_datagen2, listOfExperiments, num_samples=1000,
sample_error="binomial", seed=1234)
ds3 = ds1.copy_nonstatic(); ds3.add_counts_from_dataset(ds2); ds3.done_adding_data()
#Run GST on all three datasets
target_model.set_all_parameterizations("full TP")
results1 = pygsti.run_long_sequence_gst(ds1, target_model, prep_fiducials, meas_fiducials, germs, maxLengths, verbosity=0)
results2 = pygsti.run_long_sequence_gst(ds2, target_model, prep_fiducials, meas_fiducials, germs, maxLengths, verbosity=0)
results3 = pygsti.run_long_sequence_gst(ds3, target_model, prep_fiducials, meas_fiducials, germs, maxLengths, verbosity=0)
#make some shorthand variable names for later
tgt = results1.estimates['GateSetTomography'].models['target']
ds1 = results1.dataset
ds2 = results2.dataset
ds3 = results3.dataset
mdl1 = results1.estimates['GateSetTomography'].models['go0']
mdl2 = results2.estimates['GateSetTomography'].models['go0']
mdl3 = results3.estimates['GateSetTomography'].models['go0']
circuits = results1.circuit_lists['final']
# -
# Next we create the workspace, as before. This time, we'll leave `autodisplay=False` (the default), to demonstrate how this gives us more control over when workspace items are displayed. In particular, we'll build up a several workspace objects and display them all at once. **NOTE that setting `connected=True` means you need to have an internet connection!**
w = pygsti.report.Workspace() #create a new workspace
w.init_notebook_mode(connected=False) # and initialize it so it works within a notebook
# Note that if we create a table it doesn't get displayed automatically.
tbl1 = w.GatesVsTargetTable(mdl1, tgt)
# To see it, we need to call `display()`:
tbl1.display()
# ### Switchboards
# A `Switchboard` is essentially a collection of one or more switches along with a dictionary of "values" which depend on some or all of the switch positions. Each value looks like a NumPy `ndarray` whose axes correspond to the switches that value depends upon. The array can hold whatever you want: `Model`s, `DataSet`s, `float`s, etc., and from the perspective of the plot and table workspace objects the value looks like the thing contained in its array (e.g. a *single* `Model`, `DataSet`, or `float`, etc.).
#
# Let's start off simple and create a switchboard with a single switch named "My Switch" that has two positions "On" and "Off":
switchbd = w.Switchboard(["My Switch"],[["On","Off"]],["buttons"])
# Next, add a "value" to the switchboard called "mdl" (for "model"), with is dependent on the 0-th (and only) switch of the switchboard:
switchbd.add("mdl", [0])
# Now `switchbd` has a member, `mdl`, which looks like a 1-dimensional Numpy array (since `mdl` only depends on a single switch) of length 2 (because that single switch has 2 positions).
switchbd.mdl.shape
# We'll use `switchbd.mdl` to switch between the models `mdl1` and `mdl2`. We associate the "On" position with `mdl1` and the "Off" position with `mdl2` by simply assigning them to the corresponding locations of the array. Note that we can use NumPy's fancy indexing to make this a breeze.
switchbd.mdl[:] = [mdl1,mdl2]
# Ok, now here's the magical part: even though `switchbd.mdl` is really an array holding `Model` objects, when you provide it as an input to create a workspace item such as a plot or a table, it *behaves* like a single `Model` and can thus be used for any `Model`-type argument. We'll use it as the first argument to `GatesVsTargetTable`.
tbl2 = w.GatesVsTargetTable(switchbd.mdl, tgt)
# Note the the second argument (`tgt`, the target model) in the above call is just a plain old `Model`, just like it's always been up to this point. The above line creates a table, `tbl2`, that is *connected* to the switchboard `switchbd`. Let's display both the switchboard and the table together.
switchbd.display()
tbl2.display()
# My pressing the "On" or "Off" button the table changes between displaying metrics for `mdl1` vs. `tgt` and `mdl2` vs. `tgt`, as expected. In this simple example there was one switch controlling on table. It is possible to have any number of switches controlling any number of tables and/or plots, and also to have multiple switchboards controlling a single plot or table. In the following cells, more sophisticated uses of switchboards are demonstrated.
# +
# Create a switchboard with straighforward dataset and model dropdown switches
switchbd2 = w.Switchboard(["dataset","model"], [["DS1","DS2","DS3"],["MODEL1","MODEL2","MODEL3"]], ["dropdown","dropdown"])
switchbd2.add("ds",(0,))
switchbd2.add("mdl",(1,))
switchbd2.ds[:] = [ds1, ds2, ds3]
switchbd2.mdl[:] = [mdl1, mdl2, mdl3]
#Then create a chi2 plot that can show the goodness-of-fit between any model-dataset pair
chi2plot = w.ColorBoxPlot(("chi2",), circuits, switchbd2.ds, switchbd2.mdl, scale=0.75)
# Can also truncate circuits to only a subset of the germs and depths
circuits2 = circuits.truncate(xs_to_keep=[1,2], ys_to_keep=circuits.ys[1:4])
chi2plot2 = w.ColorBoxPlot(("chi2",), circuits2, switchbd2.ds, switchbd2.mdl, scale=0.75)
switchbd2.display()
chi2plot.display()
chi2plot2.display()
# -
#Perform gauge optimizations of gs1 using different spam weights
spamWts = np.linspace(0.0,1.0,20)
mdl_gaugeopts = [ pygsti.gaugeopt_to_target(mdl1, tgt,{'gates': 1, 'spam': x}) for x in spamWts]
# +
# Create a switchboard with a slider that controls the spam-weight used in gauge optimization
switchbd3 = w.Switchboard(["spam-weight"], [["%.2f" % x for x in spamWts]], ["slider"])
switchbd3.add("mdlGO",(0,))
switchbd3.mdlGO[:] = mdl_gaugeopts
#Then create a comparison vs. target tables
tbl3 = w.GatesVsTargetTable(switchbd3.mdlGO, tgt)
tbl4 = w.SpamVsTargetTable(switchbd3.mdlGO, tgt)
switchbd3.display()
tbl3.display()
tbl4.display()
# +
# Create a slider showing the color box plot at different GST iterations
switchbd4 = w.Switchboard(["max(L)"], [list(map(str,circuits.xs))], ["slider"])
switchbd4.add("mdl",(0,))
switchbd4.add("circuits",(0,))
switchbd4.mdl[:] = results1.estimates['GateSetTomography'].models['iteration estimates']
switchbd4.circuits[:] = results1.circuit_lists['iteration']
#Then create a logl plot that can show the goodness-of-fit at different iterations
logLProgress = w.ColorBoxPlot(("logl",), switchbd4.circuits, ds1, switchbd4.mdl, scale=0.75)
logLProgress.display()
switchbd4.display()
# -
# ### Switchboard Views
# If you want to duplicate a switch board in order to have the same switches accessible at different (multiple) location in a page, you need to create switchboard *views*. These are somewhat like NumPy array views in that they are windows into some base data - in this case the original `Switchboard` object. Let's create a view of the `Switchboard` above.
sbv = switchbd4.view()
sbv.display()
# Note that when you move one slider, the other moves with it. This is because there's really only *one* switch.
#
# Views don't need to contain *all* of the switches of the base `Switchboard` either. Here's an example where each view only shows only a subset of the switches. We also demonstrate here how the *initial positions* of each switch can be set via the `initial_pos` argument.
parent = w.Switchboard(["My Buttons","My Dropdown", "My Slider"],
[["On","Off"],["A","B","C"],["0","0.5","0.8","1.0"]],
["buttons","dropdown","slider"], initial_pos=[0,1,2])
parent.display()
buttonsView = parent.view(["My Buttons"])
buttonsView.display()
otherView = parent.view(["My Dropdown","My Slider"])
otherView.display()
# ### Exporting to HTML
# Again, you can save this notebook as an HTML file by going to **File => Download As => HTML** in the Jupyter menu. The resulting file will retain all of the plot *and switch* interactivity, and in this case doesn't need the `offline` folder (because we set `connected=True` in `init_notebook_mode` above) but does need an internet connection.
| jupyter_notebooks/Tutorials/reporting/advanced/WorkspaceSwitchboards.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Writing a custom acquisition function and interfacing with Ax
#
# As seen in the [custom BoTorch model in Ax](./custom_botorch_model_in_ax) tutorial, Ax's `BotorchModel` is flexible in allowing different components of the Bayesian optimization loop to be specified through a functional API. This tutorial walks through the steps of writing a custom acquisition function and then inserting it into Ax.
#
#
# ### Upper Confidence Bound (UCB)
#
# The Upper Confidence Bound (UCB) acquisition function balances exploration and exploitation by assigning a score of $\mu + \sqrt{\beta} \cdot \sigma$ if the posterior distribution is normal with mean $\mu$ and variance $\sigma^2$. This "analytic" version is implemented in the `UpperConfidenceBound` class. The Monte Carlo version of UCB is implemented in the `qUpperConfidenceBound` class, which also allows for q-batches of size greater than one. (The derivation of q-UCB is given in Appendix A of [Wilson et. al., 2017](https://arxiv.org/pdf/1712.00424.pdf)).
# ### A scalarized version of q-UCB
#
# Suppose now that we are in a multi-output setting, where, e.g., we model the effects of a design on multiple metrics. We first show a simple extension of the q-UCB acquisition function that accepts a multi-output model and performs q-UCB on a scalarized version of the multiple outputs, achieved via a vector of weights. Implementing a new acquisition function in botorch is easy; one simply needs to implement the constructor and a `forward` method.
# +
import math
from torch import Tensor
from typing import Optional
from botorch.acquisition import MCAcquisitionObjective
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.monte_carlo import MCAcquisitionFunction
from botorch.models.model import Model
from botorch.sampling.samplers import MCSampler, SobolQMCNormalSampler
from botorch.utils import t_batch_mode_transform
class qScalarizedUpperConfidenceBound(MCAcquisitionFunction):
def __init__(
self,
model: Model,
beta: Tensor,
weights: Tensor,
sampler: Optional[MCSampler] = None,
) -> None:
# we use the AcquisitionFunction constructor, since that of
# MCAcquisitionFunction performs some validity checks that we don't want here
super(MCAcquisitionFunction, self).__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.sampler = sampler
self.register_buffer("beta", torch.as_tensor(beta))
self.register_buffer("weights", torch.as_tensor(weights))
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
"""Evaluate scalarized qUCB on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
Tensor: A `(b)`-dim Tensor of Upper Confidence Bound values at the
given design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior) # n x b x q x o
scalarized_samples = samples.matmul(self.weights) # n x b x q
mean = posterior.mean # b x q x o
scalarized_mean = mean.matmul(self.weights) # b x q
ucb_samples = (
scalarized_mean
+ math.sqrt(self.beta * math.pi / 2)
* (scalarized_samples - scalarized_mean).abs()
)
return ucb_samples.max(dim=-1)[0].mean(dim=0)
# -
# Note that `qScalarizedUpperConfidenceBound` is very similar to `qUpperConfidenceBound` and only requires a few lines of new code to accomodate scalarization of multiple outputs. The `@t_batch_mode_transform` decorator ensures that the input `X` has an explicit t-batch dimension (code comments are added with shapes for clarity).
# #### Ad-hoc testing q-Scalarized-UCB
#
# Before hooking the newly defined acquisition function into a Bayesian Optimization loop, we should test it. For this we'll just make sure that it properly evaluates on a compatible multi-output model. Here we just define a basic multi-output `SingleTaskGP` model trained on synthetic data.
# +
import torch
from botorch.fit import fit_gpytorch_model
from botorch.models import SingleTaskGP
from botorch.utils import standardize
from gpytorch.mlls import ExactMarginalLogLikelihood
# generate synthetic data
X = torch.rand(20, 2)
Y = torch.stack([torch.sin(X[:, 0]), torch.cos(X[:, 1])], -1)
Y = standardize(Y) # standardize to zero mean unit variance
# construct and fit the multi-output model
gp = SingleTaskGP(X, Y)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll);
# construct the acquisition function
qSUCB = qScalarizedUpperConfidenceBound(gp, beta=0.1, weights=torch.tensor([0.1, 0.5]))
# -
# evaluate on single q-batch with q=3
qSUCB(torch.rand(3, 2))
# batch-evaluate on two q-batches with q=3
qSUCB(torch.rand(2, 3, 2))
# ### A scalarized version of analytic UCB (`q=1` only)
#
# We can also write an *analytic* version of UCB for a multi-output model, assuming a multivariate normal posterior and `q=1`. The new class `ScalarizedUpperConfidenceBound` subclasses `AnalyticAcquisitionFunction` instead of `MCAcquisitionFunction`. In contrast to the MC version, instead of using the weights on the MC samples, we directly scalarize the mean vector $\mu$ and covariance matrix $\Sigma$ and apply standard UCB on the univariate normal distribution, which has mean $w^T \mu$ and variance $w^T \Sigma w$. In addition to the `@t_batch_transform` decorator, here we are also using `expected_q=1` to ensure the input `X` has a `q=1`.
#
# *Note:* BoTorch also provides a `ScalarizedObjective` abstraction that can be used with any existing analytic acqusition functions and automatically performs the scalarization we implement manually below. See the end of this tutorial for a usage example.
# +
from botorch.acquisition import AnalyticAcquisitionFunction
class ScalarizedUpperConfidenceBound(AnalyticAcquisitionFunction):
def __init__(
self,
model: Model,
beta: Tensor,
weights: Tensor,
maximize: bool = True,
) -> None:
# we use the AcquisitionFunction constructor, since that of
# AnalyticAcquisitionFunction performs some validity checks that we don't want here
super(AnalyticAcquisitionFunction, self).__init__(model)
self.maximize = maximize
self.register_buffer("beta", torch.as_tensor(beta))
self.register_buffer("weights", torch.as_tensor(weights))
@t_batch_mode_transform(expected_q=1)
def forward(self, X: Tensor) -> Tensor:
"""Evaluate the Upper Confidence Bound on the candidate set X using scalarization
Args:
X: A `(b) x d`-dim Tensor of `(b)` t-batches of `d`-dim design
points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
self.beta = self.beta.to(X)
batch_shape = X.shape[:-2]
posterior = self.model.posterior(X)
means = posterior.mean.squeeze(dim=-2) # b x o
scalarized_mean = means.matmul(self.weights) # b
covs = posterior.mvn.covariance_matrix # b x o x o
weights = self.weights.view(1, -1, 1) # 1 x o x 1 (assume single batch dimension)
weights = weights.expand(batch_shape + weights.shape[1:]) # b x o x 1
weights_transpose = weights.permute(0, 2, 1) # b x 1 x o
scalarized_variance = torch.bmm(
weights_transpose, torch.bmm(covs, weights)
).view(batch_shape) # b
delta = (self.beta.expand_as(scalarized_mean) * scalarized_variance).sqrt()
if self.maximize:
return scalarized_mean + delta
else:
return scalarized_mean - delta
# -
# #### Ad-hoc testing Scalarized-UCB
#
# Notice that we pass in an explicit q-batch dimension for consistency, even though `q=1`.
# construct the acquisition function
SUCB = ScalarizedUpperConfidenceBound(gp, beta=0.1, weights=torch.tensor([0.1, 0.5]))
# evaluate on single point
SUCB(torch.rand(1, 2))
# batch-evaluate on 3 points
SUCB(torch.rand(3, 1, 2))
# To use our newly minted acquisition function within Ax, we need to write a custom factory function and pass it to the constructor of Ax's `BotorchModel` as the `acqf_constructor`, which has the call signature:
#
# ```python
# def acqf_constructor(
# model: Model,
# objective_weights: Tensor,
# outcome_constraints: Optional[Tuple[Tensor, Tensor]],
# X_observed: Optional[Tensor] = None,
# X_pending: Optional[Tensor] = None,
# **kwargs: Any,
# ) -> AcquisitionFunction:
# ```
#
# The argument `objective_weights` allows for scalarization of multiple objectives, `outcome_constraints` is used to define constraints on multi-output models, `X_observed` contains previously observed points (useful for acquisition functions such as Noisy Expected Improvement), and `X_pending` are the points that are awaiting observations. By default, Ax uses the Noisy Expected Improvement (`qNoisyExpectedImprovement`) acquisition function and so the default value of `acqf_constructor` is `get_NEI` (see documentation for additional details and context).
#
# Note that there is ample flexibility to how the arguments of `acqf_constructor` are used. In `get_NEI`, they are used in some preprocessing steps *before* constructing the acquisition function. They could also be directly passed to the botorch acquisition function, or not used at all -- all we need to do is return an `AcquisitionFunction`. We now give a bare-bones example of a custom factory function that returns our analytic scalarized-UCB acquisition.
#
# ```python
# def get_scalarized_UCB(
# model: Model,
# objective_weights: Tensor,
# **kwargs: Any,
# ) -> AcquisitionFunction:
# return ScalarizedUpperConfidenceBound(model=model, beta=0.2, weights=objective_weights)
# ```
# By following the example shown in the [custom botorch model in ax](./custom_botorch_model_in_ax) tutorial, a `BotorchModel` can be instantiated with `get_scalarized_UCB` and then run in Ax.
# ### Using `ScalarizedObjective`
#
# Using the `ScalarizedObjective` abstraction, the funcitonality of `ScalarizedUpperConfidenceBound` implemented above can be easily achieved in just a few lines of code:
# +
from botorch.acquisition.objective import ScalarizedObjective
from botorch.acquisition.analytic import UpperConfidenceBound
obj = ScalarizedObjective(weights=torch.tensor([0.1, 0.5]))
SUCB = UpperConfidenceBound(gp, beta=0.1, objective=obj)
| tutorials/custom_acquisition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Procedural Python for Reproducibility
#
# The goal today is to take some of the ideas we developed last week, and do a couple things to make our lives easier:
#
# - Define standalone functions which accomplish tasks that we would like to do repeatedly
# - Put these functions in a place where we can easily use them without copy-pasting repeatedly
# - Think about how to make our analysis reproducible – both for the sake of our future selves, and for the sake of anyone who wants to replicate and/or build on our work.
#
# To that end, we are going to work together to do the following tasks:
# 1. Write a function which will download the Pronto data and the weather data
#
# - This function should **only** download the data if it isn't already present on disk
# - Pronto data can be downloaded at https://data.seattle.gov/api/views/tw7j-dfaw/rows.csv?accessType=DOWNLOAD
# - Weather data can be downloaded at http://uwseds.github.io/data/pronto_weather.csv
#
# 2. Write two functions which, given the downloaded data, will load it, parse dates properly, and return a pandas array.
#
# 3. Write a function which will group and join the trip and weather data into a single DataFrame, making use of the above functions.
#
# 4. Develop some plots showing relationships in the data, and write a function which will create and save plots related to your analysis.
#
# - Number of rides per day over the course of the year (day-pass and annual members)
# - Number of rides per hour over the course of the day (day-pass and annual members)
# - Number of rides per day as a function of temperature (day-pass and annual members)
#
# 5. Write a master script that you – or anyone – can run, which will produce your analysis from scratch.
# Today during the class time we will walk through accomplishing these tasks together.
# ---
#
# We ended up creating a file that looks like this:
#
# ```python
# # pronto_utils.py
#
# from urllib import request
# import os
# import pandas as pd
#
#
# TRIP_DATA = "https://data.seattle.gov/api/views/tw7j-dfaw/rows.csv?accessType=DOWNLOAD"
# TRIP_FILE = "pronto_trips.csv"
#
# WEATHER_DATA = "http://uwseds.github.io/data/pronto_weather.csv"
# WEATHER_FILE = "pronto_weather.csv"
#
# def download_if_not_present(url, filename):
# """Download file from URL to filename
# If filename is present, then skip download.
# """
# if os.path.exists(filename):
# print("File already present")
# else:
# print("Downloading", filename)
# request.urlretrieve(url, filename)
#
# def download_trips():
# """Download the pronto trip data unless already downloaded"""
# download_if_not_present(TRIP_DATA, TRIP_FILE)
#
# def download_weather():
# download_if_not_present(WEATHER_DATA, WEATHER_FILE)
#
# def load_weather_data():
# download_weather()
# return pd.read_csv('pronto_weather.csv',
# parse_dates=['DATE'],
# index_col='DATE')
#
# def load_trip_data():
# download_trips()
# data = pd.read_csv('pronto_trips.csv')
# data['starttime'] = pd.to_datetime(data['starttime'], format="%m/%d/%Y %I:%M:%S %p")
# data['stoptime'] = pd.to_datetime(data['stoptime'], format="%m/%d/%Y %I:%M:%S %p")
# data['tripminutes'] = data['tripduration'] / 60
# return data
#
#
# def join_trips_and_weather():
# """Group trips by day and join with the daily weather data
# Returns: pandas DataFrame
# """
# weather = load_weather_data()
# trips = load_trip_data()
# tripdates = pd.DatetimeIndex(trips['starttime']).date
# trips_by_day = pd.pivot_table(trips,
# values='trip_id',
# index=tripdates,
# columns='usertype',
# aggfunc='count')
# return trips_by_day.join(weather)
# ```
#
# ---
#
# And here is how we used it:
import pronto_utils
weather = pronto_utils.load_weather_data()
trips = pronto_utils.load_trip_data()
# %matplotlib inline
import matplotlib.pyplot as plt
weather['PRECIPITATION_INCHES'].plot()
joined_data = pronto_utils.join_trips_and_weather()
joined_data.columns
# +
import matplotlib.pyplot as plt
plt.style.use('seaborn')
fig, ax = plt.subplots(1, 2, figsize=(16, 6), sharey=True, sharex=True)
joined_data.plot.scatter('AVG_TEMPERATURE_F', 'Member', ax=ax[0])
joined_data.plot.scatter('AVG_TEMPERATURE_F', 'Short-Term Pass Holder', ax=ax[1])
ax[0].set_title("Annual Members")
ax[1].set_title("Day-Pass Users")
ax[0].set_ylabel("Daily Ride Total")
fig.savefig('rides_vs_temperature.png')
| Autumn2017/03-Procedural-Python/Procedural-Python-Completed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# ## Initial Setup
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "height": 321, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 2880, "status": "error", "timestamp": 1505781339378, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-RR9n0dvbwgI/AAAAAAAAAAI/AAAAAAAAMYM/SOr5ZExpvXE/s50-c-k-no/photo.jpg", "userId": "112510032804989247452"}, "user_tz": 240} id="783h64rGhA3T" outputId="d447b2ab-e321-4ee5-abd4-de2c0116302f"
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
import math
import string
import re
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import helper
import pickle
import keras
from keras.models import Sequential,load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D,Conv1D,MaxPooling1D
layers = keras.layers
# -
# ## Training Parameters
#
# We'll set the hyperparameters for training our model. If you understand what they mean, feel free to play around - otherwise, we recommend keeping the defaults for your first run 🙂
# +
# Hyperparams if GPU is available
if tf.test.is_gpu_available():
print('---- We are using GPU now ----')
# GPU
BATCH_SIZE = 512 # Number of examples used in each iteration
EPOCHS = 80 # Number of passes through entire dataset
# Hyperparams for CPU training
else:
print('---- We are using CPU now ----')
# CPU
BATCH_SIZE = 256
EPOCHS = 100
# -
# ## Data
#
# The wine reviews dataset is already attached to your workspace (if you want to attach your own data, [check out our docs](https://docs.floydhub.com/guides/workspace/#attaching-floydhub-datasets)).
#
# Let's take a look at data.
data_path = '/floyd/input/gengduoshuju/' # ADD path/to/dataset
Y= pickle.load( open(os.path.join(data_path,'Y.pks'), "rb" ) )
X= pickle.load( open(os.path.join(data_path,'X.pks'), "rb" ) )
X = X.reshape((X.shape[0],X.shape[1],1))
print("Size of X :" + str(X.shape))
print("Size of Y :" + str(Y.shape))
X = X.astype(np.float64)
X = np.nan_to_num(X)
# ## Data Preprocessing
X_train, X_test, Y_train_orig,Y_test_orig= helper.divide_data(X,Y)
print(Y.min())
print(Y.max())
num_classes = 332
Y_train = keras.utils.to_categorical(Y_train_orig, num_classes)
Y_test = keras.utils.to_categorical(Y_test_orig, num_classes)
print("number of training examples = " + str(X_train.shape[0]))
print("number of test examples = " + str(X_test.shape[0]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
input_shape = X_train.shape[1:]
print(input_shape)
# # Model definition
# The *Tokens per sentence* plot (see above) is useful for setting the `MAX_LEN` training hyperparameter.
# +
# ===================================================================================
# Load the model what has already ben trained
# ===================================================================================
model = load_model(r"floyd_model_xxl_data_ver9.h5")
# -
# # Model Training
# +
opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.summary()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
model.fit(X_train, Y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, Y_test),
shuffle=True)
model.save(r"floyd_model_xxl_data_ver10.h5")
print('Training is done!')
| train_result/ml_ee_xxl_data_training_step8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
filename = '/home/whyj/Downloads/LANDSAT_8_C1.csv'
ls_pd = pd.read_csv(filename)
ls_pd
target_columns = ['path', 'row', 'dateUpdated', 'sceneCenterLatitude', 'sceneCenterLongitude', 'upperLeftCornerLatitude', 'upperLeftCornerLongitude', 'upperRightCornerLatitude', 'upperRightCornerLongitude', 'lowerLeftCornerLatitude', 'lowerLeftCornerLongitude', 'lowerRightCornerLatitude', 'lowerRightCornerLongitude']
ls_pd_s = ls_pd[target_columns]
ls_pd_s
ls_pd_911 = ls_pd_s.loc[np.logical_and(ls_pd_s['path'] == 9, ls_pd_s['row'] == 11)]
ls_pd_911
ls_pd_911.describe()
# +
output_data = ls_pd_s.loc[0:1].copy()
output_data = output_data.drop(['dateUpdated'], axis=1)
output_data = output_data.drop([0, 1])
for path in range(1, 234):
for row in range(1, 249):
ls_pd_ss = ls_pd_s.loc[np.logical_and(ls_pd_s['path'] == path, ls_pd_s['row'] == row)]
if len(ls_pd_ss.index) != 0:
output_data = output_data.append(ls_pd_ss.describe().loc['mean'])
output_data = output_data.astype({'path': int, 'row': int})
# -
output_data
output_data = output_data.rename(columns={'sceneCenterLatitude': 'lat_CTR',
'sceneCenterLongitude': 'lon_CTR',
'upperLeftCornerLatitude': 'lat_UL',
'upperLeftCornerLongitude': 'lon_UL',
'upperRightCornerLatitude': 'lat_UR',
'upperRightCornerLongitude': 'lon_UR',
'lowerLeftCornerLatitude': 'lat_LL',
'lowerLeftCornerLongitude': 'lon_LL',
'lowerRightCornerLatitude': 'lat_LR',
'lowerRightCornerLongitude': 'lon_LR'})
output_data
output_data.to_csv('LANDSAT_8_C1_cornerpoints.csv', index=False)
| LS8_derive_metadata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
import numpy as np
#from sklearn.model_selection import train_test_split
#from sklearn import preprocessing
#from sklearn.linear_model import LogisticRegression
import scipy.stats as st
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
raw_data= pd.read_csv('hlc_all_metrics_new_upd_final_1.csv')
# +
#raw_data['high_ltr'] = np.where(raw_data['decile_rank_last_1_yr_upd'] == 1, 1, 0)
# -
raw_data.head()
correlation_matrix = raw_data.corr(method='pearson',min_periods=1)
correlation_matrix.to_excel('Correlation_matrix.xlsx')
#raw_data.count()
raw_data.isnull().sum().to_excel('Number_of_nulls.xlsx')
data_with_sel_cols = raw_data[['email_acq','time_bn_wm_and_home','no_items_lt','auth_revenue_lt','no_items_lt_edl','aov_wm','ent_aov','enp_aov','fashion_aov',
'avg_wm_order_gap','first_order_wm_gmv','holiday_gmv','non_holiday_gmv','avg_basket_value_delta','no_home_visits',
'no_home_page_views','no_divisions_per_order','top_brand_revenue_percent','avg_dwell_time', 'gender', 'age_group', 'income_group',
'hh_adult_qty', 'hh_children_qty', 'ethnicity', 'urbanicity', 'marital_status_cd', 'wm_pdp_visits', 'decile_rank_last_1_yr', 'auth_revenue']]
# # Checking Outliers
data_with_sel_cols.isnull().sum()
data_with_sel_cols1 = data_with_sel_cols.fillna('NULL')
data_with_sel_cols1.to_excel('Raw_data_with_selected_columns1.xlsx')
raw_data_imputed = pd.read_csv('Raw_data_with_selected_columns1_imputed.csv')
# importing one hot encoder from sklearn
#import sklearn
from sklearn.preprocessing import MultiLabelBinarizer
# # SPLIT DATA 70:30
x_train, x_test, y_train, y_test= train_test_split(final_data.drop(['high_ltr'], axis= 1), final_data['high_ltr'], test_size=0.3)
x_train1, x_test1, y_train1, y_test1 = train_test_split(final_data1.drop(['high_ltr'], axis= 1), final_data1['high_ltr'], test_size=0.3)
# # Logistic Regression
from sklearn.linear_model import LogisticRegression
logistic= LogisticRegression()
logistic.fit(x_train, y_train)
logistic_prediction= logistic.predict(x_test)
from sklearn.linear_model import LogisticRegression
logistic= LogisticRegression()
logistic.fit(x_train1, y_train1)
logistic_prediction= logistic.predict(x_test1)
from sklearn.metrics import classification_report
from sklearn import metrics
print(classification_report(y_test,logistic_prediction))
print("Accuracy:",metrics.accuracy_score(y_test, logistic_prediction))
metrics.confusion_matrix(y_test, logistic_prediction)
from sklearn.metrics import classification_report
from sklearn import metrics
print(classification_report(y_test1,logistic_prediction))
print("Accuracy:",metrics.accuracy_score(y_test1, logistic_prediction))
metrics.confusion_matrix(y_test1, logistic_prediction)
# # K- NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(x_train, y_train)
knn_prediction= knn.predict(x_test)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(x_train1, y_train1)
knn_prediction= knn.predict(x_test1)
print(classification_report(y_test,knn_prediction))
print("Accuracy:",metrics.accuracy_score(y_test, knn_prediction))
metrics.confusion_matrix(y_test, knn_prediction)
print(classification_report(y_test1,knn_prediction))
print("Accuracy:",metrics.accuracy_score(y_test1, knn_prediction))
metrics.confusion_matrix(y_test1, knn_prediction)
# # Naive Bayes
from sklearn.naive_bayes import BernoulliNB
nb= BernoulliNB()
nb.fit(x_train, y_train)
nb_prediction= nb.predict(x_test)
from sklearn.naive_bayes import BernoulliNB
nb= BernoulliNB()
nb.fit(x_train1, y_train1)
nb_prediction= nb.predict(x_test1)
print(classification_report(y_test,nb_prediction))
print("Accuracy:",metrics.accuracy_score(y_test, nb_prediction))
metrics.confusion_matrix(y_test, nb_prediction)
print(classification_report(y_test1,nb_prediction))
print("Accuracy:",metrics.accuracy_score(y_test1, nb_prediction))
metrics.confusion_matrix(y_test1, nb_prediction)
# # Confusion Matrices
metrics.confusion_matrix(y_test, logistic_prediction)
metrics.confusion_matrix(y_test1, logistic_prediction)
metrics.confusion_matrix(y_test, knn_prediction)
metrics.confusion_matrix(y_test1, knn_prediction)
metrics.confusion_matrix(y_test, nb_prediction)
metrics.confusion_matrix(y_test1, nb_prediction)
# # Conclusion
# +
#Logistic regression fetches the best results with an accuracy of 95.1%. This is because, Logitic regression works well with Yes/No scenarios.
| High LTR analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="iEgfmiy2FWUL"
# # A2C
# ---
# In this notebook, you will implement a A2c agent with OpenAI Gym's xxx environment.
#
# ### 1. Import the Necessary Packages
# + [markdown] colab_type="text" id="zxRb62Ef0hc7"
# Here I have used the difference between the Qvalue of the current state and N Step QNext value. If you truly want the TD loss between 2 consecutive states, run the program with NUM-Steps = 1
#
# Here I have used one Optimizer for both the actor and critic. Feel free to split that up and check for your training environment
# + colab_type="code" id="3VFiSY_fFiR2" outputId="bb93523f-16c3-4431-944c-33cfdea6540e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# memory footprint support libraries/code
# !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
# !pip install gputil
# !pip install psutil
# !pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
#print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
print("GPU RAM Free: {0:.2f}GB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.2f}GB".format(gpu.memoryFree/1024, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal/1024))
printm()
###
# !apt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev python-opengl libboost-all-dev libsdl2-dev swig
# !pip install pyvirtualdisplay
# ##!pip install piglet
# !pip install pyglet==1.3.2
# !apt-get install xvfb
from pyvirtualdisplay import Display
# !apt-get install cmake
# !pip install setuptool
# !pip install ez_setup
# !pip install gym[atari]
##
#import gym
# ##!pip3 install box2d
# !pip install box2d-py
# !pip install gym[Box_2D]
import gym
##
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
# %matplotlib inline
# !python -m pip install pyvirtualdisplay
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1400, 900))
display.start()
is_ipython = 'inline' in plt.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# + colab_type="code" id="Bgv5brNrFzLz" colab={}
# + colab_type="code" id="AvMcPVVNGIDG" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.autograd import Variable
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from IPython.display import clear_output
import matplotlib.pyplot as plt
# %matplotlib inline
# + colab_type="code" id="58mbleSBGoU7" colab={}
# hyperparameters
hidden_size = 256
learning_rate = 3e-4
# Constants
GAMMA = 0.99
num_steps = 300 ## Here I am going till the end of any episode
n_episodes =4000
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# + colab_type="code" id="Yv_oFhns9QqL" colab={}
#This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
# + id="rS4pRKnKPGKz" colab_type="code" colab={}
###Create Environment
num_envs= 16
env_name = "Pendulum-v0"
def make_env():
def _thunk():
env = gym.make(env_name)
return env
return _thunk
envs = [make_env() for i in range(num_envs)]
envs = SubprocVecEnv(envs)
env = gym.make(env_name)
# + colab_type="code" id="jwOgzI8CGz8a" colab={}
##Actor Critic Network
from torch.distributions import Normal
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight,mean= 0.0, std = 0.1)
nn.init.constant_(m.bias, 0.1)
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden_size= 256, std= 0.0):
super(ActorCritic, self).__init__()
## This gives the mu of the Normal Distribution
self.actor = nn.Sequential(nn.Linear(num_inputs, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_outputs))
##This gives the Q values
self.critic= nn.Sequential(nn.Linear(num_inputs, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size,1))
##get log(std)
## This gives the log(sigma) of the Normal Distribution
self.log_std = nn.Parameter(torch.ones(1, num_outputs)*std)
self.apply(init_weights)
def forward(self, state):
## QValue function from the critic
value = self.critic(state)
## Action to be executed on the ENvironment State.
mu = self.actor(state)
## e^log(std) = std
std = self.log_std.exp().expand_as(mu)
dist = Normal(mu,std)
return dist, value
# + id="w9c3BkuaPGK-" colab_type="code" colab={}
def plot(episode, rewards):
clear_output(True)
plt.figure(figsize= (20,5))
plt.subplot(131)
plt.title('frame {:.0f} reward {:0.2f}'.format(episode, rewards[-1]))
plt.plot(rewards)
plt.show()
def test_env(vis= False):
state = env.reset()
if vis:env.render()
done = False
total_rewards =0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist,_ = model(state)
next_state, rewards, done, _ = env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis: env.render()
total_rewards += rewards
return total_rewards
# + id="cJbKZCI_R-T3" colab_type="code" colab={}
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns
# + id="vv4rMw5NSKs9" colab_type="code" colab={}
num_inputs = envs.observation_space.shape[0]
num_outputs = envs.action_space.shape[0]
#Hyper params:
hidden_size = 256
lr = 3e-2
n_episodes = 1000+1
NUM_STEPS = 20
model= ActorCritic(num_inputs, num_outputs, hidden_size).to(device)
model_optimizer = Adam(model.parameters(), lr = lr)
# + id="npqY8alXSwWo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="86dd1e68-909a-476b-f825-10f73a3c1a14"
state = envs.reset()
episode =1
test_rewards = []
while episode < n_episodes:
log_probs = []
values =[]
rewards = []
masks = [] ##masks
entropy =0
for i in range(NUM_STEPS):
#print(i)
state = torch.FloatTensor(state).to(device)
#print('**')
dist,value = model(state)
#print('0**')
action = dist.sample()
log_prob = dist.log_prob(action)
next_state, reward, done, _ = envs.step(action.cpu().numpy())
#print('1**')
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(0).to(device))
masks.append(torch.FloatTensor(1- done).unsqueeze(0).to(device))
entropy += dist.entropy().mean()
#print('2**')
if np.any(done):
episode +=1
if episode%1==0:
test_rewards.append(np.mean([test_env() for i in range(10)]))
plot(episode, test_rewards)
state = envs.reset()
break
else:
state = next_state
next_state = torch.FloatTensor(next_state).to(device)
_ , next_value = model(next_state)
returns = compute_returns(next_value, rewards, masks, gamma=0.99)
log_probs = torch.cat(log_probs)
returns = torch.cat(returns)
values = torch.cat(values)
advantage = (values - returns)
actor_loss = -(log_probs*advantage.detach()).mean()
critic_loss = advantage.pow(2).mean()
loss = actor_loss + 0.5*critic_loss - .001*entropy
model_optimizer.zero_grad()
loss.backward()
model_optimizer.step()
# + id="1mGVWL1MZKPr" colab_type="code" colab={}
| 3_A2C_Continuous.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Try except
# Sometimes we write bad code, Some times the environment of the PC isnt doing what you expect, and sometimes other people sccrew up. When that happens, you often dont want the whole program to crash and stop working. This is where **Try** statements come in. They allow you to isolate parts of code you arent sure about into something that might work, and what you want to happen when it doesnt. This is often really important in networking or user interactions
try:
print(x)
except:
print("An exception occurred")
# Here we have issues with the code we wrote.
# We can also have different code to respond to different types of errors
try:
print(x)
except NameError:
print("Variable x is not defined")
except:
print("Something else went wrong")
x = "yay"
try:
f = open("fileDoesNotExist.txt,read")
print(x)
except NameError:
print("Variable x is not defined")
except:
print("Something else went wrong")
# We can also use a finally statement to do something in all cases. The only reason i could so to use that is if there is a break statement in the error case
y = 1
while y < 10:
try:
print(x)
y+=1
except:
print("Something went wrong")
break
finally:
print("The 'try except' is finished")
print(y)
| JupyterNotebooks/Try Catch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia
# language: julia
# name: julia-1.5
# ---
# # Using the secant line
#
# We return to finding a root of the equation $xe^x=2$.
using Plots
# +
f = x -> x*exp(x) - 2;
plot(f,0.25,1.25,label="function",leg=:topleft)
# -
# From the graph, it's clear that there is a root near $x=1$. To be more precise, there is a root in the interval $[0.5,1]$. So let us take the endpoints of that interval as _two_ initial approximations.
x1 = 1; f1 = f(x1);
x2 = 0.5; f2 = f(x2);
scatter!([x1,x2],[f1,f2],label="initial points")
# Instead of constructing the tangent line by evaluating the derivative, we can construct a linear model function by drawing the line between the two points $\bigl(x_1,f(x_1)\bigr)$ and $\bigl(x_2,f(x_2)\bigr)$. This is called a _secant line_.
slope2 = (f2-f1) / (x2-x1);
secant2 = x -> f2 + slope2*(x-x2);
plot!(secant2,0.25,1.25,label="secant line",l=:dash,color=:black)
# As before, the next value in the iteration is the root of this linear model.
x3 = x2 - f2/slope2;
@show f3 = f(x3)
scatter!([x3],[0],label="root of secant")
# For the next linear model, we use the line through the two most recent points. The next iterate is the root of that secant line, and so on.
slope3 = (f3-f2) / (x3-x2);
x4 = x3 - f3/slope3;
f4 = f(x4)
| book/nonlineqn/demos/secant-line.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:geo]
# language: python
# name: conda-env-geo-py
# ---
# # Label Roads
# For machine learning, a set of road labels are needed for the downloaded aerial images. That is, for each aerial image, a mask image the same size is needed with each pixel having value 1 or 0 to indicate the prescense or abscense of a road.
#
#
# <table><tr><td><img src='/img/notebook/label_example_img.png'></td><td><img src='/img/notebook/label_example_label.png'></td></tr></table>
#
#
# Here, we use Open Street Map (OSM) data to create binary road masks for the aerial images as shown above. The OSM data is in the form of lines denoted by sequences of geographic coordinates, and the aerial images are georeferenced meaning each pixel can be mapped to a coordinate pair. Thus, assigning labels is relaively straightforward by mapping the road coordinates to the pixels in the images. There are two notable shortcomings of this approach:
#
# 1. OSM data may sometimes be incomplete or inaccurate.
# 2. OSM gives only the location of the center of the road and not the full extend of the road width.
#
# The first issue is hard to correct, but with enough data a neural net can hopefully overcome the noise.
#
# The second issue can be approached by assigning road labels more liberally. Rather than only assigning the centerline pixel as a road, one can label the adjacent neighboring pixels as roads as well. Methodical refinements of this procedure include expanding the neighborhood based on road type (e.g. highways have a larger neighborhood than residential streets) or by assigning a probability distribution to neighboring pixels rather than hard 1's. However, for this project, it is sufficient simply to expand the road labels by a fixed amount (this has already been applied in the example above). Compare the undilate (left) and dilated label examples below.
#
# <table><tr><td><img src='/img/web/labels_no_dilation.png'></td><td><img src='/img/web/labels_dilation.png'></td></tr></table>
#
# In this rest of this notebook, a label image (i.e. a binary mask) is generated for each NAIP image downloaded previously. These images are of course the same size as the NAIP image and stored locally. Then, for the large city (Phoenix, AZ) which serves as the training and benchmark set, each image/mask pair is broken up into smaller tiles (say, 512x512x3 pixels) that will be fed as input to a neural net. These tilings are saved as datasets in the hdf5 format.
# +
import rasterio
import fiona
import json
import h5py
import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from rasterio.features import rasterize
from helpers import make_tiles
from pyproj import Proj
from PIL import Image
# %matplotlib inline
# -
# First, we need to figure out which coordinate reference system (CRS) / projections we're working with. Different images may have different projections depending on their location, so the road coordinates need to be mapped with the correct projection.
#
# It's a little overkill, but here we simply project all roads in Arizona for each CRS we find. If memory were a constrained resource, we could limit it to only roads within the cities that were downloaded, but the projections for a single state are managable.
from importlib import reload
import helpers
reload(helpers)
from helpers import make_tiles
# +
with open('data/naip/download_info.json', 'r') as places_in:
places = json.load(places_in)
## Get all GeoTiff paths as a flat list
tif_paths_in = [place_info['img_paths'] for _, place_info in places.items()]
tif_paths_in = [path_in for paths_in in tif_paths_in for path_in in paths_in]
## Get projections
projections = []
for tif_path_in in tif_paths_in:
with rasterio.open(tif_path_in) as tif_in:
projections.append(tif_in.crs['init'])
projections = list(set(projections))
print(projections)
# +
## Getting shapes for all roads in AZ
shape_path = 'data/osm/arizona-latest-free_shp/gis.osm_roads_free_1.shp'
roads_map = {} # Key is projection CRS, value is list of projected roads
for projection in projections:
## Get transformation
proj = Proj(init = projection)
## Project road coordinates
roads = []
for i, feat in enumerate(fiona.open(shape_path, 'r')):
lons, lats = zip(*feat['geometry']['coordinates'])
xx, yy = proj(lons, lats)
road = {'type': 'LineString','coordinates': list(zip(xx,yy))} # In meters
roads.append(road)
roads_map[projection] = roads
print('Found {} roads'.format(len(roads_map[projections[0]])))
# -
# Next, loop through each image, get its CRS, and overlay the roads with the corresponding projection. A dilation from the OpenCV library is used to expand road labels.
# +
## Save labels as .PNG images
## Writing roads within bounds of a source geotiff.
labels_dir = 'data/naip/img/labels/'
kernel = np.ones((3,3), np.uint8) # For label dilation
## Make one output label per input image
for tif_path_in in tif_paths_in:
labels_name_out = tif_path_in.split('/')[-1].replace('.tif', '_labels.png')
labels_path_out = labels_dir + labels_name_out
## Skip if we've already made it
if os.path.isfile(labels_path_out):
continue
with rasterio.open(tif_path_in) as tif_in:
roads = roads_map[tif_in.crs['init']]
## Rasterize a mask
labels = rasterize(
roads,
out_shape = tif_in.shape,
transform = tif_in.transform,
default_value = 1,
fill = 0,
all_touched=True
)
labels = cv2.dilate(labels, kernel, iterations = 2)
labels_img = Image.fromarray(labels * 255)
labels_img.save(labels_path_out)
# -
# The data from Phoenix is used as the train/test/dev sets and will be stored in a hdf5 file. Two helper functions will accomplish this. First, `make_tiles` takes an image and chunks it up into smaller sizes that can be input to the neural net. Further, we can specify if there should be any padding which there should be for the input image because the neural net reduces the size of the input. In this case, the padding comes from reflecting the edges of the input. We tile both the aerial image and the corresponding label image. The code is in `helpers.py`.
#
# Then, `make_hdf5_set` defined below takes a list of multiple aerial/label image pairs, splits each into tiles (called chunks in the code), and randomly assigns the tiles to the train/dev/test sets in specified proportions.
def make_hdf5_set(
hdf5_path,
img_paths,
frac_train = .80,
frac_dev = .10,
frac_test = .10,
train_input_name = 'X_train',
train_label_name = 'Y_train',
dev_input_name = 'X_dev',
dev_label_name = 'Y_dev',
test_input_name = 'X_test',
test_label_name = 'Y_test'
):
assert frac_train + frac_dev + frac_test == 1
with h5py.File(hdf5_path, 'w') as data:
chunk_counter = 0
for i,img_path in enumerate(img_paths):
## Chunk the image and corresponding labels
labels_path = img_path.replace('download', 'labels').replace('.tif', '_labels.png')
X_chunks, _, _ = make_tiles(img_path, pad = 64)
labels_chunks, _, _ = make_tiles(labels_path)
labels_chunks = labels_chunks / labels_chunks.max()
labels_chunks = np.expand_dims(labels_chunks, 3).astype(np.int8)
chunk_counter = chunk_counter + X_chunks.shape[0]
## Split into train/dev/test
X_train, X_test, Y_train, Y_test = train_test_split(X_chunks, labels_chunks, test_size=frac_test, random_state=40)
X_train, X_dev, Y_train, Y_dev = train_test_split(X_train, Y_train, train_size=frac_train/(frac_train+frac_dev), random_state=30)
## Add first chunks to dataset
## Should make the maxshape not so hardcoded
if i == 0:
dset_x_train = data.create_dataset(train_input_name, X_train.shape, maxshape = (None, 640, 640, 3), data=X_train)
dset_x_dev = data.create_dataset(dev_input_name, X_dev.shape, maxshape = (None, 640, 640, 3), data=X_dev)
dset_x_test = data.create_dataset(test_input_name, X_test.shape, maxshape = (None, 640, 640, 3), data=X_test)
dset_y_train = data.create_dataset(train_label_name, Y_train.shape, maxshape = (None, 512, 512, 3), data=Y_train)
dset_y_dev = data.create_dataset(dev_label_name, Y_dev.shape, maxshape = (None, 512, 512, 3), data=Y_dev)
dset_y_test = data.create_dataset(test_label_name, Y_test.shape, maxshape = (None, 512, 512, 3), data=Y_test)
## Append new chunks to the dataset
else:
n_train_before_resize = dset_x_train.shape[0]
n_train_after_resize = n_train_before_resize + X_train.shape[0]
n_dev_before_resize = dset_x_dev.shape[0]
n_dev_after_resize = n_dev_before_resize + X_dev.shape[0]
n_test_before_resize = dset_x_test.shape[0]
n_test_after_resize = n_test_before_resize + X_test.shape[0]
dset_x_train.resize(n_train_after_resize, axis = 0)
dset_y_train.resize(n_train_after_resize, axis = 0)
dset_x_dev.resize(n_dev_after_resize, axis = 0)
dset_y_dev.resize(n_dev_after_resize, axis = 0)
dset_x_test.resize(n_test_after_resize, axis = 0)
dset_y_test.resize(n_test_after_resize, axis = 0)
dset_x_train[n_train_before_resize:] = X_train
dset_y_train[n_train_before_resize:] = Y_train
dset_x_dev[n_dev_before_resize:] = X_dev
dset_y_dev[n_dev_before_resize:] = Y_dev
dset_x_test[n_test_before_resize:] = X_test
dset_y_test[n_test_before_resize:] = Y_test
print('Saved {} input/output pairs to {}'.format(chunk_counter, hdf5_path))
# Since the whole Phoenix dataset is rather large (~25GB HDF5 file), for development purposes we'll create a smaller set based on only a few input tiles that we manually specify. Then we'll do the same for the whole dataset.
img_paths = [
'm_3311117_ne_12_1_20150601',
'm_3311117_sw_12_1_20150529',
'm_3311117_nw_12_1_20150529',
'm_3311117_se_12_1_20150601',
'm_3311125_ne_12_1_20150601',
'm_3311125_nw_12_1_20150529',
'm_3311125_se_12_1_20150601',
'm_3311125_sw_12_1_20150529',
'm_3311133_ne_12_1_20150601',
'm_3311133_nw_12_1_20150529',
'm_3311133_se_12_1_20150601',
'm_3311133_sw_12_1_20150529'
]
img_paths = ['data/naip/img/download/' + img_path + '.tif' for img_path in img_paths]
hdf5_path = 'data/naip/hdf5/phoenix_subset.h5'
make_hdf5_set(hdf5_path, img_paths)
img_paths = places['Phoenix']['img_paths']
hdf5_path = 'data/naip/hdf5/phoenix.h5'
make_hdf5_set(hdf5_path, img_paths)
| label_roads.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # SageMaker and AWS KMS–Managed Keys
# _**Handling KMS encrypted data with SageMaker model training and encrypting the generated model artifacts**_
#
# ---
#
# ---
#
# ## Contents
#
# 1. [Background](#Background)
# 1. [Setup](#Setup)
# 1. [Optionally, upload encrypted data files for training](#Optionally,-upload-encrypted-data-files-for-training)
# 1. [Training the XGBoost model](#Training-the-XGBoost-model)
# 1. [Set up hosting for the model](#Set-up-hosting-for-the-model)
# 1. [Validate the model for use](#Validate-the-model-for-use)
#
# ---
# ## Background
#
# AWS Key Management Service ([AWS KMS](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html)) enables
# Server-side encryption to protect your data at rest. Amazon SageMaker training works with KMS encrypted data if the IAM role used for S3 access has permissions to encrypt and decrypt data with the KMS key. Further, a KMS key can also be used to encrypt the model artifacts at rest using Amazon S3 server-side encryption. In this notebook, we demonstrate SageMaker training with KMS encrypted data.
#
# ---
#
# ## Setup
#
# ### Prerequisites
#
# In order to successfully run this notebook, you must first:
#
# 1. Have an existing KMS key from AWS IAM console or create one ([learn more](http://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html)).
# 2. Allow the IAM role used for SageMaker to encrypt and decrypt data with this key from within applications and when using AWS services integrated with KMS ([learn more](http://docs.aws.amazon.com/console/kms/key-users)).
#
# We use the `key-id` from the KMS key ARN `arn:aws:kms:region:acct-id:key/key-id`.
#
# ### General Setup
# Let's start by specifying:
# * AWS region.
# * The IAM role arn used to give learning and hosting access to your data. See the documentation for how to specify these.
# * The S3 bucket that you want to use for training and model data.
# + isConfigCell=true
# %%time
import os
import io
import boto3
import pandas as pd
import numpy as np
import re
from sagemaker import get_execution_role
region = boto3.Session().region_name
role = get_execution_role()
kms_key_id = '<your-kms-key-id>'
bucket='<s3-bucket>' # put your s3 bucket name here, and create s3 bucket
prefix = 'sagemaker/DEMO-kms'
# customize to your bucket where you have stored the data
bucket_path = 'https://s3-{}.amazonaws.com/{}'.format(region,bucket)
# -
# ## Optionally, upload encrypted data files for training
#
# To demonstrate SageMaker training with KMS encrypted data, we first upload a toy dataset that has Server Side Encryption with customer provided key.
#
# ### Data ingestion
#
# We, first, read the dataset from an existing repository into memory. This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as the one used below, reading into memory isn't onerous, though it would be for larger datasets.
from sklearn.datasets import load_boston
boston = load_boston()
X = boston['data']
y = boston['target']
feature_names = boston['feature_names']
data = pd.DataFrame(X, columns=feature_names)
target = pd.DataFrame(y, columns={'MEDV'})
data['MEDV'] = y
local_file_name = 'boston.csv'
data.to_csv(local_file_name, header=False, index=False)
# ### Data preprocessing
#
# Now that we have the dataset, we need to split it into *train*, *validation*, and *test* datasets which we can use to evaluate the accuracy of the machine learning algorithm. We randomly split the dataset into 60% training, 20% validation and 20% test. Note that SageMaker Xgboost, expects the label column to be the first one in the datasets. So, we'll move the median value column (`MEDV`) from the last to the first position within the `write_file` method below.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5, random_state=1)
def write_file(X, y, fname):
feature_names = boston['feature_names']
data = pd.DataFrame(X, columns=feature_names)
target = pd.DataFrame(y, columns={'MEDV'})
data['MEDV'] = y
# bring this column to the front before writing the files
cols = data.columns.tolist()
cols = cols[-1:] + cols[:-1]
data = data[cols]
data.to_csv(fname, header=False, index=False)
train_file = 'train.csv'
validation_file = 'val.csv'
test_file = 'test.csv'
write_file(X_train, y_train, train_file)
write_file(X_val, y_val, validation_file)
write_file(X_test, y_test, test_file)
# ### Data upload to S3 with Server Side Encryption
# +
s3 = boto3.client('s3')
data_train = open(train_file, 'rb')
key_train = '{}/train/{}'.format(prefix,train_file)
print("Put object...")
s3.put_object(Bucket=bucket,
Key=key_train,
Body=data_train,
ServerSideEncryption='aws:kms',
SSEKMSKeyId=kms_key_id)
print("Done uploading the training dataset")
data_validation = open(validation_file, 'rb')
key_validation = '{}/validation/{}'.format(prefix,validation_file)
print("Put object...")
s3.put_object(Bucket=bucket,
Key=key_validation,
Body=data_validation,
ServerSideEncryption='aws:kms',
SSEKMSKeyId=kms_key_id)
print("Done uploading the validation dataset")
# -
# ## Training the SageMaker XGBoost model
#
# Now that we have our data in S3, we can begin training. We'll use Amazon SageMaker XGboost algorithm as an example to demonstrate model training. Note that nothing needs to be changed in the way you'd call the training algorithm. The only requirement for training to succeed is that the IAM role (`role`) used for S3 access has permissions to encrypt and decrypt data with the KMS key (`kms_key_id`). You can set these permissions using the instructions [here](http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-users). If the permissions aren't set, you'll get the `Data download failed` error.
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/xgboost:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/xgboost:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/xgboost:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/xgboost:latest',
'ap-northeast-1': '501404015308.dkr.ecr.ap-northeast-1.amazonaws.com/xgboost:latest',
'ap-northeast-2': '306986355934.dkr.ecr.ap-northeast-2.amazonaws.com/xgboost:latest'}
container = containers[boto3.Session().region_name]
# +
# %%time
from time import gmtime, strftime
import time
job_name = 'DEMO-xgboost-single-regression' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print("Training job", job_name)
create_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": bucket_path + "/"+ prefix + "/output"
},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m4.4xlarge",
"VolumeSizeInGB": 5
},
"TrainingJobName": job_name,
"HyperParameters": {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"subsample":"0.7",
"silent":"0",
"objective":"reg:linear",
"num_round":"5"
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 86400
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix + '/train',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "csv",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": bucket_path + "/"+ prefix + '/validation',
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "csv",
"CompressionType": "None"
}
]
}
client = boto3.client('sagemaker')
client.create_training_job(**create_training_params)
try:
# wait for the job to finish and report the ending status
client.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = client.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = client.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
# -
# ## Set up hosting for the model
# In order to set up hosting, we have to import the model from training to hosting.
#
# ### Import model into hosting
#
# Register the model with hosting. This allows the flexibility of importing models trained elsewhere.
# +
# %%time
import boto3
from time import gmtime, strftime
model_name=job_name + '-model'
print(model_name)
info = client.describe_training_job(TrainingJobName=job_name)
model_data = info['ModelArtifacts']['S3ModelArtifacts']
print(model_data)
primary_container = {
'Image': container,
'ModelDataUrl': model_data
}
create_model_response = client.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
# -
# ### Create endpoint configuration
#
# SageMaker supports configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way. In addition, the endpoint configuration describes the instance type required for model deployment.
# +
from time import gmtime, strftime
endpoint_config_name = 'DEMO-XGBoostEndpointConfig-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_config_name)
create_endpoint_config_response = client.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialVariantWeight':1,
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print("Endpoint Config Arn: " + create_endpoint_config_response['EndpointConfigArn'])
# -
# ### Create endpoint
# Lastly, create the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
# +
# %%time
import time
endpoint_name = 'DEMO-XGBoostEndpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
create_endpoint_response = client.create_endpoint(
EndpointName=endpoint_name,
EndpointConfigName=endpoint_config_name)
print(create_endpoint_response['EndpointArn'])
print('EndpointArn = {}'.format(create_endpoint_response['EndpointArn']))
# get the status of the endpoint
response = client.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
# wait until the status has changed
client.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
# print the status of the endpoint
endpoint_response = client.describe_endpoint(EndpointName=endpoint_name)
status = endpoint_response['EndpointStatus']
print('Endpoint creation ended with EndpointStatus = {}'.format(status))
if status != 'InService':
raise Exception('Endpoint creation failed.')
# -
# ## Validate the model for use
# Finally, you can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
#
runtime_client = boto3.client('runtime.sagemaker')
# +
import sys
import math
def do_predict(data, endpoint_name, content_type):
payload = ''.join(data)
response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,
ContentType=content_type,
Body=payload)
result = response['Body'].read()
result = result.decode("utf-8")
result = result.split(',')
return result
def batch_predict(data, batch_size, endpoint_name, content_type):
items = len(data)
arrs = []
for offset in range(0, items, batch_size):
if offset+batch_size < items:
results = do_predict(data[offset:(offset+batch_size)], endpoint_name, content_type)
arrs.extend(results)
else:
arrs.extend(do_predict(data[offset:items], endpoint_name, content_type))
sys.stdout.write('.')
return(arrs)
# -
# The following helps us calculate the Median Absolute Percent Error (MdAPE) on the batch dataset. Note that the intent of this example is not to produce the most accurate regressor but to demonstrate how to handle KMS encrypted data with SageMaker.
# +
# %%time
import json
import numpy as np
with open('test.csv') as f:
lines = f.readlines()
#remove the labels
labels = [line.split(',')[0] for line in lines]
features = [line.split(',')[1:] for line in lines]
features_str = [','.join(row) for row in features]
preds = batch_predict(features_str, 100, endpoint_name, 'text/csv')
print('\n Median Absolute Percent Error (MdAPE) = ', np.median(np.abs(np.asarray(labels, dtype=float) - np.asarray(preds, dtype=float)) / np.asarray(labels, dtype=float)))
# -
# ### (Optional) Delete the Endpoint
#
# If you're ready to be done with this notebook, please run the delete_endpoint line in the cell below. This will remove the hosted endpoint you created and avoid any charges from a stray instance being left on.
client.delete_endpoint(EndpointName=endpoint_name)
| advanced_functionality/handling_kms_encrypted_data/handling_kms_encrypted_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Cloblak/aipi540_deeplearning/blob/main/1D_CNN_Attempts/1D_CNN_WorkingPipeLine.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Xj0pR3efRVrc" colab={"base_uri": "https://localhost:8080/"} outputId="09330500-a2e8-4750-d41d-3999aa41be85"
# !pip install alpaca_trade_api
# + [markdown] id="hdKRKIogGAu6"
# Features To Consider
# - Targets are only predicting sell within market hours, i.e. at 1530, target is prediciting price for 1100 the next day. Data from pre and post market is taken into consideration, and a sell or buy will be indicated if the price will flucuate after close.
# + id="J1fWNRnTQZX-"
# Import Dependencies
import numpy as np
import pandas as pd
import torch
from torchvision import datasets, transforms
import torchvision
from torch.utils.data import DataLoader, TensorDataset
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
from torch.optim import Adam, SGD
from torchsummary import summary
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm.notebook import tqdm
import alpaca_trade_api as tradeapi
from datetime import datetime, timedelta, tzinfo, timezone, time
import os.path
import ast
import threading
import math
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
# + id="DrI_WR501Iis" outputId="87ad442f-9b2c-4da5-d68a-286ca5bd8ae4" colab={"base_uri": "https://localhost:8080/"}
random_seed = 101
torch.manual_seed(random_seed)
# + id="IXnO8ykgRIuv"
PAPER_API_KEY = "PKE39LILN9SL1FMJMFV7"
PAPER_SECRET_KEY = "<KEY>"
PAPER_BASE_URL = 'https://paper-api.alpaca.markets'
# + id="_3XShkLcRQMs"
api = tradeapi.REST(PAPER_API_KEY, PAPER_SECRET_KEY, PAPER_BASE_URL, api_version='v2')
# + id="tINNlljbRaDs"
def prepost_train_test_validate_offset_data(api, ticker, interval, train_days=180, test_days=60, validate_days=30, offset_days = 0):
ticker_data_dict = None
ticker_data_dict = {}
monthly_data_dict = None
monthly_data_dict = {}
interval_loop_data = None
interval_loop_data = pd.DataFrame()
stock_data = None
days_to_collect = train_days + test_days + validate_days + offset_days
TZ = 'US/Eastern'
start = pd.to_datetime((datetime.now() - timedelta(days=days_to_collect)).strftime("%Y-%m-%d %H:%M"), utc=True)
end = pd.to_datetime(datetime.now().strftime("%Y-%m-%d %H:%M"), utc=True)
stock_data = api.get_bars(ticker, interval, start = start.isoformat(), end=end.isoformat(), adjustment="raw").df
interval_loop_data = interval_loop_data.append(stock_data)
df_start_ref = interval_loop_data.index[0]
start_str_ref = pd.to_datetime(start, utc=True)
while start_str_ref.value < ( pd.to_datetime(df_start_ref, utc=True) - pd.Timedelta(days=2.5)).value:
end_new = pd.to_datetime(interval_loop_data.index[0].strftime("%Y-%m-%d %H:%M"), utc=True).isoformat()
stock_data_new = None
stock_data_new = api.get_bars(ticker, interval, start=start, end=end_new, adjustment="raw").df
#stock_data_new = stock_data_new.reset_index()
interval_loop_data = interval_loop_data.append(stock_data_new).sort_values(by=['index'], ascending=True)
df_start_ref = interval_loop_data.index[0]
stock_yr_min_df = interval_loop_data.copy()
stock_yr_min_df["Open"] = stock_yr_min_df['open']
stock_yr_min_df["High"]= stock_yr_min_df["high"]
stock_yr_min_df["Low"] = stock_yr_min_df["low"]
stock_yr_min_df["Close"] = stock_yr_min_df["close"]
stock_yr_min_df["Volume"] = stock_yr_min_df["volume"]
stock_yr_min_df["VolumeWeightedAvgPrice"] = stock_yr_min_df["vwap"]
stock_yr_min_df["Time"] = stock_yr_min_df.index.tz_convert(TZ)
stock_yr_min_df.index = stock_yr_min_df.index.tz_convert(TZ)
final_df = stock_yr_min_df.filter(["Time", "Open", "High", "Low", "Close", "Volume", "VolumeWeightedAvgPrice"], axis = 1)
first_day = final_df.index[0]
traintest_day = final_df.index[-1] - pd.Timedelta(days= test_days+validate_days+offset_days)
valtest_day = final_df.index[-1] - pd.Timedelta(days= test_days+offset_days)
last_day = final_df.index[-1] - pd.Timedelta(days= offset_days)
training_df = final_df.loc[first_day:traintest_day] #(data_split - pd.Timedelta(days=1))]
validate_df = final_df.loc[traintest_day:valtest_day]
testing_df = final_df.loc[valtest_day:last_day]
full_train = final_df.loc[first_day:last_day]
offset_df = final_df.loc[last_day:]
return training_df, validate_df, testing_df, full_train, offset_df, final_df, traintest_day, valtest_day
def markethours_train_test_validate_offset_data(api, ticker, interval, train_days=180, test_days=60, validate_days=30, offset_days = 0):
ticker_data_dict = None
ticker_data_dict = {}
monthly_data_dict = None
monthly_data_dict = {}
interval_loop_data = None
interval_loop_data = pd.DataFrame()
stock_data = None
days_to_collect = train_days + test_days + validate_days + offset_days
TZ = 'US/Eastern'
start = pd.to_datetime((datetime.now() - timedelta(days=days_to_collect)).strftime("%Y-%m-%d %H:%M"), utc=True)
end = pd.to_datetime(datetime.now().strftime("%Y-%m-%d %H:%M"), utc=True)
stock_data = api.get_barset(ticker, interval, start = start.isoformat(), end=end.isoformat()).df
interval_loop_data = interval_loop_data.append(stock_data)
df_start_ref = interval_loop_data.index[0]
start_str_ref = pd.to_datetime(start, utc=True)
while start_str_ref.value < ( pd.to_datetime(df_start_ref, utc=True) - pd.Timedelta(days=2.5)).value:
end_new = pd.to_datetime(interval_loop_data.index[0].strftime("%Y-%m-%d %H:%M"), utc=True).isoformat()
stock_data_new = None
stock_data_new = api.get_barset(ticker, interval, start=start, end=end_new).df
interval_loop_data = interval_loop_data.append(stock_data_new).sort_values(by=['time'], ascending=True)
df_start_ref = interval_loop_data.index[0]
stock_yr_min_df = interval_loop_data.copy()
pre_final = pd.DataFrame()
pre_final["Open"] = stock_yr_min_df[ticker]['open']
pre_final["High"]= stock_yr_min_df[ticker]["high"]
pre_final["Low"] = stock_yr_min_df[ticker]["low"]
pre_final["Close"] = stock_yr_min_df[ticker]["close"]
pre_final["Volume"] = stock_yr_min_df[ticker]["volume"]
pre_final["Time"] = stock_yr_min_df.index.tz_convert(TZ)
pre_final.index = stock_yr_min_df.index.tz_convert(TZ)
final_df = pre_final.filter(["Time", "Open", "High", "Low", "Close", "Volume"], axis = 1).between_time('9:29', '16:05')
first_day = final_df.index[0]
traintest_day = final_df.index[-1] - pd.Timedelta(days= test_days+validate_days+offset_days)
valtest_day = final_df.index[-1] - pd.Timedelta(days= test_days+offset_days)
last_day = final_df.index[-1] - pd.Timedelta(days= offset_days)
training_df = final_df.loc[first_day:traintest_day] #(data_split - pd.Timedelta(days=1))]
validate_df = final_df.loc[traintest_day:valtest_day]
testing_df = final_df.loc[valtest_day:last_day]
full_train = final_df.loc[first_day:last_day]
offset_df = final_df.loc[last_day:]
return training_df, validate_df, testing_df, full_train, offset_df, final_df, traintest_day, valtest_day
# + id="rRFxnqAiRcnE" outputId="cdbdf104-b88f-4392-a522-7baf29af571b" colab={"base_uri": "https://localhost:8080/"}
from datetime import date
train_start = date(2017, 1, 1)
train_end = date(2019, 10, 31)
train_delta = train_end - train_start
print(f'Number of days of Training Data {train_delta.days}')
val_day_num = 400
print(f'Number of days of Validation Data {val_day_num}')
test_start = train_end + timedelta(val_day_num)
test_end = date.today()
test_delta = (test_end - test_start)
print(f'Number of days of Holdout Test Data {test_delta.days}')
ticker = "WMT" # Ticker Symbol to Test
interval = "5Min" # Interval of bars
train_day_int = train_delta.days # Size of training set (Jan 2010 - Oct 2017)
val_day_int = val_day_num # Size of validation set
test_day_int = test_delta.days # Size of test set
offset_day_int = 0 # Number of days to off set the training data
train, val, test, full, offset, complete, traintest_day, testval_day = prepost_train_test_validate_offset_data(api, ticker,
interval,
train_days=train_day_int,
test_days=test_day_int,
validate_days=val_day_int,
offset_days = offset_day_int)
# + id="bbrVHNazd27v"
def timeFilterAndBackfill(df):
"""
Prep df to be filled out for each trading day:
Time Frame: 0730-1730
Backfilling NaNs
Adjusting Volume to Zero if no Trading data is present
- Assumption is that there were no trades duing that time
"""
df = df.between_time('07:29','17:26')
TZ = 'US/Eastern'
start_dateTime = pd.Timestamp(year = df.index[0].year,
month = df.index[0].month,
day = df.index[0].day,
hour = 7, minute = 25, tz = TZ)
end_dateTime = pd.Timestamp(year = df.index[-1].year,
month = df.index[-1].month,
day = df.index[-1].day,
hour = 17, minute = 35, tz = TZ)
dateTime_index = pd.date_range(start_dateTime,
end_dateTime,
freq='5min').tolist()
dateTime_index_df = pd.DataFrame()
dateTime_index_df["Time"] = dateTime_index
filtered_df = pd.merge_asof(dateTime_index_df, df,
on='Time',
direction='backward').set_index("Time").between_time('07:29','17:26')
volumeset_list = []
prev_v = None
for v in filtered_df["Volume"]:
if prev_v == None:
if math.isnan(v):
prev_v = 0
volumeset_list.append(0)
else:
prev_v = v
volumeset_list.append(v)
elif prev_v != None:
if v == prev_v:
volumeset_list.append(0)
prev_v = v
elif math.isnan(v):
volumeset_list.append(0)
prev_v = 0
else:
volumeset_list.append(v)
prev_v = v
filtered_df["Volume"] = volumeset_list
adjvolumeset_list = []
prev_v = None
for v in filtered_df["VolumeWeightedAvgPrice"]:
if prev_v == None:
if math.isnan(v):
prev_v = 0
adjvolumeset_list.append(0)
else:
prev_v = v
adjvolumeset_list.append(v)
elif prev_v != None:
if v == prev_v:
adjvolumeset_list.append(0)
prev_v = v
elif math.isnan(v):
adjvolumeset_list.append(0)
prev_v = 0
else:
adjvolumeset_list.append(v)
prev_v = v
filtered_df["VolumeWeightedAvgPrice"] = adjvolumeset_list
preped_df = filtered_df.backfill()
return preped_df
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}"
assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}"
return np.flip(np.rot90((arr.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1,2)
.reshape(-1, nrows, ncols)), axes = (1, 2)), axis = 1)
def buildOutData_TorchPrep(train_df = train, val_df = val, test_df = test):
pass
# + id="wW4vHhnfgne4"
train = timeFilterAndBackfill(train)
val = timeFilterAndBackfill(val)
test = timeFilterAndBackfill(test)
# + id="1f2Wzmb9ovu-" outputId="82c7cd51-8dbf-463b-9721-dccdb527a581" colab={"base_uri": "https://localhost:8080/", "height": 455}
train
# + id="eYe9V9P9iFyn"
train_tonp = train[["Open", "High", "Low", "Close", "Volume"]]
val_tonp = val[["Open", "High", "Low", "Close", "Volume"]]
test_tonp = test[["Open", "High", "Low", "Close", "Volume"]]
train_array = train_tonp.to_numpy()
val_array = val_tonp.to_numpy()
test_array = test_tonp.to_numpy()
X_train = blockshaped(train_array, 24, 5)
X_val = blockshaped(val_array, 24, 5)
X_test = blockshaped(test_array, 24, 5)
# + id="BdgQFubuscWf" outputId="b80b84e6-4259-453b-b474-6563ca694a35" colab={"base_uri": "https://localhost:8080/", "height": 833}
test[24:48]
# + id="b3HFZ-jSsXBW" outputId="31fffeb1-2d12-41c9-f9c0-4cf46027d56b" colab={"base_uri": "https://localhost:8080/"}
np.set_printoptions(linewidth=1000)
X_train[0]
# + colab={"base_uri": "https://localhost:8080/"} id="hX6Xlpo-N3u1" outputId="0f3da52d-76ea-4aab-8725-a7e333358eec"
X_train[0][3][-1]
# + id="Pe89LdnsLltO"
# create target from OHLC and Volume Data
def buildTargets(obs_array,
alph = .55,
volity_int = 8):
"""
This function will take a complete set of train, val, and test
data and return the targets. Volitility will be calculated over
the 24 5min incriments. The Target shift is looking at 2 hours
shift from current time
shift_2hour = The amount of time the data interval take to equal 2 hours
(i.e. 5 min data interval is equal to 24)
alph = The alpha value for calculating the shift in price
volity_int = the number of incriments used to calculate volitility
"""
target_close_list =[]
for arr in obs_array:
target_close_list.append(arr[3][-1])
target_close_df = pd.DataFrame()
target_close_df["Close"] = target_close_list
returns = np.log(target_close_df['Close']/(target_close_df['Close'].shift(1)))
returns.fillna(0, inplace=True)
volatility = returns.rolling(window=volity_int).std()*np.sqrt(volity_int)
targets = [2] * len(target_close_df.Close)
targets = np.where(target_close_df.Close.shift(-1) >= (target_close_df.Close * (1 + alph * volatility)),
1, targets)
targets = np.where(target_close_df.Close.shift(-1) <= (target_close_df.Close * (1 - alph * volatility)),
0, targets)
return targets
# + id="4aYPOa7INyAl"
volity_val = 2
y_train = buildTargets(X_train, volity_int = volity_val)
y_val = buildTargets(X_val, volity_int = volity_val)
y_test = buildTargets(X_test, volity_int = volity_val)
# + id="SxB_AzoBf4Xe"
X_train = X_train.reshape(X_train.shape[0], 1,
X_train.shape[1],
X_train.shape[2])
X_val = X_val.reshape(X_val.shape[0], 1,
X_val.shape[1],
X_val.shape[2])
X_test = X_test.reshape(X_test.shape[0], 1,
X_test.shape[1],
X_test.shape[2])
# + colab={"base_uri": "https://localhost:8080/"} id="XIYp1XKJPCNL" outputId="9bf2b22d-53b8-4b30-f4e9-2e419812c6a9"
print(f'X Train Length {X_train.shape}, y Train Label Length {y_train.shape}')
print(f'X Val Length {X_val.shape}, y Val Label Length {y_val.shape}')
print(f'X Test Length {X_test.shape}, y Test Label Length {y_test.shape}')
# + colab={"base_uri": "https://localhost:8080/"} id="aYHL1ekIYzDV" outputId="bbf4476c-cbc5-4d2f-a89f-40d233ed6825"
X_train[0]
# + id="vWIY2rwEYCfM"
def get_class_distribution(obj):
count_dict = {
"up": 0,
"flat": 0,
"down": 0,
}
for i in obj:
if i == 1:
count_dict['up'] += 1
elif i == 0:
count_dict['down'] += 1
elif i == 2:
count_dict['flat'] += 1
else:
print("Check classes.")
return count_dict
# + id="-BsVCfr8YCiX" outputId="3908c3b2-b090-407c-b18d-62e04dd36b2d" colab={"base_uri": "https://localhost:8080/", "height": 475}
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25,7))
# Train
sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_train)]).melt(), x = "variable", y="value", hue="variable", ax=axes[0]).set_title('Class Distribution in Train Set')
# Validation
sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_val)]).melt(), x = "variable", y="value", hue="variable", ax=axes[1]).set_title('Class Distribution in Val Set')
# Test
sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_test)]).melt(), x = "variable", y="value", hue="variable", ax=axes[2]).set_title('Class Distribution in Test Set')
# + id="3pzFa-UjYCKu"
######
# Code fro scaling at a later date
######
# from sklearn.preprocessing import MinMaxScaler
# scalers = {}
# for i in range(X_train.shape[1]):
# scalers[i] = MinMaxScaler()
# X_train[:, i, :] = scalers[i].fit_transform(X_train[:, i, :])
# for i in range(X_val.shape[1]):
# X_test[:, i, :] = scalers[i].transform(X_test[:, i, :])
# for i in range(X_test.shape[1]):
# X_test[:, i, :] = scalers[i].transform(X_test[:, i, :])
# + colab={"base_uri": "https://localhost:8080/"} id="84ThCmAdYCX9" outputId="65621d0b-f7d5-4285-8650-fdfe52c70774"
print(f'X Train Length {X_train.shape}, y Train Label Length {y_train.shape}')
print(f'X Val Length {X_val.shape}, y Val Label Length {y_val.shape}')
print(f'X Test Length {X_test.shape}, y Test Label Length {y_test.shape}')
print("")
print('Training data window: ', len(X_train))
print('Val data windows: ', len(X_val))
# + id="EAdhr0KLy-72" outputId="0a883526-668c-4612-b359-b43b31a7475d" colab={"base_uri": "https://localhost:8080/"}
y_train[0].dtype
# + id="5Zun8GwOiBlW" outputId="b3c09ba8-d313-4047-8af5-071ed3569a38" colab={"base_uri": "https://localhost:8080/"}
batch_size = 5
train_data = []
for i in range(len(X_train)):
train_data.append([X_train[i].astype('float'), y_train[i]])
train_loader = torch.utils.data.DataLoader(train_data, shuffle=False, batch_size=batch_size)
i1, l1 = next(iter(train_loader))
print(i1.shape)
val_data = []
for i in range(len(X_val)):
val_data.append([X_val[i].astype('float'), y_val[i]])
val_loader = torch.utils.data.DataLoader(val_data, shuffle=False, batch_size=batch_size)
i1, l1 = next(iter(val_loader))
print(i1.shape)
# + id="BaDGe3DSpg6f" outputId="2239af76-a9ee-431b-a86d-aa1c64b4fc61" colab={"base_uri": "https://localhost:8080/", "height": 139}
# Get next batch of training images
windows, labels = iter(train_loader).next()
print(windows.shape)
windows = windows.numpy()
# plot the windows in the batch, along with the corresponding labels
fig = plt.figure(figsize=(15, 5))
for idx in range(batch_size):
print(labels[idx])
# + id="YSLUlwla8BSU"
class StockShiftClassification(nn.Module):
def __init__(self):
super(StockShiftClassification, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size = (1,3), stride=1, padding = 1)
self.pool1 = nn.MaxPool2d(4,4)
self.conv2 = nn.Conv2d(32, 64, kernel_size = (1,3), stride=1, padding = 1)
self.pool2 = nn.MaxPool2d(3,3)
self.conv3 = nn.Conv2d(64, 128, kernel_size = (1,3), stride=1, padding = 1)
self.pool3 = nn.MaxPool2d(2,2)
self.fc1 = nn.Linear(128,1000)
self.fc2 = nn.Linear(1000,500)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.pool3(x)
x = x.view(x.size(0), -1)
# Linear layer
x = self.fc1(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
# + id="IAw7OiPS8BNu" outputId="cd55f127-b122-4840-e659-901929d1db21" colab={"base_uri": "https://localhost:8080/"}
# Instantiate the model
net = StockShiftClassification().float()
# Display a summary of the layers of the model and output shape after each layer
summary(net,(windows.shape[1:]),batch_size=batch_size,device="cpu")
# + id="rZGSLKkq8BIu"
# Cross entropy loss combines softmax and nn.NLLLoss() in one single class.
criterion = nn.CrossEntropyLoss()
# Define optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# + id="lAgQIWqA8BEW"
def train_model(model,criterion,optimizer,train_loader,n_epochs,device):
loss_over_time = [] # to track the loss as the network trains
model = model.to(device).double() # Send model to GPU if available
model.train() # Set the model to training mode
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader):
# Get the input images and labels, and send to GPU if available
inputs, labels = data[0].to(device), data[1].to(device)
# Zero the weight gradients
optimizer.zero_grad()
# Forward pass to get outputs
outputs = model(inputs)
# Calculate the loss
loss = criterion(outputs, labels)
# Backpropagation to get the gradients with respect to each weight
loss.backward()
# Update the weights
optimizer.step()
# Convert loss into a scalar and add it to running_loss
running_loss += loss.item()
if i % 1000 == 999: # print every 1000 batches
avg_loss = running_loss/1000
# record and print the avg loss over the 1000 batches
loss_over_time.append(avg_loss)
print('Epoch: {}, Batch: {}, Avg. Loss: {:.4f}'.format(epoch + 1, i+1, avg_loss))
running_loss = 0.0
return loss_over_time
# + id="Mx3XtDf_8A_O" outputId="86cf1312-8e8e-418e-a9d1-fe2740d81a08" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Train the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_epochs = 100
cost_path = train_model(net.float(),criterion,optimizer,train_loader,n_epochs,device)
# visualize the loss as the network trained
plt.plot(cost_path)
plt.xlabel('Batch (1000s)')
plt.ylabel('loss')
plt.show()
# + id="Sg6m09eT8A4W"
def test_model(model,test_loader,device):
# Turn autograd off
with torch.no_grad():
# Set the model to evaluation mode
model = model.to(device)
model.eval()
# Set up lists to store true and predicted values
y_true = []
test_preds = []
# Calculate the predictions on the test set and add to list
for data in test_loader:
inputs, labels = data[0].to(device), data[1].to(device)
# Feed inputs through model to get raw scores
logits = model.forward(inputs)
# Convert raw scores to probabilities (not necessary since we just care about discrete probs in this case)
probs = F.softmax(logits,dim=1)
# Get discrete predictions using argmax
preds = np.argmax(probs.cpu().numpy(),axis=1)
# Add predictions and actuals to lists
test_preds.extend(preds)
y_true.extend(labels)
# Calculate the accuracy
test_preds = np.array(test_preds)
y_true = np.array(y_true)
test_acc = np.sum(test_preds == y_true)/y_true.shape[0]
# Recall for each class
recall_vals = []
for i in range(3):
class_idx = np.argwhere(y_true==i)
total = len(class_idx)
correct = np.sum(test_preds[class_idx]==i)
recall = correct / total
recall_vals.append(recall)
return test_acc,recall_vals
# + id="pGIu_p7XCACy"
classes = [0,1,2]
# + id="uvnTKH0fBzo2" outputId="e69fddb7-d646-4051-8ee9-6975dba37b9c" colab={"base_uri": "https://localhost:8080/"}
# Calculate the test set accuracy and recall for each class
acc,recall_vals = test_model(net,val_loader,device)
print('Test set accuracy is {:.3f}'.format(acc))
for i in range(3):
print('For class {}, recall is {}'.format(classes[i],recall_vals[i]))
# + [markdown] id="6BCMz6zXgtxA"
# # Working Code
# + id="rqrkb-0puMHj"
import torch.nn as nn
import torch.nn.functional as F
class StockClassificationBase(nn.Module):
def training_step(self, batch):
windows, labels = batch
out = self(windows) # Generate predictions
loss = F.cross_entropy(out, labels.long()) # Calculate loss
return loss
def validation_step(self, batch):
windows, labels = batch
out = self(windows) # Generate predictions
loss = F.cross_entropy(out, labels.long()) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
if len(batch_losses) == 0:
epoch_loss = torch.stack(torch.FloatTensor((0))).mean()
else:
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
if len(batch_accs) == 0:
epoch_acc = torch.stack(torch.FloatTensor((0))).mean()
else:
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result['train_loss'], result['val_loss'], result['val_acc']))
# + id="_WYY_gw_t6HT"
class StockShiftClassification(StockClassificationBase):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(1, 32, kernel_size = (1,3), stride=1, padding = 0).double(),
nn.MaxPool2d(4,4),
nn.ReLU(),
nn.Conv2d(32,64, kernel_size = (1,3), stride = 1, padding = 1).double(),
nn.ReLU(),
nn.MaxPool2d(3,3),
nn.Conv2d(64, 128, kernel_size = (1,3), stride = 1, padding = 1).double(),
nn.ReLU(),
nn.MaxPool2d(2,2),
nn.Flatten(),
nn.Linear(128,500).double(),
nn.ReLU(),
nn.Linear(500, 3).double()
)
def forward(self, xb):
return self.network(xb)
# + id="lz5bvyy4t6Uk"
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func = torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(),lr)
for epoch in range(epochs):
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
result = evaluate(model, val_loader)
result['train_loss'] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
# + id="q47Br6aJ2UI9"
# Instantiate the model
model = StockShiftClassification()
# + id="V75yN32gt6Y7" outputId="3edb5ee7-213c-4660-f3bb-35136db68935" colab={"base_uri": "https://localhost:8080/", "height": 345}
num_epochs = 30
opt_func = torch.optim.Adam
lr = 0.001
#fitting the model on training data and record the result after each epoch
history = fit(num_epochs, lr, model, train_loader, val_loader, opt_func)
# + id="P840VkA9t6ck"
# + id="2JpYyBDjt6fU"
# + id="2pYziwM8jixU"
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
#Input shape: (batch_size,1,5,24)
# Convolutional 1 layer: 1x3 kernel, stride=1, padding=0, 32 output channels / feature maps
self.conv1 = nn.Conv1d(in_channels=1, out_channels=32, kernel_size=(1,3), stride=1, padding=1)
# Conv1 layer output size = (W-F+2P)/S+1 = (5-(3))/1+1 = 1
# Conv1 layer output shape for one image: [1,5,24]
# Maxpool layer: kernel_size=4, stride=4
self.pool1 = nn.MaxPool2d(kernel_size=4, stride=4)
# Pool output shape for one image: [10,13,13]
# Convolutional 2 layer: 3x3 kernel, stride=1, padding=0, 20 output channels / feature maps
self.conv2 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(1,3), stride=1, padding=0)
# Conv2 layer output size = (W-F+2P)/S+1 = (13-3)/1+1 = 11
# Conv2 layer output shape for one image: [20,11,11]
# Maxpool layer: kernel_size=2, stride=2
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=3)
# Pool output shape for one image: [20,5,5]
# Convolutional 2 layer: 3x3 kernel, stride=1, padding=0, 20 output channels / feature maps
self.conv3 = nn.Conv2d(in_channels=64,out_channels=128,kernel_size=(1,3), stride=1, padding=0)
# Conv2 layer output size = (W-F+2P)/S+1 = (13-3)/1+1 = 11
# Conv2 layer output shape for one image: [20,11,11]
# Maxpool layer: kernel_size=2, stride=2
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
# Pool output shape for one image: [20,5,5]
# Input size: 20 * 5 * 5 = 500 from pool2 pooling layer
# 10 output channels (for the 10 classes)
self.fc1 = nn.Linear(1000, 3)
self.fc2 = nn.Linear(500, 3)
def forward(self, x):
# Two convolutional layers followed by relu and then pooling
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.pool3(x)
# Flatten into a vector to feed into linear layer
x = x.view(x.size(0), -1)
# Linear layer
x = self.fc1(x)
x = self.fc2(x)
return x
# + id="iqovPJgsji5l" outputId="80b08329-23c3-4763-f5c3-ee9320062b4a" colab={"base_uri": "https://localhost:8080/", "height": 345}
# Instantiate the model
net = ConvNet()
# Display a summary of the layers of the model and output shape after each layer
summary(net,(windows.shape[1:]),batch_size=batch_size,device="cpu")
# + id="8_sqJggVfOMj"
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.cnn_layers = Sequential(
# Defining a 2D convolution layer
Conv2d(1, 32, kernel_size=(1,3), stride=1, padding=1),
ReLU(inplace=True),
MaxPool2d(kernel_size=1, stride=4),
# Defining another 2D convolution layer
Conv2d(32, 64, kernel_size=(1,3), stride=1, padding=1),
ReLU(inplace=True),
MaxPool2d(kernel_size=1, stride=3),
)
self.linear_layers = Sequential(
Linear(4 * 7 * 7, 10)
)
# Defining the forward pass
def forward(self, x):
x = self.cnn_layers(x)
x = x.view(x.size(0), -1)
x = self.linear_layers(x)
return x
# + colab={"base_uri": "https://localhost:8080/"} id="Xi-Y6h3lfOPC" outputId="ffb95a21-69e7-4631-9ac3-cc340883f394"
# defining the model
model = Net()
# defining the optimizer
optimizer = Adam(model.parameters(), lr=0.07)
# defining the loss function
criterion = CrossEntropyLoss()
# checking if GPU is available
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
print(model)
# + id="jKPFOKxOfORP"
def train(epoch):
model.train()
tr_loss = 0
# getting the training set
x_train, y_train = Variable(train_x), Variable(train_y)
# getting the validation set
x_val, y_val = Variable(val_x), Variable(val_y)
# converting the data into GPU format
if torch.cuda.is_available():
x_train = x_train.cuda()
y_train = y_train.cuda()
x_val = x_val.cuda()
y_val = y_val.cuda()
# clearing the Gradients of the model parameters
optimizer.zero_grad()
# prediction for training and validation set
output_train = model(x_train)
output_val = model(x_val)
# computing the training and validation loss
loss_train = criterion(output_train, y_train)
loss_val = criterion(output_val, y_val)
train_losses.append(loss_train)
val_losses.append(loss_val)
# computing the updated weights of all the model parameters
loss_train.backward()
optimizer.step()
tr_loss = loss_train.item()
if epoch%2 == 0:
# printing the validation loss
print('Epoch : ',epoch+1, '\t', 'loss :', loss_val)
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="1pE-ATsFfOVJ" outputId="4131b224-7e71-4fe3-ca77-96d2d22b327e"
# defining the number of epochs
n_epochs = 25
# empty list to store training losses
train_losses = []
# empty list to store validation losses
val_losses = []
# training the model
for epoch in range(n_epochs):
train(epoch)
# + id="ppUSwLlffOYn"
# + id="lGrkKxXpfObh"
# + id="87wjhtIUfOei"
# + id="eTahPMf3fOgy"
# + id="tUSRmSLtfOi3"
# + id="T4aSyHl1YCmE"
class ClassifierDataset():
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
train_dataset = ClassifierDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).long())
val_dataset = ClassifierDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).long())
test_dataset = ClassifierDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test).long())
# + id="F1mco0NhYCqd"
EPOCHS = 300
BATCH_SIZE = 1
LEARNING_RATE = 0.0007
NUM_FEATURES = 24
NUM_CLASSES = 3
# + id="iww7ffyrYCuU"
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE
)
val_loader = DataLoader(dataset=val_dataset, batch_size=1)
test_loader = DataLoader(dataset=test_dataset, batch_size=1)
# + id="OebWQr9rYCxf"
# class MulticlassClassification(nn.Module):
# def __init__(self, num_feature, num_class):
# super(MulticlassClassification, self).__init__()
# self.layer_1 = nn.Linear(num_feature, 32)
# self.layer_2 = nn.Linear(32, 64)
# self.layer_3 = nn.Linear(64, 128)
# self.layer_out = nn.Linear(128, num_class)
# self.relu = nn.ReLU()
# self.dropout = nn.Dropout(p=0.2)
# self.batchnorm1 = nn.BatchNorm1d(5)
# self.batchnorm2 = nn.BatchNorm1d(5)
# self.batchnorm3 = nn.BatchNorm1d(5)
# def forward(self, x):
# x = self.layer_1(x)
# x = self.batchnorm1(x)
# x = self.relu(x)
# x = self.layer_2(x)
# x = self.batchnorm2(x)
# x = self.relu(x)
# x = self.dropout(x)
# x = self.layer_3(x)
# x = self.batchnorm3(x)
# x = self.relu(x)
# x = self.dropout(x)
# x = self.layer_out(x)
# return x
class MulticlassClassification(nn.Module):
def __init__(self,D_in,H,D_out):
super(MulticlassClassification,self).__init__()
self.linear1=nn.Linear(D_in,H)
self.linear2=nn.Linear(H,D_out)
def forward(self,x):
x=torch.sigmoid(self.linear1(x))
x=self.linear2(x)
return x
# + colab={"base_uri": "https://localhost:8080/"} id="AAagy-FIYC0N" outputId="7ca0af2e-ace9-41bc-b43a-b370f12c7c15"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
###################### OUTPUT ######################
cuda:0
# + id="p2wHGM9Ydxdx"
input_dim= 24 # how many Variables are in the dataset
hidden_dim = 32 # hidden layers
output_dim= 1 # number of classes
# + colab={"base_uri": "https://localhost:8080/"} id="S40u_AdscKq5" outputId="ed7570e3-9bc6-45e5-9375-f26fc0478168"
model = MulticlassClassification(input_dim,hidden_dim,output_dim)#num_feature = NUM_FEATURES, num_class=NUM_CLASSES)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
print(model)
# + id="hZAv8QiccK5q"
def multi_acc(y_pred, y_test):
y_pred_softmax = torch.log_softmax(y_pred, dim = 1)
_, y_pred_tags = torch.max(y_pred_softmax, dim = 1)
correct_pred = (y_pred_tags == y_test).float()
acc = correct_pred.sum() / len(correct_pred)
acc = torch.round(acc * 100)
return acc
# + id="g9DgGw-wcK9x"
accuracy_stats = {
'train': [],
"val": []
}
loss_stats = {
'train': [],
"val": []
}
# + colab={"base_uri": "https://localhost:8080/", "height": 429, "referenced_widgets": ["cb7c9f3f250d4f37923a9ad7a3130a11", "af033a719fd04de3b4c61ae772b2d7f8", "b274d8f4515c4a0fb5d16f748fb9ecaa", "<KEY>", "<KEY>", "239b2669ac5d4a06871258137955bb5a", "d9ae8ac19b274f4fa1d67a6da52e5952", "<KEY>", "59fd93c3786d446db0298a0be0244929", "18edccce3ce54ae1940062f7d6b56d33", "37cd085fd4b34d6ba130eb897c098f72"]} id="ZCQIwNiwcLBh" outputId="6a44bc0d-a612-4dfb-e9ad-def77f7608fb"
print("Begin training.")
for e in tqdm(range(1, EPOCHS+1)):
# TRAINING
train_epoch_loss = 0
train_epoch_acc = 0
model.train()
for X_train_batch, y_train_batch in train_loader:
X_train_batch, y_train_batch = X_train_batch.to(device), y_train_batch.to(device)
optimizer.zero_grad()
y_train_pred = model(X_train_batch)
train_loss = criterion(y_train_pred, y_train_batch)
train_acc = multi_acc(y_train_pred, y_train_batch)
train_loss.backward()
optimizer.step()
train_epoch_loss += train_loss.item()
train_epoch_acc += train_acc.item()
# VALIDATION
with torch.no_grad():
val_epoch_loss = 0
val_epoch_acc = 0
model.eval()
for X_val_batch, y_val_batch in val_loader:
X_val_batch, y_val_batch = X_val_batch.to(device), y_val_batch.to(device)
y_val_pred = model(X_val_batch)
val_loss = criterion(y_val_pred, y_val_batch)
val_acc = multi_acc(y_val_pred, y_val_batch)
val_epoch_loss += val_loss.item()
val_epoch_acc += val_acc.item()
loss_stats['train'].append(train_epoch_loss/len(train_loader))
loss_stats['val'].append(val_epoch_loss/len(val_loader))
accuracy_stats['train'].append(train_epoch_acc/len(train_loader))
accuracy_stats['val'].append(val_epoch_acc/len(val_loader))
print(f'Epoch {e+0:03}: | Train Loss: {train_epoch_loss/len(train_loader):.5f} | Val Loss: {val_epoch_loss/len(val_loader):.5f} | Train Acc: {train_epoch_acc/len(train_loader):.3f}| Val Acc: {val_epoch_acc/len(val_loader):.3f}')
# + id="21avcJKEcLE6"
# + id="z5IITTxfVgNg"
tensor_x = torch.Tensor(X_train) # transform to torch tensor
tensor_y = torch.Tensor(y_train)
my_dataset = TensorDataset(tensor_x,tensor_y) # create your datset
my_dataloader = DataLoader(my_dataset) # create your dataloader
# + id="ZAle1rBOVs-3"
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, BatchNorm2d, Dropout, Sigmoid
# def define_model(num_features, num_filter, drop):
# model = Sequential()
# model.add_module('conv1', Conv2d(1, num_filter, kernel_size=(1, num_features)))
# model.add_module('relu1', ReLU())
# model.add_module('conv2', Conv2d(num_filter, num_filter, kernel_size=(3, 1)))
# model.add_module('relu2', ReLU())
# model.add_module('pool1', MaxPool2d(kernel_size=(2, 1)))
# model.add_module('conv3', Conv2d(num_filter, num_filter, kernel_size=(3, 1)))
# model.add_module('relu3', ReLU())
# model.add_module('pool2', MaxPool2d(kernel_size=(2, 1)))
class CNNpred(Module):
def __init__(self, num_features, num_filter, drop):
super(CNNpred, self).__init__()
self.conv1 = Conv2d(1, 1, kernel_size=(1, 3))
self.relu1 = ReLU()
# self.conv2 = Conv2d(num_filter, num_filter, kernel_size=(3, 1))
# self.relu2 = ReLU()
# self.pool1 = MaxPool2d(kernel_size=(2, 1))
# self.conv3 = Conv2d(num_filter, num_filter, kernel_size=(3, 1))
# self.relu3 = ReLU()
# self.pool2 = MaxPool2d(kernel_size=(2, 1))
# self.drop1 = Dropout(drop)
self.fc1 = Linear(100, 1)
self.sig1 = Sigmoid()
# Defining the forward pass
def forward(self, x):
x = self.relu1(self.conv1(x))
# x = self.relu2(self.conv2(x))
# x = self.pool1(x)
# x = self.relu3(self.conv3(x))
# x = self.pool2(x)
x = x.view(x.shape[0], -1)
x = self.sig1(self.fc1(x))
return x
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="KFHObysQVtFI" outputId="027059ed-201f-435f-fb44-1335f98a7aa6"
# Instantiate the model
net = CNNpred(3, 32, 0)
# Display a summary of the layers of the model and output shape after each layer
summary(net,(X_train.shape[1:]),batch_size=batchSize,device="cpu")
# + id="PR6eilJcVtKn"
# + id="0RugbaScVtO3"
# + id="36-O49VmD5lC" outputId="cfd890f6-cb37-4fcd-9115-223cb9481bda" colab={"base_uri": "https://localhost:8080/"}
train_data = []
for i in range(len(X_train)):
train_data.append([X_train[i], y_train[i]])
batchSize = 10 # attempting to do not use batch to begin with
trainloader = torch.utils.data.DataLoader(train_data,
shuffle=False)
i1, l1 = next(iter(trainloader))
print(i1.shape, l1.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="2h-7_hPZTtM1" outputId="f01534c1-f3ad-47dd-af29-aaec20813c94"
# + colab={"base_uri": "https://localhost:8080/", "height": 235} id="maZIheiYTecl" outputId="2c972422-5cce-416a-9883-9e486066a2ab"
class TrainData(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
train_data = TrainData(X_train,
y_train)
# + id="9sJcmOJlD6mL"
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
#Input shape: (batch_size,1,5,24)
# Convolutional 1 layer: 3x1 kernel, stride=1, padding=0, 10 output channels / feature maps
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(1,3), stride=1, padding=1)
# Conv1 layer output size = (W-F+2P)/S+1 = (5-3)/1+1 = 3
# Conv1 layer output shape for one array: [32,3,24]
# Maxpool layer: kernel_size=4, stride=4
self.pool1 = nn.MaxPool2d(kernel_size=4, stride=4)
# Pool output shape for one image: [32,13,13]
# # Convolutional 2 layer: 3x3 kernel, stride=1, padding=0, 20 output channels / feature maps
self.conv2 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(1,3), stride=1, padding=0)
# # Conv2 layer output size = (W-F+2P)/S+1 = (13-3)/1+1 = 11
# # Conv2 layer output shape for one image: [20,11,11]
# # Maxpool layer: kernel_size=3, stride=3
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=3)
# # Pool output shape for one image: [20,5,5]
# Input size: 20 * 5 * 5 = 500 from pool2 pooling layer
# 10 output channels (for the 10 classes)
self.fc1 = nn.Linear(100*3*24, 3)
self.fc2 = nn.Linear(100*3*24, 3)
def forward(self, x):
# Two convolutional layers followed by relu and then pooling
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
# Flatten into a vector to feed into linear layer
x = x.view(x.size(0), -1)
# Linear layer
x = self.fc1(x)
return x
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="MpnkUKlsRsIT" outputId="68a646a5-a4e8-40dd-90ec-6264fddd84d0"
# Instantiate the model
net = ConvNet()
# Display a summary of the layers of the model and output shape after each layer
summary(net,(X_train.shape[1:]),batch_size=batchSize,device="cpu")
# + id="28fTMaQjJRJa" outputId="363233cf-b4e0-4e2f-c703-5bef324aa04d" colab={"base_uri": "https://localhost:8080/"}
model = ConvNet()
print(model)
# + id="6V1E-likJRR5"
# + id="4IQpShuaJRVD"
# + id="AGriOafiJRYh"
# + id="AmiJKBpFJRcB"
# + id="tMWV1z-AJRfx"
# + id="vRnOtPPfJRAT"
class MyCNNClassifier(nn.Module):
def __init__(self, in_c, n_classes):
super().__init__()
self.conv_block1 = nn.Sequential(
nn.Conv2d(in_c, 10, kernel_size=(1,3), stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU()
)
self.conv_block2 = nn.Sequential(
nn.Conv2d(5, 5, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.decoder = nn.Sequential(
nn.Linear(32 * 28 * 28, 1024),
nn.Sigmoid(),
nn.Linear(1024, n_classes)
)
def forward(self, x):
x = self.conv_block1(x)
x = self.conv_block2(x)
x = x.view(x.size(0), -1) # flat
x = self.decoder(x)
return x
# + id="BTYFSqv0C6GH"
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
#Input shape: (batch_size,1,5,24)
self.conv1 = nn.Conv2d(in_channels=1, out_channels=1000, kernel_size=(1,3), stride=1, padding=0)
self.pool1 = nn.Softmax2d()
# + id="jAoO0wQCD6xE" outputId="187bd3ab-0268-4e2b-d6ab-836b55a71162" colab={"base_uri": "https://localhost:8080/", "height": 345}
# Instantiate the model
net = ConvNet()
# Display a summary of the layers of the model and output shape after each layer
summary(net,(X_train.shape[1:]),batch_size=batchSize,device="cpu")
# + id="2XLg_I1oD7F3"
# + id="xhHP1aZFD7dG"
# + id="o55iMw16D7w2"
# + id="nndqzAwWaDkk"
# Load necessary Pytorch packages
from torch.utils.data import DataLoader, TensorDataset
from torch import Tensor
# Create dataset from several tensors with matching first dimension
# Samples will be drawn from the first dimension (rows)
dataset = TensorDataset(Tensor(X_train), Tensor(y_train))
# Create a data loader from the dataset
# Type of sampling and batch size are specified at this step
loader = DataLoader(dataset, batch_size= 10)
# Quick test
# next(iter(loader))
# + colab={"base_uri": "https://localhost:8080/"} id="Lgr6Wqww_ysP" outputId="e3f760cc-ba97-4520-e577-f268a340efad"
print('Training 2 Hour Data Arrays: ', len(X_train))
print('Test 2 Hour Data Arrays: ', len(X_test))
# Specify the image classes
classes = ['down', 'up', 'flat']
# Set random seeds for reproducibility
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed(0)
# + id="igWE1iXMAiUg"
batch_size = 20
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
# + id="AisORvtID2qF"
# + id="Tacb8dQnD3on"
# + id="iMemvboKD4tX"
# + id="MnQ70KkVAg-O"
# + id="LWzrD4TfaCfb"
# + id="9QkJt9YFBjz8"
# create target from OHLC and Volume Data
def buildTargets(full_df = full, train_observations = train.shape[0],
val_observations = val.shape[0],
test_observations = test.shape[0],
shift_2hour = 24,
alph = .55,
volity_int = 10):
"""
This function will take a complete set of train, val, and test
data and return the targets. Volitility will be calculated over
the 252 5min incriments. The Target shift is looking at 2 hours
shift from current time
shift_2hour = The amount of time the data interval take to equal 2 hours
(i.e. 5 min data interval is equal to 25)
alph = The alpha value for calculating the shift in price
volity_int = the number of incriments used to calculate volitility
"""
returns = np.log(full_df['Close']/(full_df['Close'].shift(1)))
returns.fillna(0, inplace=True)
#volatility = returns.std()*np.sqrt(volity_int)
volatility = returns.rolling(window=volity_int).std()*np.sqrt(volity_int)
#print(len(full_df.Close), len(volatility))
targets = ["flat"] * len(full_df.Close)
targets = np.where(full_df.Close.shift(-shift_2hour) >= (full_df.Close * (1 + alph * volatility)),
"up", targets)
targets = np.where(full_df.Close.shift(-shift_2hour) <= (full_df.Close * (1 - alph * volatility)),
"down", targets)
train_split = train_observations
val_split = train_observations + val_observations
test_split = train_observations + val_observations + test_observations
train_targets = targets[:train_split]
val_targets = targets[train_split:val_split]
test_targets = targets[val_split:test_split]
full_targets = targets
return train_targets, val_targets, test_targets, full_targets
# + id="9ylmZNfoLe4a"
# + id="7h1nzV77iNr2"
#train_filtered = train.loc[:][train.index.indexer_between_time(time(7,30), time(16))]
train_filtered = train.between_time('08:29','16:30')
# + colab={"base_uri": "https://localhost:8080/"} id="GRbKVjtTnrDo" outputId="8f4092b7-f9e0-40cc-fecb-d46021f9877d"
train_filtered.index[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="ovajAAREl2HK" outputId="b440341d-81a0-43ea-ff06-d89861f4d8db"
train_filtered.head()
# + colab={"base_uri": "https://localhost:8080/"} id="JaJe2QCUpahP" outputId="199c9ec5-b188-4a08-a8be-5946269cf406"
train_filtered.index[0].month
# + id="o6lCrvvRoFGU"
TZ = 'US/Eastern'
start_dateTime = pd.Timestamp(year = train_filtered.index[0].year,
month = train_filtered.index[0].month,
day = train_filtered.index[0].day,
hour = 7, minute = 25, tz = TZ)
end_dateTime = pd.Timestamp(year = train_filtered.index[-1].year,
month = train_filtered.index[-1].month,
day = train_filtered.index[-1].day,
hour = 17, minute = 30, tz = TZ)
dateTime_index = pd.date_range(start_dateTime,
end_dateTime,
freq='5min').tolist()
# + colab={"base_uri": "https://localhost:8080/"} id="v6I4ajnwp3iN" outputId="c9f0949a-3855-4c90-b1b0-48142b6ff596"
dateTime_index_df = pd.DataFrame()
dateTime_index_df["Time"] = dateTime_index
dateTime_index_df = dateTime_index_df.set_index("Time")
dateTime_index_df = dateTime_index_df.between_time('07:30','17:25')
len(dateTime_index_df) % 24
# + id="49mZ-bIbmYYT" outputId="b83c8796-d0a7-4e21-a812-fc8c51d916ea" colab={"base_uri": "https://localhost:8080/", "height": 816}
dateTime_index_df[0:240][-24:]
# + id="LBvxUL8vmSRs" outputId="7075158a-24f4-4e11-b61f-d5a075ee8b68" colab={"base_uri": "https://localhost:8080/"}
len(dateTime_index_df[24:48])
# + id="fYnzIkB9lwI0" outputId="98f6bbaf-ea9c-4a60-db95-8c6c0d7245c1" colab={"base_uri": "https://localhost:8080/", "height": 1000}
dateTime_index_df[-48:]
# + id="ODzRQAYNEL_q" colab={"base_uri": "https://localhost:8080/"} outputId="96a4b51a-53f5-4125-d3ca-8abbf9cc485e"
dateTime_index[18] == train_filtered["Time"][0]
# + id="sHmgnwvRJptC"
dateTime_index_df = pd.DataFrame()
dateTime_index_df["Time"] = dateTime_index
final_train = pd.merge_asof(dateTime_index_df, train_filtered, on='Time', direction='backward').set_index("Time").between_time('08:29','16:30')
# + id="xv_VAgq2TNYb"
volumeset_list = []
prev_v = None
for v in final_train["Volume"]:
if prev_v == None:
if math.isnan(v):
prev_v = 0
volumeset_list.append(0)
else:
prev_v = v
volumeset_list.append(v)
elif prev_v != None:
if v == prev_v:
volumeset_list.append(0)
prev_v = v
elif math.isnan(v):
volumeset_list.append(0)
prev_v = 0
else:
volumeset_list.append(v)
prev_v = v
final_train["Volume"] = volumeset_list
adjvolumeset_list = []
prev_v = None
for v in final_train["VolumeWeightedAvgPrice"]:
if prev_v == None:
if math.isnan(v):
prev_v = 0
adjvolumeset_list.append(0)
else:
prev_v = v
adjvolumeset_list.append(v)
elif prev_v != None:
if v == prev_v:
adjvolumeset_list.append(0)
prev_v = v
elif math.isnan(v):
adjvolumeset_list.append(0)
prev_v = 0
else:
adjvolumeset_list.append(v)
prev_v = v
final_train["VolumeWeightedAvgPrice"] = adjvolumeset_list
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="0KcWRUydKZHw" outputId="b491db76-e257-41fb-8fab-dc2d5172eaa2"
final_train.backfill().head()
# + id="0P0EAJKoTKYd"
# + colab={"base_uri": "https://localhost:8080/", "height": 865} id="LIMD8D_gJlHX" outputId="b5d76df5-82b4-4f98-f03b-da69b5812921"
final_train[150:175]
# + id="F6mH1R0qiKx1"
# print(buildTargets.__doc__)
train_targets, val_targets, test_targets, full_targets = buildTargets()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="VuAVCxY2gMww" outputId="33bad82e-ff31-43e8-8e5e-51e247d1f231"
train.head(50)
# + colab={"base_uri": "https://localhost:8080/"} id="o30Rh1rZdNXz" outputId="2a0c5a63-f479-4876-c96e-05618613b163"
train_targets[48:-1:24]
# + colab={"base_uri": "https://localhost:8080/"} id="s7O_fQOyfanl" outputId="3132a203-19b0-4fdb-f8b4-a4934d34e486"
print(train.iloc[71]["Close"])
print(train.iloc[94]["Close"])
# + id="CadOMouVfmrE"
# test split pd DataFrame into arrays for running CNN on
test_train_tonp = train[["Time", "Open", "High", "Low", "Close", "Volume"]]
test_train_array = test_train_tonp.to_numpy()
# + colab={"base_uri": "https://localhost:8080/"} id="aS5Kzi8q2kGB" outputId="1e55eda0-afc0-4a8b-c901-981c6c66e0f6"
hourmin_list = []
for stamp in test_train_tonp["Time"]:
hourmin_list.append(f'{stamp.hour}:{stamp.minute}')
def unique(list1):
# initialize a null list
unique_list = []
# traverse for all elements
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
return unique_list
print(len(unique(hourmin_list)))
sorted(unique(hourmin_list))
# + colab={"base_uri": "https://localhost:8080/"} id="3H4r7Yczjnpt" outputId="08186638-eb92-49b0-f012-caa9d6f31504"
cutoff = len(test_train_array) % 24
test_train_array = test_train_array[:-cutoff]
len(test_train_array) % 24
# + colab={"base_uri": "https://localhost:8080/"} id="eU_9osOThQ0k" outputId="f39934e4-3709-46a2-e089-d99e9375c2e1"
cutoff = len(test_train_array) % 24
test_train_array = test_train_array[:-cutoff]
len(test_train_array) % 24
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}"
assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}"
return (arr.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1,2)
.reshape(-1, nrows, ncols))
np.set_printoptions(linewidth=100)
print(blockshaped(test_train_array, 24, 6)[1].shape)
X_Train = blockshaped(test_train_array, 24, 6)
# + colab={"base_uri": "https://localhost:8080/"} id="UxOt0QXI1jkn" outputId="8399b424-e85c-4cc5-e3e9-6ca148fe7e6f"
np.set_printoptions(linewidth=110)
X_Train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="Wz45hu4VtxvT" outputId="94cd33e1-2c86-4222-cdcc-c368414f790a"
np.set_printoptions(linewidth=210)
np.flip(np.rot90(X_Train[0], 1), axis=0).shape
# + id="pBWHqagHcNpo"
def tf_dataset(series_x, series_y, batch_size, shuffle_buffer, shuffle=True):
ds = tf.data.Dataset.from_tensor_slices((series_x, series_y))
if shuffle:
ds = ds.cache().shuffle(shuffle_buffer).batch(batch_size).repeat()
else:
ds = ds.cache().batch(batch_size).repeat()
return ds
def create_window_dataset(ds, window_size):
windowed_dataset = []
for i in range(window_size, ds.shape[0] + 1):
windowed_dataset.append(ds[i - window_size:])
return np.array(windowed_dataset)
# + colab={"base_uri": "https://localhost:8080/", "height": 363} id="ZlbOS2pJcNw2" outputId="42225d99-57c1-431a-9a48-18928cb92fa1"
from sklearn.preprocessing import MinMaxScaler
scaled_df = pd.DataFrame()
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train[['Open', 'High', 'Low', 'Close', 'Volume']])
scaled_val = scaler.fit_transform(val[['Open', 'High', 'Low', 'Close', 'Volume']])
scaled_test = scaler.fit_transform(test[['Open', 'High', 'Low', 'Close', 'Volume']])
# + id="gILmupjYcNzF"
WINDOW_SIZE = 25
BATCH_SIZE = 1
windowed_dataset_train = create_window_dataset(scaled_train[:, 0:-1], WINDOW_SIZE)
# + id="k0MiuNJbc2Hl"
windowed_dataset_train[1].shape
# + id="4yGUbYPQcN1d"
windowed_dataset_train, labels_train = create_window_dataset(train_dataset_normalized, train_dataset[:, -1], WINDOW_SIZE)
train_set = tf_dataset(windowed_dataset_train, labels_train, BATCH_SIZE, 1000)
unshuffled_train_set = tf_dataset(windowed_dataset_train, labels_train, BATCH_SIZE, 1000, False)
windowed_dataset_validation, labels_validation = create_window_dataset(cross_validation_dataset_normalized, cross_validation_dataset[:, -1], WINDOW_SIZE)
cross_validation_set = tf_dataset(windowed_dataset_validation, labels_validation, BATCH_SIZE, 1000, False)
windowed_dataset_dev, labels_dev = create_window_dataset(dev_dataset_normalized, dev_dataset[:, -1], WINDOW_SIZE)
dev_set = tf_dataset(windowed_dataset_dev, labels_dev, 1, 1000, False)
# + id="DE9sAqIlcN33"
# + id="rJuN0MqwcN6G"
# + id="2Pr1KvdHcN8K"
# + colab={"base_uri": "https://localhost:8080/"} id="6h7NHSyxa78z" outputId="af486437-51e4-4eda-a98b-25f84110bb96"
train.values
# + colab={"base_uri": "https://localhost:8080/"} id="Gmuw4hzsXWeI" outputId="62d53422-e7cb-4e9b-94db-c7dc2c2bfde3"
from numpy import array
from numpy import hstack
# split a multivariate sequence into samples
def split_sequences(sequences, n_steps_in, n_steps_out):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out
# check if we are beyond the dataset
if out_end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix:out_end_ix, :]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# define input sequence
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90])
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95])
out_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))])
# convert to [rows, columns] structure
in_seq1 = in_seq1.reshape((len(in_seq1), 1))
in_seq2 = in_seq2.reshape((len(in_seq2), 1))
out_seq = out_seq.reshape((len(out_seq), 1))
# horizontally stack columns
dataset = hstack((in_seq1, in_seq2, out_seq))
# choose a number of time steps
n_steps_in, n_steps_out = 3, 2
# covert into input/output
X, y = split_sequences(dataset, n_steps_in, n_steps_out)
print(X.shape, y.shape)
# summarize the data
for i in range(len(X)):
print(X[i], y[i])
# + colab={"base_uri": "https://localhost:8080/"} id="u7on_GbUXuT8" outputId="0a8e4f28-7852-408f-99da-cb35987503bc"
dataset
# + id="-OJ9XpuoNqhy"
def create_dataset(df):
x = []
y = []
for i in range(50, df.shape[0]):
x.append(df[i-50:i, 0])
y.append(df[i, 0])
x = np.array(x)
y = np.array(y)
return x,y
# + colab={"base_uri": "https://localhost:8080/", "height": 380} id="RW7ef3O_abDf" outputId="9ddd2bcd-2e0e-4e66-c39d-bd09ca4cd378"
x_train, y_train = create_dataset(train)
# + id="HOBCUubqNqDO"
# + colab={"base_uri": "https://localhost:8080/"} id="H9rM5V1BHyBl" outputId="1fceb257-8107-42bf-b289-7dbc66401600"
print(f'Training Days: {train_day_int}, Set Start: {train.index[0].strftime("%d-%b-%Y")}, End: {train.index[-1].strftime("%d-%b-%Y")}, Shape: {train.shape}')
print(f'Validation Days: {val_day_int}, Set Start: {val.index[0].strftime("%d-%b-%Y")}, End: {val.index[-1].strftime("%d-%b-%Y")}, Shape: {val.shape}')
print(f'Testing Days: {test_day_int}, Start: {test.index[0].strftime("%d-%b-%Y")}, End: {test.index[-1].strftime("%d-%b-%Y")}, Shape: {test.shape}')
# + colab={"base_uri": "https://localhost:8080/"} id="XqbVlKwLKN-l" outputId="73d801bb-1ff8-472c-c4dc-fcacde9a3339"
print(f'Number of Rows Between all Sets: {(train.shape[0] + val.shape[0] + test.shape[0])}, Shape of Full Set: {full.shape[0]}')
# + [markdown] id="IMY3MYh4D7sR"
# # Loading and Preparing The Data
# + id="NmTD5JViEFMp"
# create target from OHLC and Volume Data
def buildTargets(full_df = full, train_observations = train.shape[0],
val_observations = val.shape[0],
test_observations = test.shape[0],
shift_2hour = 24,
alph = .55,
volity_int = 10):
"""
This function will take a complete set of train, val, and test
data and return the targets. Volitility will be calculated over
the 252 5min incriments. The Target shift is looking at 2 hours
shift from current time
shift_2hour = The amount of time the data interval take to equal 2 hours
(i.e. 5 min data interval is equal to 25)
alph = The alpha value for calculating the shift in price
volity_int = the number of incriments used to calculate volitility
"""
returns = np.log(full_df['Close']/(full_df['Close'].shift(1)))
returns.fillna(0, inplace=True)
#volatility = returns.std()*np.sqrt(volity_int)
volatility = returns.rolling(window=volity_int).std()*np.sqrt(volity_int)
#print(len(full_df.Close), len(volatility))
targets = ["flat"] * len(full_df.Close)
targets = np.where(full_df.Close.shift(-shift_2hour) >= (full_df.Close * (1 + alph * volatility)),
"up", targets)
targets = np.where(full_df.Close.shift(-shift_2hour) <= (full_df.Close * (1 - alph * volatility)),
"down", targets)
train_split = train_observations
val_split = train_observations + val_observations
test_split = train_observations + val_observations + test_observations
train_targets = targets[:train_split]
val_targets = targets[train_split:val_split]
test_targets = targets[val_split:test_split]
full_targets = targets
return train_targets, val_targets, test_targets, full_targets
# print(buildTargets.__doc__)
train_targets, val_targets, test_targets, full_targets = buildTargets()
# + id="OJlf0UBTsEfS"
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="EUEpy7cSajI_" outputId="c29eda79-cd3a-4005-c18b-b02c8a04ab91"
plot_df = pd.DataFrame(full_targets, columns=["FuturePrice"])
plot_df["FuturePrice"].value_counts().plot(kind = 'bar')
# + [markdown] id="W-hurH5w70Ir"
# # Mulitclass Classification (None CDT 1D CNN)
# + colab={"base_uri": "https://localhost:8080/"} id="1TfD1r6R9IbN" outputId="fde42bac-23b9-4821-e276-e62a1e05ccff"
full.columns
# + colab={"base_uri": "https://localhost:8080/"} id="KybVva6t9Xwx" outputId="04188c38-aa11-4833-85de-6ad39f479301"
full["Open"]
# + id="YYCBKN4uu-FE"
from sklearn.preprocessing import MinMaxScaler
scaled_df = pd.DataFrame()
scaler = MinMaxScaler()
scaled_train = scaler.fit_transform(train[['Open', 'High', 'Low', 'Close', 'Volume']])
scaled_val = scaler.fit_transform(val[['Open', 'High', 'Low', 'Close', 'Volume']])
scaled_test = scaler.fit_transform(test[['Open', 'High', 'Low', 'Close', 'Volume']])
# + id="TCETOevJpk81"
train_targets_df = pd.DataFrame(train_targets)
val_targets_df = pd.DataFrame(val_targets)
test_targets_df = pd.DataFrame(test_targets)
class2idx = {
"up":0,
"flat":1,
"down":2,
}
idx2class = {v: k for k, v in class2idx.items()}
train_targets_df.replace(class2idx, inplace=True)
val_targets_df.replace(class2idx, inplace=True)
test_targets_df.replace(class2idx, inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="V9bV2_jpp6Qx" outputId="bde9e731-1d9c-448f-d877-e65c51fe7cba"
train_targets_df[0].value_counts().plot(kind = 'bar')
# + id="cRJ-EbOPqEs-"
X_train = np.array(scaled_train)
X_val = np.array(scaled_val)
X_test= np.array(scaled_test)
y_train = np.array(train_targets_df[0])
y_val = np.array(val_targets_df[0])
y_test = np.array(test_targets_df[0])
# + colab={"base_uri": "https://localhost:8080/"} id="XJFXeknpqP7y" outputId="96a5204c-e548-4d5d-ec6e-59591cca9a9b"
print(f'Type of X Train: {type(X_train)}, Length of X: {X_train.shape}')
print(f'Type of y Train: {type(y_train)}, Length of y: {y_train.shape}')
print(f'Type of X Val: {type(X_val)}, Length of X: {X_val.shape}')
print(f'Type of y Val: {type(y_val)}, Length of y: {y_val.shape}')
print(f'Type of X Val: {type(X_test)}, Length of X: {X_test.shape}')
print(f'Type of y Val: {type(y_test)}, Length of y: {y_test.shape}')
# + colab={"base_uri": "https://localhost:8080/"} id="TH5tzBJ9Ew4a" outputId="b0d23569-3149-4780-aa72-4c9c434e9acd"
np.asarray(train)
# + id="41IFYHMvucBn"
class ClassifierDataset():
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
train_dataset = ClassifierDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).long())
val_dataset = ClassifierDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).long())
test_dataset = ClassifierDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test).long())
# + colab={"base_uri": "https://localhost:8080/"} id="xtO8sN6lvVAu" outputId="e9cc706e-338b-4f7a-c7aa-47b7a79856ac"
scaled_train.shape[1]
# + id="Z0qU88rZu5Q_"
EPOCHS = 300
BATCH_SIZE = 16
LEARNING_RATE = 0.0007
NUM_FEATURES = scaled_train.shape[1]
NUM_CLASSES = 3
# + id="n1UlEevhvcgy"
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
)
val_loader = DataLoader(dataset=val_dataset, batch_size=1)
test_loader = DataLoader(dataset=test_dataset, batch_size=1)
# + id="MwPC11JAvjnf"
class MulticlassClassification(nn.Module):
def __init__(self, num_feature, num_class):
super(MulticlassClassification, self).__init__()
self.layer_1 = nn.Linear(num_feature, 512)
self.layer_2 = nn.Linear(512, 128)
self.layer_3 = nn.Linear(128, 64)
self.layer_out = nn.Linear(64, num_class)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
self.batchnorm1 = nn.BatchNorm1d(512)
self.batchnorm2 = nn.BatchNorm1d(128)
self.batchnorm3 = nn.BatchNorm1d(64)
def forward(self, x):
x = self.layer_1(x)
x = self.batchnorm1(x)
x = self.relu(x)
x = self.layer_2(x)
x = self.batchnorm2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_3(x)
x = self.batchnorm3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
# + colab={"base_uri": "https://localhost:8080/"} id="vk3Y-M6lvnEx" outputId="be238fd9-2e5d-4a42-c4da-1ea8bb28562a"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# + colab={"base_uri": "https://localhost:8080/"} id="3SVhPeVgvrbB" outputId="d94c12ee-5687-4a1e-dc6b-6cce46af6508"
model = MulticlassClassification(num_feature = NUM_FEATURES, num_class=NUM_CLASSES)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
print(model)
# + id="Jx5CwK8Qvxos"
def multi_acc(y_pred, y_test):
y_pred_softmax = torch.log_softmax(y_pred, dim = 1)
_, y_pred_tags = torch.max(y_pred_softmax, dim = 1)
correct_pred = (y_pred_tags == y_test).float()
acc = correct_pred.sum() / len(correct_pred)
acc = torch.round(acc * 100)
return acc
# + id="wYRwPyN5vzAX"
accuracy_stats = {
'train': [],
"val": []
}
loss_stats = {
'train': [],
"val": []
}
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["35b0c6a641f74b7a8a5a17696c771999", "5435c22875e1449bba6f514dc362fb86", "2174b8eaf379476aa207a7d3553a23cb", "d2eea602e1f44679b9dca81d02cef6a9", "4c4ee8d44be44753b86d2334ed5201d9", "2d07aa40726741bba6d08a741f239b2a", "ae9e58658d6e45a0ae4f071d65facbd8", "<KEY>", "<KEY>", "<KEY>", "8a927038a7cb49a6a64673ce556a0c8e"]} id="8f5oKsuRvz21" outputId="b29c559b-8c71-422a-eb0f-83e583c0dd34"
print("Begin training.")
for e in tqdm(range(1, EPOCHS+1)):
# TRAINING
train_epoch_loss = 0
train_epoch_acc = 0
model.train()
for X_train_batch, y_train_batch in train_loader:
X_train_batch, y_train_batch = X_train_batch.to(device), y_train_batch.to(device)
optimizer.zero_grad()
y_train_pred = model(X_train_batch)
train_loss = criterion(y_train_pred, y_train_batch)
train_acc = multi_acc(y_train_pred, y_train_batch)
train_loss.backward()
optimizer.step()
train_epoch_loss += train_loss.item()
train_epoch_acc += train_acc.item()
# VALIDATION
with torch.no_grad():
val_epoch_loss = 0
val_epoch_acc = 0
model.eval()
for X_val_batch, y_val_batch in val_loader:
X_val_batch, y_val_batch = X_val_batch.to(device), y_val_batch.to(device)
y_val_pred = model(X_val_batch)
val_loss = criterion(y_val_pred, y_val_batch)
val_acc = multi_acc(y_val_pred, y_val_batch)
val_epoch_loss += val_loss.item()
val_epoch_acc += val_acc.item()
loss_stats['train'].append(train_epoch_loss/len(train_loader))
loss_stats['val'].append(val_epoch_loss/len(val_loader))
accuracy_stats['train'].append(train_epoch_acc/len(train_loader))
accuracy_stats['val'].append(val_epoch_acc/len(val_loader))
print(f'Epoch {e+0:03}: | Train Loss: {train_epoch_loss/len(train_loader):.5f} | Val Loss: {val_epoch_loss/len(val_loader):.5f} | Train Acc: {train_epoch_acc/len(train_loader):.3f}| Val Acc: {val_epoch_acc/len(val_loader):.3f}')
# + [markdown] id="CdPrjLX4u2u7"
# # APPENDIX
# + id="kO0RlwtEvavG"
plot_df = pd.DataFrame(full_targets, columns=["FuturePrice"])
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="o4DstDh7v61B" outputId="9fb82a04-9a72-4e9f-acc0-e71b745dfc7e"
plot_df["FuturePrice"].value_counts().plot(kind = 'bar')
# + colab={"base_uri": "https://localhost:8080/", "height": 440} id="qUSks6bebTnk" outputId="95096d69-9fee-4d57-cdf8-870bbf0b9abf"
def buildTargets_VolOnly(full_df = full, train_observations = train.shape[0],
val_observations = val.shape[0],
test_observations = test.shape[0],
alph = 0.55, volity_int = 10):
"""
This function will take a complete set of train, val, and test data and return the targets.
Volitility will be calculated over the 252 5min incriments
The Target shift is looking at 2 hours shift from current time
"""
returns = np.log(full_df['Close']/(full_df['Close'].shift(1)))
returns.fillna(0, inplace=True)
#volatility = returns.std()*np.sqrt(volity_int)
volatility = returns.rolling(window=volity_int).std()*np.sqrt(volity_int)
return volatility
#return train_targets, val_targets, test_targets, full_targets
volatility = buildTargets_VolOnly()
fig = plt.figure(figsize=(15, 7))
ax1 = fig.add_subplot(1, 1, 1)
volatility.plot(ax=ax1, color = "red")
ax1.set_xlabel('Date')
ax1.set_ylabel('Volatility', color = "red")
ax1.set_title(f'Annualized volatility for {ticker}')
ax2 = ax1.twinx()
full.Close.plot(ax=ax2, color = "blue")
ax2.set_ylabel('Close', color = "blue")
plt.show()
# + id="UViHHQXtEB7r"
X,y=data.data,data.target
# Since the default in the file is 0=malignant 1=benign we want to reverse these
y=(y==0).astype(int)
X,y= np.array(X),np.array(y)
# Let's set aside a test set and use the remainder for training and cross-validation
X_train,X_test,y_train,y_test = train_test_split(X, y, random_state=0,test_size=0.2)
# Let's scale our data to help the algorithm converge faster
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# + id="DxdD_w9pRfO8"
full.drop(columns=['Time', ])
# + id="TKu0iaKZSnyM"
from sklearn.datasets import load_breast_cancer
# + id="K23hvV9cWb96"
data=load_breast_cancer(as_frame=True)
# + colab={"base_uri": "https://localhost:8080/"} id="RuBc_eiVWedQ" outputId="fda35b37-d263-4822-e472-46c94d9d2a31"
data.target
# + id="rCar6GOuWe6f"
| 1D_CNN_Attempts/1D_CNN_WorkingPipeLine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Load/Define MTEP 1 Scenario
# Based on Yifan's recommendation: "I also put some example syntax to help create MTEP Future1 scenario, attached FYI. This is just an example, and most things are pretty much scaled on zonal level. While outside of MISO might be ok, inside MISO particularly South area might need to model it on unit level - depending on your study needs."
from powersimdata.scenario.scenario import Scenario
Scenario().get_scenario_table().tail(8)
# +
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 12 02:15:00 2021
@author: YifanLi
"""
from powersimdata.scenario.scenario import Scenario
from datetime import datetime
import pytz
utc_now = datetime.now()
mt_now = utc_now.astimezone(pytz.timezone('America/Denver'))
scenario_base = Scenario(403)
ct_base = scenario_base.state.get_ct()
grid_base = scenario_base.state.get_grid()
scenario = Scenario('')
scenario.state.set_builder(grid_model="usa_tamu", interconnect="Eastern")
scenario.state.builder.set_name("MTEP 1", mt_now)
scenario.state.builder.set_time("2016-08-01 00:00:00","2016-10-31 23:00:00","1H")
scenario.state.builder.set_base_profile("demand", "vJan2021")
scenario.state.builder.set_base_profile("hydro", "vJan2021")
scenario.state.builder.set_base_profile("solar", "vJan2021")
scenario.state.builder.set_base_profile("wind", "vJan2021")
# MISO Retirement and Expansion: Future 1 on zonal level.
scenario.state.builder.change_table.scale_plant_capacity(
"coal", zone_name={"Arkansas": 0.396,
"Illinois Downstate": 0.714,
"Indiana": 0.292,
"Iowa": 0.875,
"Louisiana": 0.196,
"Michigan Northern": 0.152,
"Michigan Southern": 0.152,
"Minnesota Northern": 0.000,
"Minnesota Southern": 0.000,
"Missouri East": 0.215,
"Montana Eastern": 0.000,
"North Dakota": 0.782,
"Wisconsin": 0.384})
scenario.state.builder.change_table.scale_plant_capacity(
"dfo", zone_name={"Illinois Downstate": 0.649,
"Indiana": 0.000,
"Iowa": 0.747,
"Louisiana": 0.873,
"Michigan Northern": 0.768,
"Michigan Southern": 0.768,
"Minnesota Northern": 0.157,
"Minnesota Southern": 0.157,
"Missouri East": 0.374,
"Wisconsin": 0.612})
scenario.state.builder.change_table.scale_plant_capacity(
"hydro", zone_name={"Michigan Northern": 1.031,
"Michigan Southern": 1.031})
scenario.state.builder.change_table.scale_plant_capacity(
"ng", zone_name={"Arkansas": 1.035,
"East Texas": 1.200,
"Illinois Downstate": 1.600,
"Indiana": 1.926,
"Iowa": 1.248,
"Louisiana": 1.073,
"Michigan Northern": 1.149,
"Michigan Southern": 1.149,
"Minnesota Northern": 1.754,
"Minnesota Southern": 1.754,
"Mississippi": 1.110,
"Missouri East": 1.863,
"Montana Eastern": 0.684,
"North Dakota": 3.842,
"Wisconsin": 1.451})
scenario.state.builder.change_table.scale_plant_capacity(
"nuclear", zone_name={"Iowa": 0.000,
"Michigan Southern": 0.772,
"Minnesota Southern": 0.309})
scenario.state.builder.change_table.scale_plant_capacity(
"other", zone_name={"Minnesota Northern": 0.800,
"Minnesota Southern": 0.800})
scenario.state.builder.change_table.scale_plant_capacity(
"solar", zone_name={"Arkansas": 49.121,
"East Texas": 1744.620,
"Illinois Downstate": 1893.734,
"Indiana": 30.234,
"Iowa": 465.011,
"Louisiana": 6174.610,
"Michigan Southern": 101.703,
"Minnesota Northern": 13.290,
"Minnesota Southern": 13.290,
"Mississippi": 19.743,
"Missouri East": 642.325,
"North Dakota": 2372.070,
"Wisconsin": 203.416})
scenario.state.builder.change_table.scale_plant_capacity(
"wind", zone_name={"Illinois Downstate": 1.590,
"Indiana": 1.642,
"Iowa": 1.298,
"Michigan Northern": 1.625,
"Michigan Southern": 1.625,
"Minnesota Northern": 1.661,
"Minnesota Southern": 1.661,
"Mississippi": 201.000,
"Missouri East": 301.278,
"Montana Eastern": 0.000,
"North Dakota": 2.554,
"Wisconsin": 1.261})
scenario.state.builder.change_table.add_plant(
[{"type": "hydro", "bus_id": 45429, "Pmax": 10.3}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 13675, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 13625, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 46288, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 45496, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 46748, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 46363, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 42760, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 42905, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 42626, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 42627, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 42628, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 55161, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 52169, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 54914, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 60845, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 60486, "capacity": 5, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 38745, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39610, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39250, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39630, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39413, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39316, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39206, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39548, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39637, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39256, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39358, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39546, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39417, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39528, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39296, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39326, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39327, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39439, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39613, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39254, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39376, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39252, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39538, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 39375, "capacity": 6, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40507, "capacity": 35, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40104, "capacity": 35, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40481, "capacity": 35, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40818, "capacity": 35, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40625, "capacity": 35, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40036, "capacity": 35, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 40767, "capacity": 26, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 49264, "capacity": 13, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 52161, "capacity": 13, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 30532, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 56810, "capacity": 10, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
scenario.state.builder.change_table.add_storage_capacity(
[{"bus_id": 48723, "capacity": 20, "duration": 4, "InEff": 0.95, "OutEff": 0.95}])
#scenario.state.builder.change_table.scale_demand([{"zone_name": 1.000}])
# -
# create scenario
scenario.create_scenario()
print('[*] Current secnario state:', scenario.state.name)
scenario.print_scenario_status()
# +
# prep simulation inputs
from powersimdata.scenario.scenario import Scenario
scenario.prepare_simulation_input()
print('\n[*] PREPARED \n')
# launch simulation
process_run = scenario.launch_simulation(solver='gurobi')
print('\n[*] SIMULATION LAUNCHED \n')
# -
scenario.print_scenario_status()
scenario.check_progress()
from powersimdata.scenario.scenario import Scenario
Scenario().get_scenario_table().tail()
Scenario().get_scenario_table().loc[403]
| standalone/Scenario_1/0_build_MTEP1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Objective
#
# Investigate ways to threshold cluster sizes.
# +
from SpotAnnotationAnalysis import SpotAnnotationAnalysis
from BaseAnnotation import BaseAnnotation
from QuantiusAnnotation import QuantiusAnnotation
worker_marker_size = 8
cluster_marker_size = 40
bigger_window_size = False
img_height = 300
clustering_params = ['AffinityPropagation', -350]
pairwise_threshold = 1
show_correctness = True
correctness_threshold = 4
# -
# # Plots
# Grouped by:
# - background
# - number of spots
# - mean SNR
# ## Background: Tissue
json_filename = 'SynthTests_tissue.json'
gen_date = '20180719'
bg_type = 'tissue'
# ## Tissue, 50 spots
# +
img_names = ['MAX_ISP_300_1_nspots50_spot_sig1.75_snr5_2.5',
'MAX_ISP_300_1_nspots50_spot_sig1.75_snr10_2.5',
'MAX_ISP_300_1_nspots50_spot_sig1.75_snr20_2.5']
for img_name in img_names:
img_filename = img_name+'spot_img.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_images/'+bg_type+'/'+img_name+'spot_img.png'
csv_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_data/'+bg_type+'/'+img_name+'_coord_snr_list.csv'
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/'+json_filename
ba = QuantiusAnnotation(json_filepath)
sa = SpotAnnotationAnalysis(ba)
anno_all = ba.df()
anno_one_snr = ba.slice_by_image(anno_all, img_filename)
plot_title = img_name
sa.plot_annotations_per_cluster(anno_one_snr, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
df_good_workers_pairwise = sa.slice_by_worker_pairwise_scores(anno_one_snr, pairwise_threshold)
plot_title = "minus workers with high pairwise scores"
sa.plot_annotations_per_cluster(df_good_workers_pairwise, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
# -
# ## Tissue, 100 spots
# +
img_names = ['MAX_ISP_300_1_nspots100_spot_sig1.75_snr5_2.5',
'MAX_ISP_300_1_nspots100_spot_sig1.75_snr10_2.5',
'MAX_ISP_300_1_nspots100_spot_sig1.75_snr20_2.5']
for img_name in img_names:
img_filename = img_name+'spot_img.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_images/'+bg_type+'/'+img_name+'spot_img.png'
csv_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_data/'+bg_type+'/'+img_name+'_coord_snr_list.csv'
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/'+json_filename
ba = QuantiusAnnotation(json_filepath)
sa = SpotAnnotationAnalysis(ba)
anno_all = ba.df()
anno_one_snr = ba.slice_by_image(anno_all, img_filename)
plot_title = img_name
sa.plot_annotations_per_cluster(anno_one_snr, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
df_good_workers_pairwise = sa.slice_by_worker_pairwise_scores(anno_one_snr, pairwise_threshold)
plot_title = "minus workers with high pairwise scores"
sa.plot_annotations_per_cluster(df_good_workers_pairwise, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
# -
# ## Tissue, 150 spots
# +
img_names = ['MAX_ISP_300_1_nspots150_spot_sig1.75_snr5_2.5',
'MAX_ISP_300_1_nspots150_spot_sig1.75_snr10_2.5',
'MAX_ISP_300_1_nspots150_spot_sig1.75_snr20_2.5']
for img_name in img_names:
img_filename = img_name+'spot_img.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_images/'+bg_type+'/'+img_name+'spot_img.png'
csv_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_data/'+bg_type+'/'+img_name+'_coord_snr_list.csv'
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/'+json_filename
ba = QuantiusAnnotation(json_filepath)
sa = SpotAnnotationAnalysis(ba)
anno_all = ba.df()
anno_one_snr = ba.slice_by_image(anno_all, img_filename)
plot_title = img_name
sa.plot_annotations_per_cluster(anno_one_snr, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
df_good_workers_pairwise = sa.slice_by_worker_pairwise_scores(anno_one_snr, pairwise_threshold)
plot_title = "minus workers with high pairwise scores"
sa.plot_annotations_per_cluster(df_good_workers_pairwise, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
# -
# # Background: Cells
json_filename = 'SynthData_cells.json'
gen_date = '20180719'
bg_type = 'cells'
# ## Cells, 50 spots
# +
img_names = ['MAX_C3-ISP_300_1_nspots50_spot_sig1.75_snr5_2.5',
'MAX_C3-ISP_300_1_nspots50_spot_sig1.75_snr10_2.5',
'MAX_C3-ISP_300_1_nspots50_spot_sig1.75_snr20_2.5']
for img_name in img_names:
img_filename = img_name+'spot_img.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_images/'+bg_type+'/'+img_name+'spot_img.png'
csv_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_data/'+bg_type+'/'+img_name+'_coord_snr_list.csv'
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/'+json_filename
ba = QuantiusAnnotation(json_filepath)
sa = SpotAnnotationAnalysis(ba)
anno_all = ba.df()
anno_one_snr = ba.slice_by_image(anno_all, img_filename)
plot_title = img_name
sa.plot_annotations_per_cluster(anno_one_snr, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
df_good_workers_pairwise = sa.slice_by_worker_pairwise_scores(anno_one_snr, pairwise_threshold)
plot_title = "minus workers with high pairwise scores"
sa.plot_annotations_per_cluster(df_good_workers_pairwise, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
# -
# ## Cells, 100 spots
# +
img_names = ['MAX_C3-ISP_300_1_nspots100_spot_sig1.75_snr5_2.5',
'MAX_C3-ISP_300_1_nspots100_spot_sig1.75_snr10_2.5',
'MAX_C3-ISP_300_1_nspots100_spot_sig1.75_snr20_2.5']
for img_name in img_names:
img_filename = img_name+'spot_img.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_images/'+bg_type+'/'+img_name+'spot_img.png'
csv_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_data/'+bg_type+'/'+img_name+'_coord_snr_list.csv'
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/'+json_filename
ba = QuantiusAnnotation(json_filepath)
sa = SpotAnnotationAnalysis(ba)
anno_all = ba.df()
anno_one_snr = ba.slice_by_image(anno_all, img_filename)
plot_title = img_name
sa.plot_annotations_per_cluster(anno_one_snr, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
df_good_workers_pairwise = sa.slice_by_worker_pairwise_scores(anno_one_snr, pairwise_threshold)
plot_title = "minus workers with high pairwise scores"
sa.plot_annotations_per_cluster(df_good_workers_pairwise, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
# -
# ## Cells, 150 spots
# +
img_names = ['MAX_C3-ISP_300_1_nspots150_spot_sig1.75_snr5_2.5',
'MAX_C3-ISP_300_1_nspots150_spot_sig1.75_snr10_2.5',
'MAX_C3-ISP_300_1_nspots150_spot_sig1.75_snr20_2.5']
for img_name in img_names:
img_filename = img_name+'spot_img.png'
img_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_images/'+bg_type+'/'+img_name+'spot_img.png'
csv_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/spot_data/'+bg_type+'/'+img_name+'_coord_snr_list.csv'
json_filepath = '/Users/jenny.vo-phamhi/Documents/FISH-annotation/Annotation/gen_'+gen_date+'/'+json_filename
ba = QuantiusAnnotation(json_filepath)
sa = SpotAnnotationAnalysis(ba)
anno_all = ba.df()
anno_one_snr = ba.slice_by_image(anno_all, img_filename)
plot_title = img_name
sa.plot_annotations_per_cluster(anno_one_snr, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
df_good_workers_pairwise = sa.slice_by_worker_pairwise_scores(anno_one_snr, pairwise_threshold)
plot_title = "minus workers with high pairwise scores"
sa.plot_annotations_per_cluster(df_good_workers_pairwise, clustering_params, show_correctness, correctness_threshold, csv_filepath, img_filename, plot_title, bigger_window_size)
| analysis_notebooks/batch_20180719/batch_20180719_cluster_sizes_thresholding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# I did this a bit flippantly before, but I want to fomalize the process by which we estimate the uncertainty on emulator predictions.
from pearce.emulator import LemonPepperWet
from GPy.kern import *
import numpy as np
from os import path
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set()
training_file = '/home/users/swmclau2/scratch/Aemulators/ds_hsab_h/PearceHSABDsCosmo.hdf5'
test_file = '/home/users/swmclau2/scratch/Aemulators/ds_hsab_test_h/PearceHSABDsCosmoTest.hdf5'
# + active=""
# #xi gm
# training_file = '/scratch/users/swmclau2/xi_gm_cosmo/PearceRedMagicXiGMCosmoFixedNd.hdf5'
# test_file = '/scratch/users/swmclau2/xi_gm_cosmo_test2/PearceRedMagicXiGMCosmoFixedNdTest.hdf5'
# -
em_method = 'gp'
fixed_params = {'z':0.0}
# + active=""
# hyperparams = {'kernel': (Linear(input_dim=7, ARD=True) + RBF(input_dim=7, ARD=True)+Bias(input_dim=7),
# RBF(input_dim=4, ARD=True)+Bias(input_dim=4) ), \
# 'optimize': True}
# -
emu = LemonPepperWet(training_file, fixed_params = fixed_params)
pred_y, data_y = emu.goodness_of_fit(test_file, statistic = None)
data_y.shape, pred_y.shape
pred_y = pred_y.reshape((100, 35, 18), order = 'F')
# +
#pred_y = np.swapaxes(pred_y, 0, 1)
# -
idx1, idx2 = 66, 10
plt.plot(emu.scale_bin_centers, pred_y[idx1, idx2, :])
plt.plot(emu.scale_bin_centers, data_y[idx2, idx1, :])
plt.xscale('log')
np.mean( np.abs(10**pred_y.swapaxes(0,1) - 10**data_y)/(10**data_y), axis = (0,1))
np.mean( np.abs(10**pred_y.swapaxes(0,1) - 10**data_y)/(10**data_y), axis = (0,1,2))
# Contract over the realizations.
pred_y_rs= pred_y.reshape((18,5,7, 1000), order = 'F')[:,0,:,:]
data_y_rs= data_y.reshape((18,5,7, 1000), order = 'F').mean(axis = 1)
data_y_rs2 = data_y.resha]pe((18, 5, 7, 1000), order ='F')
R = np.zeros((18,1000))
for i in xrange(7):
R += (data_y_rs2[:,:,i,:] - data_y_rs[:,i,:].reshape((-1, 1, 1000))).mean(axis =1)
cov = R.dot(R.T)/(R.shape[1]-1)
im = plt.imshow(np.log10(cov))
plt.colorbar(im)
R = (10**pred_y_rs - 10**data_y_rs).reshape((18,-1), order = 'F')
cov2 = R.dot(R.T)/(R.shape[1]-1)
im = plt.imshow(np.log10(cov2))
plt.colorbar(im)
# + active=""
# np.savetxt('xi_gg_nh_emu_cov_v1.npy', cov)
# -
| notebooks/Compute Emu Covmat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.0-rc2
# language: julia
# name: julia-1.7
# ---
# # Sparse Matrices in Julia
#
# Julia supports sparse matrices in the `SparseMatrixCSC` type. It uses
# the CSC format, and the datatype `Tv` for the non-zeros and all indices `Ti`
# can optionally be specified, `SparseMatrixCSC{Tv,Ti}`.
#
# Some special sparse matrices can be created using the following functions (together with
# their dense equivalents):
#
# | Sparse | Dense | Description |
# |:------------------ |:--------------- |:---------------------------------------------- |
# | `spzeros(m,n)` | `zeros(m,n)` | m-by-n matrix of zeros |
# | `sparse(I, n, n)` | `Matrix(I,n,n)` | n-by-n identity matrix |
# | `Array(S)` | `sparse(A)` | Interconverts between dense and sparse formats |
# | `sprand(m,n,d)` | `rand(m,n)` | m-by-n random matrix (uniform) of density d |
# | `sprandn(m,n,d)` | `randn(m,n)` | m-by-n random matrix (normal) of density d |
#
# More general sparse matrices can be created with the syntax `A = sparse(rows,cols,vals)` which
# takes a vector `rows` of row indices, a vector `cols` of column indices,
# and a vector `vals` of stored values (essentially the COO format).
#
# The inverse of this syntax is `rows,cols,vals = findnz(A)`.
#
# The number of non-zeros of a matrix `A` are returned by the `nnz(A)` function.
# ## Example
#
# For the matrix considered above, the easiest approach is to start from the COO format
# and use `sparse(rows, cols, vals)`. The size of the matrix is determined from the
# indices, if needed it can also be specified as `sparse(rows, cols, vals, m, n)`.
using PyPlot, SparseArrays, LinearAlgebra # Packages used
# +
rows = [1,3,4,2,1,3,1,4,1,5]
cols = [1,1,1,2,3,3,4,4,5,5]
vals = [5,-2,-4,5,-3,-1,-2,-10,7,9]
A = sparse(rows, cols, vals, 5, 5)
# -
# We note that Julia only displays the non-zeros in the matrix. If needed, it can be converted
# to a dense matrix:
Array(A)
# But in many cases, it is enough to only show the *sparsity pattern* of the matrix
# (not the actual values). PyPlot can visualize this using a so-called spy plot:
spy(A, marker=".", markersize=24); ## Note - 0-based row and columns
# ## Operations on sparse matrices
#
# Many operations work exactly the same for sparse matrices as for dense matrices,
# including arithmetic operations, indexing, assignment, and concatenation:
B = A - 4.3A # Will automatically convert datatype of values to Float64
B[:,4] .= -1.1 # OK since B now has Float64 values (otherwise use Float64.(A) to convert)
C = A * A' # Matrix multiplication (note: typically increases nnz)
Matrix([B C]) # Concatenation, again automatic conversion (of C)
# However, note that some standard operations can make the matrix more dense, and it might
# not make sense to use a sparse storage format for the result. Also, inserting new elements
# is expensive (for example the operation on the 4th column of `B` in the example above).
# ## Incremental matrix construction
#
# Since Julia uses the CSC format for sparse matrices, it is inefficient to create
# matrices incrementally (that is, to insert new non-zeros into the matrix).
# As an example, consider building a matrix using a for-loop. We start with an empty
# sparse matrix of given size $N$-by-$N$, and insert a total of $10N$ new random entries
# at random positions.
"""
Incremental matrix construction using the sparse-format
Not recommended: Insertion into existing matrix very slow
"""
function incremental_test_1(N)
A = spzeros(N,N)
for k = 1:10N
i,j = rand(1:N, 2)
A[i,j] = rand()
end
return A
end
# We time the function for increasing values of $N$:
incremental_test_1(10); # Force compile before timing
for N in [100,1000,10000]
@time incremental_test_1(N);
end
# We can observe the approximately *quadratic* dependency on $N$, even though the
# number of non-zeros is only proportional to $N$. This is because of the inefficiencies
# with element insertion into a sparse matrix.
#
# Instead, we can build the same matrix using the COO format (row, column, and value indices)
# and only call `sparse` ones:
"""
Incremental matrix construction using COO and a single call to sparse
Fast approach, avoids incremental insertion into existing array
"""
function incremental_test_2(N)
rows = Int64[]
cols = Int64[]
vals = Float64[]
for i = 1:10N
push!(rows, rand(1:N))
push!(cols, rand(1:N))
push!(vals, rand())
end
return sparse(rows, cols, vals, N, N)
end
incremental_test_2(10); # Force compile before timing
for N in [100,1000,10000,100000,1000000]
@time incremental_test_2(N);
end
# This version is magnitudes faster than the previous one, although it does not quite achieve
# linear dependency on $N$ (possibly because of the sorting inside `sparse`).
| textbook/_build/jupyter_execute/content/Sparse_Matrices/Sparse_Matrices_In_Julia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot
import numpy
import pandas
from sklearn import tree, metrics
# +
# Function to replace 'yes' or 'no' with 1.0 or 0.0 in a dataframe row
def replace_yes_no(row):
def update_value(value):
if value in ("Yes", "No"):
return 1.0 if value == "Yes" else 0.0
return float(value)
return [ update_value(x) for x in row ]
# Load a dataset, convert 'yes' or 'no' to 1.0 or 0.0, and remove NaN values
def load_dataset(path):
return pandas.read_csv(path).apply(replace_yes_no).dropna()
# -
dataframe = load_dataset("datasets/SleepStudyData.csv")
dataframe
def get_data(dataframe):
# Remove column with answers to create the input dataframe
input_data = dataframe.drop(columns = ["Enough"])
# Just grab the column with answers to create the expected output dataframe
output_data = dataframe["Enough"]
return input_data, output_data
input_data, output_data = get_data(dataframe)
# Pass training data to the decision tree classifier
classifier = tree.DecisionTreeClassifier()
classifier.fit(input_data, output_data)
# +
# Set the image size (in inches)
matplotlib.pyplot.figure(figsize = (25, 20))
# Generate the image
output = tree.plot_tree(classifier, feature_names = input_data.columns,
fontsize = 8, rounded = True, class_names = ["Not Enough", "Enough"])
# +
# Run the test dataset
test_dataset = load_dataset("datasets/SleepStudyPilot.csv")
test_input_data, test_output_data = get_data(test_dataset)
# Get the predictions
predictions = classifier.predict(test_input_data)
# Calculate the accuracy score
metrics.accuracy_score(test_output_data, predictions)
# -
enough = dataframe[dataframe["Enough"] == 1]
not_enough = dataframe[dataframe["Enough"] == 0]
enough.mean()
not_enough.mean()
| sleep_patterns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
datafile = 'data/ex1data1.txt'
cols = np.loadtxt(datafile,delimiter=',',usecols=(0,1),unpack=True) #Read in comma separated data
#Form the usual "X" matrix and "y" vector
X = np.transpose(np.array(cols[:-1]))
y = np.transpose(np.array(cols[-1:]))
m = y.size # number of training examples
#Insert the usual column of 1's into the "X" matrix
X = np.insert(X,0,1,axis=1)
#Plot the data to see what it looks like
plt.figure(figsize=(10,6))
plt.plot(X[:,1],y[:,0],'rx',markersize=10)
plt.grid(True) #Always plot.grid true!
plt.ylabel('Profit in $10,000s')
plt.xlabel('Population of City in 10,000s')
#gradient decent
iterations = 1500
alpha = 0.01
# +
def h(theta,X): #Linear hypothesis function
return np.dot(X,theta)
def computeCost(mytheta,X,y): #Cost function
"""
theta_start is an n- dimensional vector of initial theta guess
X is matrix with n- columns and m- rows
y is a matrix with m- rows and 1 column
"""
#note to self: *.shape is (rows, columns)
return float((1./(2*m)) * np.dot((h(mytheta,X)-y).T,(h(mytheta,X)-y)))
#Test that running computeCost with 0's as theta returns 32.07:
initial_theta = np.zeros((X.shape[1],1)) #(theta is a vector with n rows and 1 columns (if X has n features) )
print(computeCost(initial_theta,X,y))
# -
#Actual gradient descent minimizing routine
def descendGradient(X, theta_start = np.zeros(2)):
"""
theta_start is an n- dimensional vector of initial theta guess
X is matrix with n- columns and m- rows
"""
theta = theta_start
jvec = [] #Used to plot cost as function of iteration
thetahistory = [] #Used to visualize the minimization path later on
for meaninglessvariable in range(iterations):
tmptheta = theta
jvec.append(computeCost(theta,X,y))
# Buggy line
#thetahistory.append(list(tmptheta))
# Fixed line
thetahistory.append(list(theta[:,0]))
#Simultaneously updating theta values
for j in range(len(tmptheta)):
tmptheta[j] = theta[j] - (alpha/m)*np.sum((h(theta,X) - y)*np.array(X[:,j]).reshape(m,1))
theta = tmptheta
return theta, thetahistory, jvec
# +
#Actually run gradient descent to get the best-fit theta values
initial_theta = np.zeros((X.shape[1],1))
theta, thetahistory, jvec = descendGradient(X,initial_theta)
#Plot the convergence of the cost function
def plotConvergence(jvec):
plt.figure(figsize=(10,6))
plt.plot(range(len(jvec)),jvec,'bo')
plt.grid(True)
plt.title("Convergence of Cost Function")
plt.xlabel("Iteration number")
plt.ylabel("Cost function")
dummy = plt.xlim([-0.05*iterations,1.05*iterations])
#dummy = plt.ylim([4,8])
plotConvergence(jvec)
dummy = plt.ylim([4,7])
# -
#Plot the line on top of the data to ensure it looks correct
def myfit(xval):
return theta[0] + theta[1]*xval
plt.figure(figsize=(10,6))
plt.plot(X[:,1],y[:,0],'rx',markersize=10,label='Training Data')
plt.plot(X[:,1],myfit(X[:,1]),'b-',label = 'Hypothesis: h(x) = %0.2f + %0.2fx'%(theta[0],theta[1]))
plt.grid(True) #Always plot.grid true!
plt.ylabel('Profit in $10,000s')
plt.xlabel('Population of City in 10,000s')
plt.legend()
# +
#Import necessary matplotlib tools for 3d plots
from mpl_toolkits.mplot3d import axes3d, Axes3D
from matplotlib import cm
import itertools
fig = plt.figure(figsize=(12,12))
ax = fig.gca(projection='3d')
xvals = np.arange(-10,10,.5)
yvals = np.arange(-1,4,.1)
myxs, myys, myzs = [], [], []
for david in xvals:
for kaleko in yvals:
myxs.append(david)
myys.append(kaleko)
myzs.append(computeCost(np.array([[david], [kaleko]]),X,y))
scat = ax.scatter(myxs,myys,myzs,c=np.abs(myzs),cmap=plt.get_cmap('YlOrRd'))
plt.xlabel(r'$\theta_0$',fontsize=30)
plt.ylabel(r'$\theta_1$',fontsize=30)
plt.title('Cost (Minimization Path Shown in Blue)',fontsize=30)
plt.plot([x[0] for x in thetahistory],[x[1] for x in thetahistory],jvec,'bo-')
plt.show()
# -
datafile = 'data/ex1data2.txt'
#Read into the data file
cols = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=True) #Read in comma separated data
#Form the usual "X" matrix and "y" vector
X = np.transpose(np.array(cols[:-1]))
y = np.transpose(np.array(cols[-1:]))
m = y.size # number of training examples
#Insert the usual column of 1's into the "X" matrix
X = np.insert(X,0,1,axis=1)
#Quick visualize data
plt.grid(True)
plt.xlim([-100,5000])
dummy = plt.hist(X[:,0],label = 'col1')
dummy = plt.hist(X[:,1],label = 'col2')
dummy = plt.hist(X[:,2],label = 'col3')
plt.title('Clearly we need feature normalization.')
plt.xlabel('Column Value')
plt.ylabel('Counts')
dummy = plt.legend()
#Feature normalizing the columns (subtract mean, divide by standard deviation)
#Store the mean and std for later use
#Note don't modify the original X matrix, use a copy
stored_feature_means, stored_feature_stds = [], []
Xnorm = X.copy()
for icol in range(Xnorm.shape[1]):
stored_feature_means.append(np.mean(Xnorm[:,icol]))
stored_feature_stds.append(np.std(Xnorm[:,icol]))
#Skip the first column
if not icol: continue
#Faster to not recompute the mean and std again, just used stored values
Xnorm[:,icol] = (Xnorm[:,icol] - stored_feature_means[-1])/stored_feature_stds[-1]
#Quick visualize the feature-normalized data
plt.grid(True)
plt.xlim([-5,5])
dummy = plt.hist(Xnorm[:,0],label = 'col1')
dummy = plt.hist(Xnorm[:,1],label = 'col2')
dummy = plt.hist(Xnorm[:,2],label = 'col3')
plt.title('Feature Normalization Accomplished')
plt.xlabel('Column Value')
plt.ylabel('Counts')
dummy = plt.legend()
# +
#Run gradient descent with multiple variables, initial theta still set to zeros
#(Note! This doesn't work unless we feature normalize! "overflow encountered in multiply")
initial_theta = np.zeros((Xnorm.shape[1],1))
theta, thetahistory, jvec = descendGradient(Xnorm,initial_theta)
#Plot convergence of cost function:
plotConvergence(jvec)
# -
#print "Final result theta parameters: \n",theta
print("Check of result: What is price of house with 1650 square feet and 3 bedrooms?")
ytest = np.array([1650.,3.])
#To "undo" feature normalization, we "undo" 1650 and 3, then plug it into our hypothesis
ytestscaled = [(ytest[x]-stored_feature_means[x+1])/stored_feature_stds[x+1] for x in range(len(ytest))]
ytestscaled.insert(0,1)
print("$%0.2f" % float(h(theta,ytestscaled)))
from numpy.linalg import inv
#Implementation of normal equation to find analytic solution to linear regression
def normEqtn(X,y):
#restheta = np.zeros((X.shape[1],1))
return np.dot(np.dot(inv(np.dot(X.T,X)),X.T),y)
print ("Normal equation prediction for price of house with 1650 square feet and 3 bedrooms")
print ("$%0.2f" % float(h(normEqtn(X,y),[1,1650.,3])))
| Python/1-Linear Regression/intuition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Análisis de los datos obtenidos
# Uso de ipython para el análsis y muestra de los datos obtenidos durante la producción.Se implementa un regulador experto. Los datos analizados son del día 11 de Agosto del 2015
#
# Los datos del experimento:
# * Duración 30min
# * Filamento extruido: 537cm
# * $T: 150ºC$
# * $V_{min} tractora: 1.5 mm/s$
# * $V_{max} tractora: 3.4 mm/s$
# * Los incrementos de velocidades en las reglas del sistema experto son las mismas.
#Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('ensayo1.CSV')
# %pylab inline
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X', 'RPM TRAC']
#Mostramos un resumen de los datos obtenidoss
datos[columns].describe()
#datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']]
# Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
# +
graf=datos.ix[:, "Diametro X"].plot(figsize=(16,10),ylim=(0.5,3))
graf.axhspan(1.65,1.85, alpha=0.2)
graf.set_xlabel('Tiempo (s)')
graf.set_ylabel('Diámetro (mm)')
#datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
# -
box = datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
box.axhspan(1.65,1.85, alpha=0.2)
# En el boxplot, se ve como la mayoría de los datos están por encima de la media (primer cuartil). Se va a tratar de bajar ese porcentaje. La primera aproximación que vamos a realizar será la de hacer mayores incrementos al subir la velocidad en los tramos que el diámetro se encuentre entre $1.80mm$ y $1.75 mm$(caso 5) haremos incrementos de $d_v*2$ en lugar de $d_v*1$
# Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.')
# #Filtrado de datos
# Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas.
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
# +
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
# -
# ##Representación de X/Y
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
# #Analizamos datos del ratio
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
# #Límites de calidad
# Calculamos el número de veces que traspasamos unos límites de calidad.
# $Th^+ = 1.85$ and $Th^- = 1.65$
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
| ipython_notebooks/06_regulador_experto/ensayo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Stat Analysis LME
#
# +
#import statements
# %matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import pymer4
from pymer4.models import Lmer
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "sans-serif"
import seaborn as sns
sns.set() # Setting seaborn as default style
sns.set_theme()
# -
#read in source csv
df = pd.read_csv (r'latest_results_pavlovia.csv',
encoding='utf-8-sig')
df.info()
print([*df])
# ## Data Preprocessing
# ## Analysis
# +
# define Variables
# define dependent variable
rt = df['choice_response.rt']
# define independent variable
cond = df['cond']
#condition = pd.get_dummies(data=condition, drop_first=True)
# -
model = Lmer('rt ~ cond', data=df)
display(model.fit())
#result = model.fit()
#print result.summary()
| data_analysis/stat_lme.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Table of Contents
# * [Monitoring the photovoltaic energy production](#Monitoring-the-photovoltaic-energy-production)
# * [Load the data](#Load-the-data)
# * [Model of the data](#Model-of-the-data)
# * [Preprocessing](#Preprocessing)
# * [Phase I SPC: Estimation of the regular patterns](#Phase-I-SPC:-Estimation-of-the-regular-patterns)
# * [Selection of the pool of IC series](#Selection-of-the-pool-of-IC-series)
# * [Standardisation](#Standardisation)
# * [Histogram of the standardised data](#Histogram-of-the-standardised-data)
# * [Phase II SPC: Monitoring](#Phase-II-SPC:-Monitoring)
# * [Choice of the block length](#Choice-of-the-block-length)
# * [Choice of the target shift size](#Choice-of--the-target-shift-size)
# * [Control limit of the chart.](#Control-limit-of-the-chart.)
# * [Phase III: Estimation of shift sizes and shapes using SVMs](#Phase-III:-Estimation-of-shift-sizes-and-shapes-using-SVMs)
# * [Selection of the length of the input vector](#Selection-of-the-length-of-the-input-vector)
# * [Choice of the regularization parameter](#Choice-of-the-regularization-parameter)
# * [Training and validation of the SVMs](#Training-and-validation-of-the-SVMs)
# * [Monitoring and results display](#Monitoring-and-results-display)
#
# # Monitoring the photovoltaic energy production
# +
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from dateutil.relativedelta import relativedelta
import pkg_resources as pkg
import warnings
warnings.simplefilter('ignore')
from cusvm import preprocessing as pre
from cusvm import autocorrelations as acf
from cusvm import cusum_design_bb as chart
from cusvm import svm_training as svm
from cusvm import alerts as appl
# -
# ## Load the data
# +
data_path = pkg.resource_filename(pkg.Requirement.parse("cusvm"), 'data')
df = pd.read_csv(data_path + '/PVdaily.csv')#daily sum of load factors
data = np.array(df)[:,1:]
data = data.astype('float')
data = data/96 #to work with average instead of sum of load factors
names = list(df.columns)[1:] #names of DSOs
(n_obs, n_series) = data.shape
### load time
with open(data_path + '/time_daily', 'rb') as file:
my_depickler = pickle.Unpickler(file)
time = my_depickler.load()
# -
def multiplot(data, time, start_time=2015, stop_time=2021, ind=[3,4,14,20,21], same_ax=False):
start = np.where(time >= start_time)[0][0]
stop = np.where(time >= stop_time)[0][0]
if stop-start < 1000:
ind_ticks = np.arange(start, stop, 60) #two months
x_ticks = np.round(time[ind_ticks],2)
else :
ind_ticks = np.arange(start, stop, 365) #one year
x_ticks = np.round(time[ind_ticks])
count = 1
fig = plt.figure(figsize=(8.0, 6.0))
max_val = np.max(data[start:stop, ind])*1.1
for i in ind:
f = fig.add_subplot(len(ind), 1, count)
plt.ylabel(names[i])
#plt.plot(data[start:stop, i])
plt.plot(time[start:stop], data[start:stop, i])
if same_ax:
f.set_ylim([0, max_val])
if count < len(ind):
f.axes.get_xaxis().set_ticklabels([])
plt.xticks(x_ticks)
count += 1
plt.show()
# ## Model of the data
# The production data, denoted by $P(i,t)$, may be decomposed into different components:
#
# $$ P(i,t) = \eta(i,t) \ \chi(i,t) \ c(t) $$
#
# where
# - $c(t)$ represents a common signal between all regions. This quantity models the overall amount of solar radiations that reaches the ground in Belgium. It also accounts for weather conditions that affect the whole country.
# - $\chi(i,t)$s are piece-wise constant scaling factors that typically represent the altitude, composition of the atmosphere (e.g. the level of pollution) and micro-climate of the different areas. They are allowed to change with time as the climate or pollution may change over the years but are expected to vary at a slower peace than the other variables.
# - $\eta(i,t)$ corresponds to the different amount of solar radiations that are received in the regions due to e.g. localized weather conditions or to local malfunctions. This variable will therefore be monitored in the following, to detect potential anomalies in the network.
#
# In the following, we assume that the random variables $c$ and $\eta$ are continuous and that $\chi$, $c$ and $\eta$ are jointly independent.
# ## Preprocessing
# To monitor $\eta$, we need to isolate it from the other terms of the model.
# To this end, we first rescale the data to roughly compensate for the different levels of the regions:
#
# $$ P_{resc}(i,t) = \frac{P(i,t)}{k'(i,t)}. $$
#
# This can be done by calling the function ``rescaling()``, which has two mandatory arguments:
# - ``data`` : the data from the whole panel
# - ``period_rescaling`` : the period on which the scaling factors should be computed. <br>
#
# The function computes piece-wise constant scaling factors as the slope of the ordinary least-squares regression between each series and the median of the data. Those factors are calculated on a pre-specified period (``period_rescaling``), which is set here to one year. This value seems appropriate since the degree of pollution or the micro-climate are not expected to change much over time.
#
# Then, we estimate $\hat c(t)$, a proxy for the common component, as the point-wise median of the rescaled series along time:
#
# $$ \hat c(t) = \underset{1 \leq i\leq N}{\text{med}} P_{resc}(i,t). $$
#
# To do so, we apply the function ``median()`` to the rescaled data.
# +
data_rescaled, k_factors = pre.rescaling(data, period_rescaling=365)
#multiplot(data_rescaled, time, 2016)
med = pre.median(data_rescaled)
fig = plt.figure(figsize=(8.0, 4.0))
plt.plot(time, med); plt.show()
# -
# Now that we estimated $c$, we can remove it from the (raw) data:
#
# $$ \hat \eta(i,t) \hat \chi(i,t)= \frac{P(i,t)} {\hat c(t)}. $$
#
# To this end, we use the function ``remove_signal()``. As most of the functions in the package, its first argument is the data. The second argument of the function is set here to ``model = 'multiplicative'``. With this setting, the common signal is divided from the data whereas it would have been subtracted if ``model='additive'``. The third argument of the function allows the specification of the common signal. By default, it is computed as the median of the data. Here, it is specified as the median of the rescaled data instead, ``ref=med``.
ratio = pre.remove_signal(data, ref=med, model='multiplicative')
multiplot(ratio, time)
# After rescaling once again the data, we obtain finally an estimation of the mean of the localized variations, $\eta$, that will be denoted by $\hat \mu_{\eta}(i,t)$ in the following:
#
# $$ \hat \mu_{\eta}(i,t) = \frac{P(i,t)} {\hat c(t) \hat \chi(i,t)}. $$
#
# In practice, we thus apply once more time the function ``rescaling()`` on the previous ratio to eliminate the level ($\chi$) of the data.
#
# This variable will then be monitored in the following.
ratio, chi_factors = pre.rescaling(ratio, period_rescaling=365)
multiplot(ratio, time)
# ## Phase I SPC: Estimation of the regular patterns
# In the first stage of the method, we select a subset of in-control (IC) series from the panel. We then estimate the mean and variance on those IC data along time. The data from the whole panel (in-control or out-of-control) are later standardised by these parameters.
# ### Selection of the pool of IC series
# First, a subset (also called 'pool' in the following) of IC or stable series is selected from the network.
# To this end, a robust version of the MSE is computed for each series with the function ``pool_clustering()``.
# The series are then clustered in two groups based on their MSE value with the k-means algorithm <a id="ref-1" href="#cite-Lloyd1957">(Lloyd 1957)</a> and the subset with the lowest values is selected as the IC group.
### Selection of the IC pool
pool = pre.pool_clustering(ratio)
names_IC = [names[i] for i in range(n_series) if i in pool]
print('IC : ', names_IC)
names_OC = [names[i] for i in range(n_series) if i not in pool]
print('OC : ', names_OC)
# ### Standardisation
# Then, the (IC and OC) series are standardised by time-varying mean and variance. These quantities are computed across the pool of IC stations and along the time using K nearest neighbors regression method. Hence, the mean and the variance are always computed on the same number of values (=$K$) even if the data contain missing values. <br>
#
# To this end, we first select the value of $K$ with the function ``choice_K()`` to obtain the 'best' standardisation of the complete panel, in the sense that its empirical mean becomes close to zero and its empirical variance close to one. This function computes the mean and standard deviation (std) of the standardised data for different values of $K$. Usually, the mean and std exhibit a similar behaviour: the mean comes closer to zero and the std draws near one with increasing values of $K$, as can be seen below. $K$ is thus selected as the knee of the std curve. <br>
# The function ``choice_K()`` has several arguments:
# - ``data`` : the data from the whole panel
# - ``data_IC`` : the data from the IC pool only
# - ``start`` : starting value for $K$
# - ``stop`` : stopping value for $K$
# - ``step`` : step value for $K$ <br>
# The different values of $K$ are thus contained in the range [``start``, ``stop``] with a certain ``step``. <br>
#
# Finally, the data from the whole panel (IC and OC) are standardised by the IC mean and variance using the function ``standardisation()``. This function has three mandatory arguments:
#
# - ``data`` : the data from the whole panel
# - ``data_IC`` : the data from the IC pool only
# - ``K`` : the number of nearest neighbours.
#
# +
### Choice of K
ratioIC = ratio[:, pool]
### standardise the data
K_knee = pre.choice_K(ratio, ratioIC, start=50, stop=2000, step=50)
print('K :', K_knee)
data_stn, dataIC_stn = pre.standardisation(ratio, ratioIC, K_knee)
multiplot(data_stn, time)
# -
# ### Histogram of the standardised data
### plot all data
fig = plt.figure(figsize=(8.0, 6.0))
plt.hist(data_stn[~np.isnan(data_stn)], bins='auto', density=True, facecolor='b')
plt.title("Standardised data")
plt.text(3, 0.3, 'mean:' '%4f' %np.nanmean(data_stn))
plt.text(3, 0.2, 'std:' '%4f' %np.nanstd(data_stn))
plt.grid(True)
plt.show()
# ## Phase II SPC: Monitoring
# Having selected an IC pool and standardised the data by the IC mean and variance, we can now move on to the second phase of the method. <br>
# In this stage, we calibrate the CUSUM chart on the data using the block bootstrap (BB), a procedure that randomly samples blocks of observations with repetition from the data to generate new series similar to the observations.
# ### Choice of the block length
# To calibrate the CUSUM chart, we first select an appropriate value for the block length. This length depends on the autocorrelation of the data and can be automatically selected using the function ``block_length_choice()``.
# Large blocks usually better model the autocorrelation of the data but at the same time do not represent well the variance and the mean of the series. And conversely. Therefore, we compute an appropriate value for the block length by the following procedure. <br>
# For each block length tested, we resample several series of observations using a block bootstrap procedure. Then, the mean squared error (mse) of the mean, standard deviation and autocorrelation are computed on
# the resampled series (with respect to the original data). The appropriate value for the block length is finally selected as the knee of the autocorrelation curve. <br>
# Intuitively, this length corresponds to the first value such that the main part of the autocorrelation of the series is well represented. <br>
#
# The function ``block_length_choice()`` contains the main following arguments (for more info look directly at the documentation on top of the function):
# - ``data`` : the IC data
# - ``bbl_min`` : starting value for the block length (here set to 1)
# - ``bbl_max`` : upper value for the block length (here set to 50)
# - ``bbl_min`` : step value for the block length (here set to 1) <br>
# The different values for the block length are thus contained in the range [``bbl_min``, ``bbl_max``] with step value ``bbl_step``.
bb_length = acf.block_length_choice(dataIC_stn, 1, 50, 1)
print('Block length: ', bb_length)
# ### Choice of the target shift size
# The target shift size that it is interesting to detect depends on the deviations that the series experience. <br>
# This parameter may be estimated recursively with the function ``shift_size()`` as follows.
# For an initial value of the shift size, we compute the control limits of the chart. Then, the chart is applied on out-of-control series and the size of the deviations is estimated after each alert using a classical formula <a id="ref-2" href="#cite-Montgomery2005">(Montgomery 2005)</a>. This formula is only valid for iid normal observations however it may still be used as a first approximation (the real shift sizes will be predicted later using a support vector regressor).
# Then, the value of the shift size is updated such as a specified quantile of the shift sizes distribution.
# The procedure is then iterated until the shift size converges. <br>
# Since the shift sizes are expected to variate over a wide range, we select the target value as the 0.4 quantile of the shift sizes distribution.
# <br>
#
# The function ``shift_size()`` contains the main following arguments:
# - ``data`` : the data from the whole panel
# - ``pool`` : the index of the IC series in the panel
# - ``delta`` : the initial value for the target shift size
# - ``ARL0_threshold`` : the pre-specified value of the average run length (ARL0). <br>
# This value controls the rate of false positives (the rate of false positivse is inversely proportional to the ARL0). It is set to 200.
# - ``block_length`` : the length of the blocks
# - ``qt`` : the quantile of the shift sizes distribution
# - ``missing_values`` : policy to treat the missing values. <br>
# The missing values policy is set to 'omit'. In this mode, the chart is calculated on data that do not contain missing observations.
# +
bb_length = 8
delta_init = 2 #intial value for the target shift size
ARL0 = 200 #pre-specified ARL0 (controls the false positives rate)
### estimate an appropriate shift size
delta_target = chart.shift_size(data_stn, pool, delta=delta_init, ARL0_threshold=ARL0, block_length=bb_length,
qt=0.4, missing_values ='omit')[1]
print('delta target: ', np.round(delta_target,1))
# -
# ### Control limit of the chart.
# Having selected the block length and the target shift size, the control limits of the CUSUM chart can finally be adjusted on the IC series.
# To this end, we use the function ``limit_CUSUM()``, which adjusts the limits until a pre-specified value for the rate of false positive (ARL0) is reached at a desired accuracy. <br>
#
# The function ``limit_CUSUM()`` contains the main following arguments:
# - ``dataIC`` : the IC data
# - ``delta`` : the target shift size
# - ``ARL0_threshold`` : the pre-specified value of the average run length (ARL0). <br>
# This value controls the rate of false positives (the rate of false positivse is inversely proportional to the ARL0). It is set by default to 200.
# - ``L_plus`` : the upper value of the searching interval
# - ``block_length`` : the length of the blocks
# - ``missing_values`` : policy to treat the missing values. <br>
# The missing values policy is set to 'omit'. In this mode, the chart is calculated on data that do not contain missing observations.
#
# The function returns the positive value for the control limit. The negative limit has the same value as the positive one with opposite sign since the distribution of the data is (almost) symmetric.
delta_target = 2
control_limit = chart.limit_CUSUM(dataIC_stn, delta=delta_target, L_plus=4,
block_length=bb_length, missing_values='omit')
# ## Phase III: Estimation of shift sizes and shapes using SVMs
# In this third and last phase of the monitoring method, the support vector machines (SVM) for extracting and classifying out-of-control patterns are designed. Those methods are composed of a support vector regressor (SVR) to predict the size of the shifts in a continuous range and a support vector classifier (SVC) to classify the shape of the encountered deviations among a pre-defined number of classes.
# ### Selection of the length of the input vector
# To this end, we first select the length of the input vector of the SVMs.
# It represents the number of past observations that are fed to the support vector classifier and regressor after each alert. The regressor and classifier then predict the form and the size of the shift that causes the alert based on this vector.
# Hence, the length should be sufficiently large to ensure that most of the shifts are contained within the input vector while maintaining the computing efficiency of the method. This is usually not a problem for the large shifts that are quickly detected by the chart. However the smallest shifts may be detected only after a certain amount of time and therefore require large vectors. <br>
# Hence, the length is selected as an upper quantile of the run length distribution, computed on data shifted by the smallest shift size that we aim to detect. <br>
#
# To select this length, we can use the function ``input_vector_length()``. It works as follows. <br>
# For each monte-carlo run, a new series of observations is sampled from the IC data using a block bootstrap procedure. Then, a jump of size ``delta_min`` is simulated on top of the sample. The run length of the chart is then evaluated. The length of the input vector is finally selected as a specified upper quantile (quantile >=0.5) of the run length distribution. If the quantile is unspecified, an optimal quantile is selected by locating the 'knee' of the quantiles curve. <br>
#
# The function ``input_vector_length()`` has the following main arguments:
# - ``data`` : the IC standardised data
# - ``delta_min`` : the target shift size
# - ``L_plus`` : the value of the positive control limit
# - ``block_length`` : the length of the blocks
# - ``qt`` : the specified upper quantile
wdw_length = svm.input_vector_length(dataIC_stn, delta_target, control_limit,
block_length=bb_length, qt=0.95);
print('wdw_length: ', wdw_length)
# ### Choice of the regularization parameter
# The regularization parameter ($\lambda$) represents the trade-off between misclassification and regularization.
# It may be automatically selected with the function ``choice_C()``.
# This function selects $\lambda$ (sometimes called $C$ in the literature) to maximize the performance of the SVM classifier and regressor.
# To this end, it trains the classifier and regressor on simulated deviations for different values of $\lambda$ in the range [`` start``, ``stop``] with a certain `` step``.
# The function then returns the values of $\lambda$ that maximizes the accuracy for the classifier, the mean squared error (MSE) and the mean absolute percentage error (MAPE) for the regressor. <br>
#
# This function contains the main following arguments:
#
# - ``data`` : the IC standardised data
# - ``L_plus`` : the value of the positive control limit
# - ``delta`` : the target shift size
# - ``wdw_length`` : the length of the input vector
# - ``scale`` : the scale parameter of the half-normal distributions that are used to randomly select the sizes of the artificial deviations.
# - ``start`` : starting value for $\lambda$
# - ``stop`` : stopping value for $\lambda$
# - ``step`` : step value for $\lambda$
# The different values of $\lambda$ are thus contained in the range [``start``, ``stop``] with a certain ``step``. <br>
# - ``delay`` : flag to start the monitoring after a random delay in the range [0, ``delay``]
# - ``block_length`` : the length of the blocks
# - ``confusion`` : flag to print the confusion matrix
# +
wdw_length = 10
scale = 4
### find an optimal value for the regularization parameter
C_choices = svm.choice_C(dataIC_stn, control_limit, delta_target, wdw_length, scale,
start = 1, stop = 11, step = 1,
delay=2*wdw_length, block_length=bb_length, confusion=False)
# -
# ### Training and validation of the SVMs
# Having defined the three main parameters of the SVM procedures, we can now train the classifier and regressor on artificial deviations.
# Those are obtained as follows. After randomly sampling series of IC data by a BB procedure, three types of artificial deviations of size $\delta$ are added top of those series:
#
# - jumps: $x(t) = x_{ic}(t)+\delta$ ;
# - drifts (with varying power-law functions) : $x(t) = x_{ic}(t)+\frac{\delta}{T'}(t)^{a}$, where $a$ is randomly selected in the range $[1.5, 2]$ ;
# - oscillating shifts (with different frequencies): $x(t) = x_{ic}(t) \delta \sin{(\eta \pi t)}$, where $\eta$ is randomly selected in the range $[\frac{\pi}{m}, \frac{3\pi}{m}]$. <br>
#
# In practice, the function ``training_svm()`` does both the training and the testing of the classifier and regressor. It has the same mandatory arguments as the function ``choice_C()`` that we see previously. It has also an optional argument, ``C``, that allows the user to specify to the value of the parameter $\lambda$.
#train the classifier and regressor with selected C
C = 6
reg, clf = svm.training_svm(dataIC_stn, control_limit, delta_target,
wdw_length, scale, delay=wdw_length*2, C=C, block_length=bb_length)
# +
### save models
# filename = 'svr_elia_daily.sav'
# pickle.dump(reg, open(filename, 'wb'))
# filename = 'svc_elia_daily.sav'
# pickle.dump(clf, open(filename, 'wb'))
# +
### or load the models previously trained
#reg = pickle.load(open('../svm_models/svr_elia_daily.sav', 'rb'))
#clf = pickle.load(open('../svm_models/svc_elia_daily.sav', 'rb'))
# -
# ## Monitoring and results display
# In the previous sections, we adjust the parameters of the CUSUM chart and train the SVM methods on the photovoltaic energy production data.
# We will now apply the method that is fully calibrated to actually monitor those data.
# The function ``alerts_info()`` can be used to apply the complete procedure (CUSUM chart plus SVM methods) on a particular series of the panel.
# It contains the main following arguments:
# - ``data`` : a standardised series to be monitored
# - ``L_plus`` : the value of the positive control limit
# - ``delta`` : the target shift size
# - ``wdw_length`` : the length of the input vector
# - ``clf`` : the trained classifier
# - ``reg`` : the trained regressor
#
# Six quantities are returned by the function: the positive and negative shapes of the shifts (``form_plus`` and `` form_minus``), sizes of the shifts (``size_plus`` and ``size_minus``) and chart statistics (``C_plus`` and `` C_minus``). When the series is not in alert, the sizes and forms of the shifts are set to ``NaN``. <br>
# The function has also an optional argument ``cut``, which is set by default to ``2L_plus``. This argument defines the maximal value that the chart statistics are allowed to take, i.e. $|C^+_j|$, $|C^-_j| \leq 2L$. It thus prevents the chart to take too high values and therefore stay in alert for longer periods than the actual deviations of the series.
#
# Then, diverse functions can be called to display the main features of the monitoring.
# The function ``plot_3panels()`` returns a plot composed of three panels, which shows respectively (1) the standardised series of data which is analysed, (2) the CUSUM chart statistics ($|C^+|$, $|C^-|$) and (3) the shapes and sizes of the shifts that are predicted by the SVM when the chart is in alert.
# The function has the main following arguments:
#
# - ``data`` : a standardised series to be monitored
# - ``L_plus`` : the value of the positive control limit
# - ``time`` : an array with the time of the observations
# - ``form_plus``, ``form_minus`` : the predicted shift forms by SVMs
# - ``size_plus``, ``size_minus`` : the predicted shift sizes by SVMs
# - ``C_plus``, ``C_minus`` : the CUSUM statistics
# - ``name`` : the code name of the series
# - ``time_start`` : the starting time value of the figure
# - ``time_stop`` : the stoping time value of the figure
# - ``x_ticks`` : the locations of the ticks for the x-axis of the figure
# - ``labels_ticks`` : the labels of the ticks for the x-axis of the figure
# The labels can only be passed if the locations are passed as well.
# - ``fig_size`` : the width and heigth of the figure
# +
region = [i for i in range(len(names)) if names[i] == 'Resa'][0]
for i in range(region, region+1):
data_indv = data_stn[:,i] #monitored series
[form_plus, form_minus, size_plus, size_minus,
C_plus, C_minus] = appl.alerts_info(data_indv, control_limit,
delta_target, wdw_length, clf, reg)
fig = appl.plot_3panels(data_indv, control_limit, time,
form_plus, form_minus, size_plus, size_minus,
C_plus, C_minus, names[i], time_start=2015,
time_stop=2021)
fig = appl.plot_4panels(data_indv, ratio[:,i], control_limit, time,
form_plus, form_minus, size_plus, size_minus,
C_plus, C_minus, names[i], time_start=2015,
time_stop=2021)
fig = appl.plot_1panel(data_indv, time,
form_plus, form_minus, size_plus, size_minus, names[i],
time_start=2016, time_stop=2017)
# +
region = [i for i in range(len(names)) if names[i] == 'Resa'][0]
# time in form of dates
dates = np.arange(datetime(2015, 1, 1), datetime(2021, 1, 27),
timedelta(days=1)).astype(datetime)
#x_ticks
dates_months = []
dates_months.append(datetime(2015, 1, 1))
while dates_months[-1] < datetime(2016, 1, 1):
dates_months.append(dates_months[-1] + relativedelta(months=3))
x_ticks = dates_months
#labels of x_ticks
lab_ticks = []
for i in range(len(x_ticks)):
lab_ticks.append(x_ticks[i].strftime('%d-%b'))
for i in range(region, region+1):
data_indv = data_stn[:,i] #monitored series
[form_plus, form_minus, size_plus, size_minus,
C_plus, C_minus] = appl.alerts_info(data_indv, control_limit,
delta_target, wdw_length, clf, reg)
fig = appl.plot_3panels(data_indv, control_limit, dates,
form_plus, form_minus, size_plus, size_minus,
C_plus, C_minus, names[i], time_start=datetime(2015,1,1),
time_stop=datetime(2016,1,1), x_ticks=x_ticks,
labels_ticks=lab_ticks, fig_size=[10,7])
# -
# <!--bibtex
#
# @ARTICLE{Mathieu2019,
# AUTHOR={{Mathieu}, S. and {<NAME>}, R. and {Delouille}, V. and {Lefevre}, L. and {<NAME>.},
# TITLE="{Uncertainty quantification in sunspot counts}",
# JOURNAL={The Astrophysical Journal},
# YEAR={2019},
# volume = {886},
# number={1},
# pages= {},
# doi={https://doi.org/10.3847/1538-4357/ab4990}
# }
#
#
# @article{Kruskal_Wallis,
# author = {{<NAME>. and {<NAME>.},
# title ="{Use of ranks in one-criterion variance analysis}",
# journal = {Journal of the American Statistical Association},
# volume = {47},
# number = {260},
# pages = {583-621},
# year = {1952},
# doi = {10.1191/1471082X04st068oa},
# URL = {https://www.jstor.org/stable/2280779}
# }
#
# @ARTICLE{Qiu2014,
# author={{<NAME>. and {Xiang}, D.},
# title = "{Univariate dynamic screening system: an approach for identifying individuals with irregular longitudinal behaviour}",
# journal = {Technometrics},
# volume = {56},
# number = {2},
# pages = {248-260},
# year = {2014},
# doi = {https://doi.org/10.1080/00401706.2013.822423},
# issn = {0040-1706}
# }
#
# @BOOK{Montgomery2005,
# author = {{<NAME>},
# title = "{Introduction to Statistical Quality Control}",
# publisher={Wiley},
# year = {2004},
# edition = {5th}
# }
#
# @TechReport{Lloyd1957,
# author={{Lloyd}, S.P. },
# title = "{Least squares quantization in PCM}",
# institution = {Bell Lab},
# year = {1957},
# type = {Technical Report RR-5497},
# number = {5497}
# }
#
#
#
# @ARTICLE{Cheng2011,
# author={{Cheng}, C-S. and {Chen}, P-W. and {Huang}, K-K. },
# title = "{Estimating the shift size in the process mean with support vector regression and neural network}",
# journal = {Expert Systems with Applications},
# year = {2011},
# volume={38},
# number={8},
# pages={10624-10630},
# doi = {https://doi.org/10.1016/j.eswa.2011.02.121}
# }
#
#
# -->
#
# <!--bibtex
#
# @ARTICLE{Mathieu2019,
# AUTHOR={{<NAME>. and {<NAME>}, R. and {Delouille}, V. and {Lefevre}, L. and {Ritter}, C.},
# TITLE="{Uncertainty quantification in sunspot counts}",
# JOURNAL={The Astrophysical Journal},
# YEAR={2019},
# volume = {886},
# number={1},
# pages= {},
# doi={https://doi.org/10.3847/1538-4357/ab4990}
# }
#
#
# @article{Kruskal_Wallis,
# author = {{<NAME>. and {<NAME>.},
# title ="{Use of ranks in one-criterion variance analysis}",
# journal = {Journal of the American Statistical Association},
# volume = {47},
# number = {260},
# pages = {583-621},
# year = {1952},
# doi = {10.1191/1471082X04st068oa},
# URL = {https://www.jstor.org/stable/2280779}
# }
#
# @article{Page1961,
# author = {{<NAME>.},
# title ="{Cumulative sum charts}",
# journal = {Technometrics},
# volume = {3},
# number = {1},
# pages = {1-9},
# year = {1961},
# URL = {https://www.jstor.org/stable/1266472}
# }
# -->
#
# # References
#
# <a id="cite-Lloyd1957"/><sup><a href=#ref-1>[^]</a></sup><NAME>.P. . 1957. _Least squares quantization in PCM_.
#
# <a id="cite-Montgomery2005"/><sup><a href=#ref-2>[^]</a></sup>Montgomery, D. 2004. _Introduction to Statistical Quality Control_.
#
#
| docs/elia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="qTAyT2zjuK7X" colab_type="text"
# # Atividade Extra
#
# ---
#
# **Aluna:** <NAME>
#
# **Matrícula:** 20170059474
#
# **Email:** <EMAIL>
# + [markdown] id="hCHKk6ziuApQ" colab_type="text"
#
# <img width="60" src="https://drive.google.com/uc?export=view&id=1JQRWCUpJNAvselJbC_K5xa5mcKl1gBQe">
# + id="XaLyqvnUimrA" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 140} outputId="b21a63d4-62f7-41e4-83cd-6666a1d0fadc"
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print("User uploaded file '{name}'with lenth {length} bytes".format(
name=fn, length=len(uploaded[fn])))
# + id="9R1Fg8E-EWUI" colab_type="code" colab={}
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + id="pXk4wd7O3QOY" colab_type="code" colab={}
df_br = pd.read_csv("votacao_candidato_munzona_2018_BR.csv", encoding='latin-1', delimiter =";")
df_rn = pd.read_csv("votacao_candidato_munzona_2018_RN.csv", encoding='latin-1', delimiter =";")
# + [markdown] id="6oVIMz_LJcML" colab_type="text"
# # 1 - Votos por coligações
# + [markdown] id="nLXIXsEQ7RK6" colab_type="text"
# Objetivando identificar o viés politico da sociedade, este gráfico mostra a quantidade de votos (em milhões) por coligações. Será apresentado 9 coligações, que são compostas pelos seguintes partidos:
#
# * **VAMOS SEM MEDO DE MUDAR O BRASIL:** PSOL
# * **MUDANÇA DE VERDADE:** PODE
# * **UNIDOS PARA TRANSFORMAR O BRASIL:** REDE
# * **ESSA É A SOLUÇÃO:** MDB
# * **PARTIDO ISOLADO:** PATRI, PPL, NOVO, DC, PSTU
# * **PARA UNIR O BRASIL:** PSDB
# * **BRASIL SOBERANO:** PDT
# * **O POVO FELIZ DE NOVO:** PT
# * **BRASIL ACIMA DE TUDO, DEUS ACIMA DE TODOS: ** PSL
#
#
# + id="NkSM_gAC9b35" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="806aafe0-ea1a-4bd5-8fdc-20eba36a6386"
coligacao_voto = df_br.pivot_table(values=['QT_VOTOS_NOMINAIS'],
index=['NM_COLIGACAO'],
aggfunc='sum').reset_index().sort_values(by="QT_VOTOS_NOMINAIS")
sns.set(style="white")
plt.figure(figsize=(10,5))
graf_colig_voto = sns.barplot(y=coligacao_voto["NM_COLIGACAO"], x=coligacao_voto["QT_VOTOS_NOMINAIS"])
plt.ylabel('Coligações')
plt.xlabel('Quantidade de Votos')
plt.title('Total de votos por coligações')
graf_colig_voto.spines["right"].set_visible(False)
graf_colig_voto.spines["bottom"].set_visible(False)
graf_colig_voto.spines["top"].set_visible(False)
graf_colig_voto.spines["left"].set_visible(False)
plt.show()
# + [markdown] id="CScaNIKx5bLn" colab_type="text"
# # 2 - Distribuição de votos válidos por UF
# + [markdown] id="iVp-kuVPWtNj" colab_type="text"
# Neste tópico é possível ver a distribuição de votos em cada estado brasileiro. A coluna "ZZ" refere-se aos votos de pessoas no exterior.
#
# + id="aE8jF8xu8vGG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="2718fdaa-5157-4d05-8c4c-4394d10ea5d7"
votos_por_uf = df_br[["SG_UF","QT_VOTOS_NOMINAIS"]].groupby("QT_VOTOS_NOMINAIS").sum().reset_index()
plt.figure(figsize=(15,10))
sns.set(style="white")
graf_votos_uf = sns.barplot(x=votos_por_uf["SG_UF"], y=votos_por_uf["QT_VOTOS_NOMINAIS"])
plt.xticks(rotation= 45)
plt.xlabel('Estados')
plt.ylabel('Quantidade de Votos')
plt.title('Total de votos por estado')
graf_votos_uf.spines["right"].set_visible(False)
graf_votos_uf.spines["bottom"].set_visible(False)
graf_votos_uf.spines["top"].set_visible(False)
graf_votos_uf.spines["left"].set_visible(False)
plt.show()
# + [markdown] id="oPU64-fuLBAO" colab_type="text"
# # 3 - Votos recebidos pelos candidatos ao 2° turno
# + [markdown] id="S_9wRCvOsy_e" colab_type="text"
# Neste tópico será analisado a quantidade de votos por candidatos ao segundo turno. Ele foi dividido em 3 subanálises. Os dois primeiros referem-se aos candidatos à presidência, <NAME> e <NAME>, e o terceiro faz referência aos candidatos à governador do Rio Grande do Norte (RN).
#
#
# ## Candidatos à presidência
#
# No primeiro gráfico, foi selecionado apenas os votos do RN, e pode-se notar que o candidato <NAME> continua à frente de <NAME>. Mas agora olhando de forma mais ampla, no segundo gráfico, que mostra os votos em todo país, ocorre o contrário, Bolsonaro está com a maior quantidade de votos.
#
# + id="vH66Z3w890Id" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="d8e0f9ee-bf87-4b5d-edce-d438ba7f5379"
#RESULTADOS BR
candidatos_2_turno = df_br[df_br["DS_SIT_TOT_TURNO"]=='2º TURNO']
sns.set(style="white")
plt.figure(figsize=(15,10))
graf_voto_candidato = sns.barplot(x=candidatos_2_turno['NM_URNA_CANDIDATO'], y=candidatos_2_turno["QT_VOTOS_NOMINAIS"], palette=['green','red'])
plt.xlabel('Candidatos')
plt.ylabel('Quantidade de Votos')
plt.title('Votos por candidatos ao 2º turno')
graf_voto_candidato.spines["right"].set_visible(False)
graf_voto_candidato.spines["bottom"].set_visible(False)
graf_voto_candidato.spines["top"].set_visible(False)
graf_voto_candidato.spines["left"].set_visible(False)
plt.show()
# + id="VwZvHAZacvB7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="777767e4-48cc-4582-fff1-e83069eaf997"
#RESULTADOS RN
candidatos_2_turno_rn = candidatos_2_turno[candidatos_2_turno['SG_UF'] == "RN"]
sns.set(style="white")
plt.figure(figsize=(10,5))
graf_voto_candidato_rn = sns.barplot(x=candidatos_2_turno_rn['NM_URNA_CANDIDATO'], y=candidatos_2_turno_rn["QT_VOTOS_NOMINAIS"], palette=['red','green'])
plt.xlabel('Candidatos')
plt.ylabel('Quantidade de Votos')
plt.title('Votos no RN por candidatos ao 2º turno')
graf_voto_candidato_rn.spines["right"].set_visible(False)
graf_voto_candidato_rn.spines["bottom"].set_visible(False)
graf_voto_candidato_rn.spines["top"].set_visible(False)
graf_voto_candidato_rn.spines["left"].set_visible(False)
plt.show()
# + [markdown] id="uDOYE71ozpq9" colab_type="text"
# ## Candidatos à governador do RN
#
# Agora, os candidatos à governador do RN. No segundo turno, a população Norte Riograndense deverá decidir entre Fátima Bezerra (PT) e Carlos Eduardo (PDT). O gráfico mostra uma diferença de aproximadamente 17% entre eles, com a candidata Fátima Bezerra com a maior porcentagem dos votos.
# + id="C6tgxuZLHWze" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 345} outputId="eead3dec-6c83-471e-8475-3f2c0dfa1215"
turno2_rn = df_rn[df_rn['DS_SIT_TOT_TURNO'] == '2º TURNO'][["QT_VOTOS_NOMINAIS",'NM_URNA_CANDIDATO']].groupby('NM_URNA_CANDIDATO').sum().reset_index()
labels = turno2_rn['NM_URNA_CANDIDATO']
sizes = turno2_rn["QT_VOTOS_NOMINAIS"]
colors = ['green','yellow']
fig , ax = plt.subplots(subplot_kw = dict(aspect="equal"))
ax.pie (sizes, labels = labels , autopct = ' %1.1f%% ',shadow = True , startangle = 90, colors = colors)
ax.set_title("Votos por candidatos ao 2º turno no RN")
plt.show()
# + [markdown] id="W25ykJt5avNK" colab_type="text"
# # 4 - Votos por zonas eleitorais em Natal
# + [markdown] id="BTJGlEHAbEqJ" colab_type="text"
# Este gráfico foi pensado com objetivo de verificar a distribuição de pessoas por zonas eleitorais em Natal/RN. Para isso, foi utilizado a quantidade de votos e as cinco zonas do município.
#
# Com base no gráfico, é perceptível a cuidadosa divisão de pessoas nas zonas eleitorais. Divisão essa muito importante para evitar alguns transtornos, como a longa espera em filas para votar.
#
# Obs: As informações a respeito das zonas eleitorais em Natal foram tiradas no site do [TRE](http://www.tre-rn.jus.br/o-tre/zonas-eleitorais/enderecos-e-telefones/enderecos-e-telefones-das-zonas-eleitorais-do-rn).
# + id="Pd7RGAfe6nPz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 345} outputId="a817937c-3c85-4cb5-c5ac-3dec3b9e2d50"
edf_zonas_natal = pd.DataFrame(df_rn, columns=['NR_ZONA','QT_VOTOS_NOMINAIS']).groupby("NR_ZONA").sum().iloc[[0,1,2,3,-1],:].reset_index()
total_votos_zona_natal = df_zonas_natal["QT_VOTOS_NOMINAIS"].sum()
labels = df_zonas_natal["NR_ZONA"]
sizes = df_zonas_natal["QT_VOTOS_NOMINAIS"]
colors = ['CornflowerBlue','Sienna','Gray','DarkOrange','DarkSlateGray']
fig1 , ax = plt.subplots(subplot_kw = dict(aspect="equal"))
ax.pie (sizes, labels = labels , autopct = ' %1.1f%% ',shadow = True , startangle = 90, colors = colors, textprops = dict(color="w"))
ax.legend(title="Zonas eleitorais", loc="best", bbox_to_anchor = (1 , 0 , 0.5 , 1))
ax.set_title("Percentual de votos por zonas eleitorais em Natal")
plt.show()
# + [markdown] id="gvQbPuXWmQiH" colab_type="text"
# # 5 - Número de candidatos do RN por partido
# + [markdown] id="_40t1AD772UG" colab_type="text"
# Este gráfico tem como objetivo observar quantos candidatos no Rio Grande do Norte estão filiados à cada partido. Percebe-se uma maior concentração de candidatos nos partidos Solidariedade e PSOL.
# + id="1ShWTiBahrz7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 675} outputId="b1b2f2fd-44b5-4f3a-b2dd-7ce1eb8deb60"
candit_partido = df_rn.pivot_table(values=["NM_URNA_CANDIDATO"], index=["SG_PARTIDO"], aggfunc='count').reset_index().sort_values(by="NM_URNA_CANDIDATO")
sns.set(style="white")
plt.figure(figsize=(15,10))
graf_cand_part = sns.barplot(x=candit_partido["NM_URNA_CANDIDATO"], y=candit_partido["SG_PARTIDO"])
plt.xlabel("Número de candidatos")
plt.ylabel("Partidos")
plt.title("Número de candidatos do RN por partido")
graf_cand_part.spines["right"].set_visible(False)
graf_cand_part.spines["bottom"].set_visible(False)
graf_cand_part.spines["top"].set_visible(False)
graf_cand_part.spines["left"].set_visible(False)
plt.show()
| Eleicoes_2018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
input = tf.placeholder(tf.float32, (None, 32, 32, 3))
filter_weights = tf.Variable(tf.truncated_normal((8, 8, 3, 20))) # (height, width, input_depth, output_depth)
filter_bias = tf.Variable(tf.zeros(20))
strides = [1, 2, 2, 1] # (batch, height, width, depth)
padding = 'SAME'
conv = tf.nn.conv2d(input, filter_weights, strides, padding) + filter_bias
conv
# The padding algorithm Tensorflow uses is not exactly the same as normal padding. [More info](https://www.tensorflow.org/api_guides/python/nn#Convolution) of how Tensorflow does padding
#
# TensorFlow uses the following equation for 'SAME' vs 'PADDING'
#
# **SAME Padding**, the output height and width are computed as:
#
# out_height = ceil(float(in_height) / float(strides1))
#
# out_width = ceil(float(in_width) / float(strides[2]))
#
# **VALID Padding**, the output height and width are computed as:
#
# out_height = ceil(float(in_height - filter_height + 1) / float(strides1))
#
# out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))
| cnn/Tensorflow - Padding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Jupyter-notebooks" data-toc-modified-id="Jupyter-notebooks-1"><span class="toc-item-num">1 </span>Jupyter notebooks</a></span><ul class="toc-item"><li><span><a href="#Interface" data-toc-modified-id="Interface-1.1"><span class="toc-item-num">1.1 </span>Interface</a></span></li><li><span><a href="#Kernels" data-toc-modified-id="Kernels-1.2"><span class="toc-item-num">1.2 </span>Kernels</a></span></li><li><span><a href="#Cell-types" data-toc-modified-id="Cell-types-1.3"><span class="toc-item-num">1.3 </span>Cell types</a></span></li><li><span><a href="#Edit-and-Command-modes" data-toc-modified-id="Edit-and-Command-modes-1.4"><span class="toc-item-num">1.4 </span>Edit and Command modes</a></span></li></ul></li><li><span><a href="#Coding-with-jupyter-notebooks" data-toc-modified-id="Coding-with-jupyter-notebooks-2"><span class="toc-item-num">2 </span>Coding with jupyter notebooks</a></span></li><li><span><a href="#Format-with-markdown-and-HTML" data-toc-modified-id="Format-with-markdown-and-HTML-3"><span class="toc-item-num">3 </span>Format with <strong><em>markdown</em></strong> and <em>HTML</em></a></span><ul class="toc-item"><li><span><a href="#Markdown" data-toc-modified-id="Markdown-3.1"><span class="toc-item-num">3.1 </span><strong><em>Markdown</em></strong></a></span><ul class="toc-item"><li><span><a href="#Titles-and-subtitles" data-toc-modified-id="Titles-and-subtitles-3.1.1"><span class="toc-item-num">3.1.1 </span>Titles and subtitles</a></span></li></ul></li></ul></li><li><span><a href="#Subtitle-2" data-toc-modified-id="Subtitle-2-4"><span class="toc-item-num">4 </span>Subtitle 2</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Italics-and-bold" data-toc-modified-id="Italics-and-bold-4.0.1"><span class="toc-item-num">4.0.1 </span>Italics and bold</a></span></li><li><span><a href="#Item-lists" data-toc-modified-id="Item-lists-4.0.2"><span class="toc-item-num">4.0.2 </span>Item lists</a></span></li><li><span><a href="#Quotes" data-toc-modified-id="Quotes-4.0.3"><span class="toc-item-num">4.0.3 </span>Quotes</a></span></li><li><span><a href="#Horizontal-rules" data-toc-modified-id="Horizontal-rules-4.0.4"><span class="toc-item-num">4.0.4 </span>Horizontal rules</a></span></li><li><span><a href="#Hyperlinks" data-toc-modified-id="Hyperlinks-4.0.5"><span class="toc-item-num">4.0.5 </span>Hyperlinks</a></span></li><li><span><a href="#Equations" data-toc-modified-id="Equations-4.0.6"><span class="toc-item-num">4.0.6 </span>Equations</a></span></li><li><span><a href="#Images" data-toc-modified-id="Images-4.0.7"><span class="toc-item-num">4.0.7 </span>Images</a></span></li></ul></li><li><span><a href="#HTML" data-toc-modified-id="HTML-4.1"><span class="toc-item-num">4.1 </span>HTML</a></span><ul class="toc-item"><li><span><a href="#Images-with-HTML" data-toc-modified-id="Images-with-HTML-4.1.1"><span class="toc-item-num">4.1.1 </span>Images with HTML</a></span></li><li><span><a href="#Centering-text" data-toc-modified-id="Centering-text-4.1.2"><span class="toc-item-num">4.1.2 </span>Centering text</a></span></li></ul></li></ul></li><li><span><a href="#Notebook-sharing-as-PDF-or-HTML-files." data-toc-modified-id="Notebook-sharing-as-PDF-or-HTML-files.-5"><span class="toc-item-num">5 </span>Notebook sharing as PDF or HTML files.</a></span><ul class="toc-item"><li><span><a href="#Export-as-PDF" data-toc-modified-id="Export-as-PDF-5.1"><span class="toc-item-num">5.1 </span>Export as PDF</a></span></li><li><span><a href="#Export-as-HTML" data-toc-modified-id="Export-as-HTML-5.2"><span class="toc-item-num">5.2 </span>Export as HTML</a></span></li><li><span><a href="#Publish-online" data-toc-modified-id="Publish-online-5.3"><span class="toc-item-num">5.3 </span>Publish online</a></span></li></ul></li><li><span><a href="#Links" data-toc-modified-id="Links-6"><span class="toc-item-num">6 </span>Links</a></span></li></ul></div>
# + [markdown] slideshow={"slide_type": "slide"}
# # jupyter notebooks
# + [markdown] slideshow={"slide_type": "slide"}
# ## Jupyter notebooks
# + [markdown] slideshow={"slide_type": "subslide"}
# ___jupyter notebook___ is a tool for developing and presenting projects involving programming in ___python___, but also in ___julia___ or ___R___.
#
# Indeed, the name ___jupyter___ makes reference to those 3 supported programming languages.
# + [markdown] slideshow={"slide_type": "subslide"}
# Two characteristics make notebooks very attractive:
#
# * For programming, the possibility to run smaller pieces of code and test them interactively, instead of having a single, big program.
# * For presentation or sharing, the rich formatting possibilities as well as the option to export the complete project as PDF or HTML file to share.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Interface
# + [markdown] slideshow={"slide_type": "subslide"}
# The anaconda bundle includes the jupyter notebook already.
#
# It can be launched either from the anaconda navigator interface, or directly typing _jupyter notebook_ in the start menu in windows:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/jupyter_win_menu.png' width='300'>
# + [markdown] slideshow={"slide_type": "subslide"}
# When jupyter is launched, it starts a server that runs in a web browser. The operation and editing of the notebooks will then be done in the browser, like here in _mozilla firefox_:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/delete_notebook.png' width='1000'>
# + [markdown] slideshow={"slide_type": "subslide"}
# The main screen shows a file and directory tree. From here, it is possible to navigate folders and open files. While it is possible to open text files and images (among others) in the browser, the main use of this dashboard panel is to create, rename and delete, to start and stop _notebooks_. In order to perform these actions, it is necessary to check the box to the left of the file(s) and then look for the desired action on top of the screen. By the way, it is also possible to create and delete folders from this screen.
# + [markdown] slideshow={"slide_type": "fragment"}
# These notebooks are the documents where we will include code as well as output and formatted text and images.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Kernels
# + [markdown] slideshow={"slide_type": "subslide"}
# It is possible to create and run notebooks that execute code in a number of programming languages, like R or Julia. These languages are then referred to as ___kernels___.
#
# We will use the ___python 3___ kernel.
#
# (A list of available kernels can be consulted [in this link](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels))
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/python_kernel.png' width='1000'>
# + [markdown] slideshow={"slide_type": "subslide"}
# The ___kernel___ refers then to the programming language. It is often mentioned in messages like "Kernel Idle" or "Kernel Busy" in the menu bar (top right).
#
# Also in the menu bar, in the ___kernel___ menu, are commands to interrupt or restart it, which can be very useful if a piece of code is taking too long to execute:
# + slideshow={"slide_type": "fragment"}
import time
for i in range(1000):
print('.', end='')
time.sleep(2) # wait 2 seconds
# + [markdown] slideshow={"slide_type": "fragment"}
# Also note that the circle on the top-right, next to the kernel's name (Python 3) changes color when the kernel is busy, and that the cell is marked with an asterisk [*] on the left.
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that after you created a new notebook, you will start with a document featuring a single empty cell (more on cells later). This document is ___\"Untitled\"___. You can change the notebook's name either from the first screen, or by clicking on the title on top of the page:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/rename.png' width='700'>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Cell types
# + [markdown] slideshow={"slide_type": "subslide"}
# Notebooks are built by ___cells___ which are the units of code that will be executed independently, although it is possible to run them all one after the other.
# + [markdown] slideshow={"slide_type": "fragment"}
# ___Cells___ can be of three types:
# * Code
# * Markdown
# * Raw text
# + [markdown] slideshow={"slide_type": "subslide"}
# Basically, the three types of cells are used for:
# * Code → Writing pieces of program, that will be executed in python3
# * Markdown → Rich format, explanations, text, titles, images, equations
# * Raw text → Text without any format, in a raw form, that won't be interpreted as format or code
# + [markdown] slideshow={"slide_type": "subslide"}
# The type can be changed using the cell menu:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/cells1.png' width='700'>
# + [markdown] slideshow={"slide_type": "subslide"}
# or with the toolbar:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/cells2.png' width='700'>
# + [markdown] slideshow={"slide_type": "subslide"}
# or using keyboard shortcuts in ___command mode___:
# * <kbd>y</kbd> → Make this cell ___code___
# * <kbd>m</kbd> → Make this cell ___markdown format___
# * <kbd>r</kbd> → Make this cell ___raw input___
# + [markdown] slideshow={"slide_type": "fragment"}
# Note 1: ___Headers___ are ___markdown___ too, we will come to that later.
# + [markdown] slideshow={"slide_type": "fragment"}
# And that is a very good point to jump to the ___edit___ and ___command___ modes!
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Edit and Command modes
# + [markdown] slideshow={"slide_type": "subslide"}
# When writing inside a cell's body, we say that we are in ___edit___ mode, as we will change the contents of the cell.
#
# By contrast, when in ___command___ mode, we can use the keyboard to change some aspects of the cells, to run the code in a cell, move them, delete them, insert new ones, etc.
# + [markdown] slideshow={"slide_type": "fragment"}
# This is similar to the use of combinations of cells in other software, like the widely used shortcuts <kbd>CTRL</kbd>+<kbd>c</kbd> and <kbd>CTRL</kbd>+<kbd>v</kbd>: They are not supposed to write the letters ___c___ or ___v___ but to execute a command.
# + [markdown] slideshow={"slide_type": "subslide"}
# A short explanation of the ___edit___ and ___command___ modes is shown with the menu `Help` → `Keyboard shortcuts`
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/keyboard_shortcuts.png' width='600'>
# + [markdown] slideshow={"slide_type": "subslide"}
# It is possible to switch to ___command___ mode by clicking the <kbd>ESC</kbd> from a cell, or with a mouse click outside it.
# + [markdown] slideshow={"slide_type": "fragment"}
# Conversely, we can go into ___edit___ mode if we hit <kbd>ENTER</kbd> or with a mouse click inside the cell's body.
# + [markdown] slideshow={"slide_type": "subslide"}
# Three commonly used commands, used to run the code inside a cell without need of the toolbar are:
# * <kbd>CTRL</kbd>+<kbd>ENTER</kbd> → to run the code in the current cell
# * <kbd>Shift</kbd>+<kbd>ENTER</kbd> → to run the code in the current cell and select next cell below
# * <kbd>ALT</kbd>+<kbd>ENTER</kbd> → to run the code in the current cell and insert an empty new under it
# + [markdown] slideshow={"slide_type": "subslide"}
# Of course we can also run the code in a cell using the toolbar:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/run_buttons.png' width='300'>
# + [markdown] slideshow={"slide_type": "fragment"}
# Which takes us to the "coding" part...
# + [markdown] slideshow={"slide_type": "slide"}
# ## Coding with jupyter notebooks
# + [markdown] slideshow={"slide_type": "subslide"}
# Each cell in a jupyter notebook can run python code, provided that the python kernel is installed and running correctly, and that the cell is configured as `code`.
#
# By default, all cells are first configured as `code`.
# + [markdown] slideshow={"slide_type": "subslide"}
# A cell can be run either using a combination of keys, or using the buttons in the toolbar:
# + [markdown] slideshow={"slide_type": "fragment"}
# * <kbd>CTRL</kbd>+<kbd>ENTER</kbd> → to run the code in the current cell
# * <kbd>Shift</kbd>+<kbd>ENTER</kbd> → to run the code in the current cell and select next cell below
# * <kbd>ALT</kbd>+<kbd>ENTER</kbd> → to run the code in the current cell and insert an empty new under it
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/run_buttons.png' width='300'>
# + [markdown] slideshow={"slide_type": "subslide"}
# One interesting feature is that we can run a cell _after_, and then one _before_, and the values of variables or imported libraries is preserved.
# + slideshow={"slide_type": "fragment"}
print( a )
# + slideshow={"slide_type": "fragment"}
a = 2
# + [markdown] slideshow={"slide_type": "subslide"}
# While running cells without order can be useful sometimes, it leads to untidy and messy code.
#
# It is a good practice to structure the code to be run from beginning to end.
#
# To run the complete notebook, we can use the menus:
# * `Cell` → `Run All`
# * `Kernel` → `Restart & Run All`
# + [markdown] slideshow={"slide_type": "subslide"}
# Another feature of jupyter notebooks, regarding interactive coding, is that they often give some format to many outputs.
#
# As an example, we don't always need to use `print` to check the value of a variable:
# + slideshow={"slide_type": "fragment"}
a = 3
b = 5
c = a + b
c
# + slideshow={"slide_type": "fragment"}
a = 3
b = 5
c = a + b
print( c )
# + [markdown] slideshow={"slide_type": "subslide"}
# However, this only works for the last line:
# + slideshow={"slide_type": "fragment"}
a = 3
a
b = 5
b
c = a + b
# + [markdown] slideshow={"slide_type": "subslide"}
# If we wish to see all output, it is better to actually use `print()`:
# + slideshow={"slide_type": "fragment"}
a = 3
print( a )
b = 5
print( b )
c = a + b
print( c )
# + [markdown] slideshow={"slide_type": "subslide"}
# Lastly, the ___input label___ `In[ ]:` on the left of the cells indicates that they are ___code___ cells, and also if they were run and how:
# * `In[1]:` 1st cell to execute code
# * `In[10]:` 10th cell to execute code
# * `In[ ]:` Cell with code not yet executed
# * `In[*]:` Cell executing code right now
# + [markdown] slideshow={"slide_type": "fragment"}
# Similarly, the ___output labels___ `Out[ ]:` indicate to which cell that particular output corresponds, rather than the order of execution.
# + slideshow={"slide_type": "fragment"}
a * 2
# + [markdown] slideshow={"slide_type": "subslide"}
# If you have made several changes to your notebook and want to check everything again, remember to use the entries in the `Kernel` menu:
#
# * `Kernel` → `Restart`
# * `Kernel` → `Restart & Clear Output`
# * `Kernel` → `Restart & Run All`
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Format with ___markdown___ and _HTML_
# + [markdown] slideshow={"slide_type": "fragment"}
# The main way to provide rich format for jupyter notebooks is called ___markdown___.
#
# ___HTML___ is also supported. It provides more sophisticated output, but is a little more complicated.
# + [markdown] slideshow={"slide_type": "fragment"}
# In both cases the cells need to be set to `markdown` and bust be run, like `code` cells are.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### ___Markdown___
# + [markdown] slideshow={"slide_type": "subslide"}
# Markdown is a light formatting language, to provide rich format using plain text editors (like the jupyter notebooks)
# + [markdown] slideshow={"slide_type": "fragment"}
# It supports a number of formatting, we will some of the most common here. For more references, check the links at the end of this notebook, or search for `markdown` + `jupyter notebook` in your web browser.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Titles and subtitles
# + [markdown] slideshow={"slide_type": "subslide"}
# Titles are created by a number of leading number signs, hashtags `xxx`. The number of signs denotes the level of the title or subtitle:
# + slideshow={"slide_type": "fragment"} active=""
# # Title
# + [markdown] slideshow={"slide_type": "fragment"}
# # Title
# + slideshow={"slide_type": "subslide"} active=""
# ## Subtitle 2
# + [markdown] slideshow={"slide_type": "fragment"}
# ## Subtitle 2
# + slideshow={"slide_type": "fragment"} active=""
# ###### Subtitle 6
# + [markdown] slideshow={"slide_type": "fragment"}
# ###### Subtitle 6
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Italics and bold
# + [markdown] slideshow={"slide_type": "subslide"}
# *Italics* and **Bold** fonts are produced by either using 1 or 2 asterisks, respectively:
# * `*`Italics`*`
# * `**`Bold`**`
#
# Or 1 or 2 underlines:
# * `_`Italics`_`
# * `__`Bold`__`
#
# before and after the text to be formatted.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Item lists
# + [markdown] slideshow={"slide_type": "subslide"}
# Lists are produced with a leading character, which can be an asterisk `*`, a plus sign `+`, or a minus sign`-`:
# + slideshow={"slide_type": "fragment"} active=""
# * North
# - South
# + East
# * West
# + [markdown] slideshow={"slide_type": "fragment"}
# # + North
# # + South
# # + East
# # + West
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Quotes
# + [markdown] slideshow={"slide_type": "subslide"}
# Quotes of text can be introduced with the `>` character:
# + slideshow={"slide_type": "fragment"} active=""
# The author starts with a clear statement:
#
# > Quantification of global forest change has been lacking despite the recognized importance of
# forest ecosystem services.
#
# (Hansen et al. 2013)
# + [markdown] slideshow={"slide_type": "fragment"}
# The author starts with a clear statement:
#
# > Quantification of global forest change has been lacking despite the recognized importance of
# forest ecosystem services.
#
# (Hansen et al. 2013)
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Horizontal rules
# + [markdown] slideshow={"slide_type": "subslide"}
# A horizontal line (rule) can be inserted by using either 3 asterisks, minus signs or underlines:
# + [markdown] slideshow={"slide_type": "fragment"}
# ***
# + [markdown] slideshow={"slide_type": "fragment"}
# ---
# + [markdown] slideshow={"slide_type": "fragment"}
# ___
# + [markdown] slideshow={"slide_type": "fragment"}
# ---
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Hyperlinks
# + [markdown] slideshow={"slide_type": "subslide"}
# Hyperlinks can be used if inserted directly: https://en.wikipedia.org/wiki/Python_(programming_language)
# + [markdown] slideshow={"slide_type": "fragment"}
# Or they can be inserted in a [text](https://en.wikipedia.org/wiki/Python_(programming_language))
# + [markdown] slideshow={"slide_type": "fragment"}
# For this, we use a combination of parenthesis `()` and square brackets `[]`:
# + slideshow={"slide_type": "fragment"} active=""
# Or they can be inserted in a [text](https://en.wikipedia.org/wiki/Python_(programming_language))
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Equations
# + [markdown] slideshow={"slide_type": "subslide"}
# Equations with LaTeX
# + [markdown] slideshow={"slide_type": "skip"}
# LaTeX is a formatting syntax. For simple equations, we can use some online editor, copy-and-paste the code in a markdown cell between `$` signs:
# + slideshow={"slide_type": "fragment"} active=""
# $a=3x+1$
# + [markdown] slideshow={"slide_type": "fragment"}
# $a=3x+1$
# + [markdown] slideshow={"slide_type": "subslide"}
# A more complicated example:
# + slideshow={"slide_type": "fragment"} active=""
# $$ \huge h_{ts} = \frac{ h_m }{ 1 + b_0 \cdot e^{-b_1 \cdot T_{\Sigma_{ts}}}} $$
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ \huge h_{ts} = \frac{ h_m }{ 1 + b_0 \cdot e^{-b_1 \cdot T_{\Sigma_{ts}}}} $$
# + [markdown] slideshow={"slide_type": "subslide"}
# Explaining the LaTeX syntax falls outside the scope of this course, but be aware that this is an open possibility and check for websites in internet where you can make good looking equations to make your notebooks more professional.
#
# If you search foor "latex equations online", you should be able to generate the code that you later can copy-and-paste in your notebook.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Images
# + [markdown] slideshow={"slide_type": "subslide"}
# There are three ways to insert images in a notebook
# + [markdown] slideshow={"slide_type": "fragment"}
# 1. Images with markdown are inserted with:
# + slideshow={"slide_type": "fragment"} active=""
# 
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 2. Images using python code:
# + slideshow={"slide_type": "fragment"}
from IPython.display import Image
Image(filename='misc/img/python-logo.png')
# + [markdown] slideshow={"slide_type": "subslide"}
# The latter is a bit more complicated (and is not markdown, for the sake of precision), but it allows to several parameters:
# + slideshow={"slide_type": "fragment"}
logo = Image(filename='misc/img/python-logo.png')
logo.width = 100
logo
# + [markdown] slideshow={"slide_type": "subslide"}
# 3. Images using HTML and will be covered in the next section
# + [markdown] slideshow={"slide_type": "subslide"}
# 4. It is also possible to insert images via the menu `View`→`Insert image`. It will generate the corresponding code inside a markdown cell.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### HTML
# + [markdown] slideshow={"slide_type": "subslide"}
# Apart from ___markdown___ formatting, it is possible to use ___HTML___ code.
#
# ___HTML___ stands for hypertext markup language, and it is the standard language for creating web pages.
#
# For jupyter notebooks, it means that you can create very sophisticated tables, images, embed videos and animations, among lots of other features.
#
# In the next cells we see only three examples of simple tasks done with HTML: inserting an image, inserting the symbol for a key (from the keyboard), and centering.
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Images with HTML
# + [markdown] slideshow={"slide_type": "subslide"}
# HTML code is often easy to recognize from the use of the signs **\<** and **\>**.
#
# To insert an image from the hard disk, we can use the following line:
# + slideshow={"slide_type": "fragment"} active=""
# <img src='misc/img/helloworld.png'>
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/python-logo.png' width='200'>
# + [markdown] slideshow={"slide_type": "subslide"}
# But HTML is much more flexible. We can for example define the image size, title and alternative text:
# + slideshow={"slide_type": "fragment"} active=""
# <img src='https://upload.wikimedia.org/wikipedia/commons/1/13/Tibetan_Wolf_Canis_lupus_chanko_cropped.jpg' alt='Image not found :(' width="200" title='A wolf from Wikipedia'>
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='https://upload.wikimedia.org/wikipedia/commons/1/13/Tibetan_Wolf_Canis_lupus_chanko_cropped.jpg' alt='Image not found :(' width="200" title='A wolf from Wikipedia'>
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Centering text
# + slideshow={"slide_type": "fragment"}
# Super-title
# + [markdown] slideshow={"slide_type": "fragment"}
# # Super-title
# + slideshow={"slide_type": "fragment"}
# <center>Super-title</center>
# + [markdown] slideshow={"slide_type": "fragment"}
# <h1> <center> Super-title </center> </h1>
# + [markdown] slideshow={"slide_type": "subslide"}
# HTML uses often a pair of open-and-close statements, in this case both __center__ inside the angle brackets. Notice that the second __center__ statemets has a diagonal to point that it is the end of the centered block.
# + [markdown] slideshow={"slide_type": "subslide"}
# Similar to the centering blocks, we can format keys if we put them between __kbd__ (from keyboard) statements:
# + slideshow={"slide_type": "fragment"} active=""
# <kbd> ENTER </kbd> + <kbd> ALT </kbd> + <kbd> A </kbd>
# + [markdown] slideshow={"slide_type": "fragment"}
# <kbd> ENTER </kbd> + <kbd> ALT </kbd> + <kbd> A </kbd>
# + [markdown] slideshow={"slide_type": "subslide"}
# There are lots of other interesting HTML pieces of code, feel free to search for them to make your notebooks look better!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Notebook sharing as PDF or HTML files.
# + [markdown] slideshow={"slide_type": "subslide"}
# Once you organized your notebook with comments, titles, equations, graphs, pictures and calculations, you may want to share it with friends or colleagues.
#
# We will briefly see three ways to share your notebooks:
# * as a PDF file
# * as an HTML website
# * upload it to the internet and share the link
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Export as PDF
# + [markdown] slideshow={"slide_type": "fragment"}
# You can very easily convert your notebook to a PDF file under the menu entry `File`→`Download as`→`PDF via LaTeX`.
# + [markdown] slideshow={"slide_type": "fragment"}
# There is not much to say about it, other than noting that behind the scenes, the LaTeX syntax is used to produce the document. You don't need to worry about it though.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Export as HTML
# + [markdown] slideshow={"slide_type": "fragment"}
# Similarly, the notebook can be saved as an HTML file under the menu entry `File`→`Download as`→`HTML`.
# + [markdown] slideshow={"slide_type": "subslide"}
# A note: If you install the RISE extension from https://github.com/damianavila/RISE, you can generate slideshows, and later download the notebook in HMTL format as a presentation to share as well: `File`→`Download as`→`Reveal.js slides`.
# + [markdown] slideshow={"slide_type": "subslide"}
# To install the Rise extension for making presentation (or any other package), you need to start a console in anaconda and run some commands. You start a console by clicking on `Environments`→`base (root)`→`Open Terminal`.
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/anaconda_rise1.png' width='700'>
# + [markdown] slideshow={"slide_type": "subslide"}
# You can then enter the following command and wait for the installation to complete: `conda install -c conda-forge rise`
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/anaconda_rise2.png' width='500'>
# + [markdown] slideshow={"slide_type": "subslide"}
# If the installation was successful, you can later define the presentation's slides via `View`→`Cell Toolbar`→`Slideshow` and the drop-down menus that appear at each cell. To start a presentation, press the new button at the rightmost part of the toolbar:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/slideshow_button.png' width='50'>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Publish online
# + [markdown] slideshow={"slide_type": "fragment"}
# A notebook has already all the code needed to be redered in a browser.
#
# Provided that there is an online service to save it, we can upload it and share a link to it.
# + [markdown] slideshow={"slide_type": "fragment"}
# As an example, this is the first notebook of the present course, hosted at _github.com_:
#
# https://github.com/lskl/prog2_FIT_WS21/blob/main/3_jupyter_notebooks.ipynb
# + [markdown] slideshow={"slide_type": "subslide"}
# Notice that the notebook is itself code. It is a raw-text format: ___JSON___.
#
# In order to render it as a notebook, we can use the online service of https://nbviewer.jupyter.org/
# + [markdown] slideshow={"slide_type": "fragment"}
# Here is the same notebook, in an online version:
#
# https://nbviewer.org/github/lskl/prog2_FIT_WS21/blob/main/3_jupyter_notebooks.ipynb
# + [markdown] slideshow={"slide_type": "subslide"}
# And it can also be done with dropbox:
# + [markdown] slideshow={"slide_type": "fragment"}
# <img src='misc/img/nbviewer.png' width='700'>
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that some links need a trick: when you get the share link, change the last part from `?dl=0` to `?raw=1`
# + [markdown] slideshow={"slide_type": "slide"}
# ## Links
# + [markdown] slideshow={"slide_type": "fragment"}
#
# [A tutorial on jupyter notebooks](https://www.dataquest.io/blog/jupyter-notebook-tutorial/)
#
# [Markdown documentation in jupyter](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Working%20With%20Markdown%20Cells.html)
#
# [Markdown syntax specification](https://daringfireball.net/projects/markdown/)
#
# [Markdown formatting syntax](https://www.markdownguide.org/cheat-sheet)
#
# [Nbviewer, an online service to share notebooks, also with examples](https://nbviewer.jupyter.org/)
#
# [A collection of notebooks in different areas, as inspiration and to get ideas](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks)
#
# [An article in _Nature_, about the use of the notebooks for data science](https://www.nature.com/articles/d41586-018-07196-1)
# + [markdown] slideshow={"slide_type": "fragment"}
# ___
| 3_jupyter_notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from typing import *
# +
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if prices is None or len(prices) == 0:
return None
n = len(prices)
INF = int(1e9)
# (买入状态,卖出状态, 持有股票无操作, 无股票无操作)
# dp = [[-INF, 0, -INF, 0]] * (n + 1) #这个写法有问题所有的指向同一个list print(dp[0] is dp[1]) True
dp = [[-INF, 0, -INF, 0] for _ in range(n + 1)] # print(dp[0] is dp[1]) False
for i in range(1, n + 1):
dp[i][0] = dp[i - 1][3] - prices[i - 1] # 只有前一天没有操作且没有持有股票才能买入
dp[i][1] = max(dp[i - 1][0], dp[i - 1][2]) + prices[i - 1] # 必须持有股票才能卖出
dp[i][2] = max(dp[i - 1][0], dp[i - 1][2]) # 持有股票 但不卖出
dp[i][3] = max(dp[i - 1][1], dp[i - 1][3]) # 没有股票 没操作
return max(dp[n])
prices = [1,2,3,0,2]
Solution().maxProfit(prices)
# -
a = [1, 2, 3, 0]
a.insert(0, 4)
a
| leetcode/309.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jcohenadad/exercises/blob/main/nibabel.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="AAahRTXO3-Cs"
# This notebook covers:
# - neuroimaging data;
# - NIfTI format
# - nibabel library
# - Matrix manipulation
# - pandas library
#
# It is based on https://nipy.org/nibabel/gettingstarted.html
#
# Questions start with "❓"
# + id="GQO7ehSv3lnJ"
import os
import numpy as np
import nibabel as nib
# + [markdown] id="6hxsMPIL3ylT"
# Import example data
# + id="BFWIi3zK3xY-"
from nibabel.testing import data_path
example_filename = os.path.join(data_path, 'example4d.nii.gz')
# + [markdown] id="CO71Ty5S4oh3"
# Load 4D image. Check its size
# + colab={"base_uri": "https://localhost:8080/"} id="vGJ5WdiC4wUg" outputId="66d5a6ef-aaf9-4669-9428-7f1fedd53fd5"
img = nib.load(example_filename)
img.shape
# + [markdown] id="gocHqneJ5Whj"
# The dimensions are: x, y, z, t. There are two MRI volumes acquired back-to-back.
# + [markdown] id="rsDJZlzZ5irL"
# ❓ *Display the mid-sagittal view of the first volume. Use grayscale and make sure the image is displayed with the proper ratio.*
| nibabel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Tce3stUlHN0L"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="both" colab={} colab_type="code" id="IcfrhafzkZbH"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="qFdPvlXBOdUN"
# # Quantization aware training comprehensive guide
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/model_optimization/guide/quantization/training_comprehensive_guide"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/quantization/training_comprehensive_guide.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/quantization/training_comprehensive_guide.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/model-optimization/tensorflow_model_optimization/g3doc/guide/quantization/training_comprehensive_guide.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="FbORZA_bQx1G"
# Welcome to the comprehensive guide for Keras quantization aware training.
#
# This page documents various use cases and shows how to use the API for each one. Once you know which APIs you need, find the parameters and the low-level details in the
# [API docs](https://www.tensorflow.org/model_optimization/api_docs/python/tfmot/quantization).
#
# * If you want to see the benefits of quantization aware training and what's supported, see the [overview](https://www.tensorflow.org/model_optimization/guide/quantization/training.md).
# * For a single end-to-end example, see the [quantization aware training example](https://www.tensorflow.org/model_optimization/guide/quantization/training_example.md).
#
# The following use cases are covered:
#
# * Deploy a model with 8-bit quantization with these steps.
# * Define a quantization aware model.
# * For Keras HDF5 models only, use special checkpointing and
# deserialization logic. Training is otherwise standard.
# * Create a quantized model from the quantization aware one.
# * Experiment with quantization.
# * Anything for experimentation has no supported path to deployment.
# * Custom Keras layers fall under experimentation.
# + [markdown] colab_type="text" id="nuABqZnXVDvO"
# ## Setup
# + [markdown] colab_type="text" id="qqnbd7TOfAq9"
# For finding the APIs you need and understanding purposes, you can run but skip reading this section.
# + cellView="both" colab={} colab_type="code" id="lvpH1Hg7ULFz"
# ! pip uninstall -y tensorflow
# ! pip install -q tf-nightly
# ! pip install -q tensorflow-model-optimization
import tensorflow as tf
import numpy as np
import tensorflow_model_optimization as tfmot
import tempfile
input_shape = [20]
x_train = np.random.randn(1, 20).astype(np.float32)
y_train = tf.keras.utils.to_categorical(np.random.randn(1), num_classes=20)
def setup_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(20, input_shape=input_shape),
tf.keras.layers.Flatten()
])
return model
def setup_pretrained_weights():
model= setup_model()
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy']
)
model.fit(x_train, y_train)
_, pretrained_weights = tempfile.mkstemp('.tf')
model.save_weights(pretrained_weights)
return pretrained_weights
def setup_pretrained_model():
model = setup_model()
pretrained_weights = setup_pretrained_weights()
model.load_weights(pretrained_weights)
return model
setup_model()
pretrained_weights = setup_pretrained_weights()
# + [markdown] colab_type="text" id="dTHLMLV-ZrUA"
# ##Define quantization aware model
# + [markdown] colab_type="text" id="0U6XAUhIe6re"
# By defining models in the following ways, there are available paths to deployment to backends listed in the [overview page](https://www.tensorflow.org/model_optimization/guide/quantization/training.md). By default, 8-bit quantization is used.
#
# Note: a quantization aware model is not actually quantized. Creating a quantized model is a separate step.
# + [markdown] colab_type="text" id="Ybigft1fTn4T"
# ### Quantize whole model
# + [markdown] colab_type="text" id="puZvqnp1xsn-"
# **Your use case:**
# * Subclassed models are not supported.
#
# **Tips for better model accuracy:**
#
# * Try "Quantize some layers" to skip quantizing the layers that reduce accuracy the most.
# * It's generally better to finetune with quantization aware training as opposed to training from scratch.
#
# + [markdown] colab_type="text" id="_Zhzx_azO1WR"
# To make the whole model aware of quantization, apply `tfmot.quantization.keras.quantize_model` to the model.
#
#
# + colab={} colab_type="code" id="1s_EK8reOruu"
base_model = setup_model()
base_model.load_weights(pretrained_weights) # optional but recommended for model accuracy
quant_aware_model = tfmot.quantization.keras.quantize_model(base_model)
quant_aware_model.summary()
# + [markdown] colab_type="text" id="xTbTLn3dZM7h"
# ### Quantize some layers
# + [markdown] colab_type="text" id="MbM8o832xTxV"
# Quantizing a model can have a negative effect on accuracy. You can selectively quantize layers of a model to explore the trade-off between accuracy, speed, and model size.
#
# **Your use case:**
# * To deploy to a backend that only works well with fully quantized models (e.g. EdgeTPU v1, most DSPs), try "Quantize whole model".
#
# **Tips for better model accuracy:**
# * It's generally better to finetune with quantization aware training as opposed to training from scratch.
# * Try quantizing the later layers instead of the first layers.
# * Avoid quantizing critical layers (e.g. attention mechanism).
#
#
# + [markdown] colab_type="text" id="3OCbOUWHsE_v"
# In the example below, quantize only the `Dense` layers.
# + colab={} colab_type="code" id="HN0B_QB-ZhE2"
# Create a base model
base_model = setup_model()
base_model.load_weights(pretrained_weights) # optional but recommended for model accuracy
# Helper function uses `quantize_annotate_layer` to annotate that only the
# Dense layers should be quantized.
def apply_quantization_to_dense(layer):
if isinstance(layer, tf.keras.layers.Dense):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
return layer
# Use `tf.keras.models.clone_model` to apply `apply_quantization_to_dense`
# to the layers of the model.
annotated_model = tf.keras.models.clone_model(
base_model,
clone_function=apply_quantization_to_dense,
)
# Now that the Dense layers are annotated,
# `quantize_apply` actually makes the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
quant_aware_model.summary()
# + [markdown] colab_type="text" id="HiA28PrrW11H"
# While this example used the type of the layer to decide what to quantize, the easiest way to quantize a particular layer is to set its `name` property, and look for that name in the `clone_function`.
# + colab={} colab_type="code" id="CjY_JyB808Da"
print(base_model.layers[0].name)
# + [markdown] colab_type="text" id="mpb_BydRaSoF"
# #### More readable but potentially lower model accuracy
# + [markdown] colab_type="text" id="2vqXeYffzSHp"
# This is not compatible with finetuning with quantization aware training, which is why it may be less accurate than the above examples.
# + [markdown] colab_type="text" id="MQoMH3g3fWwb"
# **Functional example**
# + colab={} colab_type="code" id="7Wow55hg5oiM"
# Use `quantize_annotate_layer` to annotate that the `Dense` layer
# should be quantized.
i = tf.keras.Input(shape=(20,))
x = tfmot.quantization.keras.quantize_annotate_layer(tf.keras.layers.Dense(10))(i)
o = tf.keras.layers.Flatten()(x)
annotated_model = tf.keras.Model(inputs=i, outputs=o)
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
# For deployment purposes, the tool adds `QuantizeLayer` after `InputLayer` so that the
# quantized model can take in float inputs instead of only uint8.
quant_aware_model.summary()
# + [markdown] colab_type="text" id="wIGj-r2of2ls"
# **Sequential example**
#
# + colab={} colab_type="code" id="mQOiDUGgfi4y"
# Use `quantize_annotate_layer` to annotate that the `Dense` layer
# should be quantized.
annotated_model = tf.keras.Sequential([
tfmot.quantization.keras.quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=input_shape)),
tf.keras.layers.Flatten()
])
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
quant_aware_model.summary()
# + [markdown] colab_type="text" id="MpvX5IqahV1r"
# ## Checkpoint and deserialize
# + [markdown] colab_type="text" id="GuZ5wlij1dcJ"
# **Your use case:** this code is only needed for the HDF5 model format (not HDF5 weights or other formats).
# + colab={} colab_type="code" id="6khQg-q7imfH"
# Define the model.
base_model = setup_model()
base_model.load_weights(pretrained_weights) # optional but recommended for model accuracy
quant_aware_model = tfmot.quantization.keras.quantize_model(base_model)
# Save or checkpoint the model.
_, keras_model_file = tempfile.mkstemp('.h5')
quant_aware_model.save(keras_model_file)
# `quantize_scope` is needed for deserializing HDF5 models.
with tfmot.quantization.keras.quantize_scope():
loaded_model = tf.keras.models.load_model(keras_model_file)
loaded_model.summary()
# + [markdown] colab_type="text" id="NeNCMDAbnEKU"
# ## Create and deploy quantized model
# + [markdown] colab_type="text" id="iiYk_KR0rJ2n"
# In general, reference the documentation for the deployment backend that you
# will use.
#
# This is an example for the TFLite backend.
# + colab={} colab_type="code" id="fbBiEetda3R8"
base_model = setup_pretrained_model()
quant_aware_model = tfmot.quantization.keras.quantize_model(base_model)
# Typically you train the model here.
converter = tf.lite.TFLiteConverter.from_keras_model(quant_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_tflite_model = converter.convert()
# + [markdown] colab_type="text" id="v5raSy9ghxkv"
# ## Experiment with quantization
# + [markdown] colab_type="text" id="LUGpXIET0cy3"
# **Your use case**: using the following APIs means that there is no
# supported path to deployment. The features are also experimental and not
# subject to backward compatibility.
# * `tfmot.quantization.keras.QuantizeConfig`
# * `tfmot.quantization.keras.quantizers.Quantizer`
# * `tfmot.quantization.keras.quantizers.LastValueQuantizer`
# * `tfmot.quantization.keras.quantizers.MovingAverageQuantizer`
# + [markdown] colab_type="text" id="Q1KI_FCcU7Yn"
# ### Setup: DefaultDenseQuantizeConfig
# + [markdown] colab_type="text" id="I6nPkJDRUB2G"
# Experimenting requires using `tfmot.quantization.keras.QuantizeConfig`, which describes how to quantize the weights, activations, and outputs of a layer.
#
# Below is an example that defines the same `QuantizeConfig` used for the `Dense` layer in the API defaults.
#
# During the forward propagation in this example, the `LastValueQuantizer` returned in `get_weights_and_quantizers` is called with `layer.kernel` as the input, producing an output. The output replaces `layer.kernel`
# in the original forward propagation of the `Dense` layer, via the logic defined in `set_quantize_weights`. The same idea applies to the activations and outputs.
#
#
#
# + colab={} colab_type="code" id="B9SWK5UQT7VQ"
LastValueQuantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer
MovingAverageQuantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer
class DefaultDenseQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, LastValueQuantizer(num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, MovingAverageQuantizer(num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
# + [markdown] colab_type="text" id="8vJeoGQG9ZX0"
# ### Quantize custom Keras layer
#
#
# + [markdown] colab_type="text" id="YmyhI_bzWb2w"
# This example uses the `DefaultDenseQuantizeConfig` to quantize the `CustomLayer`.
#
# Applying the configuration is the same across
# the "Experiment with quantization" use cases.
# * Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `CustomLayer` and pass in the `QuantizeConfig`.
# * Use
# `tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
#
#
# + colab={} colab_type="code" id="7_rBOJdyWWEs"
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class CustomLayer(tf.keras.layers.Dense):
pass
model = quantize_annotate_model(tf.keras.Sequential([
quantize_annotate_layer(CustomLayer(20, input_shape=(20,)), DefaultDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `DefaultDenseQuantizeConfig` with `quantize_scope`
# as well as the custom Keras layer.
with quantize_scope(
{'DefaultDenseQuantizeConfig': DefaultDenseQuantizeConfig,
'CustomLayer': CustomLayer}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
# + [markdown] colab_type="text" id="vnMguvVSnUqD"
# ### Modify quantization parameters
#
# + [markdown] colab_type="text" id="BLgH1aFMjTK4"
# **Common mistake:** quantizing the bias to fewer than 32-bits usually harms model accuracy too much.
#
# This example modifies the `Dense` layer to use 4-bits for its weights instead
# of the default 8-bits. The rest of the model continues to use API defaults.
#
# + colab={} colab_type="code" id="77jgBjccnTh6"
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class ModifiedDenseQuantizeConfig(DefaultDenseQuantizeConfig):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, LastValueQuantizer(num_bits=4, symmetric=True, narrow_range=False, per_axis=False))]
# + [markdown] colab_type="text" id="x9JDKhaU3FKe"
# Applying the configuration is the same across
# the "Experiment with quantization" use cases.
# * Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `Dense` layer and pass in the `QuantizeConfig`.
# * Use
# `tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
# + colab={} colab_type="code" id="sq5mfyBF3KxV"
model = quantize_annotate_model(tf.keras.Sequential([
# Pass in modified `QuantizeConfig` to modify this Dense layer.
quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=(20,)), ModifiedDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `ModifiedDenseQuantizeConfig` with `quantize_scope`:
with quantize_scope(
{'ModifiedDenseQuantizeConfig': ModifiedDenseQuantizeConfig}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
# + [markdown] colab_type="text" id="bJMKgzh84CCs"
# ### Modify parts of layer to quantize
#
#
# + [markdown] colab_type="text" id="Z3pij2uO808g"
# This example modifies the `Dense` layer to skip quantizing the activation. The rest of the model continues to use API defaults.
# + colab={} colab_type="code" id="6BaaJPBR8djV"
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class ModifiedDenseQuantizeConfig(DefaultDenseQuantizeConfig):
def get_activations_and_quantizers(self, layer):
# Skip quantizing activations.
return []
def set_quantize_activations(self, layer, quantize_activations):
# Empty since `get_activaations_and_quantizers` returns
# an empty list.
return
# + [markdown] colab_type="text" id="2OkqHX5r2nT7"
# Applying the configuration is the same across
# the "Experiment with quantization" use cases.
# * Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `Dense` layer and pass in the `QuantizeConfig`.
# * Use
# `tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
# + colab={} colab_type="code" id="Ln9MDIZJ2n3F"
model = quantize_annotate_model(tf.keras.Sequential([
# Pass in modified `QuantizeConfig` to modify this Dense layer.
quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=(20,)), ModifiedDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `ModifiedDenseQuantizeConfig` with `quantize_scope`:
with quantize_scope(
{'ModifiedDenseQuantizeConfig': ModifiedDenseQuantizeConfig}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
# + [markdown] colab_type="text" id="yD0sIR6tmmRx"
# ### Use custom quantization algorithm
#
#
# + [markdown] colab_type="text" id="I4onhF-H1zsn"
# The `tfmot.quantization.keras.quantizers.Quantizer` class is a callable that
# can apply any algorithm to its inputs.
#
# In this example, the inputs are the weights, and we apply the math in the
# `FixedRangeQuantizer` \_\_call\_\_ function to the weights. Instead of the original
# weights values, the output of the
# `FixedRangeQuantizer` is now passed to whatever would have used the weights.
# + colab={} colab_type="code" id="Jt8UioZH49QV"
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class FixedRangeQuantizer(tfmot.quantization.keras.quantizers.Quantizer):
"""Quantizer which forces outputs to be between -1 and 1."""
def build(self, tensor_shape, name, layer):
# Not needed. No new TensorFlow variables needed.
return {}
def __call__(self, inputs, training, weights, **kwargs):
return tf.keras.backend.clip(inputs, -1.0, 1.0)
def get_config(self):
# Not needed. No __init__ parameters to serialize.
return {}
class ModifiedDenseQuantizeConfig(DefaultDenseQuantizeConfig):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
# Use custom algorithm defined in `FixedRangeQuantizer` instead of default Quantizer.
return [(layer.kernel, FixedRangeQuantizer())]
# + [markdown] colab_type="text" id="lu5ZeJ_Y2UxW"
# Applying the configuration is the same across
# the "Experiment with quantization" use cases.
# * Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `Dense` layer and pass in the `QuantizeConfig`.
# * Use
# `tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
# + colab={} colab_type="code" id="ItC_3mwT2U87"
model = quantize_annotate_model(tf.keras.Sequential([
# Pass in modified `QuantizeConfig` to modify this `Dense` layer.
quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=(20,)), ModifiedDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `ModifiedDenseQuantizeConfig` with `quantize_scope`:
with quantize_scope(
{'ModifiedDenseQuantizeConfig': ModifiedDenseQuantizeConfig}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
| site/en-snapshot/model_optimization/guide/quantization/training_comprehensive_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1.0 Introduction
# This notebook has been constructed as part of the Quantum Experience Hands-on session by Bluesmarts **<NAME>**, **<NAME>**, **<NAME>** & **<NAME>**. The purpose of this notebook is to give an introduction to the most commonly used functions available in the Qiskit framework and how to run algorithms on real quantum computers. This edition has further been altered by **<NAME>**, **<NAME>** and **<NAME>**<br>
#
# You will have approximately 2 and a half hours to work with this binder, and are allowed to work in groups. If you have any questions or need help, we will gladly assist you.
#
# Before we start coding, it is important to have some basic knowledge about how the mechanics of a quantum computer work and how they can be utilized.
# ---
# ## 1.0.1 Qubit
# When we talk about classical computing, we often hear the word 'bit'. A _bit_ can refer to zero or one - You can also think of it as a binary state, where you have a switch which can be turned off or turned on - either 0 or 1. A quantum computer uses quantum bits, or _Qubits_, where we represent the two corresponding states for a _Qubit_ as the states |0<b>⟩</b> and |1<b>⟩</b>.
#
# However, Qubits are not restricted to being either |0<b>⟩</b> or |1<b>⟩</b>, but can be a combination either a zero or a one by a quantum mechanical principle called Superposition.
#
# <div class="alert alert-block alert-warning">
# <b>Note:</b> Notation like ‘| ⟩’ is called the Dirac notation, and we’ll see this often, as it’s the standard notation for states in quantum mechanics.
# </div>
# ---
# ## 1.0.2 Superposition
# When a _qubit_ is in the state |0<b>⟩</b>, the result of _measuring_ the _Qubit_ will yield an outcome of 0 with a probability of 100 %, which is what happens with a classical bit.<br>
# Similarly, the result of measuring a _Qubit_ in state |1<b>⟩</b> will yield an outcome of 1 with a probability of 100 %.<br>
#
# For a *Qubit* in a superposition state, the outcome becomes less simple - the outcome of measuring a _Qubit_ in superposition, even of a known state, cannot be predicted with absolute certainty. Even for the most simple superposition state, when we use a quantum gate to flip the _Qubit_ into superposition (which we will cover later), the measurement of the superposition state has an equal probability (50 %) of yielding an outcome of either 0 or 1.
#
# This is, however, not the only superpositional state it can obtain, as it, in reality, can obtain any probability of being either one of them, which allows us to do more tricks.
# ---
# ## 1.0.3 Entanglement
# Entanglement is a very special form of superposition, which is key to the power of quantum computing. What that means, is that we have the ability to have two or more _Qubits_ in a superposition state, which are inherently linked, such that measurement of one changes the possible measurement outcome of the other, regardless of how far the two are apart.<br>
#
# In a quantum computer, we can use this interlinked interaction to do all sorts of interesting types of calculations, where different _Qubits_ have a persistent ghostly connection with each other. This means, that if one of the entangled _Qubits_ are flipped around, the other entangled _Qubit_ will be flipped as well.
# ---
# ## Next Jupyter notebook - Now let's get coding!
# [1. Exercise - Our first Quantum Program: Hello Quantum World](1.1%20-%20Hello%20Quantum%20World.ipynb)
| notebook-exercises/1.0 - Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import SimpleITK as sitk
import numpy as np
import csv
from glob import glob
import pandas as pd
import os
# Helper function to get rows in data frame associated
# with each file
def get_filename(case):
global file_list
for f in file_list:
if case in f:
return(f)
current_path = os.getcwd()
subset_path = current_path + "\\subset0\\"
file_list = glob(subset_path+"*.mhd")
df_node = pd.read_csv("annotations.csv")
df_node["file"] = df_node["seriesuid"].apply(get_filename)
df_node = df_node.dropna()
# Looping over the image files
fcount = 0
for img_file in file_list:
print("Getting mask for image file %s" % img_file.replace(subset_path,""))
mini_df = df_node[df_node["file"]==img_file] #get all nodules associate with file
if len(mini_df)>0: # some files may not have a nodule--skipping those
biggest_node = np.argsort(mini_df["diameter_mm"].values)[-1] # just using the biggest node
node_x = mini_df["coordX"].values[biggest_node]
node_y = mini_df["coordY"].values[biggest_node]
node_z = mini_df["coordZ"].values[biggest_node]
diam = mini_df["diameter_mm"].values[biggest_node]
# There are 89 files in the subsection
print(len(file_list))
# there are 112 instances
print(len(df_node))
print(len(mini_df))
# +
from __future__ import print_function, division
import SimpleITK as sitk
import numpy as np
import csv
from glob import glob
import pandas as pd
#Some helper functions
def make_mask(center,diam,z,width,height,spacing,origin):
'''
Center : centers of circles px -- list of coordinates x,y,z
diam : diameters of circles px -- diameter
widthXheight : pixel dim of image
spacing = mm/px conversion rate np array x,y,z
origin = x,y,z mm np.array
z = z position of slice in world coordinates mm
'''
mask = np.zeros([height,width]) # 0's everywhere except nodule swapping x,y to match img
#convert to nodule space from world coordinates
# Defining the voxel range in which the nodule falls
v_center = (center-origin)/spacing
v_diam = int(diam/spacing[0]+5)
v_xmin = np.max([0,int(v_center[0]-v_diam)-5])
v_xmax = np.min([width-1,int(v_center[0]+v_diam)+5])
v_ymin = np.max([0,int(v_center[1]-v_diam)-5])
v_ymax = np.min([height-1,int(v_center[1]+v_diam)+5])
v_xrange = range(v_xmin,v_xmax+1)
v_yrange = range(v_ymin,v_ymax+1)
# Convert back to world coordinates for distance calculation
x_data = [x*spacing[0]+origin[0] for x in range(width)]
y_data = [x*spacing[1]+origin[1] for x in range(height)]
# Fill in 1 within sphere around nodule
for v_x in v_xrange:
for v_y in v_yrange:
p_x = spacing[0]*v_x + origin[0]
p_y = spacing[1]*v_y + origin[1]
if np.linalg.norm(center-np.array([p_x,p_y,z]))<=diam:
mask[int((p_y-origin[1])/spacing[1]),int((p_x-origin[0])/spacing[0])] = 1.0
return(mask)
def matrix2int16(matrix):
'''
matrix must be a numpy array NXN
Returns uint16 version
'''
m_min= np.min(matrix)
m_max= np.max(matrix)
matrix = matrix-m_min
return(np.array(np.rint( (matrix-m_min)/float(m_max-m_min) * 65535.0),dtype=np.uint16))
############
#
# Getting list of image files
luna_path = os.getcwd()
luna_subset_path = luna_path+"\\subset0\\"
output_path = luna_path+"\\output\\"
file_list=glob(luna_subset_path+"*.mhd")
print(file_list)
#####################
#
# Helper function to get rows in data frame associated
# with each file
def get_filename(file_list, case):
for f in file_list:
if case in f:
return(f)# The locations of the nodes
# +
df_node = pd.read_csv("annotations.csv")
df_node["file"] = df_node["seriesuid"].map(lambda file_name: get_filename(file_list, file_name))
df_node = df_node.dropna()
print(df_node)
#####
#
# Looping over the image files
#
for fcount, img_file in enumerate(tqdm(file_list)):
mini_df = df_node[df_node["file"]==img_file] #get all nodules associate with file
if mini_df.shape[0]>0: # some files may not have a nodule--skipping those
# load the data once
itk_img = sitk.ReadImage(img_file)
img_array = sitk.GetArrayFromImage(itk_img) # indexes are z,y,x (notice the ordering)
num_z, height, width = img_array.shape #heightXwidth constitute the transverse plane
origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
# go through all nodes (why just the biggest?)
for node_idx, cur_row in mini_df.iterrows():
node_x = cur_row["coordX"]
node_y = cur_row["coordY"]
node_z = cur_row["coordZ"]
diam = cur_row["diameter_mm"]
# just keep 3 slices
imgs = np.ndarray([3,height,width],dtype=np.float32)
masks = np.ndarray([3,height,width],dtype=np.uint8)
center = np.array([node_x, node_y, node_z]) # nodule center
v_center = np.rint((center-origin)/spacing) # nodule center in voxel space (still x,y,z ordering)
for i, i_z in enumerate(np.arange(int(v_center[2])-1,
int(v_center[2])+2).clip(0, num_z-1)): # clip prevents going out of bounds in Z
mask = make_mask(center, diam, i_z*spacing[2]+origin[2],
width, height, spacing, origin)
masks[i] = mask
imgs[i] = img_array[i_z]
np.save(os.path.join(output_path,"images_%04d_%04d.npy" % (fcount, node_idx)),imgs)
np.save(os.path.join(output_path,"masks_%04d_%04d.npy" % (fcount, node_idx)),masks)
# +
import matplotlib.pyplot as plt
import os
import numpy as np
output_path = os.getcwd() + "\\output\\"
imgs = np.load(output_path+'images_0001_0023.npy')
masks = np.load(output_path+'masks_0001_0023.npy')
for i in range(len(imgs)):
fig,ax = plt.subplots(2,2,figsize=[8,8])
ax[0,0].imshow(imgs[i],cmap='gray')
ax[0,1].imshow(imgs[i]*masks[i],cmap='gray')
plt.show()
# +
import matplotlib.pyplot as plt
import os
import numpy as np
output_path = os.getcwd() + "\\output\\"
imgs = np.load(output_path+'images_0001_0023.npy')
masks = np.load(output_path+'masks_0001_0023.npy')
for i in range(len(imgs)):
print(imgs[i].shape)
print(masks[i].shape)
print("image %d" % i)
fig,ax = plt.subplots(2,2,figsize=[8,8])
ax[0,0].imshow(imgs[i],cmap='gray')
ax[1,0].imshow(masks[i],cmap='gray')
ax[0,1].imshow(imgs[i]*masks[i],cmap='gray')
plt.show()
# +
import matplotlib.pyplot as plt
import os
import numpy as np
output_path = os.getcwd() + "\\output\\"
imgs = np.load(output_path+'images_0001_0023.npy')
lungmasks = np.load(output_path+'lungmask_0001_0023.npy')
for i in range(len(imgs)):
fig,ax = plt.subplots(2,2,figsize=[8,8])
ax[0,0].imshow(imgs[i],cmap='gray')
ax[0,1].imshow(imgs[i]*lungmasks,cmap='gray')
plt.show()
# +
import numpy as np
from skimage import morphology
from skimage import measure
from sklearn.cluster import KMeans
from skimage.transform import resize
from glob import glob
working_path = os.getcwd()+"\\output\\"
file_list=glob(working_path+"images_*.npy")
for img_file in file_list:
# I ran into an error when using Kmean on np.float16, so I'm using np.float64 here
imgs_to_process = np.load(img_file).astype(np.float64)
print ("on image", img_file)
for i in range(len(imgs_to_process)):
img = imgs_to_process[i]
#Standardize the pixel values
mean = np.mean(img)
std = np.std(img)
img = img-mean
img = img/std
# Find the average pixel value near the lungs
# to renormalize washed out images
middle = img[100:400,100:400]
mean = np.mean(middle)
max = np.max(img)
min = np.min(img)
# To improve threshold finding, I'm moving the
# underflow and overflow on the pixel spectrum
img[img==max]=mean
img[img==min]=mean
#
# Using Kmeans to separate foreground (radio-opaque tissue)
# and background (radio transparent tissue ie lungs)
# Doing this only on the center of the image to avoid
# the non-tissue parts of the image as much as possible
#
kmeans = KMeans(n_clusters=2).fit(np.reshape(middle,[np.prod(middle.shape),1]))
centers = sorted(kmeans.cluster_centers_.flatten())
threshold = np.mean(centers)
thresh_img = np.where(img<threshold,1.0,0.0) # threshold the image
#
# I found an initial erosion helful for removing graininess from some of the regions
# and then large dialation is used to make the lung region
# engulf the vessels and incursions into the lung cavity by
# radio opaque tissue
#
eroded = morphology.erosion(thresh_img,np.ones([4,4]))
dilation = morphology.dilation(eroded,np.ones([10,10]))
#
# Label each region and obtain the region properties
# The background region is removed by removing regions
# with a bbox that is to large in either dimnsion
# Also, the lungs are generally far away from the top
# and bottom of the image, so any regions that are too
# close to the top and bottom are removed
# This does not produce a perfect segmentation of the lungs
# from the image, but it is surprisingly good considering its
# simplicity.
#
labels = measure.label(dilation)
label_vals = np.unique(labels)
regions = measure.regionprops(labels)
good_labels = []
for prop in regions:
B = prop.bbox
if B[2]-B[0]<475 and B[3]-B[1]<475 and B[0]>40 and B[2]<472:
good_labels.append(prop.label)
mask = np.ndarray([512,512],dtype=np.int8)
mask[:] = 0
#
# The mask here is the mask for the lungs--not the nodes
# After just the lungs are left, we do another large dilation
# in order to fill in and out the lung mask
#
for N in good_labels:
mask = mask + np.where(labels==N,1,0)
mask = morphology.dilation(mask,np.ones([10,10])) # one last dilation
imgs_to_process[i] = mask
np.save(img_file.replace("images","lungmask"),imgs_to_process)
#
# Here we're applying the masks and cropping and resizing the image
#
file_list=glob(working_path+"lungmask_*.npy")
out_images = [] #final set of images
out_nodemasks = [] #final set of nodemasks
for fname in file_list:
print("working on file ", fname)
imgs_to_process = np.load(fname.replace("lungmask","images"))
masks = np.load(fname)
node_masks = np.load(fname.replace("lungmask","masks"))
for i in range(len(imgs_to_process)):
mask = masks[i]
node_mask = node_masks[i]
img = imgs_to_process[i]
new_size = [512,512] # we're scaling back up to the original size of the image
img= mask*img # apply lung mask
#
# renormalizing the masked image (in the mask region)
#
new_mean = np.mean(img[mask>0])
new_std = np.std(img[mask>0])
#
# Pulling the background color up to the lower end
# of the pixel range for the lungs
#
old_min = np.min(img) # background color
img[img==old_min] = new_mean-1.2*new_std # resetting backgound color
img = img-new_mean
img = img/new_std
#make image bounding box (min row, min col, max row, max col)
labels = measure.label(mask)
regions = measure.regionprops(labels)
#
# Finding the global min and max row over all regions
#
min_row = 512
max_row = 0
min_col = 512
max_col = 0
for prop in regions:
B = prop.bbox
if min_row > B[0]:
min_row = B[0]
if min_col > B[1]:
min_col = B[1]
if max_row < B[2]:
max_row = B[2]
if max_col < B[3]:
max_col = B[3]
width = max_col-min_col
height = max_row - min_row
if width > height:
max_row=min_row+width
else:
max_col = min_col+height
#
# cropping the image down to the bounding box for all regions
# (there's probably an skimage command that can do this in one line)
#
img = img[min_row:max_row,min_col:max_col]
mask = mask[min_row:max_row,min_col:max_col]
if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions
pass
else:
# moving range to -1 to 1 to accomodate the resize function
mean = np.mean(img)
img = img - mean
min = np.min(img)
max = np.max(img)
img = img/(max-min)
new_img = resize(img,[512,512])
new_node_mask = resize(node_mask[min_row:max_row,min_col:max_col],[512,512])
out_images.append(new_img)
out_nodemasks.append(new_node_mask)
num_images = len(out_images)
#
# Writing out images and masks as 1 channel arrays for input into network
#
final_images = np.ndarray([num_images,1,512,512],dtype=np.float32)
final_masks = np.ndarray([num_images,1,512,512],dtype=np.float32)
for i in range(num_images):
final_images[i,0] = out_images[i]
final_masks[i,0] = out_nodemasks[i]
rand_i = np.random.choice(range(num_images),size=num_images,replace=False)
test_i = int(0.2*num_images)
np.save(working_path+"trainImages.npy",final_images[rand_i[test_i:]])
np.save(working_path+"trainMasks.npy",final_masks[rand_i[test_i:]])
np.save(working_path+"testImages.npy",final_images[rand_i[:test_i]])
np.save(working_path+"testMasks.npy",final_masks[rand_i[:test_i]])
# +
from __future__ import print_function
import numpy as np
import os
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from keras.utils import plot_model
import pydot
working_path = os.getcwd() + "\\output\\"
K.set_image_dim_ordering('th') # Theano dimension ordering in this code
img_rows = 512
img_cols = 512
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_np(y_true,y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((1,img_rows, img_cols))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def train_and_predict(use_existing):
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train = np.load(working_path+"trainImages.npy").astype(np.float32)
imgs_mask_train = np.load(working_path+"trainMasks.npy").astype(np.float32)
imgs_mask_test_true = np.load(working_path+"testMasks.npy").astype(np.float32)
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean # images should already be standardized, but just in case
imgs_train /= std
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
# Saving weights to unet.hdf5 at checkpoints
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)
#
# Should we load existing weights?
# Set argument for call to train_and_predict to true at end of script
if use_existing:
model.load_weights('./unet.hdf5')
#
# The final results for this tutorial were produced using a multi-GPU
# machine using TitanX's.
# For a home GPU computation benchmark, on my home set up with a GTX970
# I was able to run 20 epochs with a training set size of 320 and
# batch size of 2 in about an hour. I started getting reseasonable masks
# after about 3 hours of training.
#
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=2, nb_epoch=20, verbose=1, shuffle=True,
callbacks=[model_checkpoint])
# loading best weights from training session
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('./unet.hdf5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
num_test = len(imgs_test)
imgs_mask_test = np.ndarray([num_test,1,512,512],dtype=np.float32)
for i in range(num_test):
imgs_mask_test[i] = model.predict([imgs_test[i:i+1]], verbose=0)[0]
np.save('masksTestPredicted.npy', imgs_mask_test)
mean = 0.0
for i in range(num_test):
mean+=dice_coef_np(imgs_mask_test_true[i,0], imgs_mask_test[i,0])
mean/=num_test
print("Mean Dice Coeff : ",mean)
if __name__ == '__main__':
train_and_predict(False)
# +
from __future__ import print_function
import numpy as np
import os
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from keras.utils import plot_model
import graphviz
working_path = os.getcwd() + "\\output\\"
K.set_image_dim_ordering('th') # Theano dimension ordering in this code
img_rows = 512
img_cols = 512
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_np(y_true,y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((1,img_rows, img_cols))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
# +
from keras.utils import plot_model
### Build, Load, and Compile your model
model = get_unet()
plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
# -
# !conda install -c conda-forge pydotplus
| lung_cancer_dataanalytics/.ipynb_checkpoints/Luna16 Dataset Preprocessing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df=pd.read_csv("IMDB-Movie-Data.csv")
z=df.iloc[:,[1,2,9,8]]
import re
genres=z['Genre']
category=[]
for i in genres:
m=i.split(',')
category.append(m[0])
a=pd.DataFrame(category)
from sklearn.preprocessing import LabelEncoder
enc=LabelEncoder()
enc.fit(a[0])
a[0]=enc.transform(a[0])
a=a.rename(columns={0:"Categories"})
a.Categories.unique()
a.to_csv('genres.csv',index=False)
z=z.join(a)
z
import matplotlib.pyplot as p
cn=z.groupby('Categories').size()
c=z.Categories.unique()
p.bar(c,cn)
x=z.iloc[:,[2,3]].values
# %matplotlib inline
from sklearn.cluster import KMeans
import matplotlib.pyplot as p
a=[]
for i in range(1,10):
k=KMeans(n_clusters=i)
k.fit(x)
a.append(k.inertia_)
p.plot(range(1,10),a)
p.show()
m=KMeans(n_clusters=4,random_state=0)
y=m.fit_predict(x)
p.xlabel("Ratings")
p.ylabel("Popularity")
p.title("Movie Recommendations")
p.scatter(x[y==0,1],x[y==0,0],color='magenta',label='Avoid at all costs')
p.scatter(x[y==1,1],x[y==1,0],color='red',label='Not a waste of time')
p.scatter(x[y==2,1],x[y==2,0],color='green',label='Watch only when bored')
p.scatter(x[y==3,1],x[y==3,0],color='blue',label='Must Watch')
p.legend()
p.show()
x1=z.iloc[:,[3,4]].values
y1=z.iloc[:,0].values
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier(n_estimators=1000)
rf.fit(x1,y1)
gen={'Action':0,'Adventure':1,'Animation':2,'Biography':3,
'Comedy':4,'Crime':5,'Drama':6,'Fantasy':7,
'Horror':8,'Mystery':9,'Romance':10,
'Sci-fi':11,'Thriller':12}
print("Enter the genre of your choice:")
for k,v in gen.items():
print(k)
g=input(":")
try:
genre=gen[g]
except:
g=input("Re-enter the genre:")
genre=gen[g]
ratings=9.0
count=0
print("Recommended movies:")
while ratings>6 and count<=3:
print(rf.predict([[ratings,genre]]))
ratings-=1
count+=1
| project-Recommender System.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import re
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from wordcloud import WordCloud,STOPWORDS
# %matplotlib inline
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
# -
df = pd.read_csv("articles_data.csv")
df.head()
df.describe()
df.shape
df.info()
df.columns
data = df[['source_id','author', 'title',
'description']]
data.head()
data.isnull().sum()
data = data.dropna()
data.isnull().sum()
data.shape
data.source_id.unique()
data['source_id'].value_counts()
sns.countplot(y=data['source_id'])
data['author'].value_counts().head(10)
data['author'].value_counts().head(10).plot(kind='barh')
data.head(10)
data['title'] = data['title'].replace(r'[^A-Za-z0-9 ]+', '', regex=True)
data['description'] = data['description'].replace(r'[^A-Za-z0-9 ]+', '', regex=True)
data.head(10)
data['description'].sample(10)
data['description'][3]
def clean_text(text, lemmatizer = WordNetLemmatizer(),
stop_words = set(stopwords.words('english'))):
words = word_tokenize(text)
filtered_words = []
for word in words:
if word not in stop_words and word.isalpha():
filtered_words.append(lemmatizer.lemmatize(word))
return filtered_words
# # Title Text analysis
#
title_word_list = data['title'].apply(lambda x: clean_text(x))
title_word_list[:10]
title_word_list = [len(w) for w in title_word_list]
plt.hist(title_word_list,bins = 25)
plt.title('wordlength for title')
plt.ylabel('count')
plt.xlabel('length of text')
plt.grid()
plt.show()
word_title = clean_text(''.join(str(data['title'].tolist())))
word_title[:15]
# # Natural Language Toolkit
# The Natural Language Toolkit (NLTK) is a platform used for building Python programs that work with human language data for applying in statistical natural language processing (NLP). It contains text processing libraries for tokenization, parsing, classification, stemming, tagging and semantic reasoning
(pd.Series(nltk.ngrams(word_title,2)).value_counts())[:20]
# +
bigrams_series_title = (pd.Series(nltk.ngrams(word_title, 2)).value_counts())[:20]
bigrams_series_title.sort_values().plot.barh(color='blue', width=.9, figsize=(12, 8))
plt.title('20 Most Frequently Occuring Bigrams')
plt.ylabel('Bigram')
plt.xlabel('# of Occurances')
# -
word_cloud = WordCloud(background_color = 'white',max_words = 200,relative_scaling ='auto',colormap='winter')
word_cloud.generate(','.join(word_title))
plt.figure(figsize=(12,10))
plt.imshow(word_cloud,interpolation='bilinear')
plt.axis('off')
plt.show()
# # Description Text analysis
description_word_list = data['description'].apply(lambda x: clean_text(x))
description_word_list[:10]
word_length_desc = [len(w) for w in description_word_list]
plt.hist(word_length_desc,bins=25)
plt.title('WordLength for Description')
plt.ylabel('count')
plt.xlabel('length of text')
plt.grid()
plt.show()
words_desc = clean_text(''.join(str(df['description'].tolist())))
words_desc[:15]
(pd.Series(nltk.ngrams(words_desc,3)).value_counts())[:20]
trigrams_series_title = (pd.Series(nltk.ngrams(words_desc, 3)).value_counts())[:20]
trigrams_series_title.sort_values().plot.barh(color='blue',width=.9,figsize=(12, 8))
plt.title('20 most Frequently Occuring Trigrams')
plt.xlabel('of occurances')
plt.ylabel('trigram')
# +
bigrams_series_title = (pd.Series(nltk.ngrams(words_desc, 2)).value_counts())[:20]
bigrams_series_title.sort_values().plot.barh(color='blue', width=.9, figsize=(12, 8))
plt.title('20 Most Frequently Occuring Bigrams')
plt.ylabel('bigram')
plt.xlabel('# of Occurances')
# +
word_cloud = WordCloud(background_color="white",
max_words=2000,
relative_scaling='auto',
colormap='winter')
word_cloud.generate(','.join(words_desc))
plt.figure( figsize=(12,10) )
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis('off')
plt.show()
# -
| internet news articles EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bayesian Interpretation of Single-layer Networks
#
# - Author: <NAME> @ Carleton College
# - Date: March 17, 2020, Tuesday
# <img src='./demo_summary.png' width=700>
# ## Bayes' theorem
# For two classes, $C_1$ and $C_2$.
# $$p(C_1|x)=\frac{p(x|C_1)P(C_1)}{p(x|C_1)P(C_1) + p(x|C_2)P(C_2)}$$
# ## Expression that involves a log-ratio
# Suppose our single-layer network with a single output unit deals with a binary classification problem. For the output node $y_k$, $p(C^k_1|x)$ denotes the probability that $x$ belongs to $C^k_1$ and $1-p(C^k_1|x)$ denotes the probability that $x$ belongs to $C^k_2$. Here, $C^k_1$ and $C^k_2$ represents the two states of the $k$th binary output variable.
#
# Intuitively, we can train our single-layer network directly on a dataset with examples as inputs and posterior probabilities as outputs. However, note that probabilities are bounded by $[0, 1]$. Since this assumption is inherently built into the neural network, there may be times when the network outputs values below zero or above one. What should one do when the network outputs 1.01? There's no clear answers.
#
# Therefore, we seek a way of representing a posterior probability in terms of another quantity (let's call it $a$) which is (1) unbounded ($a \in [-\infty, \infty]$) and (2) can be easily converted back to a probability. Recall the Bayes' rule, that the posterior probability is equal to the RHS:
#
# $$p(C_1|x)=\frac{p(x|C_1)P(C_1)}{p(x|C_1)P(C_1) + p(x|C_2)P(C_2)}$$
#
# So our real challenge is to represent the RHS in terms of $a$. Let's consider some unrelated ideas just for inspiration:
# - ratios between two positive numbers are bounded by $[0, \infty]$
# - log(ratios) between two positive numbers are bounded by $[-\infty, \infty]$
#
# To obtain a ratio, we divide both the numerator and the denominator of the RHS by the numerator:
# $$p(C_1|x)=\frac{1}{1 + \frac{p(x|C_2)P(C_2)}{p(x|C_1)P(C_1}}$$
#
# To obtain a log(ratio), we use the trick $n=\exp\{ \log(n) \}$ for $n\in\mathbb{R}$:
# $$p(C_1|x)=\frac{1}{1 + \exp \{ a \} }$$ where we have defined $a$ to be $\log( \frac{p(x|C_2)P(C_2)}{p(x|C_1)P(C_1)} )$.
#
# Since we are evaluating $p(C_1|x)$ not $p(C_2|x)$, we would like $p(C_1|x)$ to have a positive gradient with respect to $p(x|C_1)P(C_1)$. Currently, this gradient is negative, which does not pose any serious problems except that it is counter-intuitive. To do this, we simply change the log(ratio) to:
# $$p(C_1|x)=\frac{1}{1 + \exp \{ -a \} }$$ where we have defined $a$ to be $- \log( \frac{p(x|C_2)P(C_2)}{p(x|C_1)P(C_1)} )=\log( \frac{p(x|C_1)P(C_1)}{p(x|C_2)P(C_2)} )$.
#
# The function $p(C_1|x)=\frac{1}{1 + \exp \{ -a \} }$ is called the logistic sigmoid function.
#
# Now we only seek to model the quantity $\log( \frac{p(x|C_1)P(C_1)}{p(x|C_2)P(C_2)} )$. To do this, we simply maximizes it as much as we can. But before optimizing it directly using gradient descent, let's substitute in the expressions for the two class-conditional distributions and see what form it takes. If this form matches the form of a single-layer network, then we are more confident that our single-layer network can handle the tasks of evaluating posterior probabilities.
# $$p(C_1|x)=\frac{1}{1 + \exp \{ -\log( \frac{p(x|C_1)P(C_1)}{p(x|C_2)P(C_2)} ) \} }$$
# ## Create a density function and sample data for testing
# +
import numpy as np
from scipy.stats import multivariate_normal
from seaborn import kdeplot
import matplotlib.pyplot as plt
import torch
from torch.utils.data import TensorDataset, DataLoader
import torch.nn as nn
import torch.optim as optim
# -
class GMM():
def __init__(self):
self.class_props = [0.3, 0.7]
self.class_conditional_1 = multivariate_normal(mean=[1, 1], cov=0.2)
self.class_conditional_2 = multivariate_normal(mean=[2, 1], cov=0.2)
def sample_with_labels(self, n):
comp_indices = np.random.choice([0, 1], size=n, p=self.class_props)
comp_1_indices = np.where(comp_indices == 0)[0]
comp_1_samples_all = self.class_conditional_1.rvs(n)
if comp_1_samples_all.ndim == 1:
comp_1_samples_all = np.expand_dims(comp_1_samples_all, 0)
comp_1_samples = comp_1_samples_all[comp_1_indices]
comp_2_indices = np.where(comp_indices == 1)[0]
comp_2_samples_all = self.class_conditional_2.rvs(n)
if comp_2_samples_all.ndim == 1:
comp_2_samples_all = np.expand_dims(comp_2_samples_all, 0)
comp_2_samples = comp_2_samples_all[comp_2_indices]
return comp_1_samples, comp_2_samples
def sample(self, n):
comp_1_samples, comp_2_samples = self.sample_with_labels(n)
samples = np.concatenate([comp_1_samples, comp_2_samples])
np.random.shuffle(samples)
return samples
def pdf(self, point):
p = np.sum(self.class_props * np.array([self.class_conditional_1.pdf(point), self.class_conditional_2.pdf(point)]))
return p
def posterior(self, point, klass):
if klass == 1:
return self.class_conditional_1.pdf(point) * gmm.class_props[0] / gmm.pdf(point)
elif klass == 2:
return self.class_conditional_2.pdf(point) * gmm.class_props[1] / gmm.pdf(point)
gmm = GMM()
np.random.seed(52)
comp_1_samples, comp_2_samples = gmm.sample_with_labels(n=1000)
print(comp_1_samples.shape, comp_2_samples.shape)
point = np.array([1, 1.0])
FEATURE_1 = np.s_[:,0]
FEATURE_2 = np.s_[:,1]
# +
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_subplot(121)
kdeplot(comp_1_samples[FEATURE_1], comp_1_samples[FEATURE_2], label=1, ax=ax1)
kdeplot(comp_2_samples[FEATURE_1], comp_2_samples[FEATURE_2], label=2, ax=ax1)
point = np.array([[1, 0.5]])
plt.scatter(point[FEATURE_1], point[FEATURE_2], label='Point to classify', color='red')
ax1.set_title('Class Conditional Densities')
ax1.set_xlabel('Feature 1'); ax1.set_ylabel('Feature 2')
ax1.legend(); ax1.grid()
ax2 = fig.add_subplot(122)
samples = np.concatenate([comp_1_samples, comp_2_samples])
kdeplot(samples[FEATURE_1], samples[FEATURE_2], ax=ax2)
ax2.set_title('Overall Density')
ax2.set_xlabel('Feature 1'); ax2.set_ylabel('Feature 2')
ax2.grid()
plt.show()
# -
# ## Bayesian closed-form solution
# For a single data point:
print('p(C_1|x):', gmm.posterior(point, klass=1))
print('p(C_2|x):', gmm.posterior(point, klass=2))
# For 1000 data points:
# +
# generate some test samples
# c1_test_samps: test samples generated by component 1
np.random.seed(19)
c1_test_samps, c2_test_samps = gmm.sample_with_labels(n=5000)
print(c1_test_samps.shape, c2_test_samps.shape)
# -
def acc(preds, decision_func):
return np.mean(decision_func(preds))
preds = np.zeros((len(c1_test_samps), ))
for i, samp in enumerate(c1_test_samps):
posterior = gmm.posterior(samp, klass=1)
if posterior > 0.5:
preds[i] = 1
bayesian_baseline_acc_c1 = acc(preds, decision_func=lambda pred : pred == 1)
print(bayesian_baseline_acc_c1)
preds = np.zeros((len(c2_test_samps), ))
for i, samp in enumerate(c2_test_samps):
posterior = gmm.posterior(samp, klass=2)
if posterior > 0.5: preds[i] = 1
bayesian_baseline_acc_c2 = acc(preds, decision_func=lambda pred : pred == 1)
print(bayesian_baseline_acc_c2)
# ## Bayesian close-form solution in matrices
# First written: Sunday, March 15th, 2020, Spring Break, Cognition Lab @ Carleton College, Good mood
# If we substitude
# $p\left(\mathbf{x} | C_{k}\right)=\frac{1}{(2 \pi)^{d / 2}|\mathbf{\Sigma}|^{1 / 2}} \exp \left\{-\frac{1}{2}\left(\mathbf{x}-\boldsymbol{\mu}_{k}\right)^{\mathrm{T}} \mathbf{\Sigma}^{-1}\left(\mathbf{x}-\boldsymbol{\mu}_{k}\right)\right\}$
# into $a = \log( \frac{p(x|C_1)P(C_1)}{p(x|C_2)P(C_2)} )$, we obtain:
# <img src='./closed_form_solution.png' width=600>
# Posterior probabilities are computed using $p(C_1|x)=\frac{1}{1 + \exp \{ -a \} }$, or $p(C_1|x)=\frac{1}{1 + \exp \{ \vec{w}^T \vec{x} + x_0 \} }$ (this is what's being done by the code below).
# +
# define mu1
mu1 = np.array([
[1],
[1]
])
# define mu2
mu2 = np.array([
[2],
[1]
])
# -
# define shared covariance matrix
cov = np.array([
[0.2, 0],
[0, 0.2]
])
# define prior probabilities
P_C1, P_C2 = gmm.class_props
print(P_C1, P_C2)
# compute \vec{w} and w0 using equation 3.18 and 3.19 respectively
w = np.linalg.inv(cov) @ (mu1 - mu2)
w0 = (-0.5) * mu1.T @ np.linalg.inv(cov) @ mu1 + (0.5) * mu2.T @ np.linalg.inv(cov) @ mu2 + np.log(P_C1 / P_C2)
print(w, w0)
# defined utility functions
def sigmoid(x):
return 1 / (1 + np.exp(-x))
c1_test_classifications = sigmoid(w.T @ c1_test_samps.T + w0)
acc(c1_test_classifications, decision_func=lambda preds : preds > 0.5)
c2_test_classifications = sigmoid(w.T @ c2_test_samps.T + w0)
acc(c2_test_classifications, decision_func=lambda preds : preds < 0.5)
# Accuracy on class 2 is expected to be higher since its prior probability is greater - there are simply more samples from class 2.
# These accuracies are not expected to be exceeded by the gradient-descent approach (unless through over-fitting).
# ## Gradient-descent approx. solution
class SingleLayerNetwork(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Linear(in_features=2, out_features=1, bias=True),
nn.Sigmoid()
)
def forward(self, xs):
xs = self.main(xs)
return xs.view(xs.size(0))
def get_model():
model = SingleLayerNetwork()
return model, optim.Adam(model.parameters(), lr=0.06)
loss_fn = nn.BCELoss()
# Generate test samples and labels:
test_samps = torch.from_numpy(np.concatenate([c1_test_samps, c2_test_samps]))
test_labels = torch.from_numpy(np.concatenate(
[
np.zeros(len(c1_test_samps)),
np.ones(len(c2_test_samps))
]
))
# Generate train samples and labels:
np.random.seed(23)
train_samps_c1, train_samps_c2 = gmm.sample_with_labels(n=5000)
print(train_samps_c1.shape, train_samps_c2.shape)
train_samps = torch.from_numpy(np.concatenate([train_samps_c1, train_samps_c2]))
train_labels = torch.from_numpy(np.concatenate(
[
np.zeros(len(train_samps_c1)),
np.ones(len(train_samps_c2)),
]
))
test_ds = TensorDataset(test_samps, test_labels)
test_dl = DataLoader(test_ds, batch_size=64, shuffle=True)
train_ds = TensorDataset(train_samps, train_labels)
train_dl = DataLoader(train_ds, batch_size=128, shuffle=True)
# Define utility functions for computing accuracy for each class separately:
def get_preds_of_c1_samps(yhatb, yb):
return yhatb.detach().numpy()[np.where(yb.numpy() == 0)[0]]
def get_preds_of_c2_samps(yhatb, yb):
return yhatb.detach().numpy()[np.where(yb.numpy() == 1)[0]]
def get_acc_of_c1_samps(yhatb, yb):
return np.mean(get_preds_of_c1_samps(yhatb, yb) < 0.5)
def get_acc_of_c2_samps(yhatb, yb):
return np.mean(get_preds_of_c2_samps(yhatb, yb) > 0.5)
# Training:
class MetricCallback():
def on_train_begin(self):
self.value_per_epoch = []
def on_epoch_begin(self):
self.num_batches = 0
self.weighted_values = []
def on_loss_end(self, value, bs):
self.weighted_values.append(value * bs)
self.num_batches += bs
def on_epoch_end(self):
self.value_per_epoch.append(np.sum(self.weighted_values) / self.num_batches)
# +
model, opt = get_model()
acc_cb_c1 = MetricCallback()
acc_cb_c2 = MetricCallback()
acc_cb_c1.on_train_begin()
acc_cb_c2.on_train_begin()
for i in range(100):
acc_cb_c1.on_epoch_begin()
acc_cb_c2.on_epoch_begin()
# ========== train ==========
model.train()
for xb, yb in train_dl:
yhatb = model(xb.float())
loss = loss_fn(yhatb, yb.float())
loss.backward()
opt.step()
opt.zero_grad()
# ========== validation ==========
model.eval()
for xb, yb in test_dl:
yhatb = model(xb.float())
bs_c1 = np.sum(yb.numpy() == 0)
if not bs_c1 == 0:
temp_acc_c1 = get_acc_of_c1_samps(yhatb, yb)
else:
# sometimes batches contain no samples from component one
# although we set acc to 1 here, its weighted version will be zero since bs_c1 is zero
temp_acc_c1 = 1.0
acc_cb_c1.on_loss_end(temp_acc_c1, bs_c1)
bs_c2 = np.sum(yb.numpy() == 1)
if not bs_c2 == 0:
temp_acc_c2 = get_acc_of_c2_samps(yhatb, yb)
else:
temp_acc_c2 = 1.0
acc_cb_c2.on_loss_end(temp_acc_c2, bs_c2)
acc_cb_c1.on_epoch_end()
acc_cb_c2.on_epoch_end()
# +
fig = plt.figure(figsize=(12, 6))
# ========== first subplot ==========
fig.add_subplot(121)
plt.plot([0, 100], [bayesian_baseline_acc_c1, bayesian_baseline_acc_c1], label='C1 Bayesian Acc')
plt.plot(acc_cb_c1.value_per_epoch, label='C1 GD Acc')
plt.plot([0, 100], [bayesian_baseline_acc_c2, bayesian_baseline_acc_c2], label='C2 Bayesian Acc')
plt.plot(acc_cb_c2.value_per_epoch, label='C2 GD Acc')
plt.legend()
plt.title('Validation Accuracy VS. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim(0.7, 1.0)
# ========== second subplot ==========
fig.add_subplot(122)
weight, bias = list(model.parameters())
weight, bias = weight.detach().numpy()[0], bias.detach().numpy()
# weight[0] * feature_1 + weight[1] * feature_2 + bias = 0
# feature_2 = (-weight[0] * feature_1 - bias) / weight[1]
xs = np.array([-1, 4])
ys = (-weight[0] * xs - bias) / weight[1]
plt.scatter(c1_test_samps[FEATURE_1], c1_test_samps[FEATURE_2], s=0.5, alpha=0.4, label='C1 Points')
plt.scatter(c2_test_samps[FEATURE_1], c2_test_samps[FEATURE_2], s=0.5, alpha=0.4, label='C2 Points')
plt.plot(xs, ys, label='GD Decision Boundary')
plt.legend()
plt.title('Decision Boundary Learned by GD')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.xlim(-1, 4)
plt.ylim(-1, 3)
plt.show()
# -
# We can see that GD accuracy oscillates around Bayesian accuracy - Bayesian accuracy seems to be the maximum accuracy you can get on this dataset.
| c3_single_layer_networks/bayesian_interpretation/bayesian_interpretation_of_singleLayer_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Density Recover
# ## <NAME>
# 2022
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats as stats
from scipy.integrate import quad
from scipy.fft import fft, ifft
from scipy.interpolate import interp1d
from functools import partial
from scipy.optimize import minimize, fsolve
import tqdm
# %matplotlib inline
plt.style.use('ggplot')
# -
# ## Fourier-Cosine Method
def COSDensity(cf, x, N, a, b):
i = complex(0.0, 1.0)
k = np.linspace(0, N-1, N)
u = np.zeros([1,N])
u = k * np.pi / (b-a)
#F_k coefficients
F_k = 2.0 / (b - a) * np.real(cf(u) * np.exp(-i * u * a))
F_k[0] = F_k[0] * 0.5; # adjustment for the first term
#Final calculation
f_X = np.matmul(F_k , np.cos(np.outer(u, x - a)))
# we output only the first row
return f_X
# +
mu = 1
sigma = 2
i = complex(0.0, 1.0)
x = np.linspace(-10.0, 10, 1000)
f_XExact = stats.norm.pdf(x, mu, sigma)
N = 2**7
a = -10
b = 10
ChF = lambda u : np.exp(i * mu * u - 0.5 * sigma**2 * u**2)
# -
COS_Density_Recover = COSDensity(ChF, x, N, a, b)
error_cos = abs(COS_Density_Recover[-1] - f_XExact[-1])
# +
plt.subplots(figsize=(10, 5), dpi=100)
plt.plot(x, COS_Density_Recover, label=f'COS MEthod, N={N}')
plt.plot(x, f_XExact, '--', label='fX', c='black')
plt.title('COS Method of Density Recover', fontsize=16)
plt.xlabel('x', fontsize=14)
plt.ylabel('PDF', fontsize=14)
plt.legend()
plt.show()
# -
# ## Fast Fourier Transformation
def FFTDensity(cf, x, N):
i = complex(0.0, 1.0)
#grid for u
u_max = 20.0
du = u_max / N
u = np.linspace(0, N-1, N) * du
# grid for x
b = np.min(x)
dx = 2 * np.pi / (N * du)
x_i = b + np.linspace(0, N-1, N) * dx
phi = np.exp(-i*b*u) * cf(u)
gamma_1 = np.exp(-i*x_i*u[0]) * cf(u[0])
gamma_2 = np.exp(-i*x_i*u[-1]) * cf(u[-1])
f_xi = du/np.pi * np.real(fft(phi) - 0.5 * (gamma_1 + gamma_2))
f_xiInterp = interp1d(x_i, f_xi, kind='cubic')
return f_xiInterp(x)
# +
mu = 1
sigma = 2
i = complex(0.0, 1.0)
x = np.linspace(-10.0, 10, 1000)
f_XExact = stats.norm.pdf(x, mu, sigma)
N = 2**7
ChF = lambda u : np.exp(i * mu * u - 0.5 * sigma**2 * u**2)
# -
fft_density_recover = FFTDensity(ChF, x, N)
error_fft = abs(fft_density_recover[-1] - f_XExact[-1])
# +
plt.subplots(figsize=(10, 5), dpi=100)
plt.plot(x, fft_density_recover, label=f'FFT, N={N}')
plt.plot(x, f_XExact, '--', label='fX', c='black')
plt.title('FFT Method of Density Recover', fontsize=16)
plt.xlabel('x', fontsize=14)
plt.ylabel('PDF', fontsize=14)
plt.legend()
plt.show()
# -
# ### Errors comparing
round(error_fft, 10)
round(error_cos, 10)
| Models-Simulation/density_recover.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
class Node(object):
"""
Base class for nodes in the network.
Arguments:
`inbound_nodes`: A list of nodes with edges into this node.
"""
def __init__(self, inbound_nodes=[]):
"""
Node's constructor (runs when the object is instantiated). Sets
properties that all nodes need.
"""
# A list of nodes with edges into this node.
self.inbound_nodes = inbound_nodes
# The eventual value of this node. Set by running
# the forward() method.
self.value = None
# A list of nodes that this node outputs to.
self.outbound_nodes = []
# New property! Keys are the inputs to this node and
# their values are the partials of this node with
# respect to that input.
self.gradients = {}
# Sets this node as an outbound node for all of
# this node's inputs.
for node in inbound_nodes:
node.outbound_nodes.append(self)
def forward(self):
"""
Every node that uses this class as a base class will
need to define its own `forward` method.
"""
raise NotImplementedError
def backward(self):
"""
Every node that uses this class as a base class will
need to define its own `backward` method.
"""
raise NotImplementedError
class Input(Node):
"""
A generic input into the network.
"""
def __init__(self):
# The base class constructor has to run to set all
# the properties here.
#
# The most important property on an Input is value.
# self.value is set during `topological_sort` later.
Node.__init__(self)
def forward(self):
# Do nothing because nothing is calculated.
pass
def backward(self):
# An Input node has no inputs so the gradient (derivative)
# is zero.
# The key, `self`, is reference to this object.
self.gradients = {self: 0}
# Weights and bias may be inputs, so you need to sum
# the gradient from output gradients.
for n in self.outbound_nodes:
self.gradients[self] += n.gradients[self]
class Linear(Node):
"""
Represents a node that performs a linear transform.
"""
def __init__(self, X, W, b):
# The base class (Node) constructor. Weights and bias
# are treated like inbound nodes.
Node.__init__(self, [X, W, b])
def forward(self):
"""
Performs the math behind a linear transform.
"""
X = self.inbound_nodes[0].value
W = self.inbound_nodes[1].value
b = self.inbound_nodes[2].value
self.value = np.dot(X, W) + b
def backward(self):
"""
Calculates the gradient based on the output values.
"""
# Initialize a partial for each of the inbound_nodes.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
# Cycle through the outputs. The gradient will change depending
# on each output, so the gradients are summed over all outputs.
for n in self.outbound_nodes:
# Get the partial of the cost with respect to this node.
grad_cost = n.gradients[self]
# Set the partial of the loss with respect to this node's inputs.
self.gradients[self.inbound_nodes[0]] += np.dot(grad_cost, self.inbound_nodes[1].value.T)
# Set the partial of the loss with respect to this node's weights.
self.gradients[self.inbound_nodes[1]] += np.dot(self.inbound_nodes[0].value.T, grad_cost)
# Set the partial of the loss with respect to this node's bias.
self.gradients[self.inbound_nodes[2]] += np.sum(grad_cost, axis=0, keepdims=False)
class Sigmoid(Node):
"""
Represents a node that performs the sigmoid activation function.
"""
def __init__(self, node):
# The base class constructor.
Node.__init__(self, [node])
def _sigmoid(self, x):
"""
This method is separate from `forward` because it
will be used with `backward` as well.
`x`: A numpy array-like object.
"""
return 1. / (1. + np.exp(-x))
def forward(self):
"""
Perform the sigmoid function and set the value.
"""
input_value = self.inbound_nodes[0].value
self.value = self._sigmoid(input_value)
def backward(self):
"""
Calculates the gradient using the derivative of
the sigmoid function.
"""
# Initialize the gradients to 0.
self.gradients = {n: np.zeros_like(n.value) for n in self.inbound_nodes}
# Sum the partial with respect to the input over all the outputs.
for n in self.outbound_nodes:
grad_cost = n.gradients[self]
sigmoid = self.value
self.gradients[self.inbound_nodes[0]] += sigmoid * (1 - sigmoid) * grad_cost
class MSE(Node):
def __init__(self, y, a):
"""
The mean squared error cost function.
Should be used as the last node for a network.
"""
# Call the base class' constructor.
Node.__init__(self, [y, a])
def forward(self):
"""
Calculates the mean squared error.
"""
# NOTE: We reshape these to avoid possible matrix/vector broadcast
# errors.
#
# For example, if we subtract an array of shape (3,) from an array of shape
# (3,1) we get an array of shape(3,3) as the result when we want
# an array of shape (3,1) instead.
#
# Making both arrays (3,1) insures the result is (3,1) and does
# an elementwise subtraction as expected.
y = self.inbound_nodes[0].value.reshape(-1, 1)
a = self.inbound_nodes[1].value.reshape(-1, 1)
self.m = self.inbound_nodes[0].value.shape[0]
# Save the computed output for backward.
self.diff = y - a
self.value = np.mean(self.diff**2)
def backward(self):
"""
Calculates the gradient of the cost.
"""
self.gradients[self.inbound_nodes[0]] = (2 / self.m) * self.diff
self.gradients[self.inbound_nodes[1]] = (-2 / self.m) * self.diff
def topological_sort(feed_dict):
"""
Sort the nodes in topological order using Kahn's Algorithm.
`feed_dict`: A dictionary where the key is a `Input` Node and the value is the respective value feed to that Node.
Returns a list of sorted nodes.
"""
input_nodes = [n for n in feed_dict.keys()]
G = {}
nodes = [n for n in input_nodes]
while len(nodes) > 0:
n = nodes.pop(0)
if n not in G:
G[n] = {'in': set(), 'out': set()}
for m in n.outbound_nodes:
if m not in G:
G[m] = {'in': set(), 'out': set()}
G[n]['out'].add(m)
G[m]['in'].add(n)
nodes.append(m)
L = []
S = set(input_nodes)
while len(S) > 0:
n = S.pop()
if isinstance(n, Input):
n.value = feed_dict[n]
L.append(n)
for m in n.outbound_nodes:
G[n]['out'].remove(m)
G[m]['in'].remove(n)
# if no other incoming edges add to S
if len(G[m]['in']) == 0:
S.add(m)
return L
def forward_and_backward(graph):
"""
Performs a forward pass and a backward pass through a list of sorted Nodes.
Arguments:
`graph`: The result of calling `topological_sort`.
"""
# Forward pass
for n in graph:
n.forward()
# Backward pass
# see: https://docs.python.org/2.3/whatsnew/section-slices.html
for n in graph[::-1]:
n.backward()
# -
def sgd_update(trainables, learning_rate=1e-2):
"""
Updates the value of each trainable with SGD.
Arguments:
`trainables`: A list of `Input` Nodes representing weights/biases.
`learning_rate`: The learning rate.
"""
# TODO: update all the `trainables` with SGD
# You can access and assign the value of a trainable with `value` attribute.
# Example:
# for t in trainables:
# t.value = your implementation here
for t in trainables:
t.value -= learning_rate * t.gradients[t]
# +
from sklearn.datasets import load_boston
from sklearn.utils import shuffle, resample
np.random.seed(0)
# Load data
data = load_boston()
X_ = data['data']
y_ = data['target']
# Normalize data
X_ = (X_ - np.mean(X_, axis=0)) / np.std(X_, axis=0)
n_features = X_.shape[1]
n_hidden = 10
W1_ = np.random.randn(n_features, n_hidden)
b1_ = np.zeros(n_hidden)
W2_ = np.random.randn(n_hidden, 1)
b2_ = np.zeros(1)
# Neural network
X, y = Input(), Input()
W1, b1 = Input(), Input()
W2, b2 = Input(), Input()
l1 = Linear(X, W1, b1)
s1 = Sigmoid(l1)
l2 = Linear(s1, W2, b2)
cost = MSE(y, l2)
feed_dict = {
X: X_,
y: y_,
W1: W1_,
b1: b1_,
W2: W2_,
b2: b2_
}
epochs = 10
# Total number of examples
m = X_.shape[0]
batch_size = 11
steps_per_epoch = m // batch_size
graph = topological_sort(feed_dict)
trainables = [W1, b1, W2, b2]
print("Total number of examples = {}".format(m))
# Step 4
for i in range(epochs):
loss = 0
for j in range(steps_per_epoch):
# Step 1
# Randomly sample a batch of examples
X_batch, y_batch = resample(X_, y_, n_samples=batch_size)
# Reset value of X and y Inputs
X.value = X_batch
y.value = y_batch
# Step 2
forward_and_backward(graph)
# Step 3
sgd_update(trainables)
loss += graph[-1].value
print("Epoch: {}, Loss: {:.3f}".format(i+1, loss/steps_per_epoch))
# -
Total number of examples = 506
Epoch: 1, Loss: 137.969
Epoch: 2, Loss: 42.464
Epoch: 3, Loss: 30.773
Epoch: 4, Loss: 22.056
Epoch: 5, Loss: 28.491
Epoch: 6, Loss: 23.760
Epoch: 7, Loss: 20.902
Epoch: 8, Loss: 23.552
Epoch: 9, Loss: 21.319
Epoch: 10, Loss: 18.262
# +
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.data.shape)
import matplotlib.pyplot as plt
plt.gray()
plt.matshow(digits.images[0])
plt.show()
# -
| miniflow/MiniFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import anndata
import numpy as np
import pandas as pd
import dill as pickle
import sys
sys.path.append("..")
import autogenes as ag
# -
# # Test load
ag.load("saved_ag.pickle")
ag.plot()
ag.selection()
# # Test interrupted
ag.load('interrupted_ag.pickle')
ag.resume()
| tests_jupyter/load_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: anonlink-venv
# language: python
# name: anonlink-venv
# ---
# # Choosing a threshold
#
# Imagine that you are a data analyst. You are given two sets of CLKs (possibly generated using clkhash) and it is your task to use anonlink produce a linkage.
#
# Luckily for you, anonlink takes care of most of this process. However, it needs to be given one parameter: the _threshold_. The threshold is a number that defines how similar two CLKs must be in order to be accepted as a match. In this example, we are using the Sørensen–Dice coefficient as the similarity metric, so the threshold is a number between 0 and 1.
#
# Choosing a threshold can be nontrivial, but anonlink has some tools to help you with this selection. Let's explore them.
# Preparation
# %matplotlib inline
# +
import base64
import json
import bitarray
import matplotlib.pyplot as plt
import anonlink
plt.rcParams['figure.figsize'] = [10, 7]
plt.rcParams['figure.dpi'] = 100
def to_bitarray(bytes_):
ba = bitarray.bitarray()
ba.frombytes(bytes_)
return ba
def load_clks(path):
with open(path) as f:
json_obj = json.load(f)['clks']
return tuple(map(to_bitarray, map(base64.b64decode, json_obj)))
clks1 = load_clks('clks-1.json')
clks2 = load_clks('clks-2.json')
# -
# ## Computing similarities
# We are going to produce a few plots that will give us hints to the location of the threshold. These plots use the distribution of the similarity scores. This means that before we can make them, we need to compute some similarities.
#
# As a complication, to compute similarity scores, we need to input a threshold. _This_ threshold is not used for linkage—it is merely a cutoff value for our plots. Higher thresholds save processing time, but may produce incomplete graphs. For plotting purposes, a threshold value of 0 is safe, as long as you don't mind waiting a little.
# +
PLOTTING_THRESHOLD = 0
all_candidate_pairs = anonlink.candidate_generation.find_candidate_pairs(
[clks1, clks2],
anonlink.similarities.dice_coefficient,
PLOTTING_THRESHOLD)
# -
# ## Histogram of similarities
# Let's plot a histagram of all the similarity scores! We'll use a log scale to deal with differences in order of magnitude.
# +
def plot_similarities_hist(candidate_pairs, bins=100):
"""Plot a histogram of the similarity scores in candidate pairs.
:param candidate_pairs: The candidate pairs.
:param bins: An integer determining the number of bins to use.
Default 100.
"""
counts, bin_boundaries = anonlink.stats.similarities_hist(candidate_pairs, bins)
plt.hist(bin_boundaries[:-1], bins=bin_boundaries, weights=counts)
plt.yscale('symlog') # Log scale, but lets some values be 0
plt.title('Histogram of similarities')
plt.xlabel('Similarity')
plt.ylabel('Count')
plot_similarities_hist(all_candidate_pairs)
# -
# We can clearly see two populations. A big population of non-matching pairs with similarities <0.8 and a much smaller population of matching pairs with similarities >0.8.
#
# The large separation between those population is a sign that our data is reasonably clean and our encoding schema is doing a good job. The populations are not separated as cleanly in more difficult problems.
#
# Generally, the optimal threshold will be at the bottom of the trough between the two distributions.
#
# ## Cumulative matches by threshold
# We can also plot the number of matches we would get by setting a particular threshold.
# +
def plot_cumul_number_matches_vs_threshold(candidate_pairs, steps=100):
"""Plot the number of matches for each threshold.
We use the 2-party greedy solver to calculate the number of matches
that would be returned if the candidate_pairs were found using a
particular threshold. This function requires only a single pass of
the data, so it is faster than simply running the greedy solver
multiple times.
:param candidate_pairs: The candidate pairs.
:param steps: An integer determining the number of threshold steps
to use. Default 100.
"""
num_matches, thresholds = anonlink.stats.cumul_number_matches_vs_threshold(
candidate_pairs, steps)
plt.plot(thresholds, num_matches)
plt.title('Number of matches by threshold')
plt.xlabel('Threshold')
plt.ylabel('Number of matches')
plot_cumul_number_matches_vs_threshold(all_candidate_pairs)
# -
# We can see that the curve levels off around threshold of 0.8.
#
# The set of all possible pairs can be split into (1) a set of matches that may or may not be accepted (depending on the threshold) and (2) a much bigger set of pairs that will never be accepted by the greedy solver. The smaller set of possible matches has two well-separated subpopulations, and 0.8 appears to be right in-between. This makes 0.8 a good threshold.
#
# In less clean data, there might not be a clearly visible 'saddle point'.
#
# ## Possible matches vs definite nonmatches ratio by threshold
# Finally, we can split the similarities into many buckets by threshold, and plot the ratio of possible matches against definite nonmatches by threshold. Definite nonmatches are pairs that will never be accepted by the greedy solver, whereas possible matches are ones that may or may not be accepted, depending on the threshold.
# +
def plot_matches_nonmatches_ratio_hist(candidate_pairs, bins=100):
"""Plot the ratio of possible matches and definite nonmatches.
We use the greedy solver to split the candidate pairs into possible
matches and definite nonmatches. A possible match may or may not be
accepted as a pair depending on the threshold chosen. A definite
nonmatch will never be accepted, since one record in this pair has
a more promising match with another record. We then plot the ratio
of possible matches and definite nonmatches.
:param candidate_pairs: The candidate pairs.
:param bins: An integer determining the number of bins to use.
Default 100.
"""
matches_num, nonmatches_num, bin_boundaries = anonlink.stats.matches_nonmatches_hist(
candidate_pairs, bins)
all_num = matches_num + nonmatches_num
nonmatches_ratio = [nn * 100 / an if an else 0 for nn, an in zip(nonmatches_num, all_num)]
matches_ratio = [mn * 100 / an if an else 0 for mn, an in zip(matches_num, all_num)]
width = (bin_boundaries[-1] - bin_boundaries[0]) / (bin_boundaries.shape[0] - 1)
plt.bar(bin_boundaries[:-1], matches_ratio,
width=width, align='edge', label='Possible matches')
plt.bar(bin_boundaries[:-1], nonmatches_ratio,
width=width, bottom=matches_ratio, align='edge', label='Definite nonmatches')
plt.legend()
plt.title('Proportion of possible matches by similarity')
plt.xlabel('Similarity')
plt.ylabel('Proportion (%)')
plot_matches_nonmatches_ratio_hist(all_candidate_pairs)
# -
# We find that all matches with a threshold of around 0.8 or more are accepted. This reinforces our previous beliefs that a good threshold is around 0.8.
#
# ## Deciding on a threshold
# The above plots let us decide on a threshold. It will be around 0.8. Let's pick 0.81, since that's where the trough in the first plot lies.
#
# ## Results
# Using the plots above, an analyst might choose a threshold of 0.81. Let's perform this linkage.
# +
THRESHOLD = .81
results_candidate_pairs = anonlink.candidate_generation.find_candidate_pairs(
[clks1, clks2],
anonlink.similarities.dice_coefficient,
THRESHOLD)
solution = anonlink.solving.greedy_solve(results_candidate_pairs)
found_pairs = {(i, j) for (_, i), (_, j) in map(sorted, solution)}
# -
# We can cheat a little since we actually have the ground truth. (Normally having the ground truth would defeat the point of this linkage!) Let's compute the actual accuracy.
# +
with open('ground-truth.json') as f:
true_pairs = set(map(tuple, json.load(f)))
true_positives = len(found_pairs & true_pairs)
false_positives = len(found_pairs) - true_positives
false_negatives = len(true_pairs) - true_positives
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f1_score = 2 * true_positives / (2 * true_positives + false_negatives + false_positives)
print(f'Precision: {precision:0.3}')
print(f'Recall: {recall:0.3}')
print(f'F1 score: {f1_score:0.3}')
# -
# An F1 score of .99 is very good. We might be able to improve upon it by adding another significant figure to our threshold (of course, finding it would require more fine-grained plots).
#
# Small deviations on this threshold would help us fine-tune the balance between precision and recall.
| docs/examples/similarity-plots/similarity-plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
dados = pd.read_csv('dados/aluguel.csv', sep = ';')
dados.head(10)
tipo_de_imovel = dados['Tipo']
type(tipo_de_imovel)
tipo_de_imovel.drop_duplicates(inplace = True)
tipo_de_imovel
| .ipynb_checkpoints/Curso Pandas #2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="qWu3mtL5HxZH"
# # Calculating similar books based on reviews
# + id="owyfLGo_y2Bc"
# + executionInfo={"elapsed": 62627, "status": "ok", "timestamp": 1608176652055, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="JI3XtJof3HwJ"
import pickle
import pandas as pd
with open('/content/drive/MyDrive/GoodReadsClean/IntermediateData/ReviewVars/review_tf_idf.pkl', 'rb') as handle:
tidf = pickle.load(handle)
with open('/content/drive/MyDrive/GoodReadsClean/IntermediateData/ReviewVars/review_map_workID_index.pkl', 'rb') as handle:
rev_bks = pickle.load(handle)
bk_data = pd.read_pickle("/content/drive/MyDrive/GoodReadsClean/IntermediateData/BooksVars/book_metadata.pkl")
# + executionInfo={"elapsed": 2330, "status": "ok", "timestamp": 1608176684080, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="QF5lvggT4OoH"
bk_data.loc[bk_data.work_id == "", "work_id"] = bk_data[bk_data.work_id == ""].book_id
df = bk_data
id_title = dict(zip(df.work_id, df.title))
# + executionInfo={"elapsed": 8137, "status": "ok", "timestamp": 1608176692810, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="e8GH7sJMGn6J"
work_book_map = {}
book_work_map = {}
for x,y in zip(df.work_id, df.book_id):
if x not in work_book_map.keys():
work_book_map[x] = []
work_book_map[x].append(y)
if y not in book_work_map.keys():
book_work_map[y] = []
book_work_map[y].append(x)
# + executionInfo={"elapsed": 756, "status": "ok", "timestamp": 1608176694617, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="iFyNC7p_K9VL"
import pickle
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
#save_object(book_work_map, "/content/drive/MyDrive/GoodReadsClean/IntermediateData/OtherVars/book_work_map.pkl")
# + id="5YDCEQkc4SfF"
rev_rev_bks = {cnt:val for cnt, val in enumerate(rev_bks)}
# + colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"elapsed": 4070, "status": "ok", "timestamp": 1608100308974, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="C8XlCPjn-jUf" outputId="446ecf23-9503-44a9-a5f8-211b355e7e94"
bk_data[bk_data.title.str.lower().str.contains("mistborn")]
# + executionInfo={"elapsed": 739, "status": "ok", "timestamp": 1608176117441, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="hoja89nSBxSO"
# + executionInfo={"elapsed": 984, "status": "ok", "timestamp": 1608176782944, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="e0alrXQhDYIF"
import numpy as np
from sklearn.metrics.pairwise import linear_kernel
def reccomendReview(id, n, verbose = False):
sparseIndex = rev_bks[id]
cosine_similarities = linear_kernel(tidf[sparseIndex],tidf)
closest = np.argsort(cosine_similarities[0])[-(n+1):-1]
closest_ids = [rev_rev_bks[x] for x in closest]
if verbose:
print("Books Closest to:", id_title[str(id)])
for x in closest_ids[::-1]:
print(id_title[str(x)])
return closest_ids[::-1]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31690, "status": "ok", "timestamp": 1608089587031, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="XpUbQhidIQrt" outputId="a7a9d169-20a6-42fc-cbae-a27da8237ce6"
# %%time
ids = reccomendReview(1180409, 50, True)
# + [markdown] id="AJzgZxhTJ5m4"
# This takes way too long. 45 seconds per prediction is far too much time. We have 2 options: dimensionality reduction or clustering. Then we can search for cosine similar within clusters. We try both
# + [markdown] id="N5OIyt0eK1I7"
# ## Dimensionality reduction
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2175134, "status": "ok", "timestamp": 1608086920434, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="hFnh_UhjISg8" outputId="92fbd511-5b7b-403a-8371-6208ddcc468c"
# %%time
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(300)
ans = svd.fit_transform(tidf[:])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 351, "status": "ok", "timestamp": 1608087011879, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="bHZHUJO8Czzz" outputId="a3919236-3e6c-42c2-d83b-fc0f02acb321"
print(svd.explained_variance_ratio_.sum())
# + [markdown] id="x4ETPrrRwMNC"
# Try out a rec!
# + id="xDoJexgILIue"
def reccomendReview2(id, n, verbose = False):
sparseIndex = rev_bks[id]
cosine_similarities = linear_kernel([ans[sparseIndex]],ans)
closest = np.argsort(cosine_similarities[0])[-(n+1):-1]
closest_ids = [rev_rev_bks[x] for x in closest]
if verbose:
print("Books Closest to:", id_title[str(id)])
for x in closest_ids[::-1]:
print(id_title[str(x)])
return closest_ids[::-1]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1475, "status": "ok", "timestamp": 1608087685324, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="0qeyR_BOOWH0" outputId="956ec3dd-6e8f-45d4-96f5-d148c5db3eb8"
# %%time
ids = reccomendReview2(14245059, 50, True)
# + [markdown] id="y8wyI0CKfQn8"
# Much faster, but completely different recs. Which ones to trust?
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 347, "status": "ok", "timestamp": 1608087420147, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="gwabPTtCen3p" outputId="7819c9aa-783d-454f-913f-5331b19b41ea"
ans.shape
# + [markdown] id="72ePHMmThMz6"
# ## Clustering - We try on reduced dims to avoid prohibitively long convergence times
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 226590, "status": "ok", "timestamp": 1608092191593, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="4q5DLpRyertj" outputId="a235d79a-1943-4b00-95c9-5eff6225133f"
from sklearn.cluster import MiniBatchKMeans
dists = []
cluster_cands = list(range(2,30))
for k in cluster_cands:
clf = MiniBatchKMeans(n_clusters = k, batch_size = 1000, verbose = 0, max_iter = 1)
clf.fit(ans[:])
print(k,clf.inertia_)
dists.append(clf.inertia_)
# + colab={"base_uri": "https://localhost:8080/", "height": 312} executionInfo={"elapsed": 494, "status": "ok", "timestamp": 1608092253297, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="47SjZtIlstFv" outputId="d25514a9-d9f9-4f9e-b8af-f29a31fb2c4b"
# %matplotlib inline
from matplotlib import pyplot as plt
plt.plot(cluster_cands,dists, 'xb-')
plt.title("Clusters vs Loss")
plt.xlabel("# of Clusters")
# + [markdown] id="RtkBHupWgPBh"
# # Tag-based filtering
# + executionInfo={"elapsed": 13591, "status": "ok", "timestamp": 1608176728231, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="ZID4XTUUgQ5I"
with open('/content/drive/MyDrive/GoodReadsClean/IntermediateData/BooksVars/book_tags_tfidf.pkl', 'rb') as handle:
tag_tidf = pickle.load(handle)
# + executionInfo={"elapsed": 12091, "status": "ok", "timestamp": 1608176730659, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="Mytx12evg0jv"
with open('/content/drive/MyDrive/GoodReadsClean/IntermediateData/BooksVars/book_map_workid_index.pkl', 'rb') as handle:
bk_map = pickle.load(handle)
# + executionInfo={"elapsed": 11490, "status": "ok", "timestamp": 1608176730662, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="HBhcp5EphzGb"
rev_bk_map = {v:k for k,v in bk_map.items()}
# + executionInfo={"elapsed": 10977, "status": "ok", "timestamp": 1608176730663, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="PKR_T_-QhBtk"
def reccomendGeneral(id, n, tidf, rev_bks, rev_rev_bks, verbose = False):
sparseIndex = rev_bks[str(id)]
cosine_similarities = linear_kernel(tidf[sparseIndex],tidf)
closest = np.argsort(cosine_similarities[0])[-(n+1):-1]
closest_ids = [rev_rev_bks[x] for x in closest]
if verbose:
print("Books Closest to:", id_title[str(id)])
for x in closest_ids[::-1]:
print(id_title[str(x)])
return closest_ids[::-1]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 4341, "status": "ok", "timestamp": 1608176792356, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="i6H19_IfhjP4" outputId="eaf1e81d-8ef0-44fa-cb9f-a0b8a966b991"
# %%time
ids = reccomendGeneral(1180409, 50,tag_tidf, bk_map, rev_bk_map, True)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 472, "status": "ok", "timestamp": 1608088354063, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="0lbHZXXehkVr" outputId="b19cacc5-3ffb-4440-d311-574d2fa7eec6"
bk_map["14245059"]
# + [markdown] id="0kbmRpyFyRMp"
# ## Evaluation
# + executionInfo={"elapsed": 39048, "status": "ok", "timestamp": 1608176556614, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="r_RV6xjpiDsF"
import pickle
with open(r"/content/drive/MyDrive/GoodReadsClean/IntermediateData/InteractVars/interact.pkl", 'rb') as f:
int_sparse = pickle.load(f)
int_sparse_rowOG = int_sparse.tocsr().astype(float)
int_sparse_colOG = int_sparse.tocsc().astype(float)
# + executionInfo={"elapsed": 737, "status": "ok", "timestamp": 1608176987347, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="jp8a5rg70T7N"
def getTrueSim(int_sparse_col, id1, id2):
a = int_sparse_col.getcol(id1)
b = int_sparse_col.getcol(id2)
adict = dict(zip(a.indices,a.data))
bdict = dict(zip(b.indices,b.data))
inter = list(set(adict.keys()) & set(bdict.keys()))
if len(inter) <= 1:
return 0
bro = np.corrcoef(a[inter].toarray().reshape(1,-1)[0], b[inter].toarray().reshape(1,-1)[0])[0,1]
return bro
# + executionInfo={"elapsed": 4904, "status": "ok", "timestamp": 1608176949197, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="_Wv4hVldkxMO"
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1222, "status": "ok", "timestamp": 1608177003557, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="59OM7cIGz2PQ" outputId="1a43dd23-e76a-4051-f4a5-882bfd56367c"
for num in
ids = reccomendGeneral(num, 50,tag_tidf, bk_map, rev_bk_map)
for id in ids:
print(getTrueSim(int_sparse_colOG, 1180409, id))
# + colab={"background_save": true, "base_uri": "https://localhost:8080/", "output_embedded_package_id": "17i6BO_kDjidBhWg7n2iZfuZXtNPw7-tq"} executionInfo={"elapsed": 847, "status": "ok", "timestamp": 1608177085684, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00514865726030680676"}, "user_tz": 300} id="5IHyETusz6T7" outputId="fb1d9f26-b65a-448e-8fb1-ab2017672507"
np.random
# + id="xumQyKbO0k5_"
| ContentFiltering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import os
import sys
import inspect
import numpy as np
import matplotlib.pyplot as plt
import pyvista as pv
from sklearn.preprocessing import normalize
from utils import runSVMClassification, plotClassificationResults
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from morphomatics_med.geom import Surface
from morphomatics_med.manifold import FundamentalCoords, PointDistributionModel, util
from morphomatics_med.stats import StatisticalShapeModel
median = np.load("./mesh_median.npy")
mean = np.load("./mesh_mean.npy")
#load faces indices
mesh = pv.read("/mnt/materials/SIRF/MathPlusBerlin/DATA/adni_hippos_hackathon/AD/test/10064.obj")
# align
from morphomatics_med.manifold.util import align
median = align(median, mean)
# calculate difference
diff = np.linalg.norm(median - mean, axis = 1)
print(diff.shape)
pl = pv.Plotter(notebook = True)
pl.add_mesh(pv.PolyData(median, mesh.faces), smooth_shading=True, scalars = diff, cmap = "viridis")
pl.view_yx()
pl.camera.roll += 180
pl.show(jupyter_backend = 'ipygany')
# -
| show_difference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.7 64-bit
# name: python37764bitdad157b643b7445ca0af50d1c9882389
# ---
# +
import pandas as pd
data=pd.read_csv("MeMas_5min.csv",header=0,infer_datetime_format=True,parse_dates=['Data'],index_col=['Data'])
daily_data=data.resample('D').sum()
daily_data
# +
import matplotlib.pyplot as plt
plt.figure(figsize=(25,5))
plt.plot(data)
plt.grid
# -
plt.figure(figsize=(25,5))
plt.plot(daily_data)
plt.grid
# +
from sklearn.metrics import mean_squared_error
import numpy as np
def forecasts(act,pred):
scores=[]
for i in range(act.shape[1]):
mse=mean_squared_error(act[:,i],pred[:,i])
rmse=np.sqrt(mse)
scores.append(rmse)
s=0
for row in range(act.shape[0]):
for col in range(act.shape[1]):
s+=(act[row,col]-pred[row,col])**2
score=np.sqrt(s/(act.shape[0]*act.shape[1]))
return score,scores
# -
# Primeiramnete é necessário separar os dados por semanas inteiras, no conjunto de dados já deixei o primeiro dia sendo um domingo e o último sendo um sábado. No total são 95 semanas, 71 dessas semanas serão utilizadas para o treinamento do modelo e as outras 24 para a validação.
def split(data):
train,test=data[0:497],data[496:-1]
train=np.array(np.split(train,len(train)/7))
test=np.array(np.split(test,len(test)/7))
return train,test
def summarized_scores(name,score,scores):
s_scores=', '.join(['%.1f'%s for s in scores])
print('%s:[%.3f] %s'%(name,score,s_scores))
def to_supervized(train,n_in,n_out):
data=train.reshape((train.shape[0]*train.shape[1],train.shape[2]))
X,y=[],[]
in_start=0
for i in range(len(data)):
in_end=in_start+n_in
out_end=in_end+n_out
if out_end<len(data):
x_in=data[in_start:in_end,0]
x_in=x_in.reshape((len(x_in),1))
X.append(x_in)
y.append(data[in_end:out_end,0])
in_start+=1
return np.array(X),np.array(y)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
def build_model(train,n_input):
train_x,train_y=to_supervized(train,n_input,7)
verbose=0
epochs=70
batch_size=16
n_timesteps=train_x.shape[1]
n_features=train_x.shape[2]
n_outputs=train_y.shape[1]
model=Sequential()
model.add(LSTM(200,activation='relu',input_shape=(n_timesteps,n_features)))
model.add(Dense(100,activation='relu'))
model.add(Dense(n_outputs))
model.compile(loss='mse',optimizer='adam')
model.fit(train_x,train_y,epochs=epochs,batch_size=batch_size,verbose=verbose)
return model
def forecast(model,history,n_input):
data=np.array(history)
data=data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
in_x=data[-n_input:,0]
in_x=in_x.reshape((1,len(in_x),1))
yhat=model.predict(in_x,verbose=0)
return yhat[0]
def evaluate_model(train,test,n_input):
model=build_model(train,n_input)
history=[x for x in train]
prediction=[]
for i in range(len(test)):
yhat_s=forecast(model,history,n_input)
prediction.append(yhat_s)
history.append(test[i,:])
prediction=np.array(prediction)
score,scores=forecasts(test[:,:,0],prediction)
return score,scores
train,test=split(daily_data.values)
train.shape
# ^ 71 semanas para o treinamento
test.shape
# ^ 24 semanas para a validação
score,scores=evaluate_model(train,test,4)
summarized_scores('lstm',score,scores)
days=['Dom','Seg','Ter','Qua','Qui','Sex','Sáb']
plt.figure(figsize=(12,9))
plt.plot(days,scores)
| previsao_lstm(adaptado)/previsao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neural Machine Translation
# <sup> with inputs from https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html </sup>
#
# General Reference: https://github.com/nyu-dl/NLP_DL_Lecture_Note/blob/master/lecture_note.pdf
# ### Install Google Translate API for Comparision
# https://github.com/ssut/py-googletrans
# ! pip install googletrans
from googletrans import Translator
translator = Translator()
path_to_utils = 'pyfiles'
import os
import sys
sys.path.append(path_to_utils)
import global_variables
import nmt_dataset
import nnet_models_new
# +
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from functools import partial
import time
# from tqdm import tqdm_notebook as tqdm
import tqdm
# -
base_saved_models_dir = '.'
# ### The Dataset
# We will work with a English to French Dataset from https://www.manythings.org/anki/
main_data_path = './data/'
# +
source_name = 'en'
target_name = 'fr'
path_to_train_data = {'source':main_data_path+'train.'+source_name,
'target':main_data_path+'train.'+target_name}
path_to_val_data = {'source': main_data_path+'valid.'+source_name,
'target':main_data_path+'valid.'+target_name}
# -
saved_models_dir = os.path.join(base_saved_models_dir, source_name+'2'+target_name)
## See first 5 records
# ! head -5 'data/train.en'
# ! head -5 'data/train.fr'
# ### Processing and making PyTorch Dataset
# We have to make it a pair - (source, target) sentence pair. For this, we have to read the file and parse it accordingly. We might have to take care of some details there, like making sure that we strip off any non-required special characters or extra space. All those boring details aside (which you can see in dataset_helper.py) what are the other things we have to do?
#
# We have to make a vocabulary and tokenize like we have been doing. Here, we are writing a Language Class, to take care of this for you. Once we have done all this and tokenized, we write a pytorch dataset object to help as handle this efficiently during training
saved_language_model_dir = os.path.join(saved_models_dir, 'lang_obj')
dataset_dict = {'train': nmt_dataset.LanguagePair(source_name = source_name, target_name=target_name,
filepath = path_to_train_data,
lang_obj_path = saved_language_model_dir,
minimum_count = 1),
'val': nmt_dataset.LanguagePair(source_name = source_name, target_name=target_name,
filepath = path_to_val_data,
lang_obj_path = saved_language_model_dir,
minimum_count = 1)}
# The LanguagePair object we built has a DataFrame underneath. We see the first 5 rows of the dataframe below:
dataset_dict['train'].main_df.iloc[:5]
# ### vocabulary sizes and sentence lengths
### vocabulary sizes
print('source vocab: ', dataset_dict['train'].source_lang_obj.n_words ,
'target vocab: ', dataset_dict['train'].target_lang_obj.n_words)
### vocabulary sizes
print('max len: ', dataset_dict['train'].main_df['source_len'].max(),
'min len: ', dataset_dict['train'].main_df['source_len'].min() )
dataset_dict['train'].main_df['source_len'].quantile([0.5, 0.75, 0.9, 0.95, 0.99, 0.999, 0.9999])
# 51 looks like a very long sentence and at the $99.99$th percentile is 32. We probably don't want that much. How do we get rid of rest of the words or clip sentence at some MAX LEN? We can use the collate function of pytorch that we had seen earlier to do this.
MAX_LEN = int(dataset_dict['train'].main_df['source_len'].quantile(0.9999))
batchSize = 64
dataloader_dict = {'train': DataLoader(dataset_dict['train'], batch_size = batchSize,
collate_fn = partial(nmt_dataset.vocab_collate_func, MAX_LEN=MAX_LEN),
shuffle = True, num_workers=0),
'val': DataLoader(dataset_dict['val'], batch_size = batchSize,
collate_fn = partial(nmt_dataset.vocab_collate_func, MAX_LEN=MAX_LEN),
shuffle = True, num_workers=0) }
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <http://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of usually of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence. Essentially, all we need is some mechanism to read the source sentence and create an encoding and some mechanism to read the encoding and decode it to the target language.
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "I am not the
# black cat" → "Je ne suis pas le chat noir". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
#
#
# ### Concepts:
# 1. NMT as a conditional language modelling
# 2. Encoder
# 3. Decoding during evaluation - step by step (see code)
# 4. Teaching Forcing (see code) and train step
# 5. How do we evaluate the quality of translation? BLEU Score
# The Encoder
# -----------
#
# The encoder is anything which takes in a sentence and gives us a representation for the sentence.
#
# Usually, the encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# However, we will first start with a BoW encoder and then move on to RNN based encoders
# +
### configuration
source_lang_obj = dataset_dict['train'].source_lang_obj
target_lang_obj = dataset_dict['train'].target_lang_obj
source_vocab = dataset_dict['train'].source_lang_obj.n_words;
target_vocab = dataset_dict['train'].target_lang_obj.n_words;
hidden_size = 512
rnn_layers = 1
lr = 0.25;
longest_label = 32;
gradient_clip = 0.3;
use_cuda = True
num_epochs = 10
# -
# ### BagOfWords Encoder
encoder_bow = nnet_models_new.BagOfWords(input_size = source_vocab,
hidden_size = hidden_size,
nlayers=4,
reduce = "sum")
print(encoder_bow)
# The Decoder
# --------------------
#
#
# The decoder is another RNN that takes the encoder output vector(s) and outputs a sequence of words to create the translation.
#
# Decoder w/o Attention
# ------------------------
# In the simplest seq2seq decoder we use only last output of the encoder. This last output is sometimes called the context vector as it encodes context from the entire sequence. This context vector is used as the initial hidden state of the decoder.
#
# At every step of decoding, the decoder is given an input token and hidden state. The initial input token is the start-of-string <SOS> token, and the first hidden state is the context vector (the encoder's last hidden state).
decoder_bow = nnet_models_new.DecoderRNN(target_vocab, hidden_size, rnn_layers)
print(decoder_bow)
nmt_bow = nnet_models_new.seq2seq(encoder_bow, decoder_bow,
lr = 1e-2,
use_cuda = use_cuda,
hiddensize = hidden_size,
numlayers = hidden_size,
target_lang=dataset_dict['train'].target_lang_obj,
longest_label = longest_label,
clip = gradient_clip)
# ### Training Loop
def get_full_filepath(path, enc_type):
filename = 'nmt_enc_'+enc_type+'_dec_rnn.pth'
return os.path.join(path, filename)
def save_models(nmt_model, path, enc_type):
if not os.path.exists(path):
os.makedirs(path)
filename = 'nmt_enc_'+enc_type+'_dec_rnn.pth'
torch.save(nmt_model, os.path.join(path, filename))
# +
def train_model(dataloader, nmt, num_epochs=50, val_every=1, saved_model_path = '.', enc_type ='rnn'):
best_bleu = -1;
for epoch in range(num_epochs):
start = time.time()
running_loss = 0
print('Epoch: [{}/{}]'.format(epoch, num_epochs));
for i, data in tqdm.tqdm_notebook(enumerate(dataloader['train']), total=len(dataloader['train'])):
_, curr_loss = nmt.train_step(data);
running_loss += curr_loss
epoch_loss = running_loss / len(dataloader['train'])
print("epoch {} loss = {}, time = {}".format(epoch, epoch_loss,
time.time() - start))
sys.stdout.flush()
if epoch%val_every == 0:
val_bleu_score = nmt.get_bleu_score(dataloader['val']);
print('validation bleu: ', val_bleu_score)
sys.stdout.flush()
nmt.scheduler_step(val_bleu_score);
if val_bleu_score > best_bleu:
best_bleu = val_bleu_score
save_models(nmt, saved_model_path, enc_type);
print('='*50)
print("Training completed. Best BLEU is {}".format(best_bleu))
# -
# ### Training Bow Encoder GRU Decoder Model
# +
train_again = False
modelname = 'bow_model'
device = torch.device('cuda') if use_cuda and torch.cuda.is_available() else torch.device('cpu');
if os.path.exists(get_full_filepath(saved_models_dir, modelname)) and (not train_again):
nmt_bow = torch.load(get_full_filepath(saved_models_dir, modelname))
else:
train_model(dataloader_dict, nmt_bow,
num_epochs = num_epochs,
saved_model_path = saved_models_dir,
enc_type = 'bow_test')
# -
# ### Check Performance
print(nmt_bow.get_bleu_score(dataloader_dict['val']))
# ## Interacting with the system
# +
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import copy
# %matplotlib inline
def get_binned_bl_score(nmt_model, val_dataset):
len_threshold = np.arange(0, 31, 5)
bin_bl_score = np.zeros(len(len_threshold));
for i in tqdm.tqdm_notebook( range(1, len(len_threshold)), total = len(len_threshold)-1):
min_len = len_threshold[i-1]
max_len = len_threshold[i]
temp_dataset = copy.deepcopy(val_dataset);
temp_dataset.main_df = temp_dataset.main_df[(temp_dataset.main_df['source_len'] > min_len) & (temp_dataset.main_df['source_len'] <= max_len)];
temp_loader = DataLoader(temp_dataset, batch_size = batchSize,
collate_fn = partial(nmt_dataset.vocab_collate_func, MAX_LEN=100),
shuffle = True, num_workers=0)
bin_bl_score[i] = nmt_model.get_bleu_score(temp_loader);
len_threshold = len_threshold[1:]
bin_bl_score = bin_bl_score[1:]
plt.plot(len_threshold, bin_bl_score, 'x-')
plt.ylim(0, np.max(bin_bl_score)+1)
plt.xlabel('len')
plt.ylabel('bl score')
return len_threshold, bin_bl_score
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions, cmap='bone', aspect='auto')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
[global_variables.EOS_TOKEN], rotation=90)
ax.set_yticklabels([''] + output_words.split(' ')+
[global_variables.EOS_TOKEN]);
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def get_encoded_batch(sentence, lang_obj, use_cuda):
""" accepts only bsz = 1.
input: one sentence as a string
output: named tuple with vector and length"""
sentence = sentence + ' ' + global_variables.EOS_TOKEN;
tensor = source_lang_obj.txt2vec(sentence).unsqueeze(0)
device = torch.device('cuda') if use_cuda and torch.cuda.is_available() else torch.device('cpu');
named_returntuple = namedtuple('namedtuple', ['text_vecs', 'text_lens', 'label_vecs', 'label_lens', 'use_packed'])
return_tuple = named_returntuple( tensor.to(device),
torch.from_numpy(np.array([tensor.shape[-1]])).to(device),
None,
None,
False );
return return_tuple
def get_translation(nmt_model, sentence, lang_obj, use_cuda):
print('souce: ', sentence)
batch = get_encoded_batch(sentence, lang_obj, use_cuda);
prediction, attn_scores_list = nmt_model.eval_step(batch, return_attn = True);
prediction = prediction[0];
print('prediction: ', prediction)
print('GT on sentence (src->tgt): ', translator.translate(sentence,
src = source_name,
dest = target_name).text)
print('GT on prediction (tgt->src): ', translator.translate(prediction,
src = target_name,
dest = source_name).text)
if attn_scores_list[0] is not None:
if attn_scores_list[0][0] is not None:
attn_matrix = [x[0].data.cpu().numpy() for x in attn_scores_list];
attn_matrix = np.stack(attn_matrix)[:,:, 0]
showAttention(sentence, prediction, attn_matrix)
# -
get_translation(nmt_bow, 'how are you ?', source_lang_obj, use_cuda)
get_translation(nmt_bow, 'are hello ? how you', source_lang_obj, use_cuda)
# ## RNN Encoder
encoder_rnn = nnet_models_new.EncoderRNN(source_vocab, hidden_size, rnn_layers)
print(encoder_rnn)
decoder_rnn = nnet_models_new.DecoderRNN(target_vocab, hidden_size, rnn_layers)
print(decoder_rnn)
nmt_rnn = nnet_models_new.seq2seq(encoder_rnn, decoder_rnn,
lr = lr,
use_cuda = use_cuda,
hiddensize = hidden_size,
numlayers = hidden_size,
target_lang=dataset_dict['train'].target_lang_obj,
longest_label = longest_label,
clip = gradient_clip)
train_again = False
modelname = 'rnn_model'
if os.path.exists(get_full_filepath(saved_models_dir, modelname)) and (not train_again):
nmt_rnn = torch.load(get_full_filepath(saved_models_dir, modelname))
else:
train_model(dataloader_dict, nmt_rnn,
num_epochs = num_epochs,
saved_model_path = saved_models_dir,
enc_type = 'rnn_test')
# ### Check Performance
print(nmt_rnn.get_bleu_score(dataloader_dict['val']))
# ### Interacting with system
get_translation(nmt_rnn, 'how are you ?', source_lang_obj, use_cuda)
get_translation(nmt_rnn, 'are hello ? how you', source_lang_obj, use_cuda)
get_translation(nmt_rnn, 'i know that the last thing you want to do is help me .', source_lang_obj, use_cuda)
rnn_len_threshold, rnn_bin_bl = get_binned_bl_score(nmt_rnn, dataset_dict['val'])
# We work with a small training data and hence you see this drop in BLEU score for sentences of shorter length. We don't have enough data points with small sentence length
# ## RNN Encoder + Source Side Attention
# ### Concepts:
# 1. Context Vector which gives additional information from source side for decoding the next token.
# 2. Fit context vector into decoding framework: <br>
# a) Initiaze with 0 <br>
# b) input to RNN is the concatenation of the input token and context vector <br>
# c) output token decoded from context vector calculated from the hidden of previous timestep; i.e, context vector for the next timestep is used for decoding the current output
# 3. Calculating the context vector. Explain attention module: <br>
# a) hidden transformed to encoder hidden dimension using linear layer. This is your query vector. <br>
# b) Value and Key vectors are the encoder outputs. <br>
# c) Dot product. Raw Score. Softmax. Linear Combination. <br>
# d) contactenated the linear combination vector with the input hidden from decoder. Pass it through a linear layer to project it back to decoder hidden dimension followed by a `tanh`.
#
#
#
encoder_attention = True
self_attention = False
encoder_encoderattn = nnet_models_new.EncoderRNN(source_vocab, hidden_size, 1)
decoder_encoderattn = nnet_models_new.Decoder_SelfAttn(output_size=target_vocab,
hidden_size=hidden_size,
encoder_attention = encoder_attention,
self_attention = self_attention)
nmt_encoderattn = nnet_models_new.seq2seq(encoder_encoderattn, decoder_encoderattn,
lr = lr,
use_cuda = use_cuda,
hiddensize = hidden_size,
numlayers = hidden_size,
target_lang=dataset_dict['train'].target_lang_obj,
longest_label = longest_label,
clip = gradient_clip)
train_again = False
modelname = 'encoderattn_model'
if os.path.exists(get_full_filepath(saved_models_dir, modelname)) and (not train_again):
nmt_encoderattn = torch.load(get_full_filepath(saved_models_dir, modelname))
else:
train_model(dataloader_dict, nmt_encoderattn,
num_epochs = num_epochs,
saved_model_path = saved_models_dir,
enc_type = 'encoderattn_test')
# ### Check Performance
print(nmt_encoderattn.get_bleu_score(dataloader_dict['val']))
# ### BLEU vs Sentence Length
attn_len_threshold, attn_bin_bl = get_binned_bl_score(nmt_encoderattn, dataset_dict['val'])
plt.plot(rnn_len_threshold, rnn_bin_bl, '--x', label = 'w/o attn')
plt.plot(attn_len_threshold, attn_bin_bl, '--x', label = 'attn')
plt.xlabel('len sentence')
plt.ylabel('bl score')
plt.legend()
# ### Interacting with system
get_translation(nmt_encoderattn, 'how are you ?', source_lang_obj, use_cuda)
get_translation(nmt_encoderattn, 'she knows better than to argue with him .', source_lang_obj, use_cuda)
get_translation(nmt_encoderattn, 'she s five years younger than me .', source_lang_obj, use_cuda)
get_translation(nmt_encoderattn, 'i know that the last thing you want to do is help me .', source_lang_obj, use_cuda)
| Part 02/002_NMT/a.seq_to_seq/.ipynb_checkpoints/NMT-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# TorchVision hotfix https://github.com/pytorch/vision/issues/1938
from syft.util import get_root_data_path
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [("User-agent", "Mozilla/5.0")]
urllib.request.install_opener(opener)
import torchvision
torchvision.datasets.MNIST(get_root_data_path(), train=True, download=True)
torchvision.datasets.MNIST(get_root_data_path(), train=False, download=True)
import syft as sy
duet = sy.duet(loopback=True)
duet.requests.add_handler(action="accept")
duet.store.pandas
| examples/vertical-learning/advanced/MNIST/DO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # EOPF S1 MSI L1 Product Data Structure Proposal
#
from IPython.display import IFrame
from utils import display
from EOProductDataStructure import EOProductBuilder, EOVariableBuilder, EOGroupBuilder
# # 1. Read S1 MSI Product
# +
path_product="data/ew_slc.yaml"
import yaml
product = None
with open(path_product, "r") as stream:
try:
product = yaml.safe_load(stream)['product']
except yaml.YAMLError as exc:
print(exc)
ex_slc = EOProductBuilder("S1A_EW_SLC__", coords=EOGroupBuilder('coords'))
for key, values in product.items():
if key == "attributes":
ex_slc.attrs = values
else:
group = EOGroupBuilder(key)
variables = values['variables']
for var in variables:
variable = EOVariableBuilder(var)
try:
variable.dtype = variables[var].split('->')[1]
except:
pass
variable.dims = [d.split('->')[0] for d in variables[var].replace('F(','').replace(')','').split(',')]
group.variables.append(variable)
for d in variable.dims:
if d not in group.dims:
group.dims.append(d)
ex_slc.groups.append(group)
display(ex_slc.compute())
| eopf-notebooks/eopf_product_data_structure/EOPF_S1A_EW_SLC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Predicting Credit Card Default
# This code was part of CSE 60647, Fall 2021, University of Notre Dame.
# _______________________________________________
#
# ### Authors:
# - <NAME> {<EMAIL>}
# - <NAME> {<EMAIL>}
# - <NAME> {<EMAIL>}
#
# _______________________________________________
#
# ### Dataset
#
# The data used in this project was compiled by Yei and Lien[1] for analyzing customer defaults on credit card payments in Taiwan.
#
# It was obtained from UCI ML respiratory[2].
#
# Download the data [here](default.xls)
#
# _______________________________________________
#
# ### Solution
#
# The code is in four parts:
# - [Preliminary Analysis and Preparation of Data](./data_preparation.ipynb)
# - [Model Development: Exploring methods](./model_training.ipynb)
# - [Model Development with Cross-Validation](./model_training_crossval.ipynb)
# - [Best Model Analysis](./model_analysis.ipynb)
#
# Requirements to run the scripts:
# - imblearn
# - xlrd
# - matplotlib
# - sklearn
# - pandas
# - seaborn
# _______________________________________________
#
# ### Resulting Documents
#
# - [Paper](./Final_Paper_for_DS_Project___Credit_Card.pdf)
# - [Presentation (video)](https://drive.google.com/file/d/1IWRR7-RPNRTbJKI86NnbVgWaqIKr-n5x/view?usp=sharing)
#
# _______________________________________________
#
# ### References
#
# [1]-<NAME> and <NAME>. 2009. *The comparisons of data mining techniques
# for the predictive accuracy of probability of default of credit card clients*. Expert
# Systems with Applications 36 (2009), 2473–2480
#
# [2] 2016. *Default of credit card clients*. UCI Machine Learning Repository.
# [https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients](https://archive.ics.uci.edu/ml/datasets/default+of+credit+card+clients)
#
#
| README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Use ENGIE’s open data set
#
# ENGIE provides access to the data of its 'La Haute Borne' wind farm through https://opendata-renewables.engie.com and through an API. The data can be used to create additional turbine objects and gives users the opportunity to work with further real-world data.
#
# The series of notebooks in the 'examples' folder uses SCADA data downloaded from https://opendata-renewables.engie.com, saved in the 'examples/data' folder. Additional plant level meter, availability, and curtailment data were synthesized based on the SCADA data.
#
# In the following example, data is loaded into a turbine object and plotted as a power curve. The selected turbine can be changed if desired.
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from project_ENGIE import Project_Engie
from operational_analysis.toolkits import filters
from operational_analysis.toolkits import power_curve
# -
# ### Import the data
project = Project_Engie('./data/la_haute_borne')
project.prepare()
# List of turbines
turb_list = project.scada.df.id.unique()
turb_list
# Let's examine the first turbine from the list above.
df = project.scada.df.loc[project.scada.df['id'] == turb_list[0]]
windspeed = df["wmet_wdspd_avg"]
power_kw = df["wtur_W_avg"]/1000 # Put into kW
def plot_flagged_pc(ws, p, flag_bool, alpha):
plt.scatter(ws, p, s = 1, alpha = alpha)
plt.scatter(ws[flag_bool], p[flag_bool], s = 1, c = 'red')
plt.xlabel('Wind speed (m/s)')
plt.ylabel('Power (W)')
plt.show()
# First, we'll make a scatter plot the raw power curve data.
plot_flagged_pc(windspeed, power_kw, np.repeat('True', df.shape[0]), 1)
# ### Range filter
out_of_range = filters.range_flag(windspeed, below=0, above=70)
windspeed[out_of_range].head()
# No wind speeds out of range
#
# ### Window range filter
#
# Now, we'll apply a window range filter to remove data with power values outside of the window from 20 to 2100 kW for wind speeds between 5 and 40 m/s.
out_of_window = filters.window_range_flag(windspeed, 5., 40, power_kw, 20., 2100.)
plot_flagged_pc(windspeed, power_kw, out_of_window, 0.2)
# Let's remove these flagged data from consideration
windspeed_filt1 = windspeed[~out_of_window]
power_kw_filt1 = power_kw[~out_of_window]
# ### Bin filter
#
# We may be interested in fitting a power curve to data representing 'normal' turbine operation. In other words, we want to flag all anomalous data or data represenatative of underperformance. To do this, the 'bin_filter' function is useful. It works by binning the data by a specified variable, bin width, and start and end points. The criteria for flagging is based on some measure (scalar or standard deviation) from the mean or median of the bin center.
#
# As an example, let's bin on power in 100 kW increments, starting from 25.0 kW but stopping at 90% of peak power (i.e. we don't want to flag all the data at peak power and high wind speed. Let's use a scalar threshold of 1.5 m/s from the median for each bin. Let's also consider data on both sides of the curve by setting the 'direction' parameter to 'all'
max_bin = 0.90*power_kw_filt1.max()
bin_outliers = filters.bin_filter(power_kw_filt1, windspeed_filt1, 100, 1.5, 'median', 20., max_bin, 'scalar', 'all')
plot_flagged_pc(windspeed_filt1, power_kw_filt1, bin_outliers, 0.5)
# As seen above, one call for the bin filter has done a decent job of cleaning up the power curve to represent 'normal' operation, without excessive removal of data points. There are a few points at peak power but low wind speed that weren't flagged, however. Let catch those, and then remove those as well as the flagged data above, and plot our 'clean' power curve
windspeed_filt2 = windspeed_filt1[~bin_outliers]
power_kw_filt2 = power_kw_filt1[~bin_outliers]
# #### Unresponsive Filter
#
# As a final filtering demonstration, we can look for an unrespsonsive sensor (i.e. repeating measurements). In this case, let's look for 3 or more repeating wind speed measurements:
frozen = filters.unresponsive_flag(windspeed_filt2, 3)
windspeed_filt2[frozen]
# We actually found a lot, so let's remove these data as well before moving on to power curve fitting.
#
# Note that many of the unresponsive sensor values identified above are likely caused by the discretization of the data to only two decimal places. However, the goal is to illustrate the filtering process.
windspeed_final = windspeed_filt2[~frozen]
power_kw_final = power_kw_filt2[~frozen]
# ##### Power curve fitting
#
# We will now consider three different models for fitting a power curve to the SCADA data.
# Fit the power curves
iec_curve = power_curve.IEC(windspeed_final, power_kw_final)
l5p_curve = power_curve.logistic_5_parametric(windspeed_final, power_kw_final)
spline_curve = power_curve.gam(windspeed_final, power_kw_final, n_splines = 20)
# Plot the results
x = np.linspace(0,20,100)
plt.figure(figsize = (10,6))
plt.scatter(windspeed_final, power_kw_final, alpha=0.5, s = 1, c = 'gray')
plt.plot(x, iec_curve(x), color="red", label = 'IEC', linewidth = 3)
plt.plot(x, spline_curve(x), color="C1", label = 'Spline', linewidth = 3)
plt.plot(x, l5p_curve(x), color="C2", label = 'L5P', linewidth = 3)
plt.xlabel('Wind speed (m/s)')
plt.ylabel('Power (kW)')
plt.legend()
plt.show()
# The above plot shows that the IEC method accurately captures the power curve, although it results in a 'choppy' fit, while the L5P model (constrained by its parametric form) deviates from the knee of the power curve through peak production. The spline fit tends to fit the best.
| examples/00_toolkit_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project Submission
#
# Continuous Control for the Udacity Ud893 Deep Reinforcement Learning Nanodegree (DRLND)
#
# ## Imports and Dependencies
# +
import sys
sys.path.append("../python")
import random
import numpy as np
import torch
from collections import deque
import matplotlib.pyplot as plt
from datetime import datetime
import os
from unityagents import UnityEnvironment
# %matplotlib inline
# -
# ## Unity Environment
#
# Note that if your operating system is Windows (64-bit), the Unity environment is included and you can run the below environment instantiation cell.
#
# However, if you're using a different operating system, download the file you require from one of the following links:
#
# - Linux: [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher_Linux.zip)
# - Mac OSX: [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher.app.zip)
# - Windows (32-bit): [click here](https://s3-us-west-1.amazonaws.com/udacity-drlnd/P2/Reacher/Reacher_Windows_x86.zip)
#
# Then, place the file in the main project directory folder and unzip (or decompress) the file. Modify the file_name in the below cell and then run the cell.
env = UnityEnvironment(file_name="Tennis_Windows_x86_64/Tennis.exe")
# ## Get Default Brain
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ## Main Training Loop Function
# +
import time
def training_loop(agent, n_episodes=7000, max_t=3000):
"""DDPG Training Loop
Params
======
agent (function): agent function
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
"""
model_dir = os.getcwd()+"/model_dir"
os.makedirs(model_dir, exist_ok=True)
start_time = datetime.now()
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
for i_episode in range(1, n_episodes+1):
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations # get the current state
agent.reset()
score = np.zeros(len(env_info.agents))
for t in range(max_t):
action = agent.act(state)
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations # get the next state
reward = env_info.rewards # get the reward
done = env_info.local_done # see if episode has finished
agent.step(state, action, reward, next_state, done, t)
state = np.copy(next_state)
score += reward
if np.any(done):
break
scores_window.append(np.amax(score)) # save most recent score
scores.append(np.amax(score))
print('\rEpisode {}\tAverage Score: {:.4f}\tScore: {}\tTime: {}'.format(i_episode, np.mean(scores_window), score, datetime.now()-start_time), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.4f}\tScore: {}\tTime: {}'.format(i_episode, np.mean(scores_window), score, datetime.now()-start_time))
save_dict_list = []
for i in range(len(env_info.agents)):
save_dict = {'actor_params': agent.actor_local[i].state_dict(),
'actor_optim_params': agent.actor_optimizer[i].state_dict(),
'critic_params': agent.critic_local[i].state_dict(),
'critic_optim_params': agent.critic_optimizer[i].state_dict()}
save_dict_list.append(save_dict)
torch.save(save_dict_list,
os.path.join(model_dir, 'episode-{}.pt'.format(i_episode)))
if np.mean(scores_window)>=0.5:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.4f}'.format(i_episode, np.mean(scores_window)))
save_dict_list = []
for i in range(len(env_info.agents)):
save_dict = {'actor_params': agent.actor_local[i].state_dict(),
'actor_optim_params': agent.actor_optimizer[i].state_dict(),
'critic_params': agent.critic_local[i].state_dict(),
'critic_optim_params': agent.critic_optimizer[i].state_dict()}
save_dict_list.append(save_dict)
torch.save(save_dict_list,
os.path.join(model_dir, 'final-episode.pt'))
break
return scores
# -
# ## MADDPG Agent
# +
from maddpg_agent import Agent
agent = Agent(num_agents=2, state_size=24, action_size=2, random_seed=2, lr_a=1e-4, lr_c=1e-3,weight_decay=0, fc1_units=400, fc2_units=300)
start = datetime.now()
scores = training_loop(agent)
end = datetime.now()
time_taken = end - start
print('Time: ',time_taken)
# plot the scores
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episodes')
plt.title('MADDPG Agent')
plt.show()
# purposely left commented. Leave as is.
# env.close()
# -
# ## Run Smart Agent
# +
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor
import torch
import torch.nn.functional as F
import torch.optim as optim
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed, fc1_units=400, fc2_units=300):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.actor_local = Actor(
state_size, action_size, random_seed, fc1_units=fc1_units, fc2_units=fc2_units).to(device)
def act(self, state):
"""Returns actions for given state as per current policy."""
action = np.zeros(self.action_size)
state.resize(1, self.state_size) # reshape for batch processing
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
return np.clip(action, -1, 1)
env_info = env.reset(train_mode=False)[brain_name]
num_agents = len(env_info.agents)
# create 2 agents
agent = [Agent(state_size=24, action_size=2, random_seed=2, fc1_units=400, fc2_units=300) for _ in range(num_agents)]
# load each agent
agent_file = torch.load('model_dir/final-episode.pt', map_location='cpu')
for i in range(num_agents):
agent[i].actor_local.load_state_dict(agent_file[i]['actor_params'])
episodes = 1
for i in range(episodes):
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
state = env_info.vector_observations # get the current state
scores = np.zeros(num_agents) # initialize the score (for each agent)
for j in range(400):
actions = np.zeros([2, 2])
for i in range(num_agents):
actions[i] = agent[i].act(state[i])
env_info = env.step(actions)[brain_name] # send the action to the environment
state = env_info.vector_observations # get the next state
reward = env_info.rewards # get the reward
done = env_info.local_done # see if episode has finished
if np.any(done):
break
scores += reward
print('\rEpisode: {}\tStep: {}\tScore: {}'.format(i+1, j+1, scores), end="")
print('\rEpisode: {}\tStep: {}\tScore: {}'.format(i+1, j+1, scores))
# purposely left commented. Leave as is.
# env.close()
# -
| P3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # WASM - Análise Estatiscas
#
# Análise dos dados de execução do processamento de detecção de faces compilados com WebAssembly versus compilação nativa com visual C++. O objetivo desta pesqusias em constatar a diferença de performance entre um código compilado p/ WebAssembly e outro com compilaçção nativa.
# ## Ferramentas para analise
import numpy as np
import pandas as pd
import os
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# ## Dados de testes
#
# O conjunto de dados de teste é formado por três vídeos selecionados a partir do *site* YouTube.
# Os critérios de seleção foram: ser um video de acesso publico, ou seja, sem restrições para reprodução e estar
# disponível em formato MPEG-4 com resolução padrão de 1920px por 1080px.
df_dataset = pd.read_csv('./data/dataset.csv')
df_dataset
# ## Resultados
#
# Foram feitos testes com uma aplicação nativa usado o Windows10 e com WebAssembly usando Chrome 75 e Firefox 67 executando no mesmo sistema operacional e maquina fisica com as configuraçãoes padrão de instalação.
df_meta_fd = pd.read_csv('./data/META_LOG_FACE_DETECT.csv')
df_meta_fd
# ## Carregar dados de teste
#
#
#
# +
#dataset_1
df_d1sc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_SMALL_Chrome_92_Linux.csv')
df_d1mc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_MEDIUM_Chrome_92_Linux.csv')
df_d1lc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_LARGE_Chrome_92_Linux.csv')
df_d1sf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_SMALL_Firefox_91_UNIX.csv')
df_d1mf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_MEDIUM_Firefox_91_UNIX.csv')
df_d1lf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_LARGE_Firefox_91_UNIX.csv')
df_d1swin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_SMALL_LINUX.csv')
df_d1mwin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_MEDIUM_LINUX.csv')
df_d1lwin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET1_LARGE_LINUX.csv')
#dataset_2
df_d2sc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_SMALL_Chrome_92_Linux.csv')
df_d2mc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_MEDIUM_Chrome_92_Linux.csv')
df_d2lc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_LARGE_Chrome_92_Linux.csv')
df_d2sf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_SMALL_Firefox_91_UNIX.csv')
df_d2mf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_MEDIUM_Firefox_91_UNIX.csv')
df_d2lf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_LARGE_Firefox_91_UNIX.csv')
df_d2swin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_SMALL_LINUX.csv')
df_d2mwin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_MEDIUM_LINUX.csv')
df_d2lwin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET2_LARGE_LINUX.csv')
#dataset_3
df_d3sc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_SMALL_Chrome_92_Linux.csv')
df_d3mc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_MEDIUM_Chrome_92_Linux.csv')
df_d3lc = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_LARGE_Chrome_92_Linux.csv')
df_d3sf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_SMALL_Firefox_91_UNIX.csv')
df_d3mf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_MEDIUM_Firefox_91_UNIX.csv')
df_d3lf = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_LARGE_Firefox_91_UNIX.csv')
df_d3swin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_SMALL_LINUX.csv')
df_d3mwin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_MEDIUM_LINUX.csv')
df_d3lwin = pd.read_csv('./data/LOG_FACE_DETECT_DATASET3_LARGE_LINUX.csv')
frames_ds1 =[df_d1sc, df_d1mc, df_d1lc, df_d1sf, df_d1mf, df_d1lf, df_d1swin, df_d1mwin, df_d1lwin]
df_ds1 = pd.concat(frames_ds1, sort=False)
frames_ds2 =[df_d2sc, df_d2mc, df_d2lc, df_d2sf, df_d2mf, df_d2lf, df_d2swin, df_d2mwin, df_d2lwin]
df_ds2 = pd.concat(frames_ds2, sort=False)
frames_ds3 =[df_d3sc, df_d3mc, df_d3lc, df_d3sf, df_d3mf, df_d3lf, df_d3swin, df_d3mwin, df_d3lwin]
df_ds3 = pd.concat(frames_ds3, sort=False)
# -
print('DATASET1', df_ds1['id'].count(), 'DATASET2', df_ds2['id'].count(), 'DATASET3', df_ds3['id'].count())
print('TOTAL', df_ds1['id'].count() + df_ds2['id'].count() + df_ds3['id'].count())
# ## Selecionar dados de teste
#
# O criterio de uniformização dos dados é considerar apenas as medições que tiveram o mesmo numero de rostos detectados,
# para fazer isso é necessario identificar qual a quantidade de rostos detectados mais comuns que seja diferente de zero.
ds1_faces_detected_rank = df_ds1[df_ds1["faces_detected"] > 0]['faces_detected'].value_counts()
ds2_faces_detected_rank = df_ds2[df_ds2["faces_detected"] > 0]['faces_detected'].value_counts()
ds3_faces_detected_rank = df_ds3[df_ds3["faces_detected"] > 0]['faces_detected'].value_counts()
print("DATASET1 - Rank com a quantidade mais comum de rostos detectados:")
print(ds1_faces_detected_rank)
print("DATASET2 - Rank com a quantidade mais comum de rostos detectados:")
print(ds2_faces_detected_rank)
print("DATASET3 - Rank com a quantidade mais comum de rostos detectados:")
print(ds3_faces_detected_rank)
# +
#dataset_1
df_ds1 = df_ds1[df_ds1["faces_detected"] == ds1_faces_detected_rank.head(1).index[0]]
df_ds1 = df_ds1.drop(columns=['id','workload','faces_detected','eyes_detected','workload'])
#dataset_2
df_ds2 = df_ds2[df_ds2["faces_detected"] == ds2_faces_detected_rank.head(1).index[0]]
df_ds2 = df_ds2.drop(columns=['id','workload','faces_detected','eyes_detected','workload'])
#dataset_3
df_ds3 = df_ds3[df_ds3["faces_detected"] == ds3_faces_detected_rank.head(1).index[0]]
df_ds3 = df_ds3.drop(columns=['id','workload','faces_detected','eyes_detected','workload'])
frames_ds123 =[df_ds1, df_ds2, df_ds3]
df_ds = pd.concat(frames_ds123, sort=False)
print('DATASET1', df_ds1['dataset'].count(),
'DATASET2', df_ds2['dataset'].count(),
'DATASET3', df_ds3['dataset'].count())
print('TOTAL', df_ds1['dataset'].count() + df_ds2['dataset'].count() + df_ds3['dataset'].count())
print('TOTAL df_ds', df_ds['dataset'].count())
# +
## Constantes
CHROME='Chrome_92_Linux'
FIREFOX='Firefox_91_UNIX'
NATIVO='LINUX'
CHROME_L='Chrome'
FIREFOX_L='Firefox'
NATIVO_L='Linux'
## Funções uteis
def get_col(dfds, system, res, col):
data_col = dfds[dfds["system"] == system]
data_col = data_col[data_col["resolution_label"] == res]
data_col = data_col.loc[:,[col]]
return data_col
def get_df_process_time(dfds, res):
crhome_process_time = get_col(dfds, CHROME, res, 'process_time_ms').rename(columns = {'process_time_ms':CHROME_L})
firefox_process_time = get_col(dfds, FIREFOX, res, 'process_time_ms').rename(columns = {'process_time_ms':FIREFOX_L})
linux_process_time = get_col(dfds, NATIVO, res, 'process_time_ms').rename(columns = {'process_time_ms':NATIVO_L})
frames = crhome_process_time.join(firefox_process_time)
frames = frames.join(linux_process_time)
return frames
# -
# ## Aalise Estatística
# **Analise Geral dos datasets unificados**
# ### DATASET 1 2 e 3 - ANALISE DE OUTLIERS
#
# #### RESOLUÇÂO SMALL
df_proc_small = get_df_process_time(df_ds,'SMALL')
boxplot_small = df_proc_small.boxplot()
# #### RESOLUÇÃO MEDIUM
df_proc_medium = get_df_process_time(df_ds,'MEDIUM')
boxplot_medium = df_proc_medium.boxplot()
# #### RESOLUÇÂO LARGE
df_proc_large = get_df_process_time(df_ds,'LARGE')
boxplot_large = df_proc_large.boxplot()
# #### REMOVENDO OUTLIERS
# +
stats_small = df_proc_small.describe()
stats_medium = df_proc_medium.describe()
stats_large = df_proc_large.describe()
print(stats_small)
print(stats_medium)
print(stats_large)
stats = {'SMALL':stats_small, 'MEDIUM':stats_medium, 'LARGE':stats_large}
def get_lim_sup(res, system_label):
std = stats[res].loc['std'][system_label]
q3 = stats[res].loc['75%'][system_label]
return q3 + 1.5*std
def get_lim_inf(res, system_label):
std = stats[res].loc['std'][system_label]
q1 = stats[res].loc['25%'][system_label]
return q1 - 1.5*std
lim_sup_s_c = get_lim_sup('SMALL', CHROME_L);
lim_sup_m_c = get_lim_sup('MEDIUM', CHROME_L);
lim_sup_l_c = get_lim_sup('LARGE', CHROME_L);
lim_sup_s_f = get_lim_sup('SMALL', FIREFOX_L);
lim_sup_m_f = get_lim_sup('MEDIUM', FIREFOX_L);
lim_sup_l_f = get_lim_sup('LARGE', FIREFOX_L);
lim_sup_s_n = get_lim_sup('SMALL', NATIVO_L);
lim_sup_m_n = get_lim_sup('MEDIUM', NATIVO_L);
lim_sup_l_n = get_lim_sup('LARGE', NATIVO_L);
lim_inf_s_c = get_lim_inf('SMALL', CHROME_L);
lim_inf_m_c = get_lim_inf('MEDIUM', CHROME_L);
lim_inf_l_c = get_lim_inf('LARGE', CHROME_L);
lim_inf_s_f = get_lim_inf('SMALL', FIREFOX_L);
lim_inf_m_f = get_lim_inf('MEDIUM', FIREFOX_L);
lim_inf_l_f = get_lim_inf('LARGE', FIREFOX_L);
lim_inf_s_n = get_lim_inf('SMALL', NATIVO_L);
lim_inf_m_n = get_lim_inf('MEDIUM', NATIVO_L);
lim_inf_l_n = get_lim_inf('LARGE', NATIVO_L);
print(lim_sup_m_n)
# +
def remove_outliers(df_ds_copy, res, system, lim_sup, lim_inf):
df_ds_s = df_ds_copy[(df_ds_copy["resolution_label"] == res)
& (df_ds_copy["system"] == system)
& ((df_ds_copy['process_time_ms'] > lim_sup)
| (df_ds_copy['process_time_ms'] < lim_inf))]
index = df_ds_s.index
df_ds_copy = df_ds_copy.drop(index=index)
return df_ds_copy
df_ds = remove_outliers(df_ds, 'SMALL', CHROME, lim_sup_s_c, lim_inf_s_c)
df_ds = remove_outliers(df_ds, 'SMALL', FIREFOX, lim_sup_s_f, lim_inf_s_f)
df_ds = remove_outliers(df_ds, 'SMALL', NATIVO, lim_sup_s_n, lim_inf_s_n)
df_ds = remove_outliers(df_ds, 'MEDIUM', CHROME, lim_sup_m_c, lim_inf_m_c)
df_ds = remove_outliers(df_ds, 'MEDIUM', FIREFOX, lim_sup_m_f, lim_inf_m_f)
df_ds = remove_outliers(df_ds, 'MEDIUM', NATIVO, lim_sup_m_n, lim_inf_m_n)
df_ds = remove_outliers(df_ds, 'LARGE', CHROME, lim_sup_l_c, lim_inf_l_c)
df_ds = remove_outliers(df_ds, 'LARGE', FIREFOX, lim_sup_l_f, lim_inf_l_f)
df_ds = remove_outliers(df_ds, 'LARGE', NATIVO, lim_sup_l_n, lim_inf_l_n)
df_ds
# -
df_proc_small = get_df_process_time(df_ds,'SMALL')
boxplot_small = df_proc_small.boxplot()
df_proc_medium = get_df_process_time(df_ds,'MEDIUM')
boxplot_medium = df_proc_medium.boxplot()
df_proc_large = get_df_process_time(df_ds,'LARGE')
boxplot_large = df_proc_large.boxplot()
# ### Dados de teste - Geral (DATASET1, DATASET2, DATASET3)
#dataset,process_time_ms,pre_time_ms,tag_time_ms,pos_time_ms,total_time_ms,system,resolution_label
df_ds_group = df_ds.groupby(['dataset','system', 'resolution_label'])
df_ds_group[['total_time_ms']].agg(['count'])
#dataset,process_time_ms,pre_time_ms,tag_time_ms,pos_time_ms,total_time_ms,system,resolution_label
df_ds_group = df_ds.groupby(['system', 'resolution_label'])
df_ds_st = df_ds_group[
['process_time_ms','pre_time_ms','tag_time_ms','pos_time_ms','total_time_ms']
].agg([np.mean, np.median])
df_ds_st
# ## Analise de Performance de Todas as Fase de Processamento
# +
total_c = df_ds_st.loc['Chrome_92_Linux']['total_time_ms']['median'].sort_values()
total_f = df_ds_st.loc['Firefox_91_UNIX']['total_time_ms']['median'].sort_values()
total_w = df_ds_st.loc['LINUX']['total_time_ms']['median'].sort_values()
activity = ["BAIXA", "MÉDIA", "ALTA"]
plt.rcParams["figure.figsize"] = (8,6)
linestyles = ['-', '--', '-.', ':']
fig, ax1 = plt.subplots()
ax1.plot(activity, total_c, label="WASM - " + CHROME_L, linestyle=linestyles[0])
ax1.plot(activity, total_f, label="WASM - " + FIREFOX_L,linestyle=linestyles[1])
ax1.plot(activity, total_w, label="NATIVO - " + NATIVO_L,linestyle=linestyles[3])
ax1.legend()
plt.xlabel('Resolução da Imagem')
plt.ylabel('Tempo mediana de todas as fases (ms)')
plt.title("Peformance Geral da Prova de Conceito")
plt.legend()
plt.show()
print("Nativo", total_w)
print("Wams Firefox", total_f)
print("Wams Chrome", total_c)
print("Comparação Nativo x Firefox:", ((((total_w - total_f) * 100)/total_w)/100).mean())
print("Comparação Chrome x Firefox:", ((((total_c - total_f) * 100)/total_c)/100).mean())
# -
# ## Analise de Performance de Pré-Processamento
# +
pre_c = df_ds_st.loc['Chrome_92_Linux']['pre_time_ms']['median'].sort_values()
pre_f = df_ds_st.loc['Firefox_91_UNIX']['pre_time_ms']['median'].sort_values()
pre_w = df_ds_st.loc['LINUX']['pre_time_ms']['median'].sort_values()
activity = ["BAIXA", "MÉDIA", "ALTA"]
fig, ax = plt.subplots()
ax.plot(activity, pre_c, label="WASM - "+ CHROME_L,linestyle=linestyles[0])
ax.plot(activity, pre_f, label="WASM - "+ FIREFOX_L,linestyle=linestyles[1])
ax.plot(activity, pre_w, label="Native - "+ NATIVO_L ,linestyle=linestyles[3])
ax.legend()
plt.xlabel('Resolução da Imagem')
plt.ylabel('Tempo mediano do Pré-Processamento (ms)')
plt.title("Performance da fase de Pré-Processamento.")
plt.legend()
plt.show()
print("Nativo", pre_w)
print("Wams Firefox", pre_f)
print("Wams Chrome", pre_c)
print("Comparação Chrome x Nativo:", ((((pre_c - pre_w) * 100)/pre_c)/100).median())
print("Comparação Chrome x Firefox:", ((((pre_c - pre_f) * 100)/pre_c)/100).median())
# -
# ## Analise de Performance do Processamento prioncipal - Detecção de Faces
# +
pro_c = df_ds_st.loc['Chrome_92_Linux']['process_time_ms']['median'].sort_values()
pro_f = df_ds_st.loc['Firefox_91_UNIX']['process_time_ms']['median'].sort_values()
pro_w = df_ds_st.loc['LINUX']['process_time_ms']['median'].sort_values()
activity = ["BAIXA", "MÉDIA", "ALTA"]
fig, ax = plt.subplots()
ax.plot(activity, pro_c, label="WASM - "+CHROME_L,linestyle=linestyles[0])
ax.plot(activity, pro_f, label="WASM - "+FIREFOX_L,linestyle=linestyles[1])
ax.plot(activity, pro_w, label="NATIVO -"+NATIVO_L,linestyle=linestyles[3])
ax.legend()
plt.xlabel('Resolução da Imagem')
plt.ylabel('Tempo mediano da detecção de faces (ms)')
plt.title("Performance da Detecção de Faces.")
plt.legend()
plt.show()
print("Nativo", pro_c)
print("Wams Firefox", pro_f)
print("Wams Chrome", pro_c)
print("Comparação Nativo x Firefox:", ((((pro_w - pro_f) * 100)/pro_w)/100).median())
print("Comparação Firefox x Chrome:", ((((pro_f - pro_c) * 100)/pro_f)/100).median())
# -
# ## Analise de Performance da marcação de faces na imagem
# +
pro_c = df_ds_st.loc['Chrome_92_Linux']['tag_time_ms']['median'].sort_values()
pro_f = df_ds_st.loc['Firefox_91_UNIX']['tag_time_ms']['median'].sort_values()
pro_w = df_ds_st.loc['LINUX']['tag_time_ms']['median'].sort_values()
activity = ["BAIXA", "MÉDIA", "ALTA"]
fig, ax = plt.subplots()
ax.plot(activity, pro_c, label="WASM - "+CHROME_L,linestyle=linestyles[0])
ax.plot(activity, pro_f, label="WASM - "+FIREFOX_L,linestyle=linestyles[1])
ax.plot(activity, pro_w, label="NATIVO - "+NATIVO_L,linestyle=linestyles[3])
ax.legend()
plt.xlabel('Resolução da Imagem')
plt.ylabel('Tempo mediano da marcação de faces (ms)')
plt.title("Peformace da marcação de faces na imagem")
plt.legend()
plt.show()
print("Nativo", pro_c)
print("Wams Firefox", pro_f)
print("Wams Chrome", pro_c)
print("Comparação Nativo x Firefox:", ((((pro_w - pro_f) * 100)/pro_w)/100).median())
print("Comparação Firefox x Chrome:", ((((pro_f - pro_c) * 100)/pro_f)/100).median())
# +
pro_c = df_ds_st.loc['Chrome_92_Linux']['pos_time_ms']['median'].sort_values()
pro_f = df_ds_st.loc['Firefox_91_UNIX']['pos_time_ms']['median'].sort_values()
pro_w = df_ds_st.loc['LINUX']['pos_time_ms']['median'].sort_values()
activity = ["BAIXA", "MÉDIA", "ALTA"]
fig, ax = plt.subplots()
ax.plot(activity, pro_c, label="WASM - "+CHROME_L,linestyle=linestyles[0])
ax.plot(activity, pro_f, label="WASM - "+FIREFOX_L,linestyle=linestyles[1])
ax.plot(activity, pro_w, label="NATIVO - "+NATIVO_L,linestyle=linestyles[3])
ax.legend()
plt.xlabel('Resolução da Imagem')
plt.ylabel('Tempo mediano do pós-processamento (ms)')
plt.title("Peformace do Pós-processamento")
plt.legend()
plt.show()
print("Nativo", pro_c)
print("Wams Firefox", pro_f)
print("Wams Chrome", pro_c)
print("Comparação Nativo x Firefox:", ((((pro_w - pro_f) * 100)/pro_w)/100).median())
print("Comparação Firefox x Chrome:", ((((pro_f - pro_c) * 100)/pro_f)/100).median())
# -
#
| wasmopencv/results/wasm_analise_resultados.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import csv
import threading
from matplotlib import pyplot as plt
from keyboard import is_pressed
import csv
import os.path
from collections import Counter
# +
# initialize cascada classifier
haar_cascade = cv2.CascadeClassifier(
'data/haarcascade_frontalface_alt.xml')
#initialize default. Change int argument to change webcam used.
cam = cv2.VideoCapture(0)
if cam.isOpened():
#read img from camera and make a gray version for the cascada classifier
ret_val, img = cam.read()
if ret_val:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#use the classifier to detect faces. Returns face box coordinates for each face
faces = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
print("faces found: ", len(faces))
#for each face, draw a rectangle around it
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x+w, y+h), (0, 255, 0), 2)
# display the img
plt.imshow(gray,cmap="gray")
# +
def display(img,faces,numImages,lastlabel,saving):
disp_img = np.copy(img)
if len(faces) == 1:
x,y,w,h = faces[0]
savestr = ""
if saving:
savestr = "SAVED"
cv2.rectangle(disp_img, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(disp_img,lastlabel + " {}".format(str(numImages)) + savestr,
(x,y-5),cv2.FONT_HERSHEY_SIMPLEX,.5,(0,255,0))
return disp_img
def label(savepath,smoothCapture=False):
csvFileWriter = None
csvFile = None
num = 0
if os.path.isfile(savepath + "/faceLabels.csv"):
with open(savepath + '/faceLabels.csv', 'r') as file:
l = np.array(list(csv.reader(file)))
print(l.shape)
labelCount = Counter(l[:,1])
num = int(l[-1,0][4:-4]) +1
csvFile = open(savepath + '/faceLabels.csv', 'a')
else:
csvFile = open(savepath + '/faceLabels.csv', 'w')
print(labelCount)
csvFileWriter = csv.writer(csvFile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL,lineterminator='\n')
cam = cv2.VideoCapture(smoothCapture)
lastlabel = "Neutral"
numImages = [labelCount["Neutral"],labelCount["Happy"],labelCount["Sad"],labelCount["Angry"]]
lastlabelidx = 0
saving = False
while True:
key = cv2.waitKeyEx(False)
if key == ord(' ') and saving == False and faceimg is not None:
saving = True
numImages[lastlabelidx] += 1
imgName = "face"+str(num)+'.png'
cv2.imwrite(savepath + "/" + imgName,faceimg)
csvFileWriter.writerow([imgName,lastlabel])
num += 1
disp_img = display(img,faces,numImages,lastlabel,saving)
else:
saving = False
ret_val, img = cam.read()
img = cv2.flip(img,1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
faceimg = None
if len(faces) == 1:
x,y,w,h = faces[0]
faceimg = gray[y:y+h,x:x+w]
if key == 2424832:
lastlabel = "Neutral"
lastlabelidx = 0
elif key == 2490368:
lastlabel = "Happy"
lastlabelidx = 1
elif key == 2621440:
lastlabel = "Sad"
lastlabelidx = 2
elif key == 2555904:
lastlabel = "Angry"
lastlabelidx = 3
disp_img = display(img,faces,numImages,lastlabel,saving)
cv2.imshow('my webcam', disp_img)
if key == 27:
break # esc to quit
cv2.destroyAllWindows()
# -
label("images")
| old_version/Create Labeled Images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 64-bit
# name: python374jvsc74a57bd0021d9f4f6a0c9e23e32c4246ac82593951ffad9baab3e58c0c69e8a8c06b339b
# ---
# # Pivot Tables
# We have seen how the ``GroupBy`` abstraction lets us explore relationships within a dataset.
# A *pivot table* is a similar operation that is commonly seen in spreadsheets and other programs that operate on tabular data.
# The pivot table takes simple column-wise data as input, and groups the entries into a two-dimensional table that provides a multidimensional summarization of the data.
# The difference between pivot tables and ``GroupBy`` can sometimes cause confusion; it helps me to think of pivot tables as essentially a *multidimensional* version of ``GroupBy`` aggregation.
# That is, you split-apply-combine, but both the split and the combine happen across not a one-dimensional index, but across a two-dimensional grid.
# ## Motivating Pivot Tables
#
# For the examples in this section, we'll use the database of passengers on the *Titanic*, available through the Seaborn library (see [Visualization With Seaborn](04.14-Visualization-With-Seaborn.ipynb)):
import numpy as np
import pandas as pd
import seaborn as sns
titanic = sns.load_dataset('titanic')
titanic.head()
# This contains a wealth of information on each passenger of that ill-fated voyage, including gender, age, class, fare paid, and much more.
# ## Pivot Tables by Hand
#
# To start learning more about this data, we might begin by grouping according to gender, survival status, or some combination thereof.
# If you have read the previous section, you might be tempted to apply a ``GroupBy`` operation–for example, let's look at survival rate by gender:
titanic.groupby('sex')[['survived']].mean()
# This immediately gives us some insight: overall, three of every four females on board survived, while only one in five males survived!
#
# This is useful, but we might like to go one step deeper and look at survival by both sex and, say, class.
# Using the vocabulary of ``GroupBy``, we might proceed using something like this:
# we *group by* class and gender, *select* survival, *apply* a mean aggregate, *combine* the resulting groups, and then *unstack* the hierarchical index to reveal the hidden multidimensionality. In code:
titanic.groupby(['sex', 'class'])['survived'].aggregate('mean').unstack()
# This gives us a better idea of how both gender and class affected survival, but the code is starting to look a bit garbled.
# While each step of this pipeline makes sense in light of the tools we've previously discussed, the long string of code is not particularly easy to read or use.
# This two-dimensional ``GroupBy`` is common enough that Pandas includes a convenience routine, ``pivot_table``, which succinctly handles this type of multi-dimensional aggregation.
# ## Pivot Table Syntax
#
# Here is the equivalent to the preceding operation using the ``pivot_table`` method of ``DataFrame``s:
titanic.pivot_table('survived', index='sex', columns='class')
# This is eminently more readable than the ``groupby`` approach, and produces the same result.
# As you might expect of an early 20th-century transatlantic cruise, the survival gradient favors both women and higher classes.
# First-class women survived with near certainty (hi, Rose!), while only one in ten third-class men survived (sorry, Jack!).
# ### Multi-level pivot tables
#
# Just as in the ``GroupBy``, the grouping in pivot tables can be specified with multiple levels, and via a number of options.
# For example, we might be interested in looking at age as a third dimension.
# We'll bin the age using the ``pd.cut`` function:
age = pd.cut(titanic['age'], [0, 18, 80])
titanic.pivot_table('survived', ['sex', age], 'class')
# We can apply the same strategy when working with the columns as well; let's add info on the fare paid using ``pd.qcut`` to automatically compute quantiles:
fare = pd.qcut(titanic['fare'], 2)
titanic.pivot_table('survived', ['sex', age], [fare, 'class'])
# The result is a four-dimensional aggregation with hierarchical indices (see [Hierarchical Indexing](03.05-Hierarchical-Indexing.ipynb)), shown in a grid demonstrating the relationship between the values.
# ### Additional pivot table options
#
# The full call signature of the ``pivot_table`` method of ``DataFrame``s is as follows:
#
# ```python
# # call signature as of Pandas 0.18
# DataFrame.pivot_table(data, values=None, index=None, columns=None,
# aggfunc='mean', fill_value=None, margins=False,
# dropna=True, margins_name='All')
# ```
#
# We've already seen examples of the first three arguments; here we'll take a quick look at the remaining ones.
# Two of the options, ``fill_value`` and ``dropna``, have to do with missing data and are fairly straightforward; we will not show examples of them here.
#
# The ``aggfunc`` keyword controls what type of aggregation is applied, which is a mean by default.
# As in the GroupBy, the aggregation specification can be a string representing one of several common choices (e.g., ``'sum'``, ``'mean'``, ``'count'``, ``'min'``, ``'max'``, etc.) or a function that implements an aggregation (e.g., ``np.sum()``, ``min()``, ``sum()``, etc.).
# Additionally, it can be specified as a dictionary mapping a column to any of the above desired options:
titanic.pivot_table(index='sex', columns='class',
aggfunc={'survived':sum, 'fare':'mean'})
# Notice also here that we've omitted the ``values`` keyword; when specifying a mapping for ``aggfunc``, this is determined automatically.
# At times it's useful to compute totals along each grouping.
# This can be done via the ``margins`` keyword:
titanic.pivot_table('survived', index='sex', columns='class', margins=True)
# Here this automatically gives us information about the class-agnostic survival rate by gender, the gender-agnostic survival rate by class, and the overall survival rate of 38%.
# The margin label can be specified with the ``margins_name`` keyword, which defaults to ``"All"``.
# ## Example: Birthrate Data
#
# As a more interesting example, let's take a look at the freely available data on births in the United States, provided by the Centers for Disease Control (CDC).
# This data can be found at https://raw.githubusercontent.com/jakevdp/data-CDCbirths/master/births.csv
# (this dataset has been analyzed rather extensively by <NAME> and his group; see, for example, [this blog post](http://andrewgelman.com/2012/06/14/cool-ass-signal-processing-using-gaussian-processes/)):
# +
# shell command to download the data: (ALREADY DOWNLOADED)
# #!curl -O https://raw.githubusercontent.com/jakevdp/data-CDCbirths/master/births.csv
# -
births = pd.read_csv('../../data/births.csv')
# Taking a look at the data, we see that it's relatively simple–it contains the number of births grouped by date and gender:
births.head()
# We can start to understand this data a bit more by using a pivot table.
# Let's add a decade column, and take a look at male and female births as a function of decade:
births['decade'] = 10 * (births['year'] // 10)
births.pivot_table('births', index='decade', columns='gender', aggfunc='sum')
# We immediately see that male births outnumber female births in every decade.
# To see this trend a bit more clearly, we can use the built-in plotting tools in Pandas to visualize the total number of births by year:
# %matplotlib inline
import matplotlib.pyplot as plt
sns.set() # use Seaborn styles
births.pivot_table('births', index='year', columns='gender', aggfunc='sum').plot()
plt.ylabel('total births per year');
# With a simple pivot table and ``plot()`` method, we can immediately see the annual trend in births by gender. By eye, it appears that over the past 50 years male births have outnumbered female births by around 5%.
# ### Further data exploration
#
# Though this doesn't necessarily relate to the pivot table, there are a few more interesting features we can pull out of this dataset using the Pandas tools covered up to this point.
# We must start by cleaning the data a bit, removing outliers caused by mistyped dates (e.g., June 31st) or missing values (e.g., June 99th).
# One easy way to remove these all at once is to cut outliers; we'll do this via a robust sigma-clipping operation:
quartiles = np.percentile(births['births'], [25, 50, 75])
mu = quartiles[1]
sig = 0.74 * (quartiles[2] - quartiles[0])
# This final line is a robust estimate of the sample mean, where the 0.74 comes from the interquartile range of a Gaussian distribution.
#
# With this we can use the ``query()`` method (discussed further in [High-Performance Pandas: ``eval()`` and ``query()``](03.12-Performance-Eval-and-Query.ipynb)) to filter-out rows with births outside these values:
births = births.query('(births > @mu - 5 * @sig) & (births < @mu + 5 * @sig)')
# Next we set the ``day`` column to integers; previously it had been a string because some columns in the dataset contained the value ``'null'``:
# set 'day' column to integer; it originally was a string due to nulls
births['day'] = births['day'].astype(int)
# Finally, we can combine the day, month, and year to create a Date index (see [Working with Time Series](03.11-Working-with-Time-Series.ipynb)).
# This allows us to quickly compute the weekday corresponding to each row:
# +
# create a datetime index from the year, month, day
births.index = pd.to_datetime(10000 * births.year +
100 * births.month +
births.day, format='%Y%m%d')
births['dayofweek'] = births.index.dayofweek
# -
# Using this we can plot births by weekday for several decades:
# +
import matplotlib.pyplot as plt
import matplotlib as mpl
births.pivot_table('births', index='dayofweek',
columns='decade', aggfunc='mean').plot()
plt.gca().set_xticklabels(['Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat', 'Sun'])
plt.ylabel('mean births by day');
# -
# Apparently births are slightly less common on weekends than on weekdays! Note that the 1990s and 2000s are missing because the CDC data contains only the month of birth starting in 1989.
#
# Another intersting view is to plot the mean number of births by the day of the *year*.
# Let's first group the data by month and day separately:
births_by_date = births.pivot_table('births',
[births.index.month, births.index.day])
births_by_date.head()
# The result is a multi-index over months and days.
# To make this easily plottable, let's turn these months and days into a date by associating them with a dummy year variable (making sure to choose a leap year so February 29th is correctly handled!)
births_by_date.index = [pd.datetime(2012, month, day)
for (month, day) in births_by_date.index]
births_by_date.head()
# Focusing on the month and day only, we now have a time series reflecting the average number of births by date of the year.
# From this, we can use the ``plot`` method to plot the data. It reveals some interesting trends:
# Plot the results
fig, ax = plt.subplots(figsize=(12, 4))
births_by_date.plot(ax=ax);
# In particular, the striking feature of this graph is the dip in birthrate on US holidays (e.g., Independence Day, Labor Day, Thanksgiving, Christmas, New Year's Day) although this likely reflects trends in scheduled/induced births rather than some deep psychosomatic effect on natural births.
#
# Looking at this short example, you can see that many of the Python and Pandas tools we've seen to this point can be combined and used to gain insight from a variety of datasets.
# We will see some more sophisticated applications of these data manipulations in future sections!
| week4_EDA_np_pd_json_apis_regex/day2_numpy_pandas_II/theory/pandas/9.Pivot-Tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/John-Dennis-AI/John-Dennis-AI.github.io/blob/master/XGBoost_Example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BzNBPtRMoaiU" colab_type="text"
# # **Using XGBoost to Diagnose Breast Cancer**
# + [markdown] id="dGUPwc1LWi0A" colab_type="text"
# If needed, install XGBoost
# + id="SAogCyaYWLbU" colab_type="code" outputId="3a3cc402-b2ce-4132-c705-f4b0eaf220fa" colab={"base_uri": "https://localhost:8080/", "height": 68}
pip install xgboost
# + [markdown] id="Yb2NtrPpWuTJ" colab_type="text"
# Set up the data (to make it easy, use data directly from sklearn)
# + id="Zw5aPOuYWm9b" colab_type="code" colab={}
from sklearn import datasets
import xgboost as xgb
bc = datasets.load_breast_cancer()
X = bc.data
y = bc.target
# + [markdown] id="EHXpLL_QW3OQ" colab_type="text"
# Define the train-test split into 80% train, 20% test
# + [markdown] id="9QuzYmUGdy0K" colab_type="text"
#
# + id="HnH0q30bW6VQ" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
# + [markdown] id="QdGrbIvuW9QQ" colab_type="text"
# Transform data into a DMatrix, an internal data structure used by XGBoost for efficiency
# + id="ZdYK3yHxXGaP" colab_type="code" colab={}
D_train = xgb.DMatrix(X_train, label=Y_train)
D_test = xgb.DMatrix(X_test, label=Y_test)
# + [markdown] id="YUjfpKlfXQGA" colab_type="text"
# Set up the XGBoost parameters (set to default values as per [parameter list](https://xgboost.readthedocs.io/en/latest/parameter.html))
# + id="cv9WwnX_XVHA" colab_type="code" colab={}
params = {
'eta': 0.3, # Similar to learning rate
'max_depth': 6,
'objective': 'multi:softprob', # Same as softmax but with predicted probability
'num_class': 3
}
steps = 25 # Number of training iterations
# + [markdown] id="SoYe_pxhXT1n" colab_type="text"
# Train the Model
# + id="evVa_AAsXcvv" colab_type="code" colab={}
model = xgb.train(params, D_train, steps)
# + [markdown] id="LIU8GT0iWxMo" colab_type="text"
# Run the Model
# + id="9i5CchrHXi1f" colab_type="code" outputId="6f3b0521-a1d8-40f7-e34c-d74f07336e46" colab={"base_uri": "https://localhost:8080/", "height": 68}
import numpy as np
from sklearn.metrics import precision_score, recall_score, accuracy_score
preds = model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
print("Accuracy = {}".format(accuracy_score(Y_test, best_preds)))
print("Precision = {}".format(precision_score(Y_test, best_preds, average='macro')))
print("Recall = {}".format(recall_score(Y_test, best_preds, average='macro')))
# + [markdown] id="A5OmEe0lYNW9" colab_type="text"
# Plot the Decision Tree
#
#
# + id="fSzKmFF9YQ7V" colab_type="code" outputId="600c9f39-5def-4ab9-8b2a-6a42a25411aa" colab={"base_uri": "https://localhost:8080/", "height": 520}
from xgboost import plot_tree
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (50,9)
plot_tree(model)
plt.show()
# + id="kTXTE_C6ZOPV" colab_type="code" colab={}
| XGBoost_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import bs4
import random
import requests
import webbrowser
def check_url(url):
# Check Valid URL for Wikipedia
# Provided Regex
if(not re.search(r"\/wiki\/([\w%]+)", url)):
raise Exception('Valid URL example: https://en.wikipedia.org/wiki/Python')
# Get user input for URL
url = input('Please enter wikipedia.org article url: ')
check_url(url)
# Request and get Response by URL
# Parse html document
# Get all tags with class
res = requests.get(url)
html = res.text
parsed_doc = bs4.BeautifulSoup(html, 'lxml')
img_tags = parsed_doc.select('.thumbimage')
def get_images():
# Check the validity of tags
# Generator for image tags with src attribute
for i in img_tags:
if i.has_attr('src'):
yield i
def get_image_urls():
# Returns all src attribute values (URLs)
urls = []
for i in get_images():
urls.append(i['src'])
return urls
def open_images():
# Open each image with a new tab
urls = get_image_urls()
for i in urls:
webbrowser.open('http://' + i)
def download_images():
# Request to image urls
# Get actual image content
# Save image in the current directory
urls = get_image_urls()
contents = []
for i in urls:
contents.append(requests.get('http:' + i).content)
for i in contents:
f = open('image_' + str(random.randint(0, 99999)) + '.jpg', 'wb')
f.write(i)
f.close()
def execute():
# Get user preference
# Execute logic
preference = int(input('Enter 1 to open in browser / 2 to download in the current directory '))
if preference == 1:
open_image()
elif preference == 2:
download_images()
else:
raise Exception('Please enter either 1 or 2')
print('DONE!')
execute()
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test Autograd
# ## Librerías
import autograd.numpy as np
from autograd import grad
from functools import partial
from bisect import bisect_right
# ## First Try
def present_value(cashflows: np.array, dfs: np.array) -> float:
return np.dot(cashflows, dfs)
grad_pv = grad(present_value, argnum=0)
# +
valor_tasa = .1
grad_pv(np.array([5.0, 100.0]), np.array([(1+valor_tasa/2)**-1, (1+valor_tasa)**-1]))
# -
(1+valor_tasa/2)**-1
present_value(np.array([5, 100]), np.array(
[(1+valor_tasa/2)**-1, (1+valor_tasa)**-1]))
pv = partial(present_value, np.array([5.0, 100.0]))
grad_pv2 = grad(pv)
pv(np.array([(1+valor_tasa/2)**-1, (1+valor_tasa)**-1]))
grad_pv2(np.array([(1+valor_tasa/2)**-1, (1+valor_tasa)**-1]))
def two(x, y):
return x*y
grad_two = grad(two)
grad_two(1.0, 2.0)
# ## Más Estrucuturado
tenors = np.array([1.0, 365.0])
rates = np.array([.05, .1])
def lin_interpol(tenors, rates, tenor):
if tenor >= tenors[len(tenors) - 1]:
return rates[len(tenors) - 1]
elif tenor <= tenors[0]:
return rates[0]
else:
i = bisect_right(tenors, tenor) - 1
m = (rates[i + 1] - rates[i]) / (tenors[i + 1] - tenors[i])
return rates[i] + m * (tenor - tenors[i])
def df(rate, tenor):
return (1 + rate)**(-tenor / 365.0)
def present_value(interp, disc, cashflow, tenor, tenors, rates):
rate = interp(tenors, rates, tenor)
return df(rate, tenor) * cashflow
present_value(lin_interpol, df, 100.0, 189.0, tenors, rates)
drates = grad(present_value, argnum=5)
drates(lin_interpol, df, 100.0, 180.0, tenors, rates)
10000*10*4*6/1000/60
# ### Bootstrapping (1 Sólo Fujo)
cashflow = 107.5
plazo = 365.0
# **Problema:** encontrar la tasa a 365 días que haga que el valor presente de este flujo sea igual a 100.
pv = partial(present_value, lin_interpol, df)
pv(cashflow, plazo, tenors, rates)
g = grad(pv, argnum=3)
g(cashflow, plazo, tenors, rates)
rates0 = np.array([.05, 0.0])
def solve(cashflow, tenor, obj, tenors, rates, pv):
epsilon = .000001
g = grad(pv, argnum=3)
rates_ = np.array([rates[0], rates[1]])
delta = 1
while delta > epsilon:
r1 = rates_[1] - (pv(cashflow, tenor, tenors, rates_) - obj) / g(cashflow, tenor, tenors, rates_)[1]
delta = abs(r1 - rates_[1])
if type(r1) is np.float64:
rates_[1] = r1
else:
rates_[1] = r1._value
return r1
solve(cashflow, plazo, 100.0, tenors, rates0, pv)
rates0
gsolve = grad(solve, argnum=0)
gsolve(cashflow, plazo, 100.0, tenors, rates0, pv)
# ### Bootstrapping (Varios Flujos)
def kron(i, j):
return int(i == j)
def dfs(rates, tenors):
return np.array([df(z[0], z[1]) for z in zip(rates, tenors)])
rates = np.array([.01, .075])
dfs(rates, tenors)
def lin_interpols(tenors, rates, new_tenors):
return np.array([lin_interpol(tenors, rates, t) for t in new_tenors])
lin_interpols(tenors, rates, [90.0, 180.0, 270.0])
def fixed_rate_leg(nocional, tasa, num_cupones):
return np.array([.5 * 365.0 * i for i in range(1, num_cupones + 1)]), \
np.array([nocional * (kron(i, num_cupones) + tasa / 2.0)
for i in range(1, num_cupones + 1)])
fixed_rate_leg(100, .06, 2)
def present_value_2(interp, disc, cashflows_tenors, cashflows, curve_tenors, curve_rates):
cashflow_rates = interp(curve_tenors, curve_rates, cashflows_tenors)
return np.dot(dfs(cashflow_rates, cashflows_tenors), cashflows)
present_value_2(lin_interpols, dfs, fixed_rate_leg(100, .06, 2)[0],
fixed_rate_leg(100, .06, 2)[1], tenors, rates)
def pv_fixed_leg(interp, disc, nocional, tasa, num_cupones, curve_tenors, curve_rates):
plazos, flujos = fixed_rate_leg(nocional, tasa, num_cupones)
return present_value_2(interp, disc, plazos, flujos, curve_tenors, curve_rates)
pv_fixed_leg(lin_interpols, dfs, 100.0, .06, 2, tenors, rates)
pv_2 = partial(pv_fixed_leg, lin_interpols, dfs)
pv_2(100.0, .06, 2, tenors, rates)
gpvleg = grad(pv_2, argnum=1)
gpvleg(100.0, .06, 2, tenors, rates)
def solve_2(nocional, tasa, num_cupones, obj, tenors, rates, pv):
epsilon = .000001
g = grad(pv, argnum=4)
rates_ = np.array([r for r in rates] + [0.0,])
print(rates_)
which = len(rates_) - 1
print(f'which: {which}')
delta = 1
while delta > epsilon:
q = (pv(nocional, tasa, num_cupones, tenors, rates_) - obj)
q /= g(nocional, tasa, num_cupones, tenors, rates_)[which]
r1 = rates_[which] - q
print(r1)
delta = abs(r1 - rates_[which])
print(f'delta: {delta}')
if type(r1) is np.float64:
rates_ = np.array([r for r in rates] + [r1,])
else:
rates_ = np.array([r for r in rates] + [r1._value,])
return r1
rates0 = np.array([.05,])
r_sol = solve_2(100.0, .06, 2, 100.0, tenors, rates0, pv_2)
print(f'sol: {r_sol}')
rates_sol = np.array([rates0[0], r_sol])
pv_2(100.0, .06, 2, tenors, rates_sol)
gs2 = grad(solve_2, argnum=1)
gs2(100.0, .06, 2, 100.0, tenors, rates0, pv_2)
grad(solve_2, argnum=5)(100.0, .06, 2, 100.0, tenors, rates0, pv_2)
tenors0 = [1.0, 365.0, 730.0]
rates0 = [0.05, r_sol]
solve_2(100.0, .062, 4, 100.0, tenors0, rates0, pv_2)
grad(solve_2, argnum=1)(100.0, .062, 4, 100.0, tenors0, rates0, pv_2)
| test_autograd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # CMake
#
# + [markdown] slideshow={"slide_type": "slide"}
# # **C**ross platform **Make**
#
# * One system that generates native build files for:
# * UNIX - Makefiles
# * MacOS - XCode
# * Windows - Visual Studio
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Basic Use
#
# In root directory of the project you wish to build, run
#
# ```
# > cmake
# ```
#
# You’ll likely need a few option variables to get it right:
#
# |Option |Significance|
# |:------|------------|
# |`CMAKE_INSTALL_PREFIX=/a/path`| Installation path is `/a/path`|
# |`BUILD_SHARED_LIBS=ON`| Build shared (dynamic) libraries|
# |`CMAKE_BUILD_TYPE=Debug`| Generate files with debug flags set|
# |`CMAKE_C_COMPILER=icc`| Sets C Language compiler to `icc`|
# |`CMAKE_CXX_FLAGS="-O3"`| Sets C++ compiler optimization level 3|
# |`CMAKE_PREFIX_PATH=/a/path`| Search for dependencies in `/a/path`|
#
#
# Many more at: http://www.cmake.org/Wiki/CMake_Useful_Variables
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Examples of `CMake` configuration.
#
# Options can be given on the command line:
#
# ```
# > cmake -DCMAKE INSTALL PREFIX=/usr/local .
# ```
#
# Or given in a bash script:
#
# ```
# # # #!/usr/bin/env bash
# cmake -DCMAKE_INSTALL_PREFIX=/usr/local \
# -DCMAKE_BUILD_TYPE=Release \
# -DCMAKE_CXX_COMPILER=icpc \
# /path/to/src
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Compile, Test, and Install
#
# After configuration is complete, type
#
# ```
# > make
# ```
#
# Can usually build in parallel on multi-core machines, to build on 2 processors
#
# ```
# > make -j2
# ```
#
# If the project has a test suite,
#
# ```
# > make test or > ctest
# ```
#
# To install in prefix location (must have permissions)
#
# ```
# > make install
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Other
#
# * With further configuration you can export to Apple XCode or Microsoft Visual Studio Projects
# * Interactive interface with `ccmake`.
# * Tip:
# * Always create a separate build directory within the source for building to keep the source directory clean.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Creating your own CMake projects
#
# CMake instructions go in `CMakeLists.txt` file
#
# For example, a typical project directory structure might have:
#
# ```
# my_project/
# ├── CMakeLists.txt
# ├── src/
# | ├── my_source_file_1.cc
# | ├── my_source_file_2.cc
# | └── CMakeLists.txt
# └── test/
# ├── my_test_1.cc
# ├── my_test_2.cc
# └── CMakeLists.txt
# ```
#
# The top level `CMakeLists.txt` would have the commands:
#
# ```
# add_subdirectrory(src)
# add_subdirectrory(test)
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Common commands
#
# To create a compiled library:
#
# ```
# add_library(library_name library_source_file_1.cc library_source_file_2.cc)
# ```
#
# To create a compiled executable:
#
# ```
# add_executable(executable_name executable_source_file_1.cc library_source_file_2.cc)
# ```
#
# To add header file locations:
#
# ```
# target_include_directories(library_or_executable_name /path/to/header_files)
# ```
#
# To link against other libraries:
#
# ```
# target_link_libraries(library_or_executable_name library_name_to_link_against)
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Other
#
# * Can create intername CMake variables with `set()`
# * Conditional logic with `if()/endif()`, e.g.
# ```
# set(MY_BOOL_VAR ON)
# if(MY_BOOL_VAR)
# #Do something
# else()
# #Do something else
# endif()
# ```
# * Resource for learning CMake:
# * https://cmake.org/documentation/
# * Existing projects!
#
# + hide_input=true init_cell=true slideshow={"slide_type": "skip"} language="javascript"
# function hideElements(elements, start) {
# for(var i = 0, length = elements.length; i < length;i++) {
# if(i >= start) {
# elements[i].style.display = "none";
# }
# }
# }
#
# var prompt_elements = document.getElementsByClassName("prompt");
# hideElements(prompt_elements, 0)
| CMake-rise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import Modules
from bs4 import BeautifulSoup
import pandas as pd
import re
import time
import pickle
from project4 import pickle_stuff
# ### Scrape G2
# +
g2_star_ratings = []
g2_product_reviews = []
def get_reviews_g2():
driver = webdriver.Chrome('/Applications/chromedriver')
page = 0
while page <= 3:
page += 1
g2_url = 'https://www.g2.com/products/' + 'ncr-aloha-pos' + '/reviews?page=' + str(page)
driver.get(g2_url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
review = str(soup.find_all('div', {'itemprop': 'reviewBody'}))
g2_product_reviews.append(review)
star = str(soup.find_all('div', {'class':'f-1 d-f ai-c mb-half--small-only'}))
g2_star_ratings.append(star)
all_g2_ratings = []
for rating in g2_star_ratings:
result = re.findall('(?<=stars large stars-)(.*?)(?=")', rating)
all_g2_ratings.append(result)
time.sleep(5)
driver.close()
get_reviews_g2()
# -
# #### Grab Star Ratings from HTML
# +
all_g2_ratings = []
for rating in g2_star_ratings:
result = re.findall('(?<=stars large stars-)(.*?)(?=")', rating)
all_g2_ratings.append(result)
stars = [item for sublist in all_g2_ratings for item in sublist]
ratings = []
for star in stars:
ratings.append(int(star))
# -
# #### Grab Reviews from HTML
helper = []
for string in g2_product_reviews:
sub_string = string.split('div>, ')
helper.append(sub_string)
reviews = [item for sublist in helper for item in sublist]
len(reviews)
# +
#g2_product_reviews
# -
# #### Combine Reviewer Id and Star Rating in DataFrame
reviewer = list(range(165))
ratings_table = pd.DataFrame(zip(reviewer, ratings)).rename(columns={0: "reviewer", 1: "rating"})
# #### Clean Up Reviews
all_g2_reviews = []
for review in reviews:
result = re.findall('(?<=text">).*?(?=<)', review)
all_g2_reviews.append(result)
len(all_g2_reviews)
# #### Merge Reviews to DataFrame
g2_reviews_flat = [item for sublist in all_g2_reviews for item in sublist]
g2_reviews_flat_mod = {item:[index] for index,sublist in enumerate(all_g2_reviews) for item in sublist}
x = pd.DataFrame(g2_reviews_flat_mod).T.reset_index().rename(columns={"index": "text", 0: "reviewer"})
df = x.merge(ratings_table, how='left', on = 'reviewer')
# +
# filepath = '/Users/tim/src/Metis/Project_4/data/interim/aloha_g2_reviews.pkl'
# with open(filepath, 'wb') as pkl:
# pickle.dump(df, pkl)
# -
# ### Scrape Capterra
# +
capt_product_reviews = []
def get_reviews_capterra():
driver = webdriver.Chrome('/Applications/chromedriver')
capt_url = 'https://www.capterra.com/p/178748/Aloha/#reviews'
driver.get(capt_url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
review = str(soup.find_all('div', {'class': "review-comments color-text"}))
capt_product_reviews.append(review)
time.sleep(5)
driver.close()
get_reviews_capterra()
# -
# #### Grab Reviews from HTML
helper = []
for string in capt_product_reviews:
sub_string = string.split('div>, ')
helper.append(sub_string)
reviews_capt = [item for sublist in helper for item in sublist]
all_capt_reviews = []
for review in reviews_capt:
result = re.findall('(?<=\/strong>).*?(?=<)', review)
all_capt_reviews.append(result)
len(all_capt_reviews)
# #### Combine Reviewer Id and Review in DataFrame
capt_reviews_flat = [item for sublist in all_capt_reviews for item in sublist]
capt_reviews_flat_mod = {item:[index+166] for index,sublist in enumerate(all_capt_reviews) for item in sublist}
x_capt = pd.DataFrame(capt_reviews_flat_mod).T.reset_index().rename(columns={"index": "text", 0: "reviewer"})
df= df.append(x_capt)
filepath = '/Users/tim/src/Metis/Project_4/data/interim/aloha_reviews2.pkl'
with open(filepath, 'wb') as pkl:
pickle.dump(df, pkl)
| Notebooks/1_scrape_reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example of how to use unittest in IPython or Jupyter
#
# Configuration for runnint unittest in IPython or Jupyter is different than running unittest from command line.
#
# When you run following code from IPython or Jupyter
#
# ```python
# if __name__ == '__main__':
# unittest.main()
# ```
#
# will result in following error:
#
# ```
# E
# ======================================================================
# ERROR: C:\Users\XXXX\AppData\Roaming\jupyter\runtime\kernel-4b12e789-a050-4567-a8a1-7361d4a1745f (unittest.loader._FailedTest)
# ----------------------------------------------------------------------
# AttributeError: module '__main__' has no attribute 'C:\Users\XXXX\AppData\Roaming\jupyter\runtime\kernel-4b12e789-a050-4567-a8a1-7361d4a1745f'
#
# ----------------------------------------------------------------------
# Ran 1 test in 0.001s
#
# FAILED (errors=1)
# An exception has occurred, use %tb to see the full traceback.
#
# SystemExit: True
#
#
# d:\development\lib\site-packages\IPython\core\interactiveshell.py:2855: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.
# warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
# ```
#
# The reason is that `unittest.main` looks at `sys.argv` and first parameter is what started IPython or Jupyter, therefore the error about kernel connection file not being a valid attribute.
# Passing explicit list to `unittest.main` will prevent IPython and Jupyter look at `sys.argv`.
# Passing `exit=Fals` will prevent `unittest.main` to shutdown the kernell process
#
# ```python
# if __name__ == '__main__':
# unittest.main(argv=['first-arg-is-ignored'], exit=False)
# ```
if __name__ == '__main__':
unittest.main()
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
class TestDemo1(unittest.TestCase):
"""Example of how to use unittest in Jupyter."""
def test1(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
# #### or directly call unittest.main()
unittest.main(argv=[''], verbosity=2, exit=False)
| unit1/test.try.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8 - AzureML
# language: python
# name: python38-azureml
# ---
# ## Data Profiling
# The first step of both the Cross Industry Process for Data Mining (CRISP-DM) and the Microsoft Team Data Science Process (TDSP), is understanding the business case for the project. In other words, what problem exists that could be solved if a value could be predicted? The next step is to source and understand data that pertains to the business case. This activity (data profiling) is aimed at identifying the shape of the dataset (i.e., the number of observations and features), the data types of its features, their meaning, the number of unique values contained within each feature, and the distribution of those values. In doing so it is typical to find the data in a raw form; i.e., the data is likely to be flawed to some extent that it would require preparatory measures before it could be used to train a machine learning model. Foremost among flaws are missing data. An analysis must be performed to decide whether to impute missing data, or to simply drop features or observations that have missing values. This decision is typically driven by quantifying how much data would remain if those missing features and/or observations were excluded. This is because any treatment applied to impute replacement values may influence all subsequent activities to understand the true distribution of values within the affected features, and to identify correlations among those features.
# #### Import Libraries
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# #### Import the Data
# +
data_dir = os.path.join(os.getcwd(), 'Data')
source_data_file = 'titanic.csv';
data_file = os.path.join(data_dir, source_data_file)
df = pd.read_csv(data_file, header=0, index_col=0)
# Ensure the index values are: seed=1, increment=1
df.reset_index(drop=True, inplace=True)
# -
# ### 1.0. Perform an Initial Profile
# The data being analyzed pertains to the sinking of the luxury passenger ship **Titanic**. Each observation describes an individual passenger aboard the ship at the time it sank, along with whether or not the passenger survived or perished in the disaster. The following table contains metadata describing each feature (attribute) captured to describe each passenger.
#
# #### Data Dictionary:
# <div style="margin-left: 0 px; margin-right: auto; width: 60%">
#
# | Variable | Description | Details |
# | ----------|:-----------------------------------|:---------------------------------------------- |
# | survived | Survival | 0 = No; 1 = Yes |
# | name | First & Last Name | Given Name & Surname |
# | sex | Gender | Male or Female |
# | age | Age | Numeric representing years |
# | sibsp | Number of Siblings/Spouses Aboard | 1 = Spouse; >1 = # Siblings |
# | parch | Number of Parents/Children Aboard | |
# | ticket | Ticket Number | Ticket number purchased |
# | fare | Passenger Fare | The cost of the passenger's ticket |
# | cabin | Cabin | The cabin number |
# | embarked | Port of Embarkation | C = Cherbourg; Q = Queenstown; S = Southampton |
# | boat | The lifeboat they boarded | Lifeboat number |
# | body | Body Weight | Numerical value (decimal) |
# | home.dest | Passengers home & destination | Textual value |
#
# </div>
#
# #### 1.1. Quantify the Observations and Features
print(f"Shape: {df.shape[0]} Observations x {df.shape[1]} Features")
# #### 1.2. Determine which Features are Numerical and which are Categorical
df.head()
# The **Target** variable *(survived)* is easily identified and can be seen to contain binary numeric values; making this a binary classification experiment. The **sex** feature also contains binary values, although they are textual. The **age** and **fare** features contain numerical values, as do the **sibsp** and **parch** features. Missing values (NaN) can be seen in both the **boat** and **body** features, and the **home.dest** feature contains brief textual descriptions. The **boat** feature contains *numerical* values, but is actually a categorical feature. The same is true regarding the **ticket** feature; however, it isn't obvious whether its values are naturally ordered, making it an *Ordinal* categorical feature, or if they have no natural ordering, making it a *Nominal* categorical variable. The **embarked** and **cabin** features are very clearly categorical data. Finally, we observe the **name** feature uniquely identifies each observation (passenger), and is therefore of no consequence to predicting whether a new (unseen) passenger would be likely to survive or perish.
#
# #### 1.3. Inspect each Feature's Data Type
df.dtypes
# #### 1.4. Inspect the Cardinality (number of unique values) of each Feature?
df.nunique()
# #### 1.5. Inspect each Feature's Unique Values
# +
unique_values = []
for col in df.columns:
unique_values.append(df[col].unique())
pd.DataFrame(list(zip(df.columns, unique_values)), columns=['Feature', 'Unique Values'])
# -
# These initial profiling techniques reveal that some features having numerical data types may in fact contain categorical data (e.g., survived, sibsp, parch). To better understand their meaning and influence it may be advantageous to convert them from their present numerical types to the **Object** type to indicate their categorical nature. What's more, it may be worthwhile to research their meaning for the sake of mapping descriptive labels to the existing numerical values.
#
# #### 1.6. Identify Duplicated Observations
# The initial profile has also revealed that, although there are 1309 observations in our datset, there are only 1307 unique values in the **name** feature. This indicates that we have a couple of duplicate observations; therefore, we may wish to remove them.
df[df.duplicated(subset='name', keep=False) == True]
# Here we observe that two passengers (<NAME> and <NAME>) have duplicate records. In each case, the first instance contains fewer missing values (NaN) so it should make sense to keep them and discard the second instances; however, we also observe conflicting data points between the duplicate records (e.g., age, ticket, fare, embarked) which also poses the question "Which values are accurate, and which are not?" The only way to determine which values are correct is to perform additional research, and this is a common scenerio a data scientist might encounter. To ensure the veracity of source data, it's important to maintain effective communications with business analysts, subject matter experts, data engineers, and data stewards.
#
# ### 2.0. Quantify Any Missing Data
# #### 2.1. Inspect the number of missing values per feature?
df.isnull().sum().sort_values(ascending=True)
df.isnull().sum().sort_values(ascending=True).plot.bar(figsize=(10,5))
plt.ylabel('Number of Missing Values')
plt.xlabel('Variables')
plt.title('Quantifying Missing Data (Counts)')
# #### 2.2. Inspect the Percentage of Missing Values per Feature?
df.isnull().mean().sort_values(ascending=True)
df.isnull().mean().sort_values(ascending=True).plot.bar(figsize=(10,5))
plt.ylabel('Percentage of Missing Values')
plt.xlabel('Variables')
plt.title('Quantifying Missing Data (Percentage)')
# From these execises we can clearly see that 5 of the 13 features contain a significant number (or percentage) of missing values: age (0.20%), home.dest (0.43%), boat (0.63%), cabin (0.77%), body (0.91%). As the percentage of missing values exceeds 50% we may ask ourselves if enough data remains in those features to impute new values using the mean, median or mode of the remaining observations. What's more, we would have to wonder if these features would be influential even if they were complete.
#
# ### 3.0. Inspect the Distribution of Values per Feature
# #### 3.1. Separate Numerical and Categorical Features
# Because numerical values often tend to be continuous while categorical values are inherently discrete, it is advantageous to separate them to make it easier to apply appropriate visualization techniques and/or feature engineering techniques.
numerical_cols = [col for col in df.columns if df.dtypes[col] != 'O']
categorical_cols = [col for col in df.columns if col not in numerical_cols]
# #### 3.2. Evaluate the Statistical Distribution of the Numerical Features
df[numerical_cols].describe()
df[numerical_cols].hist(figsize=(12,12), bins=15)
plt.show()
# #### 3.3. Identify Any Outliers
# An outlier is a data point which is significantly different from the remaining data. "An outlier is an observation which deviates so much from the other observations as to arouse suspicions that it was generated by a different mechanism." [<NAME>. Identification of Outliers, Chapman and Hall , 1980.]
#
# According to the IQR (inter-quantile range) proximity rule, a value is an outlier if it falls outside an upper boundary, *defined as 75th quantile + (IQR * 1.5)*, or a lower boundary, *defined as 25th quantile - (IQR * 1.5)*, where the inter-quantile range (IQR) is defined as *(75th quantile - 25th quantile)*.
#
# In the boxplots displayed below, the **IQR** is indicated by the (inner) box, the **median** is indicated by the line within the box, the top and bottom edges of the box correspond to the 75th and 25th quantile, and the whiskers mark the **proximity rule boundaries** as described above. Values that fall outside the whiskers are considered outliers; however, further research may be required to determine if these values are in fact erroneous, or if they represent the general truth.
numerical_cols.remove('survived')
for col in numerical_cols:
plt.figure(figsize=(10,3))
sns.boxplot(x=df[col])
plt.title(col)
# #### 3.4. Evaluate the Distribution of Categorical Features
# The distribution of categorical features is determined by comparing the count of each category relative to the whole. Customary visualizations for this task are the Bar Chart and the Frequency Table.
df[categorical_cols].head()
# +
cols = df[['survived','sex','cabin','embarked','boat']]
for col in cols:
plt.figure(figsize=(10,4))
sns.countplot(x=col, data=df)
plt.title(col)
plt.xlabel("Count of each {0}".format(col))
plt.ylabel(col)
plt.show()
# -
# #### 3.5. Identify any Rare Categories in Categorical Features
# While some categories (labels) appear frequently in categorical features, others may occur less often. In fact, it is quite typical for one or more categories to appear in a large percentage of the observations, while the remaining categories appear in a very small percentage. If a value appears very infrequently (e.g., less than 5%) then it may be considered *rare*. The only features that may contain *rare* categories are **cabin** and **boat**, so this exercise will focus on them.
for col in ['cabin','boat']:
survived = pd.crosstab(index=df[col], columns=["Percent"], colnames=[""])
survived = survived.sort_values(by=['Percent'], ascending=True)
display(round(survived/survived.sum(), 3))
# ##### View Rare Occurances by Count
# In some cases it may be more intuitive to view occurances by *count* rather than by *percentage*.
for col in ['cabin','boat']:
survived = pd.crosstab(index=df[col], columns=["Percent"], colnames=[""])
survived = survived.sort_values(by=['Percent'], ascending=True)
display(survived)
| 04-Feature-Engineering/01-Data-Profiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Tutorial: Autotune the block matrix multiplication
# ===================
#
# This tutorial describes how to define autotuning problem and an evaluating method for autotuning the block matrix multiplication.
#
# We assume that you have checked out a copy of `ytopt`. For guidelines on how to get ytopt set up, refer [Install instructions](https://github.com/ytopt-team/ytopt/blob/tutorial/README.md).
#
# This example including the source code is borrowed from [http://opentuner.org/tutorial/gettingstarted/](http://opentuner.org/tutorial/gettingstarted/).
# Indentifying a problem to autotune
# -----------------------
# In this tutorial, we target to autotune the block size for matrix multiplication. Blocking is used to improve the temporal locality of inner loops such that data structures in a program are orgarnized into chunks, i.e. blocks (ref: [https://csapp.cs.cmu.edu/public/waside/waside-blocking.pdf](https://csapp.cs.cmu.edu/public/waside/waside-blocking.pdf)). We want to find the block size that gives the minimal execution time.
#
# Save the related source files in the seprate folder: `mmm_block.cpp`. We have the files in `<https://github.com/ytopt-team/ytopt/tree/tutorial/ytopt/benchmark/mmm-block/mmm_problem/mmm_block.cpp>`.
# +
#include <stdio.h>
#include <cstdlib>
#define N 100
int main(int argc, const char** argv)
{
int n = BLOCK_SIZE * (N/BLOCK_SIZE);
int a[N][N];
int b[N][N];
int c[N][N];
int sum=0;
for(int k1=0;k1<n;k1+=BLOCK_SIZE)
{
for(int j1=0;j1<n;j1+=BLOCK_SIZE)
{
for(int k1=0;k1<n;k1+=BLOCK_SIZE)
{
for(int i=0;i<n;i++)
{
for(int j=j1;j<j1+BLOCK_SIZE;j++)
{
sum = c[i][j];
for(int k=k1;k<k1+BLOCK_SIZE;k++)
{
sum += a[i][k] * b[k][j];
}
c[i][j] = sum;
}
}
}
}
}
return 0;
}
# -
# Defining autotuning problem
# -----------------------
# We describe how to define your search problem `<https://github.com/ytopt-team/ytopt/blob/tutorial/ytopt/benchmark/mmm-block/mmm_problem/problem.py>`
#
# --------------
# First, we first define search space using ConfigSpace that is a python library `<https://automl.github.io/ConfigSpace/master/>`.
# +
# import required library
import os, sys, time, json, math
import numpy as np
from autotune import TuningProblem
from autotune.space import *
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
from skopt.space import Real, Integer, Categorical
HERE = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(1, os.path.dirname(HERE)+ '/plopper')
from plopper import Plopper
# -
# Our search space contains one parameter; `BLOCK_SIZE`: number of blocks.
# create an object of ConfigSpace
cs = CS.ConfigurationSpace(seed=1234)
#block size for openmp dynamic schedule
p0= CSH.UniformIntegerHyperparameter(name='BLOCK_SIZE', lower=1, upper=10, default_value=5)
cs.add_hyperparameters([p0])
# problem space
input_space = cs
output_space = Space([Real(0.0, inf, name="time")])
# --------------
# Then, we need to define the objective function `myobj` to evaluate a point in the search space.
#
# In this example, we define an evaluating method (Plopper) for code generation and compilation.
# Plopper take source code and output directory and return an execution time.
# +
dir_path = os.path.dirname(os.path.realpath(__file__))
kernel_idx = dir_path.rfind('/')
kernel = dir_path[kernel_idx+1:]
obj = Plopper(dir_path+'/mmm_block.cpp',dir_path)
x1=['BLOCK_SIZE']
def myobj(point: dict):
def plopper_func(x):
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
value = [point[x1[0]]]
print('CONFIG:',point)
params = ["BLOCK_SIZE"]
result = obj.findRuntime(value, params)
return result
x = np.array([point['BLOCK_SIZE']])
results = plopper_func(x)
print('OUTPUT:%f',results)
return results
# -
# The following describes our evaluating function, Plopper. You can find it `<https://github.com/ytopt-team/ytopt/blob/tutorial/ytopt/benchmark/mmm-block/plopper/plopper.py>`.
# +
import os, sys, subprocess, random
class Plopper:
def __init__(self,sourcefile,outputdir):
self.sourcefile = sourcefile
self.outputdir = outputdir+"/tmp_files"
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
def createDict(self, x, params):
dictVal = {}
for p, v in zip(params, x):
dictVal[p] = v
return(dictVal)
def findRuntime(self, x, params):
interimfile = ""
exetime = 1
# Generate intermediate file
dictVal = self.createDict(x, params)
#compile and find the execution time
tmpbinary = self.outputdir + '/tmp.bin'
kernel_idx = self.sourcefile.rfind('/')
kernel_dir = self.sourcefile[:kernel_idx]
gcc_cmd = 'g++ ' + kernel_dir +'/mmm_block.cpp '
gcc_cmd += ' -D{0}={1}'.format('BLOCK_SIZE', dictVal['BLOCK_SIZE'])
gcc_cmd += ' -o ' + tmpbinary
run_cmd = kernel_dir + "/exe.pl " + tmpbinary
#Find the compilation status using subprocess
compilation_status = subprocess.run(gcc_cmd, shell=True, stderr=subprocess.PIPE)
#Find the execution time only when the compilation return code is zero, else return infinity
if compilation_status.returncode == 0 :
execution_status = subprocess.run(run_cmd, shell=True, stdout=subprocess.PIPE)
exetime = float(execution_status.stdout.decode('utf-8'))
if exetime == 0:
exetime = 1
else:
print(compilation_status.stderr)
print("compile failed")
return exetime
# -
# This file consists of several components.
#
# `__init__()` takes paths of the source file and output directory, and creates the output directory if it does not exists.
def __init__(self,sourcefile,outputdir):
# Initilizing global variables
self.sourcefile = sourcefile
self.outputdir = outputdir+"/tmp_files"
if not os.path.exists(self.outputdir):
os.makedirs(self.outputdir)
# `createDict()` generates a dictionary for parameter labels and values.
def createDict(self, x, params):
dictVal = {}
for p, v in zip(params, x):
dictVal[p] = v
return(dictVal)
# `findRuntime()` first calls `createDict()` to obatain configuration.
# After that, it generates the commandline `gcc_cmd` for compiling the modified source code and the commandline `run_cmd` for executing the compiled code.
# Then, it finds the compilation status using subprocess; finds the execution time of the compiled code; and returns the execution time as cost to the search module.
def findRuntime(self, x, params):
interimfile = ""
exetime = 1
# Generate intermediate file
dictVal = self.createDict(x, params)
#compile and find the execution time
tmpbinary = self.outputdir + '/tmp.bin'
kernel_idx = self.sourcefile.rfind('/')
kernel_dir = self.sourcefile[:kernel_idx]
gcc_cmd = 'g++ ' + kernel_dir +'/mmm_block.cpp '
gcc_cmd += ' -D{0}={1}'.format('BLOCK_SIZE', dictVal['BLOCK_SIZE'])
gcc_cmd += ' -o ' + tmpbinary
run_cmd = kernel_dir + "/exe.pl " + tmpbinary
#Find the compilation status using subprocess
compilation_status = subprocess.run(gcc_cmd, shell=True, stderr=subprocess.PIPE)
#Find the execution time only when the compilation return code is zero, else return infinity
if compilation_status.returncode == 0 :
execution_status = subprocess.run(run_cmd, shell=True, stdout=subprocess.PIPE)
exetime = float(execution_status.stdout.decode('utf-8'))
if exetime == 0:
exetime = 1
else:
print(compilation_status.stderr)
print("compile failed")
return exetime #return execution time as cost
# Note:
# - `exe.pl` computes average the execution time over 5 runs.
#
# --------------
# Last, we create an object of the autotuning problem. The problem will be called in the commandline implementation.
Problem = TuningProblem(
task_space=None,
input_space=input_space,
output_space=output_space,
objective=myobj,
constraints=None,
model=None)
# Running and viewing Results
# -----------------------
#
# Now, we can run the following command to autotune the program:
# --evaluator flag sets which object used to evaluate models, --problem flag sets path to the Problem instance you want to use for the search, --max-evals flag sets the maximum number of evaluations, --learner flag sets the type of learner (surrogate model).
#
# - Go to where `problem.py` such as
#
# `
# # cd ytopt/benchmark/mmm-block/mmm_problem
# `
# - Start search
#
# `python -m ytopt.search.ambs --evaluator ray --problem problem.Problem --max-evals=5 --learner RF
# `
#
# Note that use `python3` if your environment is built with python3.
# --------------
# Once autotuning kick off, ytopt.log, results.csv, and results.json will be rendered.
#
# We can track the results of each run configuration from `ytopt.log` shows the following:
# ```
# 2021-07-30 15:35:14|15364|INFO|ytopt.search.search:53] Created "ray" evaluator
# 2021-07-30 15:35:14|15364|INFO|ytopt.search.search:54] Evaluator: num_workers is 1
# 2021-07-30 15:35:14|15364|INFO|ytopt.search.hps.ambs:47] Initializing AMBS
# 2021-07-30 15:35:14|15364|INFO|ytopt.search.hps.optimizer.optimizer:51] Using skopt.Optimizer with RF base_estimator
# 2021-07-30 15:35:14|15364|INFO|ytopt.search.hps.ambs:79] Generating 1 initial points...
# 2021-07-30 15:35:15|15364|INFO|ytopt.evaluator.evaluate:104] Submitted new eval of {'BLOCK_SIZE': '5'}
# 2021-07-30 15:35:17|15364|INFO|ytopt.evaluator.evaluate:206] New eval finished: {"BLOCK_SIZE": "5"} --> 0.144
# 2021-07-30 15:35:17|15364|INFO|ytopt.evaluator.evaluate:217] Requested eval x: {'BLOCK_SIZE': '5'} y: 0.144
# 2021-07-30 15:35:17|15364|INFO|ytopt.search.hps.ambs:92] Refitting model with batch of 1 evals
# 2021-07-30 15:35:17|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:119] tell: {'BLOCK_SIZE': '5'} --> ('5',): evaluated objective: 0.144
# 2021-07-30 15:35:17|15364|INFO|ytopt.search.hps.ambs:94] Drawing 1 points with strategy cl_max
# 2021-07-30 15:35:18|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:84] _ask: ['6'] lie: 0.144
# 2021-07-30 15:35:18|15364|INFO|ytopt.evaluator.evaluate:104] Submitted new eval of {'BLOCK_SIZE': '6'}
# 2021-07-30 15:35:19|15364|INFO|ytopt.evaluator.evaluate:206] New eval finished: {"BLOCK_SIZE": "6"} --> 0.139
# 2021-07-30 15:35:19|15364|INFO|ytopt.evaluator.evaluate:217] Requested eval x: {'BLOCK_SIZE': '6'} y: 0.139
# 2021-07-30 15:35:19|15364|INFO|ytopt.search.hps.ambs:92] Refitting model with batch of 1 evals
# 2021-07-30 15:35:19|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:119] tell: {'BLOCK_SIZE': '6'} --> ('6',): evaluated objective: 0.139
# 2021-07-30 15:35:19|15364|INFO|ytopt.search.hps.ambs:94] Drawing 1 points with strategy cl_max
# 2021-07-30 15:35:19|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:84] _ask: ['2'] lie: 0.144
# 2021-07-30 15:35:19|15364|INFO|ytopt.evaluator.evaluate:104] Submitted new eval of {'BLOCK_SIZE': '2'}
# 2021-07-30 15:35:21|15364|INFO|ytopt.evaluator.evaluate:206] New eval finished: {"BLOCK_SIZE": "2"} --> 0.303
# 2021-07-30 15:35:21|15364|INFO|ytopt.evaluator.evaluate:217] Requested eval x: {'BLOCK_SIZE': '2'} y: 0.303
# 2021-07-30 15:35:21|15364|INFO|ytopt.search.hps.ambs:92] Refitting model with batch of 1 evals
# 2021-07-30 15:35:21|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:119] tell: {'BLOCK_SIZE': '2'} --> ('2',): evaluated objective: 0.303
# 2021-07-30 15:35:21|15364|INFO|ytopt.search.hps.ambs:94] Drawing 1 points with strategy cl_max
# 2021-07-30 15:35:21|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:84] _ask: ['8'] lie: 0.303
# 2021-07-30 15:35:21|15364|INFO|ytopt.evaluator.evaluate:104] Submitted new eval of {'BLOCK_SIZE': '8'}
# 2021-07-30 15:35:23|15364|INFO|ytopt.evaluator.evaluate:206] New eval finished: {"BLOCK_SIZE": "8"} --> 0.128
# 2021-07-30 15:35:23|15364|INFO|ytopt.evaluator.evaluate:217] Requested eval x: {'BLOCK_SIZE': '8'} y: 0.128
# 2021-07-30 15:35:23|15364|INFO|ytopt.search.hps.ambs:92] Refitting model with batch of 1 evals
# 2021-07-30 15:35:23|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:119] tell: {'BLOCK_SIZE': '8'} --> ('8',): evaluated objective: 0.128
# 2021-07-30 15:35:23|15364|INFO|ytopt.search.hps.ambs:94] Drawing 1 points with strategy cl_max
# 2021-07-30 15:35:23|15364|DEBUG|ytopt.search.hps.optimizer.optimizer:84] _ask: ['9'] lie: 0.303
# 2021-07-30 15:35:23|15364|INFO|ytopt.evaluator.evaluate:104] Submitted new eval of {'BLOCK_SIZE': '9'}
# 2021-07-30 15:35:25|15364|INFO|ytopt.search.hps.ambs:85] Elapsed time: 00:00:10.34
# 2021-07-30 15:35:25|15364|INFO|ytopt.evaluator.evaluate:206] New eval finished: {"BLOCK_SIZE": "9"} --> 0.125
# 2021-07-30 15:35:25|15364|INFO|ytopt.evaluator.evaluate:217] Requested eval x: {'BLOCK_SIZE': '9'} y: 0.125
# 2021-07-30 15:35:25|15364|INFO|ytopt.search.hps.ambs:101] Hyperopt driver finishing
# ```
# Look up the best configuration (found so far) and its value by inspecting the following created file: `results.csv` and `results.json`.
#
# In this run, the best configuration and its runtime is obtained:
#
# `{'BLOCK_SIZE': '9'}: 0.125`
| docs/tutorials/mmm-block/tutorial-mmm-block.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-gisenv] *
# language: python
# name: conda-env-.conda-gisenv-py
# ---
import pandas as pd
import os
"""Functions Related to Tranfering Data to Excel Sheet"""
def writeDataframe6(wbpath,df,sheetname,dfcols,xlcols,indexcol):
wb=opxl.load_workbook(wbpath)
wsheet=wb[sheetname]
for index,row in df.iterrows():
myrow=int(row[indexcol])
for idf,ixl in zip(dfcols,xlcols):
wsheet.cell(row=myrow,column=ixl).value=row[idf]
wb.save(wbpath)
def saveDataFrame(dataframes,filepath,names):
writer = pd.ExcelWriter(filepath, engine='xlsxwriter')
for df,sname in zip(dataframes,names):
df.to_excel(writer,sheet_name=sname)
writer.save()
# +
"""Packagewise and Structurewise Summation"""
def packageWiseSum(package_df,projection_df):
plist=list(package_df['Package_No'])
for index,row in projection_df.iterrows():
pname=row['Package']
row_index=plist.index(pname)
package_df.iloc[row_index,1]=package_df.iloc[row_index,1]+row['VDW_30_06_2020']
package_df.iloc[row_index,2]=package_df.iloc[row_index,2]+row['VDW_30_06_2021']
for index,row in package_df.iterrows():
package_df.iloc[index,4]= package_df.iloc[index,2]- package_df.iloc[index,3]
return package_df
def structureWiseSum(structure_df,projection_df):
plist=list(structure_df['Code'])
for index,row in projection_df.iterrows():
pname=row['Structure Code']
row_index=plist.index(pname)
structure_df.iloc[row_index,3]=structure_df.iloc[row_index,3]+row['VDW_30_06_2020']
structure_df.iloc[row_index,4]=structure_df.iloc[row_index,4]+row['VDW_30_06_2021']
for index,row in structure_df.iterrows():
structure_df.iloc[index,6]= structure_df.iloc[index,4]- structure_df.iloc[index,5]
return structure_df
def dppWiseSum(structure_df,dpp_df):
dpp_item=list(dpp_df['DPP_Item']) #DPP_Item
for index,row in structure_df.iterrows():
#item_name=row[]
pname=row['DPP_Item']
row_index= dpp_item.index(pname)
dpp_df.iloc[row_index,1]= dpp_df.iloc[row_index,1]+row['VWD_30_06_2020']
dpp_df.iloc[row_index,2]=dpp_df.iloc[row_index,2]+row['VWD_30_06_2021']
for index,row in dpp_df.iterrows():
dpp_df.iloc[index,4]= dpp_df.iloc[index,2]- dpp_df.iloc[index,3]
return dpp_df
# +
def buildPackageWisePaymentMatrix(package_df):
#Package_No Payment_30_06_2020
package=list(package_df['Package_No'])
paid=list(package_df['Payment_30_06_2020'])
zero_list=[0 for x in package]
data={'Package_No':package,'VWD_30_06_2020':zero_list,'VWD_30_06_2021':zero_list,
'Payment_30_06_2020':paid,'Payment_30_06_2021':zero_list}
package_payment_df=pd.DataFrame(data)
return package_payment_df
def buildStructureWisePaymentMatrix(structure_df):
#DPP Item Initial Code
structure_name=list(structure_df['Items'])
structure_code=list(structure_df['Code'])
DPP_Item_code=list(structure_df['DPP_Item'])
payment=list(structure_df['Payment_30_06_2020'])
#Package_No Payment_30_06_2020
zero_list=[0 for x in structure_name]
data={'Items':structure_name,'Code':structure_code ,'DPP_Item':DPP_Item_code,'VWD_30_06_2020':zero_list,'VWD_30_06_2021':zero_list,
'Payment_30_06_2020': payment,'Payment_30_06_2021':zero_list}
structure_payment_df=pd.DataFrame(data)
return structure_payment_df
def buildVWDMatrix(projection_df):
#Component Unit Qnty Cost Present_Progress Projected_progress
#Package
#Structure Code
stc=list(projection_df['Structure Code'])
packages=list(projection_df['Package'])
components=list(projection_df['Component'])
units=list(projection_df['Unit'])
quantity=list(projection_df['Qnty'])
cost=list(projection_df['Cost'])
present_prog=list(projection_df['Present_Progress'])
projected_prog=list(projection_df['Projected_progress'])
zero_list=[0 for x in components ]
data={'Package':packages,'Structure Code':stc,'Component':components,'Unit':units,'Qnty':quantity,'Cost':cost,
'Present_Progress':present_prog,'Projected_progress':projected_prog,
'VDW_30_06_2020': zero_list,'VDW_30_06_2021':zero_list,'Work_Done_20_21':zero_list,'Payment_30_06_2020': zero_list,
'Payment_30_06_2020':zero_list}
vwd_mat=pd.DataFrame(data)
for index,row in vwd_mat.iterrows():
vwd_mat.iloc[index,8]=vwd_mat.iloc[index,5]*vwd_mat.iloc[index,6]
vwd_mat.iloc[index,9]=vwd_mat.iloc[index,5]*vwd_mat.iloc[index,7]
vwd_mat.iloc[index,10]=vwd_mat.iloc[index,9]-vwd_mat.iloc[index,8]
return vwd_mat
# +
myfolder=r'E:\Website_24_11_2020\cmis6\cmis6\Civilworks cost\RADP Preparations' #Home Computer
#myfolder=r'F:\website\cmis6\Civilworks cost\RADP_20_21'
input_file_name=os.path.join(myfolder,'Projecttion_input.xlsx')
out_file_name=os.path.join(myfolder,'Projecttion_output.xlsx')
myframes=[]
mynames=[]
sheetName="Structure_Type"
structure_type_df=pd.read_excel(input_file_name,sheet_name=sheetName)
structure_type_df.fillna(0,inplace=True)
structure_type_df
myframes.append(structure_type_df)
mynames.append("Structure_Type")
"""Projection Info"""
sheetName="Kishoregnj"
projection_info_df=pd.read_excel(input_file_name,sheet_name=sheetName)
projection_info_df.fillna(0,inplace=True)
projection_info_df
myframes.append(projection_info_df)
mynames.append("Projection_Info")
sheetName="Packages"
package_name_df=pd.read_excel(input_file_name,sheet_name=sheetName)
package_name_df.fillna(0,inplace=True)
myframes.append(package_name_df)
mynames.append("Package_Name")
vwd_df=buildVWDMatrix(projection_info_df)
myframes.append(vwd_df)
mynames.append("Value_of_Work_Done")
package_payment_df=buildPackageWisePaymentMatrix(package_name_df)
package_payment_df=packageWiseSum(package_payment_df,vwd_df)
myframes.append(package_payment_df)
mynames.append("Package_wise_payment")
"""Building Structurewise Cost"""
structure_payment_df=buildStructureWisePaymentMatrix(structure_type_df)
structure_payment_df=structureWiseSum(structure_payment_df,vwd_df)
myframes.append(structure_payment_df)
mynames.append("Structure_Wise_payment")
"""Building Structurewise Cost"""
sheetName="DPP_Item"
dpp_item_df=pd.read_excel(input_file_name,sheet_name=sheetName)
dpp_item_df.fillna(0,inplace=True)
dpp_item_df=dppWiseSum(structure_payment_df,dpp_item_df)
myframes.append(dpp_item_df)
mynames.append("DPP_Wise_payment")
saveDataFrame(myframes,out_file_name,mynames)
# -
projection_info_df
dpp_item_df
| Civilworks cost/RADP Preparations/RADP_Prep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Python Machine Learning 2nd Edition* by [<NAME>](https://sebastianraschka.com), Packt Publishing Ltd. 2017
#
# Code Repository: https://github.com/rasbt/python-machine-learning-book-2nd-edition
#
# Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-2nd-edition/blob/master/LICENSE.txt)
# # Python Machine Learning - Code Examples
# # Chapter 8 - Applying Machine Learning To Sentiment Analysis
# Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).
# %load_ext watermark
# %watermark -a "<NAME>" -u -d -v -p numpy,pandas,sklearn,nltk
# *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.*
# <br>
# <br>
# ### Overview
# - [Preparing the IMDb movie review data for text processing](#Preparing-the-IMDb-movie-review-data-for-text-processing)
# - [Obtaining the IMDb movie review dataset](#Obtaining-the-IMDb-movie-review-dataset)
# - [Preprocessing the movie dataset into more convenient format](#Preprocessing-the-movie-dataset-into-more-convenient-format)
# - [Introducing the bag-of-words model](#Introducing-the-bag-of-words-model)
# - [Transforming words into feature vectors](#Transforming-words-into-feature-vectors)
# - [Assessing word relevancy via term frequency-inverse document frequency](#Assessing-word-relevancy-via-term-frequency-inverse-document-frequency)
# - [Cleaning text data](#Cleaning-text-data)
# - [Processing documents into tokens](#Processing-documents-into-tokens)
# - [Training a logistic regression model for document classification](#Training-a-logistic-regression-model-for-document-classification)
# - [Working with bigger data – online algorithms and out-of-core learning](#Working-with-bigger-data-–-online-algorithms-and-out-of-core-learning)
# - [Topic modeling](#Topic-modeling)
# - [Decomposing text documents with Latent Dirichlet Allocation](#Decomposing-text-documents-with-Latent-Dirichlet-Allocation)
# - [Latent Dirichlet Allocation with scikit-learn](#Latent-Dirichlet-Allocation-with-scikit-learn)
# - [Summary](#Summary)
# <br>
# <br>
# # Preparing the IMDb movie review data for text processing
# ## Obtaining the IMDb movie review dataset
# The IMDB movie review set can be downloaded from [http://ai.stanford.edu/~amaas/data/sentiment/](http://ai.stanford.edu/~amaas/data/sentiment/).
# After downloading the dataset, decompress the files.
#
# A) If you are working with Linux or MacOS X, open a new terminal windowm `cd` into the download directory and execute
#
# `tar -zxf aclImdb_v1.tar.gz`
#
# B) If you are working with Windows, download an archiver such as [7Zip](http://www.7-zip.org) to extract the files from the download archive.
# **Optional code to download and unzip the dataset via Python:**
# +
import os
import sys
import tarfile
import time
source = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
target = 'aclImdb_v1.tar.gz'
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = progress_size / (1024.**2 * duration)
percent = count * block_size * 100. / total_size
sys.stdout.write("\r%d%% | %d MB | %.2f MB/s | %d sec elapsed" %
(percent, progress_size / (1024.**2), speed, duration))
sys.stdout.flush()
if not os.path.isdir('aclImdb') and not os.path.isfile('aclImdb_v1.tar.gz'):
if (sys.version_info < (3, 0)):
import urllib
urllib.urlretrieve(source, target, reporthook)
else:
import urllib.request
urllib.request.urlretrieve(source, target, reporthook)
# -
if not os.path.isdir('aclImdb'):
with tarfile.open(target, 'r:gz') as tar:
tar.extractall()
# ## Preprocessing the movie dataset into more convenient format
# +
import pyprind
import pandas as pd
import os
# change the `basepath` to the directory of the
# unzipped movie dataset
basepath = 'aclImdb'
labels = {'pos': 1, 'neg': 0}
pbar = pyprind.ProgBar(50000)
df = pd.DataFrame()
for s in ('test', 'train'):
for l in ('pos', 'neg'):
path = os.path.join(basepath, s, l)
for file in os.listdir(path):
with open(os.path.join(path, file),
'r', encoding='utf-8') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]],
ignore_index=True)
pbar.update()
df.columns = ['review', 'sentiment']
# -
# Shuffling the DataFrame:
# +
import numpy as np
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
# -
# Optional: Saving the assembled data as CSV file:
df.to_csv('movie_data.csv', index=False, encoding='utf-8')
# +
import pandas as pd
df = pd.read_csv('movie_data.csv', encoding='utf-8')
df.head(3)
# -
# <hr>
# ### Note
#
# If you have problems with creating the `movie_data.csv` file in the previous chapter, you can find a download a zip archive at
# https://github.com/rasbt/python-machine-learning-book-2nd-edition/tree/master/code/ch08/
# <hr>
# <br>
# <br>
# # Introducing the bag-of-words model
# ...
# ## Transforming documents into feature vectors
# By calling the fit_transform method on CountVectorizer, we just constructed the vocabulary of the bag-of-words model and transformed the following three sentences into sparse feature vectors:
# 1. The sun is shining
# 2. The weather is sweet
# 3. The sun is shining, the weather is sweet, and one and one is two
#
# +
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer()
docs = np.array([
'The sun is shining',
'The weather is sweet',
'The sun is shining, the weather is sweet, and one and one is two'])
bag = count.fit_transform(docs)
# -
# Now let us print the contents of the vocabulary to get a better understanding of the underlying concepts:
print(count.vocabulary_)
# As we can see from executing the preceding command, the vocabulary is stored in a Python dictionary, which maps the unique words that are mapped to integer indices. Next let us print the feature vectors that we just created:
# Each index position in the feature vectors shown here corresponds to the integer values that are stored as dictionary items in the CountVectorizer vocabulary. For example, the rst feature at index position 0 resembles the count of the word and, which only occurs in the last document, and the word is at index position 1 (the 2nd feature in the document vectors) occurs in all three sentences. Those values in the feature vectors are also called the raw term frequencies: *tf (t,d)*—the number of times a term t occurs in a document *d*.
print(bag.toarray())
# <br>
# ## Assessing word relevancy via term frequency-inverse document frequency
np.set_printoptions(precision=2)
# When we are analyzing text data, we often encounter words that occur across multiple documents from both classes. Those frequently occurring words typically don't contain useful or discriminatory information. In this subsection, we will learn about a useful technique called term frequency-inverse document frequency (tf-idf) that can be used to downweight those frequently occurring words in the feature vectors. The tf-idf can be de ned as the product of the term frequency and the inverse document frequency:
#
# $$\text{tf-idf}(t,d)=\text{tf (t,d)}\times \text{idf}(t,d)$$
#
# Here the tf(t, d) is the term frequency that we introduced in the previous section,
# and the inverse document frequency *idf(t, d)* can be calculated as:
#
# $$\text{idf}(t,d) = \text{log}\frac{n_d}{1+\text{df}(d, t)},$$
#
# where $n_d$ is the total number of documents, and *df(d, t)* is the number of documents *d* that contain the term *t*. Note that adding the constant 1 to the denominator is optional and serves the purpose of assigning a non-zero value to terms that occur in all training samples; the log is used to ensure that low document frequencies are not given too much weight.
#
# Scikit-learn implements yet another transformer, the `TfidfTransformer`, that takes the raw term frequencies from `CountVectorizer` as input and transforms them into tf-idfs:
# +
from sklearn.feature_extraction.text import TfidfTransformer
tfidf = TfidfTransformer(use_idf=True,
norm='l2',
smooth_idf=True)
print(tfidf.fit_transform(count.fit_transform(docs))
.toarray())
# -
# As we saw in the previous subsection, the word is had the largest term frequency in the 3rd document, being the most frequently occurring word. However, after transforming the same feature vector into tf-idfs, we see that the word is is
# now associated with a relatively small tf-idf (0.45) in document 3 since it is
# also contained in documents 1 and 2 and thus is unlikely to contain any useful, discriminatory information.
#
# However, if we'd manually calculated the tf-idfs of the individual terms in our feature vectors, we'd have noticed that the `TfidfTransformer` calculates the tf-idfs slightly differently compared to the standard textbook equations that we de ned earlier. The equations for the idf and tf-idf that were implemented in scikit-learn are:
# $$\text{idf} (t,d) = log\frac{1 + n_d}{1 + \text{df}(d, t)}$$
#
# The tf-idf equation that was implemented in scikit-learn is as follows:
#
# $$\text{tf-idf}(t,d) = \text{tf}(t,d) \times (\text{idf}(t,d)+1)$$
#
# While it is also more typical to normalize the raw term frequencies before calculating the tf-idfs, the `TfidfTransformer` normalizes the tf-idfs directly.
#
# By default (`norm='l2'`), scikit-learn's TfidfTransformer applies the L2-normalization, which returns a vector of length 1 by dividing an un-normalized feature vector *v* by its L2-norm:
#
# $$v_{\text{norm}} = \frac{v}{||v||_2} = \frac{v}{\sqrt{v_{1}^{2} + v_{2}^{2} + \dots + v_{n}^{2}}} = \frac{v}{\big (\sum_{i=1}^{n} v_{i}^{2}\big)^\frac{1}{2}}$$
#
# To make sure that we understand how TfidfTransformer works, let us walk
# through an example and calculate the tf-idf of the word is in the 3rd document.
#
# The word is has a term frequency of 3 (tf = 3) in document 3, and the document frequency of this term is 3 since the term is occurs in all three documents (df = 3). Thus, we can calculate the idf as follows:
#
# $$\text{idf}("is", d3) = log \frac{1+3}{1+3} = 0$$
#
# Now in order to calculate the tf-idf, we simply need to add 1 to the inverse document frequency and multiply it by the term frequency:
#
# $$\text{tf-idf}("is",d3)= 3 \times (0+1) = 3$$
tf_is = 3
n_docs = 3
idf_is = np.log((n_docs+1) / (3+1))
tfidf_is = tf_is * (idf_is + 1)
print('tf-idf of term "is" = %.2f' % tfidf_is)
# If we repeated these calculations for all terms in the 3rd document, we'd obtain the following tf-idf vectors: [3.39, 3.0, 3.39, 1.29, 1.29, 1.29, 2.0 , 1.69, 1.29]. However, we notice that the values in this feature vector are different from the values that we obtained from the TfidfTransformer that we used previously. The nal step that we are missing in this tf-idf calculation is the L2-normalization, which can be applied as follows:
# $$\text{tfi-df}_{norm} = \frac{[3.39, 3.0, 3.39, 1.29, 1.29, 1.29, 2.0 , 1.69, 1.29]}{\sqrt{[3.39^2, 3.0^2, 3.39^2, 1.29^2, 1.29^2, 1.29^2, 2.0^2 , 1.69^2, 1.29^2]}}$$
#
# $$=[0.5, 0.45, 0.5, 0.19, 0.19, 0.19, 0.3, 0.25, 0.19]$$
#
# $$\Rightarrow \text{tfi-df}_{norm}("is", d3) = 0.45$$
# As we can see, the results match the results returned by scikit-learn's `TfidfTransformer` (below). Since we now understand how tf-idfs are calculated, let us proceed to the next sections and apply those concepts to the movie review dataset.
tfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True)
raw_tfidf = tfidf.fit_transform(count.fit_transform(docs)).toarray()[-1]
raw_tfidf
l2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2))
l2_tfidf
# <br>
# ## Cleaning text data
df.loc[0, 'review'][-50:]
import re
def preprocessor(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)',
text)
text = (re.sub('[\W]+', ' ', text.lower()) +
' '.join(emoticons).replace('-', ''))
return text
preprocessor(df.loc[0, 'review'][-50:])
preprocessor("</a>This :) is :( a test :-)!")
df['review'] = df['review'].apply(preprocessor)
# <br>
# ## Processing documents into tokens
# +
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
def tokenizer(text):
return text.split()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
# -
tokenizer('runners like running and thus they run')
tokenizer_porter('runners like running and thus they run')
# +
import nltk
nltk.download('stopwords')
# +
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and runs a lot')[-10:]
if w not in stop]
# -
# <br>
# <br>
# # Training a logistic regression model for document classification
# Strip HTML and punctuation to speed up the GridSearch later:
X_train = df.loc[:25000, 'review'].values
y_train = df.loc[:25000, 'sentiment'].values
X_test = df.loc[25000:, 'review'].values
y_test = df.loc[25000:, 'sentiment'].values
# +
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import GridSearchCV
tfidf = TfidfVectorizer(strip_accents=None,
lowercase=False,
preprocessor=None)
param_grid = [{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
]
lr_tfidf = Pipeline([('vect', tfidf),
('clf', LogisticRegression(random_state=0))])
gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid,
scoring='accuracy',
cv=5,
verbose=1,
n_jobs=-1)
# -
# **Important Note about `n_jobs`**
#
# Please note that it is highly recommended to use `n_jobs=-1` (instead of `n_jobs=1`) in the previous code example to utilize all available cores on your machine and speed up the grid search. However, some Windows users reported issues when running the previous code with the `n_jobs=-1` setting related to pickling the tokenizer and tokenizer_porter functions for multiprocessing on Windows. Another workaround would be to replace those two functions, `[tokenizer, tokenizer_porter]`, with `[str.split]`. However, note that the replacement by the simple `str.split` would not support stemming.
# **Important Note about the running time**
#
# Executing the following code cell **may take up to 30-60 min** depending on your machine, since based on the parameter grid we defined, there are 2*2*2*3*5 + 2*2*2*3*5 = 240 models to fit.
#
# If you do not wish to wait so long, you could reduce the size of the dataset by decreasing the number of training samples, for example, as follows:
#
# X_train = df.loc[:2500, 'review'].values
# y_train = df.loc[:2500, 'sentiment'].values
#
# However, note that decreasing the training set size to such a small number will likely result in poorly performing models. Alternatively, you can delete parameters from the grid above to reduce the number of models to fit -- for example, by using the following:
#
# param_grid = [{'vect__ngram_range': [(1, 1)],
# 'vect__stop_words': [stop, None],
# 'vect__tokenizer': [tokenizer],
# 'clf__penalty': ['l1', 'l2'],
# 'clf__C': [1.0, 10.0]},
# ]
# +
## @Readers: PLEASE IGNORE THIS CELL
##
## This cell is meant to generate more
## "logging" output when this notebook is run
## on the Travis Continuous Integration
## platform to test the code as well as
## speeding up the run using a smaller
## dataset for debugging
if 'TRAVIS' in os.environ:
gs_lr_tfidf.verbose=2
X_train = df.loc[:250, 'review'].values
y_train = df.loc[:250, 'sentiment'].values
X_test = df.loc[25000:25250, 'review'].values
y_test = df.loc[25000:25250, 'sentiment'].values
# -
gs_lr_tfidf.fit(X_train, y_train)
print('Best parameter set: %s ' % gs_lr_tfidf.best_params_)
print('CV Accuracy: %.3f' % gs_lr_tfidf.best_score_)
clf = gs_lr_tfidf.best_estimator_
print('Test Accuracy: %.3f' % clf.score(X_test, y_test))
# <hr>
# <hr>
# #### Start comment:
#
# Please note that `gs_lr_tfidf.best_score_` is the average k-fold cross-validation score. I.e., if we have a `GridSearchCV` object with 5-fold cross-validation (like the one above), the `best_score_` attribute returns the average score over the 5-folds of the best model. To illustrate this with an example:
# +
from sklearn.linear_model import LogisticRegression
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
np.random.seed(0)
np.set_printoptions(precision=6)
y = [np.random.randint(3) for i in range(25)]
X = (y + np.random.randn(25)).reshape(-1, 1)
cv5_idx = list(StratifiedKFold(n_splits=5, shuffle=False, random_state=0).split(X, y))
cross_val_score(LogisticRegression(random_state=123), X, y, cv=cv5_idx)
# -
# By executing the code above, we created a simple data set of random integers that shall represent our class labels. Next, we fed the indices of 5 cross-validation folds (`cv3_idx`) to the `cross_val_score` scorer, which returned 5 accuracy scores -- these are the 5 accuracy values for the 5 test folds.
#
# Next, let us use the `GridSearchCV` object and feed it the same 5 cross-validation sets (via the pre-generated `cv3_idx` indices):
# +
from sklearn.model_selection import GridSearchCV
gs = GridSearchCV(LogisticRegression(), {}, cv=cv5_idx, verbose=3).fit(X, y)
# -
# As we can see, the scores for the 5 folds are exactly the same as the ones from `cross_val_score` earlier.
# Now, the best_score_ attribute of the `GridSearchCV` object, which becomes available after `fit`ting, returns the average accuracy score of the best model:
gs.best_score_
# As we can see, the result above is consistent with the average score computed the `cross_val_score`.
cross_val_score(LogisticRegression(), X, y, cv=cv5_idx).mean()
# #### End comment.
#
# <hr>
# <hr>
# <br>
# <br>
# # Working with bigger data - online algorithms and out-of-core learning
# +
# This cell is not contained in the book but
# added for convenience so that the notebook
# can be executed starting here, without
# executing prior code in this notebook
import os
import gzip
if not os.path.isfile('movie_data.csv'):
if not os.path.isfile('movie_data.csv.gz'):
print('Please place a copy of the movie_data.csv.gz'
'in this directory. You can obtain it by'
'a) executing the code in the beginning of this'
'notebook or b) by downloading it from GitHub:'
'https://github.com/rasbt/python-machine-learning-'
'book-2nd-edition/blob/master/code/ch08/movie_data.csv.gz')
else:
with in_f = gzip.open('movie_data.csv.gz', 'rb'), \
out_f = open('movie_data.csv', 'wb'):
out_f.write(in_f.read())
# +
import numpy as np
import re
from nltk.corpus import stopwords
# The `stop` is defined as earlier in this chapter
# Added it here for convenience, so that this section
# can be run as standalone without executing prior code
# in the directory
stop = stopwords.words('english')
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) +\
' '.join(emoticons).replace('-', '')
tokenized = [w for w in text.split() if w not in stop]
return tokenized
def stream_docs(path):
with open(path, 'r', encoding='utf-8') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
# -
next(stream_docs(path='movie_data.csv'))
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
# +
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
# -
# **Note**
#
# - You can replace `Perceptron(n_iter, ...)` by `Perceptron(max_iter, ...)` in scikit-learn >= 0.19.
# +
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
else:
clf = SGDClassifier(loss='log', random_state=1, max_iter=1)
doc_stream = stream_docs(path='movie_data.csv')
# +
import pyprind
pbar = pyprind.ProgBar(45)
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = get_minibatch(doc_stream, size=1000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
# -
X_test, y_test = get_minibatch(doc_stream, size=5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
clf = clf.partial_fit(X_test, y_test)
# ## Topic modeling
# ### Decomposing text documents with Latent Dirichlet Allocation
# ### Latent Dirichlet Allocation with scikit-learn
# +
import pandas as pd
df = pd.read_csv('movie_data.csv', encoding='utf-8')
df.head(3)
# +
## @Readers: PLEASE IGNORE THIS CELL
##
## This cell is meant to create a smaller dataset if
## the notebook is run on the Travis Continuous Integration
## platform to test the code on a smaller dataset
## to prevent timeout errors and just serves a debugging tool
## for this notebook
if 'TRAVIS' in os.environ:
df.loc[:500].to_csv('movie_data.csv')
df = pd.read_csv('movie_data.csv', nrows=500)
print('SMALL DATA SUBSET CREATED FOR TESTING')
# +
from sklearn.feature_extraction.text import CountVectorizer
count = CountVectorizer(stop_words='english',
max_df=.1,
max_features=5000)
X = count.fit_transform(df['review'].values)
# +
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=10,
random_state=123,
learning_method='batch')
X_topics = lda.fit_transform(X)
# -
lda.components_.shape
# +
n_top_words = 5
feature_names = count.get_feature_names()
for topic_idx, topic in enumerate(lda.components_):
print("Topic %d:" % (topic_idx + 1))
print(" ".join([feature_names[i]
for i in topic.argsort()\
[:-n_top_words - 1:-1]]))
# -
# Based on reading the 5 most important words for each topic, we may guess that the LDA identified the following topics:
#
# 1. Generally bad movies (not really a topic category)
# 2. Movies about families
# 3. War movies
# 4. Art movies
# 5. Crime movies
# 6. Horror movies
# 7. Comedies
# 8. Movies somehow related to TV shows
# 9. Movies based on books
# 10. Action movies
# To confirm that the categories make sense based on the reviews, let's plot 5 movies from the horror movie category (category 6 at index position 5):
# +
horror = X_topics[:, 5].argsort()[::-1]
for iter_idx, movie_idx in enumerate(horror[:3]):
print('\nHorror movie #%d:' % (iter_idx + 1))
print(df['review'][movie_idx][:300], '...')
# -
# Using the preceeding code example, we printed the first 300 characters from the top 3 horror movies and indeed, we can see that the reviews -- even though we don't know which exact movie they belong to -- sound like reviews of horror movies, indeed. (However, one might argue that movie #2 could also belong to topic category 1.)
# <br>
# <br>
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
# ! python ../.convert_notebook_to_script.py --input ch08.ipynb --output ch08.py
| code/ch08/ch08.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="c9a88b39b94bc77f9b4d035b4a58411a102ee013"
# ## Amazon Alexa Reviews Analysis
# + [markdown] _uuid="fd943c986770a35c35c5d85b4ee97bbb791a6f74"
# ### [Dataset link](https://www.kaggle.com/sid321axn/amazon-alexa-reviews)
# + [markdown] _uuid="254de402d8b20e2e27ed811cd0c6e3aa7f7a5fa1"
# * The project analyzes reviews by users of **Amazon’s Alexa products**.
# * Using **Natural Language Processing** on the product reviews and some additional features, a machine learning model should be able to predict if the feedback is **positive (1) or negative (0).**
# + [markdown] _uuid="7965510cc9e180efc6eb872afeec61b91c455514"
# * The primary methods used are **Random Forrest and Gradient Boosting** for this dataset.
# + [markdown] _uuid="384981cdb165af21e717e21238eb5b7531041962"
# ### Importing Libraries
# + _uuid="636303e1832f69824245bd283a164f4a0422101d"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# + _uuid="2c4ffc071012b7a39f3c1b3599e765fc3c00e89b"
import warnings
warnings.filterwarnings("ignore")
# + _uuid="dc24a90f00ad065fec442df131c3da43947f1ac4"
sns.set_palette("bright")
# + [markdown] _uuid="f181642a1524ea96de21bdb8c918e6302091026b"
# ### Importing Data
# + _uuid="f985a886b62dfbe4531b244476fffc7a472e4f8b"
data = pd.read_csv("amazon_alexa.tsv", sep="\t")
# + _uuid="f54bee06d41ea08dc726eafad8da8af2b8132723"
data.head()
# + [markdown] _uuid="d5ff9b7dde4c7abb0f773aabf1efc636ee33abe0"
# <br>
# ### Exploring the dataset
# This data set has five columns:
# * rating
# * date
# * variation
# * verfied_reviews
# * feedback
#
# We will explore each column with the help of charts and how does it impact our target column **feedback**.
# + _uuid="ec4ae5218c3d8caa176badbe8bc568a851ea6622"
data.columns
# + [markdown] _uuid="f6a6893a1f2a45e47654fa5abdfb6755864ea388"
# Rating column has values:
# + _uuid="cc3514848e986eddd5bd64659902c63ebd26d977"
data['rating'].unique()
# + [markdown] _uuid="4846de8757274d178bd374337ed0190c592f848d"
# #### Converting *date* attribute from string to datetime.date datatype
# We will be using date column for feature engineering, so it would be a good idea if we convert this column from a **string** datatype to a **datetime.date** datatype.
# + _uuid="36e7d43749f66fd49e1b4a207118e6ae8fb2e149"
type(data['date'][0]) , data['date'][0]
# + _uuid="923b953ebc6daef59ad0cec7fbad77d666921d50"
data['date'] = pd.to_datetime(data['date'])
data['date'][0]
# + _uuid="506ebeb4205b53772319b83c71392c6f270ef016"
dates = data['date']
only_dates = []
for date in dates:
only_dates.append(date.date())
data['only_dates'] = only_dates
data['only_dates'][0]
# + [markdown] _uuid="97b2aba1dab9137d61113de9d1ef1101240d64e0"
# ### Feature Engineering:
# + [markdown] _uuid="c96e4cc94cb001958bad1b3f33b2514213488434"
# #### Extracting *Year, Month, Day of the Week* from date.
# * We will be using these features later in the model.
# * We will extract month, year and day of the week into separate columns.
# + _uuid="806275b518251be07ec1ceb54eedd77105b0eda1"
only_year = []
for date in dates:
only_year.append(date.year)
data['year'] = only_year
only_month = []
for date in dates:
only_month.append(date.month)
data['month'] = only_month
# 1 -> monday
# 7 -> sunday
only_weekday = []
for date in dates:
only_weekday.append(date.isoweekday())
data['day_of_week'] = only_weekday
# + [markdown] _uuid="3447080907b5083cffc241e2f22b33ff84bc37c0"
# #### Estimating length of the reviews
# * Calculating the length of text proves to be an important feature for classifying text in a Natural Language Processing problem.
# + _uuid="b3f30ce0fd4fd1c4c5e1822cac3717b0e0676fad"
reviews = data['verified_reviews']
len_review = []
for review in reviews:
len_review.append(len(review))
data['len_of_reviews'] = len_review
# + _uuid="b1a59fd0f9342db344d1e0d327dc00aa385eaaa9"
data['len_of_reviews'][0], data['verified_reviews'][0]
# + [markdown] _uuid="a641e97b2b8b86620764ac497393e57eeb4e226d"
# #### Updated Column List:
# * As a result, we have added new columns in our dataset.
# + _uuid="658b791819d97db272bc67e6c4fe0b1f2002183c"
data.columns
# + [markdown] _uuid="4084eb7de9a67d7c974a9cad0e54f3596b3d9c7e"
# ### Visualizing your Exploratory Data Analysis:
#
# * With the help of this graph we can detect that the number of 5 rating review is high in this dataset. <br>
# * In other words it seems that customers are very much happy with Alexa products.
# + _uuid="d744d75797652d0ae54074d9429c03458b937c66"
plt.figure(figsize=(15,7))
plt.bar(height = data.groupby('rating').count()['date'], x = sorted(data['rating'].unique(), reverse= False))
plt.xlabel("Ratings")
plt.ylabel("Count")
plt.title("Count of Ratings")
plt.show()
# + [markdown] _uuid="b31e15427e327f756b771d635764545c538dc6ce"
# * On applying a hue of feedback, we can detect that reviews which have a rating of more than 2, result in a positive feedback (1).
# * We will be removing this column from the training set, we would prefer the learning algorithm not to capitalize on this feature.
# + _uuid="10d935ad4a4d0b4fbb358df96fdc4e52f6c5e717"
plt.figure(figsize=(15,7))
sns.countplot(x="rating", hue="feedback", data=data)
plt.show()
# + [markdown] _uuid="2499c15bb4cea7a4cac8b84f1d00c2378e5acb65"
# * The bar plot of rating with respect to variation highlights that black dot is the most frequently ordered product and also most liked.
# + _uuid="5cd1f8b043675ee725652db2bee8809926d71b49"
plt.figure(figsize=(15,7))
sns.barplot(x="rating", y="variation", hue="feedback", data=data, estimator= sum, ci = None)
plt.show()
# + [markdown] _uuid="f76b993cae0b91216bd778e73984e1a04dc3323f"
# * On changing the aggregation function to mean(default), average rating seems to be 4.5 for every positive feedback review.
# + _uuid="521b177009237ab6e890f0accbca3497470a0434"
plt.figure(figsize=(15,7))
sns.barplot(x="rating", y="variation", hue="feedback", data=data, ci = None)
plt.show()
# + [markdown] _uuid="d26e3e9200b61b6249628ef5348d21fbb1881450"
# * When we take month into consideration, most orders in this dataset comes from the month of July.
# + _uuid="d42197dd6eb78f3544b4274d7e6515bb46ef0182"
plt.figure(figsize=(15,7))
sns.barplot(y="rating", x="month", hue="feedback", data=data, ci = None, estimator= sum)
plt.show()
# + [markdown] _uuid="49089ca6dfd024dc2e2000773b79f255f6979244"
# * Changing the average function to mean again does not highlight anything important, just the fact that the products have high ratings.
# + _uuid="5abf4534f2584d66ff25371461300c25aba49bfa"
plt.figure(figsize=(15,7))
sns.barplot(y="rating", x="month", hue="feedback", data=data, ci = None)
plt.show()
# + [markdown] _uuid="b0a4995564e8f6d252bdd25f62a687fa52fddf89"
# * When day of the week is considered, it seems that Monday happens to be the day when most people write their reviews.
# * This can relate to prime delivery guarantee within two days, and the most frequent day of ordering being on Saturday or the weekend.
# + _uuid="ade86f8d6e3e795c138393da30dc4543a658757b"
plt.figure(figsize=(15,7))
sns.countplot(x="day_of_week", hue="feedback", data=data)
plt.show()
# + _uuid="f85f5da4329fb2fd035170b7e98ff2d2b564fa07"
plt.figure(figsize=(15,7))
sns.barplot(y="rating", x="day_of_week", hue="feedback", data=data, ci = None)
plt.show()
# + [markdown] _uuid="66d644b277806128b7f1dcbd99e934e8aa31d136"
# * Overall this dataset is imbalanced towards negative reviews.
# * Therefore the important score to look at would be the **F1 Score**, on how the model performed.
# + _uuid="12f2fc67d0a38d72c62f5c21bf57779838ba91be"
plt.figure(figsize=(15,7))
sns.countplot(x="feedback", data=data)
plt.show()
# + [markdown] _uuid="2981a0692cd9601717d4952c0d507c3aaf1ffd03"
# * Finally the length column, which depicts that customers with negative review tend to write a longer review.
# + _uuid="5ec198195c7b055ac217323c420a39183c4ee7a9"
plt.figure(figsize=(15,7))
sns.distplot(data[data['feedback'] == 0]['len_of_reviews'], label = 'Feedback - 0')
sns.distplot(data[data['feedback'] == 1]['len_of_reviews'], label = 'Feedback - 1')
plt.legend()
plt.show()
# + [markdown] _uuid="353ad1817b48ef5eb6425bce6011682882c4193d"
# ### Data Preprocessing:
# + [markdown] _uuid="6634f8868564e80ce58b9ec600c2dfdc6fa44580"
# #### TfidfVectorizer:<br>
#
# * Since we cannot directly insert text data into out machine learning models, we will have to use a vectorizer.
# * The most vectorizer for any text data happens to be Count-Vectorizer, because it is easy to understand and relate to.
# * We will use Term frequency inverse document frequency (TF-IDF) vectorizer for this dataset.
# * The formula is as:
# 
# + _uuid="24922f8caaefdd388bbe8ef6df67412b0a2e4ba6"
from sklearn.feature_extraction.text import TfidfVectorizer
# + _uuid="d0eb0571584544a990926498935c9accf6b1de3d"
tdf = TfidfVectorizer(stop_words='english')
# + _uuid="20bee0816d90cd79164df80cba94b5c39cbd99f6"
pd.DataFrame(tdf.fit_transform(data['verified_reviews']).toarray())
# + _uuid="e398565fc5245e6e82d58b55b2a1a3c1c8f74c4c"
tdf_data = pd.DataFrame(tdf.fit_transform(data['verified_reviews']).toarray())
# + [markdown] _uuid="d0e422fe65cac0778736176c3d46fd3f64b85865"
# ### One Hot Encoding: <br>
#
# * For variation we will be using one hot encoding, which can be expalined by the image below.
#
# 
# <br>
# * One important thing to take care about it no matter how many dummy variables you end up having, just make sure that drop any one variable.
# * You can do this by setting **drop_first = True**.
# * This problem is sometimes stated as dummy variable trap.
# + _uuid="d7481b7a79d48455bfa1ed78e296abad34cd9410"
pd.get_dummies(data['variation'], drop_first= True)
# + _uuid="0306554b9e416ff5b7708fb9b147214f751d3538"
one_hot_data = pd.get_dummies(data['variation'])
# + [markdown] _uuid="a78af03e06008f584004f727885c010efaedcdf8"
# * Now, we can just concat all the features which we intend to use into a singe dataframe called **X**.
# + _uuid="718422f68f0235654b3d8b145c09cf3d3c7f13ad"
X = pd.concat([one_hot_data, tdf_data, data['month'], data['day_of_week'], data['len_of_reviews']], axis=1)
# + _uuid="ef64a221df88861e2d97e855e76c08fb5895a51e"
X.head()
# + [markdown] _uuid="5fa6223d1fe8110e45b4af48b3e86a909122cf46"
# * And the target vector **y**.
# + _uuid="79596c152563c01d83818a9b488652f34270a401"
y = data['feedback']
# + [markdown] _uuid="b69c488a8dbf5d5cd00fdf8e192d47e8a254462c"
# ### K Fold Cross Validation:
# * K Fold cross validation gives a good idea on how is our selected model performing on different chunks of data.
# * We are getting perfect scores through cross validation, as a result we would not be performing hyper parameter tuning.
# + _uuid="f1d73a87f6598f6f8fd44769a622b0e09b658d86"
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score
rf = RandomForestClassifier()
k_fold = KFold(n_splits=5)
cross_val_score(rf, X, y, cv=k_fold, scoring='f1')
# -
from sklearn.naive_bayes import MultinomialNB
mnb = MultinomialNB()
cross_val_score(mnb, X, y, cv=k_fold, scoring='f1')
# + _uuid="27bdfc1e8dae38ef847314eaeda57c9873fe68ee"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# + [markdown] _uuid="373537c1267597dbd4d7d774660b30291f32f947"
# #### Random Forest Classifier:
# + _uuid="3822e2282e32bb832c2dd9911dd09d8e9c5a6104"
rf = RandomForestClassifier()
fit_model = rf.fit(X_train, y_train)
# + [markdown] _uuid="531c6757e9212120f0a262a790cbbf3990e97bf9"
# * One of the most important methods of random forest classifier in scikit learn is **feature_importances_**.
# * Let us have a look at the top 10 features.
# + _uuid="8f6f16c3db6f3ae152a90cfae9a897bebc2cae33"
t = zip(fit_model.feature_importances_, X_train.columns)
t1 = reversed(sorted(t , key=lambda x: x[0]))
i = 0
for element in t1:
if (i < 10):
print(element)
i = i + 1
# + _uuid="bdb81d96e2e7e3778371e52a5db529f146f9aaec"
y_pred = rf.predict(X_test)
# -
# ### Learning Curve:
# +
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = learning_curve(RandomForestClassifier(), X_train, y_train, scoring='f1', train_sizes=np.linspace(0.1, 1.0, 20), cv = 3)
train_scores = np.mean(train_scores, axis = 1)
test_scores = np.mean(test_scores, axis = 1)
plt.plot(train_sizes, train_scores, 'o-', label="Training score")
plt.plot(train_sizes, test_scores, 'o-', label="Cross-validation score")
plt.legend();
# + _uuid="c0e3f2579df376ad28f3459009c0cf03712becb8"
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score
# + _uuid="6b8f03da4d482b5554c13d2978bf492236eb5db3"
print("==============================================")
print("For Random Forest Classifier:\n")
print("Accuracy Score: ",accuracy_score(y_test, y_pred))
print("Precision Score: ",precision_score(y_test, y_pred))
print("Recall Score: ",recall_score(y_test, y_pred))
print("F1 Score: ",f1_score(y_test, y_pred))
print("Confusion Matrix:\t \n",confusion_matrix(y_test, y_pred))
print("==============================================")
# + [markdown] _uuid="829015322040335c87612d331deab05b29595e55"
# #### Gradient Boosting Classifier:
# + _uuid="750740bfd3926245fb8998a7f369edd3f5d43f60"
from sklearn.ensemble import GradientBoostingClassifier
gbc = GradientBoostingClassifier()
gbc.fit(X_train, y_train)
y_pred = gbc.predict(X_test)
print("==============================================")
print("For Gradient Boosting Classifier:\n")
print("Accuracy Score: ",accuracy_score(y_test, y_pred))
print("Precision Score: ",precision_score(y_test, y_pred))
print("Recall Score: ",recall_score(y_test, y_pred))
print("F1 Score: ",f1_score(y_test, y_pred))
print("Confusion Matrix:\t \n",confusion_matrix(y_test, y_pred))
print("==============================================")
# + [markdown] _uuid="6ecfc36f71b476778e3d44a96a5a12e1923e294a"
# ### Conclusions:
# * Feature Engineering is the most crucial step when it comes to Natural Language Processing.
# * Switching Count Vectorizer with a TDF IF Vectorizer also made a difference on F1 score.
#
# + _uuid="44e3483f29aa1f94cdb4ad68d02cead836a4ee29"
results = pd.DataFrame(data = {'Y Test': y_test, 'Y Predictions': y_pred})
# + _uuid="ff750ffd92defd258ecd1f24ee4161bdc984baf0"
results.head()
# + _uuid="c08b03b7c231ead72834b01ed5402ed564df008d"
results.to_csv('Results.csv')
# + _uuid="5b72dce3bead4e287f3e6ae2fac5bc656ffa7391"
| Amazon-Alexa-Reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
#Importing the libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#Visualisations
import seaborn as sns
import matplotlib.pyplot as plt
# -
# # Data Preprocessing
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.pipeline import make_pipeline
# Importing the dataset
train = pd.read_csv("input/train.csv")
test = pd.read_csv("input/test.csv")
df = pd.concat([train.iloc[:,:-1],test],axis=0)
target = train["SalePrice"]
features = train.iloc[:,:-1].copy()
# + _uuid="f1aaaa9f4a564824465e129b75a9300bed749695"
#Removing Skew from SalesPrice data
target_log = np.log(target)
X_train, X_test, y_train, y_test = train_test_split(train, target_log, test_size=0.2, random_state=0)
print(train.shape, target_log.shape)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# + [markdown] _uuid="176f907970011ddd964f03ff05c627b7e334c981"
# Creating the preprocessing pipelines for both numeric and categorical data.
# + _uuid="ac8bc53c3ece7780a6cd31b7b62b970b07e124af"
def features_split(df, cardinality = 10):
categorical_features_with_low_cardinality = (df.dtypes == "object") & (df.nunique() < cardinality)
categorical_features_with_high_cardinality = (df.dtypes == "object") & (df.nunique() >= cardinality)
numerical_features = (df.dtypes == "int") | (df.dtypes == "float")
selected_categorical_features = df.columns[categorical_features_with_low_cardinality].values.tolist()
selected_numerical_features = df.columns[numerical_features].values.tolist()
return selected_categorical_features,selected_numerical_features
# + _kg_hide-input=true _uuid="a51193a59700da0acd7441003cda5b265c97098f"
categorical_features, numerical_features = features_split(X_train)
# + _uuid="47b4de73610d2212ac2b026877d551291a06b000"
categorical_features
# + _uuid="b1c495612791a9846fb42c27b88e17cbb51b8a29"
categorical_transformer = make_pipeline(SimpleImputer(strategy='constant', fill_value = 'missing'),
OneHotEncoder(handle_unknown='ignore',sparse = False))
numerical_transformer = make_pipeline(SimpleImputer(strategy='median'))
preprocess = make_column_transformer(
(categorical_transformer, categorical_features),
(numerical_transformer, numerical_features))
# + [markdown] _uuid="043d40caec840f84f1361145d2f93df1102007cb"
# Combining preprocessing step based on the ColumnTransformer with a classifier
# in order to get a full prediction pipeline:
# + _uuid="355ef1aba9b1e5cebad031de648ddf31432f19ec"
from sklearn.ensemble import RandomForestRegressor
model = make_pipeline(preprocess, RandomForestRegressor(n_estimators=10))
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
model.fit(X_train,y_train)
# + _uuid="bc744284b1bb5771bc0594036139004b4ea61bd9"
model.score(X_test, y_test)
# + [markdown] _uuid="415d967bbc586e4468d7815ac64653a265d54ac1"
# # Model validation
# + _uuid="5f0b5e4b53f8e74a8523a43f2b97430111ff462f"
#model validation
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.model_selection import GridSearchCV, cross_val_score
# + _uuid="1c7cdd58c06efdd9e5f5855f3b7682ae8b20cde1"
#importing classes of models
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
# + _uuid="e5e53b9a1e2fa45aa037c79efb7e3dfd95ad3462"
def evaluation(y_test, predicted_values):
print(" MAE score: "+ str(mean_absolute_error(y_test,predicted_values))+
" R^2 score: " + str(r2_score(y_test,predicted_values)) +
" MSE score: " + str(mean_squared_error(y_test,predicted_values)))
# + _uuid="8bf3635c9e0dbe6039e0debc3cb98e65bc119e17"
predicted_values = model.predict(X_test)
evaluation(y_test, predicted_values)
# + _uuid="99d0c5846eabde2bcede1068cfbd3b2e70586490"
def model_validation(estimator, parameters, X_train, y_train, X_test, y_test):
model_pipe = make_pipeline(preprocess, estimator)
model_grid = GridSearchCV(model_pipe, parameters,
verbose=1 , scoring = "r2", cv=5)
model_grid.fit(X_train, y_train)
# print("Best model: " + str(model_grid.best_estimator_))
# print("Best score: " + str(model_grid.best_score_))
predicted_values = model_grid.best_estimator_.fit(X_train, y_train).predict(X_test)
evaluation(y_test, predicted_values)
print(" cross validation score: " +str(np.mean(cross_val_score(model_grid.best_estimator_,X_train,y_train,cv=10, scoring="r2"))))
# -
# # Model Tuning
# + [markdown] _uuid="8bd1adda31216c44ad5e0c153caa03d783061a32"
# XGBRegressor
# + _uuid="b0362933995d99aa8b1d5b2c7403a7a131d1e408"
parameters = {"xgbregressor__max_depth": [3,5]}
model_validation(XGBRegressor(), parameters, X_train, y_train, X_test, y_test)
# + [markdown] _uuid="17c7bb9286a09c035b438685b65c3455ae9e5192"
# Linear Regression Model
# + _uuid="75097282a15b1d6679529d9864b8c781749d83bb"
linear_regression_parameters = {'linearregression__fit_intercept':[True,False],
'linearregression__normalize':[True,False],
'linearregression__copy_X':[True, False],
}
model_validation(LinearRegression(), linear_regression_parameters, X_train, y_train, X_test, y_test)
# + [markdown] _uuid="961b5e15c744d8c129f6ffcbf4580626123003fa"
# Lasso Model
# + _uuid="6546fbac2df1023f876803a29241612d9b88ddef"
lasso_parameters = {"lasso__fit_intercept":[True,False],
"lasso__normalize":[True,False],
"lasso__copy_X":[True, False],
"lasso__precompute" : [True, False],
}
model_validation(Lasso(), lasso_parameters, X_train, y_train, X_test, y_test)
# + [markdown] _uuid="366d6c57ad2cf69de425ce4d254f61eeecc2c5a9"
# Ridge Model
# + _uuid="892485b727a4900f3cf7122382ed3adbe9bd8ab6"
ridge_pipe = make_pipeline(preprocess, Ridge())
ridge_parameters = {"ridge__fit_intercept":[True,False],
"ridge__normalize":[True,False],
"ridge__copy_X":[True, False],
"ridge__solver" : ["auto"],
}
ridge_grid = GridSearchCV(ridge_pipe, ridge_parameters,
verbose=1 , scoring = "r2", cv=3)
ridge_grid.fit(X_train, y_train)
print("Best lasso model: " + str(ridge_grid.best_estimator_))
print("Best lasso score: " + str(ridge_grid.best_score_))
# + _uuid="752ad8d0e86aca8a4e540d8da569cb432983704d"
predicted_values = ridge_grid.best_estimator_.fit(X_train, y_train).predict(X_test)
evaluation(y_test, predicted_values)
#print(ridge_grid.get_params().keys())
# + [markdown] _uuid="cf0a418acdfb19cbb8d944b9a940f5ab3cf50388"
# Decision Tree Model
# + _uuid="3dbd2b715edded7d845aa933ee475bd63fd87f39"
decision_tree_parameters = {"decisiontreeregressor__criterion" : ["mse", "mae"],
"decisiontreeregressor__splitter" : ["best", "random"],
"decisiontreeregressor__min_samples_split" : [2, 3],
"decisiontreeregressor__max_features" : ["auto", "log2"],
"decisiontreeregressor__max_depth" : [5]
}
model_validation(DecisionTreeRegressor(), decision_tree_parameters, X_train, y_train, X_test, y_test)
# + [markdown] _uuid="f61a346aec1304ca528b02dfef6c0235655eb85c"
# Random Forest Model
# + _uuid="993f3b9c840b04b69bf69c78078e4c9af5216ce3"
random_forest_parameters = {"randomforestregressor__max_depth" : [3],
"randomforestregressor__min_samples_split" : [2, 3],
"randomforestregressor__max_features" : ["auto", "log2"]}
model_validation(RandomForestRegressor(n_estimators=10, n_jobs=-1),
random_forest_parameters, X_train, y_train, X_test, y_test)
# + [markdown] _uuid="bbd09277acc148aaf0821e2ef7f7835a9d7405ee"
# In conclusion, XGBoost perform the best based on statistics showed above.
# -
model_pipe = make_pipeline(preprocess, XGBRegressor())
model_grid = GridSearchCV(model_pipe, parameters,
verbose=1 , scoring = "r2", cv=5)
model_grid.fit(X_train, y_train)
log_predicted_values = model_grid.best_estimator_.fit(X_train, y_train).predict(test)
submission_predictions = np.exp(log_predicted_values)
| ames_housing_prices.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from matplotlib import pyplot as plt
import os
import pandas as pd
import numpy as np
best = pd.read_csv('bestseller_genders.tsv', sep = '\t')
print(best.shape)
best.head(10)
# -
best = best[best.Gender != 'U']
equiv = {'M': 0, 'F' : 1}
best['intgender'] = best.Gender.map(equiv)
year2dec = dict()
for i in range(1900, 2000):
year2dec[i] = 10 * (i // 10)
best['decade'] = best.year.map(year2dec)
best.shape
by_year = best.groupby('decade')
pctwomen = by_year.aggregate(np.mean)
import matplkotpctwomen
import matplotlib
matplotlib.rcParams.update({'font.size': 18})
fig, ax = plt.subplots(figsize = (12, 8))
ax.plot(pctwomen.index, pctwomen.intgender, linewidth = 3)
ax.set_title('Fraction of bestsellers by women')
ax.set_ylim(0.201, 0.45)
fig.savefig('bestsellers.png')
plt.show()
| bestsellergender/bestseller_gender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 1: Schema on Read
from pyspark.sql import SparkSession
import pandas as pd
import matplotlib
spark = SparkSession.builder.getOrCreate()
dfLog = spark.read.text("data/NASA_access_log_Jul95.gz")
# # Load the dataset
#Data Source: http://ita.ee.lbl.gov/traces/NASA_access_log_Jul95.gz
dfLog = spark.read.text("data/NASA_access_log_Jul95.gz")
# # Quick inspection of the data set
# see the schema
dfLog.printSchema()
# number of lines
dfLog.count()
#what's in there?
dfLog.show(5)
#a better show?
dfLog.show(5, truncate=False)
#pandas to the rescue
pd.set_option('max_colwidth', 200)
dfLog.limit(5).toPandas()
# # Let' try simple parsing with split
from pyspark.sql.functions import split
dfArrays = dfLog.withColumn("tokenized", split("value"," "))
dfArrays.limit(10).toPandas()
# # Second attempt, let's build a custom parsing UDF
# +
from pyspark.sql.functions import udf
@udf
def parseUDF(line):
import re
PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
match = re.search(PATTERN, line)
if match is None:
return (line, 0)
size_field = match.group(9)
if size_field == '-':
size = 0
else:
size = match.group(9)
return {
"host" : match.group(1),
"client_identd" : match.group(2),
"user_id" : match.group(3),
"date_time" : match.group(4),
"method" : match.group(5),
"endpoint" : match.group(6),
"protocol" : match.group(7),
"response_code" : int(match.group(8)),
"content_size" : size
}
# -
#Let's start from the beginning
dfParsed= dfLog.withColumn("parsed", parseUDF("value"))
dfParsed.limit(10).toPandas()
dfParsed.printSchema()
# # Third attempt, let's fix our UDF
# +
#from pyspark.sql.functions import udf # already imported
from pyspark.sql.types import MapType, StringType
@udf(MapType(StringType(),StringType()))
def parseUDFbetter(line):
import re
PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
match = re.search(PATTERN, line)
if match is None:
return (line, 0)
size_field = match.group(9)
if size_field == '-':
size = 0
else:
size = match.group(9)
return {
"host" : match.group(1),
"client_identd" : match.group(2),
"user_id" : match.group(3),
"date_time" : match.group(4),
"method" : match.group(5),
"endpoint" : match.group(6),
"protocol" : match.group(7),
"response_code" : int(match.group(8)),
"content_size" : size
}
# -
#Let's start from the beginning
dfParsed= dfLog.withColumn("parsed", parseUDFbetter("value"))
dfParsed.limit(10).toPandas()
#Let's start from the beginning
dfParsed= dfLog.withColumn("parsed", parseUDFbetter("value"))
dfParsed.limit(10).toPandas()
#Bingo!! we'got a column of type map with the fields parsed
dfParsed.printSchema()
dfParsed.select("parsed").limit(10).toPandas()
# # Let's build separate columns
dfParsed.selectExpr("parsed['host'] as host").limit(5).show(5)
dfParsed.selectExpr(["parsed['host']", "parsed['date_time']"]).show(5)
fields = ["host", "client_identd","user_id", "date_time", "method", "endpoint", "protocol", "response_code", "content_size"]
exprs = [ "parsed['{}'] as {}".format(field,field) for field in fields]
exprs
dfClean = dfParsed.selectExpr(*exprs)
dfClean.limit(5).toPandas()
# ## Popular hosts
from pyspark.sql.functions import desc
dfClean.groupBy("host").count().orderBy(desc("count")).limit(10).toPandas()
# ## Popular content
from pyspark.sql.functions import desc
dfClean.groupBy("endpoint").count().orderBy(desc("count")).limit(10).toPandas()
# ## Large Files
dfClean.createOrReplaceTempView("cleanlog")
spark.sql("""
select endpoint, content_size
from cleanlog
order by content_size desc
""").limit(10).toPandas()
from pyspark.sql.functions import expr
dfCleanTyped = dfClean.withColumn("content_size_bytes", expr("cast(content_size as int)"))
dfCleanTyped.limit(5).toPandas()
dfCleanTyped.createOrReplaceTempView("cleantypedlog")
spark.sql("""
select endpoint, content_size
from cleantypedlog
order by content_size_bytes desc
""").limit(10).toPandas()
# +
# Left for you, clean the date column :)
# 1- Create a udf that parses that weird format,
# 2- Create a new column with a data tiem string that spark would understand
# 3- Add a new date-time column properly typed
# 4- Print your schema
# -
| 03-datalakes-spark/03-introduction-to-datalakes/Exercise 1 - Schema On Read.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# # SimJulia
# ## Basic Concepts
#
# Simjulia is a discrete-event simulation library. The behavior of active components (like vehicles, customers or messages) is modeled with processes. All processes live in an environment. They interact with the environment and with each other via events.
#
# Processes are described by `@resumable` functions. You can call them process function. During their lifetime, they create events and `@yield` them in order to wait for them to be triggered.
using ResumableFunctions
@resumable function fibonacci(n::Int) :: Int
a = 0
b = 1
for i in 1:n
@yield a
a, b = b, a+b
end
end
fib = fibonacci(5)
fib()
fib()
for fib in fibonacci(10)
println(fib)
end
# When a process yields an event, the process gets suspended. SimJulia resumes the process, when the event occurs (we say that the event is triggered). Multiple processes can wait for the same event. SimJulia resumes them in the same order in which they yielded that event.
#
# An important event type is the `timeout`. Events of this type are scheduled after a certain amount of (simulated) time has passed. They allow a process to sleep (or hold its state) for the given time. A `timeout` and all other events can be created by calling a constructor having the environment as first argument.
# ## Our First Process
#
# Our first example will be a car process. The car will alternately drive and park for a while. When it starts driving (or parking), it will print the current simulation time.
#
# So let’s start:
using SimJulia
@resumable function car(env::Environment)
while true
println("Start parking at ", now(env))
parking_duration = 5
@yield timeout(env, parking_duration)
println("Start driving at ", now(env))
trip_duration = 2
@yield timeout(env, trip_duration)
end
end
# Our car process requires a reference to an `Environment` in order to create new events. The car‘s behavior is described in an infinite loop. Remember, the car function is a `@resumable function`. Though it will never terminate, it will pass the control flow back to the simulation once a `@yield` statement is reached. Once the yielded event is triggered (“it occurs”), the simulation will resume the function at this statement.
#
# As said before, our car switches between the states parking and driving. It announces its new state by printing a message and the current simulation time (as returned by the function call `now`). It then calls the constructor `timeout` to create a timeout event. This event describes the point in time the car is done parking (or driving, respectively). By yielding the event, it signals the simulation that it wants to wait for the event to occur.
#
# Now that the behavior of our car has been modeled, lets create an instance of it and see how it behaves:
sim = Simulation()
@process car(sim)
run(sim, 15)
# The first thing we need to do is to create an environment, e.g. an instance of `Simulation`. The macro `@process` having as argument a car process function call creates a process that is initialised and added to the environment automatically.
#
# Note, that at this time, none of the code of our process function is being executed. Its execution is merely scheduled at the current simulation time.
#
# The `Process` returned by the `@process` macro can be used for process interactions.
#
# Finally, we start the simulation by calling run and passing an end time to it.
# ## Process Interaction
#
# The `Process` instance that is returned by `@process` macro can be utilized for process interactions. The two most common examples for this are to wait for another process to finish and to interrupt another process while it is waiting for an event.
#
# ### Waiting for a Process
#
# As it happens, a SimJulia `Process` can be used like an event. If you yield it, you are resumed once the process has finished. Imagine a car-wash simulation where cars enter the car-wash and wait for the washing process to finish, or an airport simulation where passengers have to wait until a security check finishes.
#
# Lets assume that the car from our last example is an electric vehicle. Electric vehicles usually take a lot of time charging their batteries after a trip. They have to wait until their battery is charged before they can start driving again.
#
# We can model this with an additional charge process for our car. Therefore, we redefine our car process function and add a charge process function.
#
# A new charge process is started every time the vehicle starts parking. By yielding the `Process` instance that the `@process` macro returns, the run process starts waiting for it to finish:
@resumable function charge(env::Environment, duration::Number)
@yield timeout(env, duration)
end
@resumable function car(env::Environment)
while true
println("Start parking and charging at ", now(env))
charge_duration = 5
charge_process = @process charge(sim, charge_duration)
@yield charge_process
println("Start driving at ", now(env))
trip_duration = 2
@yield timeout(sim, trip_duration)
end
end
# Starting the simulation is straightforward again: We create a `Simulation`, one (or more) cars and finally call `run`.
sim = Simulation()
@process car(sim)
run(sim, 15)
# ### Interrupting Another Process
#
# Imagine, you don’t want to wait until your electric vehicle is fully charged but want to interrupt the charging process and just start driving instead.
#
# SimJulia allows you to interrupt a running process by calling the `interrupt` function:
@resumable function driver(env::Environment, car_process::Process)
@yield timeout(env, 3)
@yield interrupt(car_process)
end
# The driver process has a reference to the car process. After waiting for 3 time steps, it interrupts that process.
#
# Interrupts are thrown into process functions as `Interrupt` exceptions that can (should) be handled by the interrupted process. The process can then decide what to do next (e.g., continuing to wait for the original event or yielding a new event):
@resumable function car(env::Environment)
while true
println("Start parking and charging at ", now(env))
charge_duration = 5
charge_process = @process charge(sim, charge_duration)
try
@yield charge_process
catch
println("Was interrupted. Hopefully, the battery is full enough ...")
end
println("Start driving at ", now(env))
trip_duration = 2
@yield timeout(sim, trip_duration)
end
end
# When you compare the output of this simulation with the previous example, you’ll notice that the car now starts driving at time 3 instead of 5:
sim = Simulation()
car_process = @process car(sim)
@process driver(sim, car_process)
run(sim, 15)
# ## Shared Resources
#
# SimJulia offers three types of resources that help you modeling problems, where multiple processes want to use a resource of limited capacity (e.g., cars at a fuel station with a limited number of fuel pumps) or classical producer-consumer problems.
#
# In this section, we’ll briefly introduce SimJulia’s Resource class.
#
# ### Basic Resource Usage
#
# We’ll slightly modify our electric vehicle process car that we introduced in the last sections.
#
# The car will now drive to a battery charging station (BCS) and request one of its two charging spots. If both of these spots are currently in use, it waits until one of them becomes available again. It then starts charging its battery and leaves the station afterwards:
@resumable function car(env::Environment, name::Int, bcs::Resource, driving_time::Number, charge_duration::Number)
@yield timeout(sim, driving_time)
println(name, " arriving at ", now(env))
@yield request(bcs)
println(name, " starting to charge at ", now(env))
@yield timeout(sim, charge_duration)
println(name, " leaving the bcs at ", now(env))
@yield release(bcs)
end
# The resource’s `request` function generates an event that lets you wait until the resource becomes available again. If you are resumed, you “own” the resource until you release it.
#
# You are responsible to call release once you are done using the resource. When you `release` a resource, the next waiting process is resumed and now “owns” one of the resource’s slots. The basic Resource sorts waiting processes in a FIFO (first in—first out) way.
#
# A resource needs a reference to an `Environment` and a capacity when it is created:
sim = Simulation()
bcs = Resource(sim, 2)
# We can now create the car processes and pass a reference to our resource as well as some additional parameters to them
for i in 1:4
@process car(sim, i, bcs, 2i, 5)
end
# Finally, we can start the simulation. Since the car processes all terminate on their own in this simulation, we don’t need to specify an until time — the simulation will automatically stop when there are no more events left:
run(sim)
# ### Priority resource
#
# As you may know from the real world, not every one is equally important. To map that to SimJulia, the methods `request(res, priority=priority)` and `release(res, priority=priority)` lets requesting and releasing processes provide a priority for each request/release. More important requests/releases will gain access to the resource earlier than less important ones. Priority is expressed by integer numbers; smaller numbers mean a higher priority:
# +
@resumable function resource_user(sim::Simulation, name::Int, res::Resource, wait::Float64, prio::Int)
@yield timeout(sim, wait)
println("$name Requesting at $(now(sim)) with priority=$prio")
@yield request(res, priority=prio)
println("$name got resource at $(now(sim))")
@yield timeout(sim, 3.0)
@yield release(res)
end
sim = Simulation()
res = Resource(sim, 1)
@process resource_user(sim, 1, res, 0.0, 0)
@process resource_user(sim, 2, res, 1.0, 0)
@process resource_user(sim, 3, res, 2.0, -1)
run(sim)
# -
# Although the third process requested the resource later than the second, it could use it earlier because its priority was higher.
# ### Containers
#
# Containers help you modelling the production and consumption of a homogeneous, undifferentiated bulk. It may either be continuous (like water) or discrete (like apples).
#
# You can use this, for example, to model the gas / petrol tank of a gas station. Tankers increase the amount of gasoline in the tank while cars decrease it.
#
# The following example is a very simple model of a gas station with a limited number of fuel dispensers (modeled as `Resource`) and a tank modeled as `Container`:
# +
struct GasStation
fuel_dispensers :: Resource
gas_tank :: Container{Float64}
function GasStation(env::Environment)
gs = new(Resource(env, 2), Container(env, 1000.0, level=100.0))
@process monitor_tank(env, gs)
return gs
end
end
@resumable function monitor_tank(env::Environment, gs::GasStation)
while true
if gs.gas_tank.level < 100.0
println("Calling tanker at $(now(env))")
@process tanker(env, gs)
end
@yield timeout(env, 15.0)
end
end
@resumable function tanker(env::Environment, gs::GasStation)
@yield timeout(env, 10.0)
println("Tanker arriving at $(now(env))")
amount = gs.gas_tank.capacity - gs.gas_tank.level
@yield put(gs.gas_tank, amount)
end
@resumable function car(env::Environment, name::Int, gs::GasStation)
println("Car $name arriving at $(now(env))")
@yield request(gs.fuel_dispensers)
println("Car $name starts refueling at $(now(env))")
@yield get(gs.gas_tank, 40.0)
@yield timeout(env, 15.0)
@yield release(gs.fuel_dispensers)
println("Car $name done refueling at $(now(env))")
end
@resumable function car_generator(env::Environment, gs::GasStation)
for i = 0:3
@process car(env, i, gs)
@yield timeout(env, 5.0)
end
end
sim = Simulation()
gs = GasStation(sim)
@process car_generator(sim, gs)
run(sim, 55.0)
# -
# Priorities can be given to a `put` or a `get` event by setting the named argument priority.
# ### Stores
#
# Using a `Store` you can model the production and consumption of concrete objects (in contrast to the rather abstract “amount” stored in a Container). A single `Store` can even contain multiple types of objects.
#
# A custom function can also be used to filter the objects you get out of the `store`.
#
# Here is a simple example modelling a generic producer/consumer scenario:
# +
@resumable function producer(env::Environment, sto::Store)
for i = 1:100
@yield timeout(env, 2.0)
@yield put(sto, "spam $i")
println("Produced spam at $(now(env))")
end
end
@resumable function consumer(env::Environment, name::Int, sto::Store)
while true
@yield timeout(env, 1.0)
println("$name requesting spam at $(now(env))")
item = @yield get(sto)
println("$name got $item at $(now(env))")
end
end
sim = Simulation()
sto = Store{String}(sim, capacity=UInt(2))
@process producer(sim, sto)
consumers = [@process consumer(sim, i, sto) for i=1:2]
run(sim, 5.0)
# -
# A `Store` with a filter on the `get` event can, for example, be used to model machine shops where machines have varying attributes. This can be useful if the homogeneous slots of a` Resource` are not what you need:
# +
struct Machine
size :: Int
duration :: Float64
end
@resumable function user(env::Environment, name::Int, sto::Store, size::Int)
machine = @yield get(sto, (mach::Machine)->mach.size == size)
println("$name got $machine at $(now(env))")
@yield timeout(env, machine.duration)
@yield put(sto, machine)
println("$name released $machine at $(now(env))")
end
@resumable function machineshop(env::Environment, sto::Store)
m1 = Machine(1, 2.0)
m2 = Machine(2, 1.0)
@yield put(sto, m1)
@yield put(sto, m2)
end
sim = Simulation()
sto = Store{Machine}(sim, capacity=UInt(2))
ms = @process machineshop(sim, sto)
users = [@process user(sim, i, sto, (i % 2) + 1) for i=0:2]
run(sim)
# -
| Lectures/Lecture 13.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
from plots import *
# # Information Theory
#
# Information theory is concerned with representing data in a compact fashion (a task known as **data compression** or **source coding**), as well as with transmitting and storing it in a way that is robust to errors (a task known as **error correction** or **channel coding**)
# ## Entropy
# The **entropy** of a random variable $X$ with distribution $p$, denoted by $\mathbb{H}(X)$ or sometimes $\mathbb{H}(p)$, is a measure of its uncertainty. In particular, for a discrete variable with $K$ states, it is defined by
#
# $$
# \mathbb{H}(X)\triangleq - \sum_{k=1}^Kp(X=k)\log_2 p(X=k)
# $$
#
# For the special case of binary random variables, $X\in\{0, 1\}$, we can write $p(X=1) = \theta$ and $p(X=0) = 1-\theta$. Hence the entropy becomes
#
# $$
# \mathbb{H}(X) = -[\theta\log_2\theta + (1-\theta)\log_2(1-\theta)]
# $$
#
# This is called the **binary entropy function**, and is also written $\mathbb{H}(\theta)$. We see that the maximum value of 1 occurs when the distribution is uniform, $\theta = 0.5$.
bernoulli_entropy_fig()
# ## KL divergence
#
# One way to measure the dissimilarity of two probability distributions $p$ and $q$ is known as the **Kullback-Leibler divergence (KL divergence)** or **relative entropy**. This is defined as follows:
#
# $$
# \mathbb{K}\mathbb{L}(p||q)\triangleq\sum_{k=1}^K p_k\log \frac{p_k}{q_k}
# $$
#
# where the sum gets replaced by an integral for pdfs. Note that the KL divergence is **not** a distance, since it is asymmetric, i.e. $\mathbb{K}\mathbb{L}(p||q) \ne \mathbb{K}\mathbb{L}(q||p)$.
#
# We can rewrite this as
#
# $$
# \mathbb{K}\mathbb{L}(p||q) = \sum_k p_k\log p_k -\sum_k p_k\log q_k = \mathbb{H}(p) + \mathbb{H}(p, q)
# $$
#
# where $\mathbb{H}(p, q)$ is called the **cross entropy**,
#
# $$
# \mathbb{H}(p, q)\triangleq -\sum_k p_k\log q_k
# $$
#
# One can show that the KL divergence is the average number of *extra* bits needed to encode the data, due to the fact that we used distribution $q$ to encode the data instead of the true distribution $p$.
#
# **Important:** $\mathbb{K}\mathbb{L}(p ||q) \ge 0$, and that the KL divergence is only equal to zero iff $q = p$.
# ## Mutual Information
#
# Consider two random variables $X$ and $Y$. Suppose we want to know how much knowing one variable tells us about the other. A general approach is to determine how similar the joint distribution $p(X, Y)$ is to the factored distribution $p(X)p(Y)$. This is called the **mutual information** or **MI** and is defined as follows:
#
# $$
# \mathbb{I}(X, Y) \triangleq \mathbb{K}\mathbb{L}(p(X, Y) || p(X)p(Y)) = \sum_x\sum_y p(x, y)\log\frac{p(x, y)}{p(x)p(y)}
# $$
# We have $\mathbb{I}(X, Y) \ge 0$ with equality iff $p(X, Y) = p(X)p(Y)$. That is the MI is zero iff the variables are independent.
#
# For continuous random variables, we use the **maximal information coefficient** (MIC), defined as follows:
#
# $$
# m(x, y) = \frac{\max_{G\in G(x, y)}\mathbb{I}(X{G}; Y(G))}{\log\min(x, y)}
# $$
#
# where $G(x, y)$ is the set of 2d grids of size $x\times y$, and $X(G)$, $Y(G)$ represents a discretization of the variables onto this grid. Now define MIC as
#
# $$
# \mathrm{MIC}\triangleq \max_{x, y:xy < B} m(x, y)
# $$
#
# where $B$ is some sample-size dependent bound on the number of bins we can use and still reliably estimate the distribution.
#
# Statistics such as MIC based on mutual information can be used to discover interesting relationships between variables in a way that simpler measures, such as correlation coefficients, cannot.
| distributions/Information-Theory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plain examples and tests for the Models and Model classes
# If you are looking for a quick example on how to use the `smartdoc15_ch1` package, we recommend you start by looking at the tutorials instead.
# ## Import and creation of the `Models` object
from smartdoc15_ch1 import Models
m = Models(data_home="/data/competitions/2015-ICDAR-smartdoc/challenge1/99-computable-version-2017-test",
download_if_missing=False)
type(m)
# Like `Dataset`, the `Models` class is an enhanced `list` of all the `Model` relevant objects.
# You can select which variant of model images you want to use when creating the `Models` object.
m = Models(data_home="/data/competitions/2015-ICDAR-smartdoc/challenge1/99-computable-version-2017-test",
download_if_missing=False,
variant=Models.VARIANT_04_CORRECTED)
# Legitimate variants codes are available as:
(Models.VARIANT_01_ORIGINAL,
Models.VARIANT_02_EDITED,
Models.VARIANT_03_CAPTURED,
Models.VARIANT_04_CORRECTED,
Models.VARIANT_05_SCALED33,)
# ## Content of `Models`
# Like `list` objects, a `Models` object can be accessed using indexes, iterated, etc.
len(m)
# Note that here, for testing purposes, we use a reduced version of the models set.
# ## `Model` objects
# Note that `Models` (plural) refers to the set of models, and `Model` refers the a single element.
m0 = m[0]
m0
type(m0)
# A `Model` object contains everything you could need to know about a model image, accessible in a `dict`-based fashion.
m0["image_path"], m0["model_cat"], m0["model_name"], m0["model_id"]
# ## Reading model images
# Like `Frame` objects, you can obtain the associated image of a model using a `read_image()` method.
m0_image = m0.read_image()
m0_image.shape
# By default, the image is loaded in grayscale at its original size, but one can ask for color and/or resized version.
m0_image_color_resize = m0.read_image(color=True, scale_factor=0.5)
m0_image_color_resize.shape
# ## Other methods and properties of `Models`
# We provide other methods and properties for the `Models` class to facilitate the access to several pieces of information.
# You can retrieve the list of model ids and model type ids in one line:
m.model_ids
m.modeltype_ids
# You can also list all the possible values for the `model_id` and the `modeltype_id` fields.
#
# *(Note that here this examples uses a reduced version of the model set.)*
m.unique_model_ids
m.unique_model_names
m.unique_modeltype_ids
m.unique_modeltype_names
# Finally, the underlying Pandas Dataframe is made available directly in case you need more flexibility.
m.raw_dataframe
| example_notebooks/load_test_Models_py3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basics of Supervised Learning (mostly Classification)
#
# Recall that the field of Supervised Learning is one in which you're trying to learn a function: $$f(x)=y$$ where you have some **labeled** data: that is, you have a dataset with both the inputs $x$ and the correct outputs $y$. Here are some examples:
#
# 1. Perhaps you're trying to predict the market rate of houses, and you have a collection of data on houses (such as address, town, number of beds/baths, square footage, acreage of yard, _etc._). That's the _input_, $x$. Suppose however you also have how much each house recently sold for. That's the _output_ of your desired function, $y$.
# 2. Suppose you have a dataset of some health diagnostic tests for a person (such as their height, weight, blood pressure, blood sugar, family history of heart disease, _etc._), and you also know whether or not they've been diagnosed with diabetes. If you'd like to train a model to predict diabetes whether someone has diabetes, then their diagnostics are your $x$ and whether they've been diagnosed is your $y$.
#
# In these two cases, you're performing similar tasks: given some _labeled training data_, learn how to make a prediction on future data. These are both examples of _Supervised Learning_. The difference between them is:
#
# 1. Housing prices could be any real-valued number, realistically something in the tens of thousands to millions of dollars. This process is called **Regression**.
# 2. Whether someone has a disease is deciding on a class: there is a set, finite (usually small) number of possible outputs. This process is called **Classification**.
#
# Let's take an example dataset.
#
#
# +
import random
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('fivethirtyeight')
from sklearn.linear_model import LogisticRegression
# -
def generate_data(seed, num_samples=10, noise=0.2):
random.seed(seed)
# Here, we make a function that chooses the output for a given input. This is simply used to
# generate some fake data. In real life, you'd be given a labelled dataset!
def generate_y(bp):
noise_point = random.random() < noise
if not noise_point:
return 1 if bp > 145 else 0
return 0 if bp > 145 else 1
ids = list(range(num_samples))
blood_pressures = [random.randrange(100, 165) for i in ids]
has_disease = [generate_y(bp) for bp in blood_pressures]
return pd.DataFrame({'blood_pressure': blood_pressures, 'has_disease': has_disease}, index=ids)
df = generate_data(14513)
df
# Okay, so we have some very simple data. Let's graph it.
# +
plt.scatter(df['blood_pressure'], df['has_disease'])
plt.xlabel('Blood Pressure')
plt.ylabel('Has Disease')
plt.yticks([0,1])
plt.ylim([-0.5, 1.5])
plt.xlim([100, 165])
plt.show()
# -
# Our data suggests that there is in fact some correlation with blood pressure and this mystery disease. This is where machine learning comes in. Suppose we are able to generate some function $f$, which produces a diagnosis given a blood pressure reading. Here's an example function that we may be able to produce using a classification algorithm known as _Logistic Regression_.
# +
DATA_MEAN = df['blood_pressure'].mean()
DATA_STD = df['blood_pressure'].std()
# Because this algorithm is high-powered and ready to be utilized in the real world, it's expecting
# more than one dimension of input. Thus, we use this silly reshape command because our input,
# blood pressure, is one-dimensional. We also clean up the data to have mean 0 and a standard
# deviation of 1 when it goes into the model.
def prepare_inputs(X):
X = X - DATA_MEAN
X = X / DATA_STD
return X.reshape(-1, 1)
X = prepare_inputs(df['blood_pressure'].values)
model = LogisticRegression()
model.fit(X, df['has_disease'])
# -
# So we have a model that has been trained on our data. So what? Let's figure out what it would predict for any given value in our range, and plot that. This will give us a curve that we can interpret.
# +
x_range = [90, 180]
bp_range = np.arange(x_range[0], x_range[1], 0.1)
outputs = model.predict_proba(prepare_inputs(bp_range))[:,1]
plt.scatter(df['blood_pressure'], df['has_disease'])
plt.xlabel('Blood Pressure')
plt.ylabel('Has Disease')
plt.yticks([0,1])
plt.ylim([-0.5, 1.5])
plt.xlim(x_range)
plt.plot(bp_range, outputs, color='blue')
plt.show()
# -
# So this _S_-curve in our graph is the output of our function. Why isn't it $0$ or $1$? We interpret it as the _probability of being $1$_, because it's a number between 0 and 1 that tends toward 0 near the cluster of 0's in the input data, and toward 1 near the cluster of 1's in the input data.
#
# So now the question is: what do we do with this? Well, one thing you can do is to make a choice for a cutoff probability. One obvious guess is 50\%: if I'm at least 50\% confident that the subject is a 1, I'll predict them to be a 1, otherwise they're a 0.
# +
# We want to figure out at which blood pressure value my model predicts 50%. This is something
# particular to Logistic Regression, you don't need to understand why this line works.
cutoff_bp = -1 * model.intercept_[0] * DATA_STD / model.coef_[0] + DATA_MEAN
## Old:
plt.scatter(df['blood_pressure'], df['has_disease'])
plt.xlabel('Blood Pressure')
plt.ylabel('Has Disease')
plt.yticks([0,1])
plt.ylim([-0.5, 1.5])
plt.xlim(x_range)
## New:
plt.plot(x_range, [0.5, 0.5], 'r--', linewidth=2, zorder=1)
plt.text(x_range[1], 0.45, '50%', color='red')
plt.plot([cutoff_bp, cutoff_bp], [-0.5, 1.5], 'g--', linewidth=2, zorder=1)
plt.text(cutoff_bp + 2, 1.3, 'Prediction: 1 $\longrightarrow$', color='green')
plt.text(cutoff_bp - 2, 1.3, '$\longleftarrow$ Prediction: 0', color='green', ha='right')
plt.scatter([cutoff_bp], [0.5], s=100, zorder=2, color='green')
## Old:
plt.plot(bp_range, outputs, color='blue', zorder=1)
plt.show()
# -
# What you now see is that there are four quadrants of data points.
#
# ### Correct
# * In the top right corner are patients _with_ the disease that we successfully predicted _have_ the disease. These are called **true positives**.
# * In the bottom left corner are patients _without_ the disease that we successfully predicted _do not have_ the disease. These are called **true negatives**.
#
# ### Incorrect
# * In the top left corner are patients _with_ the disease that we incorrectly predicted _do not have_ the disease. These are called **false negatives**. (_Think:_ "It's false that we predicted them to be negative")
# * In the bottom right corner are patients _without_ the disease that we incorrectly predicted _have_ the disease. These are called **false positives**. (_Think:_ "It's false that we predicted them to be positive")
# +
df['prediction'] = model.predict(prepare_inputs(df['blood_pressure'].values))
### For clarity's sake, let's split the data into its four categories, and then color them accordingly
true_positives = df[(df['has_disease'] == 1) & (df['prediction'] == 1)]
true_negatives = df[(df['has_disease'] == 0) & (df['prediction'] == 0)]
false_negatives = df[(df['has_disease'] == 1) & (df['prediction'] == 0)]
false_positives = df[(df['has_disease'] == 0) & (df['prediction'] == 1)]
datasets = [
(true_positives, 'True Positives', 'blue', [cutoff_bp + 20, 1.3]),
(true_negatives, 'True Negatives', 'green', [x_range[0], -0.3]),
(false_negatives, 'False Negatives', 'red', [x_range[0], 1.3]),
(false_positives, 'False Positives', 'orange', [cutoff_bp + 20, -0.3])
]
for dataset, title, color, loc in datasets:
plt.scatter(dataset['blood_pressure'], dataset['has_disease'], c=color)
plt.text(loc[0], loc[1], title, color=color)
## Old:
plt.xlabel('Blood Pressure')
plt.ylabel('Has Disease')
plt.yticks([0,1])
plt.ylim([-0.5, 1.5])
plt.xlim(x_range)
plt.plot(x_range, [0.5, 0.5], 'k--', linewidth=2)
plt.plot([cutoff_bp, cutoff_bp], [-0.5, 1.5], 'k--', linewidth=2)
plt.plot(bp_range, outputs, color='blue')
plt.show()
# -
# One final caveat: Whether something _in the training data_ was predicted correctly or incorrectly is not as important as you might expect. The real goal here is to minimize incorrect predictions in data _you haven't seen before_, which is much harder, and is the focus of a great deal of tools. Nonetheless, we could calculate our **training accuracy score**:
print('Training Accuracy:', df[df['has_disease'] == df['prediction']].shape[0] / df.shape[0])
# Now, just because it's fun, let's turn all of the above code into a more interactive simulation. Below I've bundled all the code up into a function with lots of parameters. Try calling the function with various parameters to see what effect that has on your data and your model!
def create_and_test_data_and_model(
seed=14513,
num_samples=10,
noise=0.2,
standardize_data=True,
):
### Generate data and calculate statistics
df = generate_data(seed, num_samples=num_samples, noise=noise)
x_range = [df['blood_pressure'].min() - 10, df['blood_pressure'].max() + 10]
data_mean = df['blood_pressure'].mean() if standardize_data else 0
data_std = df['blood_pressure'].std() if standardize_data else 1
def prepare_inputs(X):
X = X - data_mean
X = X / data_std
return X.reshape(-1, 1)
X = prepare_inputs(df['blood_pressure'].values)
### Train model and make predictions
model = LogisticRegression()
model.fit(X, df['has_disease'])
bp_range = np.arange(x_range[0], x_range[1], 0.1)
outputs = model.predict_proba(prepare_inputs(bp_range))[:,1]
cutoff_bp = -1 * model.intercept_[0] * data_std / model.coef_[0] + data_mean
df['prediction'] = model.predict(prepare_inputs(df['blood_pressure'].values))
### Split the data into its four categories
true_positives = df[(df['has_disease'] == 1) & (df['prediction'] == 1)]
true_negatives = df[(df['has_disease'] == 0) & (df['prediction'] == 0)]
false_negatives = df[(df['has_disease'] == 1) & (df['prediction'] == 0)]
false_positives = df[(df['has_disease'] == 0) & (df['prediction'] == 1)]
datasets = [
(true_positives, 'True Positives', 'blue', [cutoff_bp + 20, 1.3]),
(true_negatives, 'True Negatives', 'green', [x_range[0], -0.3]),
(false_negatives, 'False Negatives', 'red', [x_range[0], 1.3]),
(false_positives, 'False Positives', 'orange', [cutoff_bp + 20, -0.3])
]
### Create graph
for dataset, title, color, loc in datasets:
plt.scatter(dataset['blood_pressure'], dataset['has_disease'], c=color)
plt.text(loc[0], loc[1], title, color=color)
plt.xlabel('Blood Pressure')
plt.ylabel('Has Disease')
plt.yticks([0,1])
plt.ylim([-0.5, 1.5])
plt.xlim(x_range)
plt.plot(x_range, [0.5, 0.5], 'k--', linewidth=2)
plt.plot([cutoff_bp, cutoff_bp], [-0.5, 1.5], 'k--', linewidth=2)
plt.plot(bp_range, outputs, color='blue')
print('Training Accuracy:', df[df['has_disease'] == df['prediction']].shape[0] / df.shape[0])
plt.show()
create_and_test_data_and_model(
seed=None,
num_samples=100,
noise=0.15,
standardize_data=True,
)
| lessons/Basics of Supervised Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import json
from sklearn.utils import shuffle
import pickle
from gensim.models import Word2Vec
from utils.item2vec_recommender_utils import hit_rate_evaluate, recommend, get_similar_artists
from utils.metrics import MAPk, precision, recall
import warnings
warnings.filterwarnings("ignore")
# ### 1. Load prepared data
train_grouped = pd.read_pickle('data/train_mod_grouped.pickle')
test_grouped = pd.read_pickle('data/test_grouped.pickle')
left_out_df = pd.read_pickle('data/left_out_df.pickle')
val_df = train_grouped[train_grouped['user_id'].isin(left_out_df['user_id'])]
train_grouped.head()
left_out_df.head()
train_lst = list(train_grouped['persons_lst'])
# ### 2. Prepare artists names and left-out set dictionaries
people_df = pd.read_pickle('data/new_persons_df.pickle')
people_df
people_df['person_id'] = people_df['person_id'].astype(str)
len(people_df['person_id'].unique())
people_dict = pd.Series(people_df['person_name'].values, index=people_df['person_id']).to_dict()
people_dict['29692']
# *****************
left_out_dict = pd.Series(left_out_df['person_id'].values, index=left_out_df['user_id']).to_dict()
# ### 3. Train word2vec embeddings with different parameters
# +
params1 = dict(size = 50,
min_count = 1,
window = 5,
sg = 1,
hs = 0)
params2 = dict(size = 50,
min_count = 1,
window = 5,
sg = 1,
hs = 1)
params3 = dict(size = 100,
min_count = 1,
window = 10,
sg = 1,
hs = 0)
params4 = dict(size = 50,
min_count = 1,
window = 10,
sg = 1,
hs = 0)
params5 = dict(size = 100,
min_count = 1,
window = 5,
sg = 1,
hs = 1)
params_lst = [params1, params2, params3, params4, params5]
# -
# %%time
for i, p in zip(range(len(params_lst)), params_lst):
model = Word2Vec(**p)
model.build_vocab(train_lst, progress_per=200)
model.train(train_lst, total_examples = model.corpus_count,
epochs=10, report_delay=1)
model.save('item2vec_savings/model' + str(i+1) + '.sav')
model1 = Word2Vec.load('item2vec_savings/model1.sav')
model2 = Word2Vec.load('item2vec_savings/model2.sav')
model3 = Word2Vec.load('item2vec_savings/model3.sav')
model4 = Word2Vec.load('item2vec_savings/model4.sav')
model5 = Word2Vec.load('item2vec_savings/model5.sav')
print(model1)
print(model2)
print(model3)
print(model4)
print(model5)
models = [model1, model2, model3, model4, model5]
# ### 4. Hit rate evaluation
# Claculate hit rate using left-one-out data in order to decide which model is better
# +
# %%time
hit_rates = []
for model in models:
hit_rate = hit_rate_evaluate(model, val_df, left_out_dict, people_dict)
hit_rates.append(hit_rate)
# -
print(hit_rates)
# Model 5 gave the best hit rate
# ### 5. Make recommendations for test set and calculate MAPkand recall
test_grouped
def separate_pers_lst(row):
persons_lst = list(set(row['persons_lst']))
n = len(persons_lst)
l = persons_lst[:n//2]
r = persons_lst[n//2:]
d = {'persons_lst': l,
'right_lst' : r}
return d
test_df_sep = pd.DataFrame(list(test_grouped.apply(lambda row: separate_pers_lst(row), axis=1)))
test_df_sep['user_id'] = test_grouped['user_id']
# %%time
top_20_names, top_20_ids = recommend(model5, test_df_sep, people_dict)
y_true = test_df_sep['right_lst']
y_pred = top_20_ids
# +
mapk_scores = []
for t, p in zip(y_true, y_pred):
mapk_scores.append(MAPk(t,p))
# -
np.mean(mapk_scores)
# +
rec = []
for t, p in zip(y_true, y_pred):
rec.append(recall(t,p))
# -
np.mean(rec)
scores_dict = {'mapk': mapk_scores,
'recall': rec}
a_file = open("item2vec_savings/scores_dict.pkl", "wb")
pickle.dump(scores_dict, a_file)
a_file.close()
# ### 6. List top-20 most similar artists for some artists
#pop singer
people_dict['29692']
names, ids = get_similar_artists(model5, model5['29692'], people_dict)
names
# **************
#rock
people_dict['157384']
names, ids = get_similar_artists(model5, model5['157384'], people_dict)
names
# *****************
#rap
people_dict['211094']
names, ids = get_similar_artists(model5, model5['211094'], people_dict)
names
| item2vec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Comparing the Estimation Methods
# ## Introduction
# We conducted a simulation study demonstrating the performances of Meir et al. (2022) [1] and comparing it with that of Lee et al. (2018) [2].
#
# The data was generated in the same way as in Usage Example section, i.e. $M=2$ competing events, $n=50,000$ observations, Z with 5 covariates and right censoring.
#
# Failure times were generated based on
#
# $$
# \lambda_{j}(t|Z) = \frac{\exp(\alpha_{jt}+Z^{T}\beta_{j})}{1+\exp(\alpha_{jt}+Z^{T}\beta_{j})}
# $$
#
# with
#
# $\alpha_{1t} = -1 -0.3 \log(t)$,
#
# $\alpha_{2t} = -1.75 -0.15\log(t)$, $t=1,\ldots,d$,
#
# $\beta_1 = (-\log 0.8, \log 3, \log 3, \log 2.5, \log 2)$,
#
# $\beta_{2} = (-\log 1, \log 3, \log 4, \log 3, \log 2)$.
#
# Censoring time for each observation was sampled from a discrete uniform distribution, i.e. $C_i \sim \mbox{Uniform}\{1,...,d+1\}$.
#
# We repeated this procedure for $d \in (15, 30, 45, 60, 100)$ and report the results in Meir et al. (2022) [1]. For each value of $d$, the results are based on 100 replications.
#
# We showed that both estimation methods perform very well in terms of bias and provide highly similar results in terms of point estimators and their standard errors. However, the computational running time of our approach is 1.5-3.5 times shorter depending on $d$, where the improvement factor increases as a function of $d$.
# ## Estimation Replications
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from pydts.examples_utils.generate_simulations_data import generate_quick_start_df
import warnings
pd.set_option("display.max_rows", 500)
warnings.filterwarnings('ignore')
# %matplotlib inline
# +
real_coef_dict = {
"alpha": {
1: lambda t: -1 - 0.3 * np.log(t),
2: lambda t: -1.75 - 0.15 * np.log(t)
},
"beta": {
1: -np.log([0.8, 3, 3, 2.5, 2]),
2: -np.log([1, 3, 4, 3, 2])
}
}
n_patients = 50000
n_cov = 5
# -
from pydts.fitters import repetitive_fitters
rep_dict, times_dict, counts_df = repetitive_fitters(rep=100, n_patients=n_patients, n_cov=n_cov, d_times=60,
j_events=2, pid_col='pid', test_size=0.25, verbose=0,
real_coef_dict=real_coef_dict, censoring_prob=0.8)
# ## Comparing Standard Error of Lee et al. (2018) and Meir et al. (2022)
# +
from pydts.examples_utils.plots import plot_reps_coef_std
new_res_dict = plot_reps_coef_std(rep_dict, True)
# -
# ## Comparison of the Estimated Coefficients
from pydts.examples_utils.plots import plot_models_coefficients
a = new_res_dict['alpha']
b = new_res_dict['beta']
times = [t+1 for t in list(a[1].reset_index().index)]
n_cov = 5
temp_c_df = counts_df.loc[[1,2]].groupby(['X']).sum().values.flatten().astype(int)
plot_models_coefficients(a, b, times, temp_c_df)
# ## Computational Time Comparison
# +
from pydts.examples_utils.plots import plot_times
plot_times(times_dict)
# -
# ## References
#
# [1] <NAME>\*, <NAME>\*, and <NAME>, "PyDTS: A Python Package for Discrete-Time Survival Analysis with Competing Risks" (2022)
#
# [2] <NAME> <NAME>. and <NAME>., "On the analysis of discrete time competing risks data", Biometrics (2018) doi: 10.1111/biom.12881
| docs/ModelsComparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 17 - Natural Language Processing
#
# by [<NAME>](http://www.albahnsen.com/) and [<NAME>](https://github.com/jesugome)
#
# version 1.5, March 2019
#
# ## Part of the class [Practical Machine Learning](https://github.com/albahnsen/PracticalMachineLearningClass)
#
# This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [<NAME>](https://github.com/justmarkham)
# ### What is NLP?
#
# - Using computers to process (analyze, understand, generate) natural human languages
# - Most knowledge created by humans is unstructured text, and we need a way to make sense of it
# - Build probabilistic model using data about a language
#
# ### What are some of the higher level task areas?
#
# - **Information retrieval**: Find relevant results and similar results
# - [Google](https://www.google.com/)
# - **Information extraction**: Structured information from unstructured documents
# - [Events from Gmail](https://support.google.com/calendar/answer/6084018?hl=en)
# - **Machine translation**: One language to another
# - [Google Translate](https://translate.google.com/)
# - **Text simplification**: Preserve the meaning of text, but simplify the grammar and vocabulary
# - [Rewordify](https://rewordify.com/)
# - [Simple English Wikipedia](https://simple.wikipedia.org/wiki/Main_Page)
# - **Predictive text input**: Faster or easier typing
# - [My application](https://justmarkham.shinyapps.io/textprediction/)
# - [A much better application](https://farsite.shinyapps.io/swiftkey-cap/)
# - **Sentiment analysis**: Attitude of speaker
# - [Hater News](http://haternews.herokuapp.com/)
# - **Automatic summarization**: Extractive or abstractive summarization
# - [autotldr](https://www.reddit.com/r/technology/comments/35brc8/21_million_people_still_use_aol_dialup/cr2zzj0)
# - **Natural Language Generation**: Generate text from data
# - [How a computer describes a sports match](http://www.bbc.com/news/technology-34204052)
# - [Publishers withdraw more than 120 gibberish papers](http://www.nature.com/news/publishers-withdraw-more-than-120-gibberish-papers-1.14763)
# - **Speech recognition and generation**: Speech-to-text, text-to-speech
# - [Google's Web Speech API demo](https://www.google.com/intl/en/chrome/demos/speech.html)
# - [Vocalware Text-to-Speech demo](https://www.vocalware.com/index/demo)
# - **Question answering**: Determine the intent of the question, match query with knowledge base, evaluate hypotheses
# - [How did supercomputer Watson beat Jeopardy champion Ken Jennings?](http://blog.ted.com/how-did-supercomputer-watson-beat-jeopardy-champion-ken-jennings-experts-discuss/)
# - [IBM's Watson Trivia Challenge](http://www.nytimes.com/interactive/2010/06/16/magazine/watson-trivia-game.html)
# - [The AI Behind Watson](http://www.aaai.org/Magazine/Watson/watson.php)
#
# ### What are some of the lower level components?
#
# - **Tokenization**: breaking text into tokens (words, sentences, n-grams)
# - **Stopword removal**: a/an/the
# - **Stemming and lemmatization**: root word
# - **TF-IDF**: word importance
# - **Part-of-speech tagging**: noun/verb/adjective
# - **Named entity recognition**: person/organization/location
# - **Spelling correction**: "New Yrok City"
# - **Word sense disambiguation**: "buy a mouse"
# - **Segmentation**: "New York City subway"
# - **Language detection**: "translate this page"
# - **Machine learning**
#
# ### Why is NLP hard?
#
# - **Ambiguity**:
# - Hospitals are Sued by 7 Foot Doctors
# - Juvenile Court to Try Shooting Defendant
# - Local High School Dropouts Cut in Half
# - **Non-standard English**: text messages
# - **Idioms**: "throw in the towel"
# - **Newly coined words**: "retweet"
# - **Tricky entity names**: "Where is A Bug's Life playing?"
# - **World knowledge**: "Mary and Sue are sisters", "Mary and Sue are mothers"
#
# NLP requires an understanding of the **language** and the **world**.
# # Data
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# from textblob import TextBlob, Word
from nltk.stem.snowball import SnowballStemmer
# %matplotlib inline
df = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/mashable_texts.csv', index_col=0)
df.head()
# # Tokenization
#
# - **What:** Separate text into units such as sentences or words
# - **Why:** Gives structure to previously unstructured text
# - **Notes:** Relatively easy with English language text, not easy with some languages
# ### Create the target feature (number of shares)
y = df.shares
y.describe()
y = pd.cut(y, [0, 893, 1200, 2275, 63200], labels=[0, 1, 2, 3])
y.value_counts()
df['y'] = y
# ### create document-term matrices
X = df.text
# use CountVectorizer to create document-term matrices from X
vect = CountVectorizer()
X_dtm = vect.fit_transform(X)
temp=X_dtm.todense()
vect.vocabulary_
# rows are documents, columns are terms (aka "tokens" or "features")
X_dtm.shape
# last 50 features
print(vect.get_feature_names()[-150:-100])
# show vectorizer options
vect
# - **lowercase:** boolean, True by default
# - Convert all characters to lowercase before tokenizing.
vect = CountVectorizer(lowercase=False)
X_dtm = vect.fit_transform(X)
X_dtm.shape
X_dtm.todense()[0].argmax()
vect.get_feature_names()[8097]
# - **ngram_range:** tuple (min_n, max_n)
# - The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used.
# include 1-grams and 2-grams
vect = CountVectorizer(ngram_range=(1, 4))
X_dtm = vect.fit_transform(X)
X_dtm.shape
# last 50 features
print(vect.get_feature_names()[-1000:-950])
# ### Predict shares
# +
# Default CountVectorizer
vect = CountVectorizer()
X_dtm = vect.fit_transform(X)
# use Naive Bayes to predict the star rating
nb = MultinomialNB()
pd.Series(cross_val_score(nb, X_dtm, y, cv=10)).describe()
# -
# define a function that accepts a vectorizer and calculates the accuracy
def tokenize_test(vect):
X_dtm = vect.fit_transform(X)
print('Features: ', X_dtm.shape[1])
nb = MultinomialNB()
print(pd.Series(cross_val_score(nb, X_dtm, y, cv=10)).describe())
# include 1-grams and 2-grams
vect = CountVectorizer(ngram_range=(1, 2))
tokenize_test(vect)
# # Stopword Removal
#
# - **What:** Remove common words that will likely appear in any text
# - **Why:** They don't tell you much about your text
#
#
# - **stop_words:** string {'english'}, list, or None (default)
# - If 'english', a built-in stop word list for English is used.
# - If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens.
# - If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms.
# remove English stop words
vect = CountVectorizer(stop_words='english')
tokenize_test(vect)
# set of stop words
print(vect.get_stop_words())
# # Other CountVectorizer Options
#
# - **max_features:** int or None, default=None
# - If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus.
# remove English stop words and only keep 100 features
vect = CountVectorizer(stop_words='english', max_features=100)
tokenize_test(vect)
# all 100 features
print(vect.get_feature_names())
# include 1-grams and 2-grams, and limit the number of features
vect = CountVectorizer(ngram_range=(1, 2), max_features=1000)
tokenize_test(vect)
# - **min_df:** float in range [0.0, 1.0] or int, default=1
# - When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts.
# include 1-grams and 2-grams, and only include terms that appear at least 2 times
vect = CountVectorizer(ngram_range=(1, 2), min_df=2)
tokenize_test(vect)
# # Stemming and Lemmatization
#
# **Stemming:**
#
# - **What:** Reduce a word to its base/stem/root form
# - **Why:** Often makes sense to treat related words the same way
# - **Notes:**
# - Uses a "simple" and fast rule-based approach
# - Stemmed words are usually not shown to users (used for analysis/indexing)
# - Some search engines treat words with the same stem as synonyms
# +
# initialize stemmer
stemmer = SnowballStemmer('english')
# words
# -
vect = CountVectorizer()
vect.fit(X)
words = list(vect.vocabulary_.keys())[:100]
# stem each word
print([stemmer.stem(word) for word in words])
# **Lemmatization**
#
# - **What:** Derive the canonical form ('lemma') of a word
# - **Why:** Can be better than stemming
# - **Notes:** Uses a dictionary-based approach (slower than stemming)
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
import nltk
nltk.download('wordnet')
# assume every word is a noun
print([wordnet_lemmatizer.lemmatize(word) for word in words])
# assume every word is a verb
print([wordnet_lemmatizer.lemmatize(word,pos='v') for word in words])
# define a function that accepts text and returns a list of lemmas
def split_into_lemmas(text):
text = text.lower()
words = text.split()
return [wordnet_lemmatizer.lemmatize(word) for word in words]
# use split_into_lemmas as the feature extraction function (WARNING: SLOW!)
vect = CountVectorizer(analyzer=split_into_lemmas)
tokenize_test(vect)
# # Term Frequency-Inverse Document Frequency (TF-IDF)
#
# - **What:** Computes "relative frequency" that a word appears in a document compared to its frequency across all documents
# - **Why:** More useful than "term frequency" for identifying "important" words in each document (high frequency in that document, low frequency in other documents)
# - **Notes:** Used for search engine scoring, text summarization, document clustering
# example documents
simple_train = ['call you tonight', 'Call me a cab', 'please call me... PLEASE!']
# Term Frequency
vect = CountVectorizer()
tf = pd.DataFrame(vect.fit_transform(simple_train).toarray(), columns=vect.get_feature_names())
tf
# Document Frequency
vect = CountVectorizer(binary=True)
df_ = vect.fit_transform(simple_train).toarray().sum(axis=0)
pd.DataFrame(df_.reshape(1, 6), columns=vect.get_feature_names())
# Term Frequency-Inverse Document Frequency (simple version)
tf/df_
# TfidfVectorizer
vect = TfidfVectorizer()
pd.DataFrame(vect.fit_transform(simple_train).toarray(), columns=vect.get_feature_names())
# **More details:** [TF-IDF is about what matters](http://planspace.org/20150524-tfidf_is_about_what_matters/)
# # Using TF-IDF to Summarize a text
#
# create a document-term matrix using TF-IDF
vect = TfidfVectorizer(stop_words='english')
dtm = vect.fit_transform(X)
features = vect.get_feature_names()
dtm.shape
# choose a random text
review_id = 40
review_text = X[review_id]
review_length = len(review_text)
# create a dictionary of words and their TF-IDF scores
word_scores = {}
for word in vect.vocabulary_.keys():
word = word.lower()
if word in features:
word_scores[word] = dtm[review_id, features.index(word)]
# print words with the top 5 TF-IDF scores
print('TOP SCORING WORDS:')
top_scores = sorted(word_scores.items(), key=lambda x: x[1], reverse=True)[:5]
for word, score in top_scores:
print(word)
# print 5 random words
print('\n' + 'RANDOM WORDS:')
random_words = np.random.choice(list(word_scores.keys()), size=5, replace=False)
for word in random_words:
print(word)
# # Conclusion
#
# - NLP is a gigantic field
# - Understanding the basics broadens the types of data you can work with
# - Simple techniques go a long way
# - Use scikit-learn for NLP whenever possible
| notebooks/17-NaturalLanguageProcessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Load Libraries
import pandas as pd
import numpy as np
# ## Load total CSV after downloading from STRAVA API
# Load it into a Dataframe using pandas
path = 'python_data/strava_angelos_20170812122309.csv'
df = pd.read_csv(path)
df.head()
# Comparing with test data to spot any difference
path_test = 'python_data/strava_angelos_test.csv'
df_test = pd.read_csv(path_test)
df_test.head()
# ## Useful Functions
# Trim data per specific time threshold
def trimmer(time_df, thresh):
cur = None
for i, item in time_df.iteritems():
if (cur is None) or (item - cur >= thresh):
yield i
cur = item
# Create a string format for the Day column
def dayConverter(s):
# Set date formats
time_format = "%Y-%m-%d"
# Convert from period to string
converted = s.strftime(time_format)
return converted
# Add distance to all activities
def distanceEnhancer(added,x):
return x + added
# Transforms seconds to hours
def secToHours(d):
# Create the rule
seconds = d
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
#Convert all to string
seconds = str(seconds)
minutes = str(minutes)
hours = str(hours)
#Add 0(zeros) for single digit numbers
if len(seconds)==1:
seconds = '0'+seconds
if len(minutes)==1:
minutes = '0'+minutes
if len(hours)==1:
hours = '0'+hours
return hours+':'+minutes+':'+seconds
# Transform hours to seconds
def hoursToSec(s, factors=(1, 60, 3600)):
"""[[hours:]minutes:]seconds -> seconds"""
return int(sum(x*y for x, y in zip(map(float, s.split(':')[::-1]), factors)))
# ## Modify the data
# +
# Convert to act_StartDate to datetime
df['act_startDate'] = pd.to_datetime(df['act_startDate'])
df['timestamp'] = pd.to_datetime(df['timestamp'])
# Keep only date as a period in a seperate column
df['day'] = pd.DatetimeIndex(df['act_startDate']).to_period('D')
# Convert days to strings
df['day'] = df['day'].apply(lambda x: dayConverter(x))
# Sort df by ascending day and timestamp
df = df.sort_values(by=['day','timestamp'],ascending=[True,True])
# Check main df
df.head()
# -
# ## Create a day number flag for keeping track of the trip days
# ## Create an iteration number flag for keeping track of activities per day
# +
# Create helper dataframe with unique days
df_helper = df.groupby(by=['day','act_id']).count().iloc[:,0].reset_index().filter(items=['day','act_id'])
# Create columns for iter_no(per activity) and day_no(per day)
days = list(set(df_helper['day']))
days.sort()
day_no = list()
iter_no = list()
for index,day in enumerate(days):
counter=1
for dfday in df_helper['day']:
if dfday == day:
iter_no.append(counter)
day_no.append(index+1)
counter+=1
df_helper['day_no'] = pd.Series(day_no).values
df_helper['iter_no'] = pd.Series(iter_no).values
df_helper
# +
# Merge flagger with the main dataframe
df = pd.merge(df,df_helper,on=['day','act_id'])
# Check main df
df.head()
# -
# ## Create cumulative distance/time when changing activity
# Keep the last row of each iteration of the same day and add its number to all the rest of the next day and do the same for the next activity
# +
# Transform distance to cumulative distance when changing activity (per day)
for day in days:
act_day = list(df_helper[df_helper['day']==day]['act_id'])
if len(act_day)>1:
last_dist = []
last_time = []
for i,act in enumerate(act_day):
adding_dist = float(df[(df['day']==day) & (df['act_id']==act)]['distance'].iloc[-1:]) # +1
adding_time = int(df[(df['day']==day) & (df['act_id']==act)]['time'].iloc[-1:]) # +1 if problem with division
if i == 0:
last_dist.append(adding_dist)
last_time.append(adding_time)
else:
adding_dist_plus = adding_dist+last_dist[i-1]
adding_time_plus = adding_time+last_time[i-1]
last_dist.append(adding_dist_plus)
last_time.append(adding_time_plus)
df.loc[(df['day']==day) & (df['act_id']==act),'distance'] = df[(df['day']==day) & (df['act_id']==act)]['distance'].apply(
lambda x: distanceEnhancer(last_dist[i-1],x))
df.loc[(df['day']==day) & (df['act_id']==act),'time'] = df[(df['day']==day) & (df['act_id']==act)]['time'].apply(
lambda x: distanceEnhancer(last_time[i-1],x))
#new = df.groupby(by=['day','act_id']).count().iloc[:,0].reset_index().filter(items=['day','act_id'])
# -
# ## Create elevation gain per day
# Create elevation gain per day by summing only the positive differences between each consecutive point way before trimming.
# +
# Create total elevation per day dataframe
result = {}
for day in days:
temp_alt_df = df[df['day'] == day]['altitude']
deltas = []
for i in range(len(temp_alt_df)):
if i>0:
delta = temp_alt_df.iloc[i]-temp_alt_df.iloc[i-1]
if delta>0:
deltas.append(delta)
result[day] = sum(deltas)
# Create dataframe from result dictionary
alt_ttl_df = pd.DataFrame(result.items(), columns=['day', 'elevation_gain'])
alt_ttl_df
# -
# ## Create total distance in KM per day
# Create total distance in km per day by picking the last value of distance column for each day
# +
temp = {}
for day in days:
temp_dist_df = df[df['day'] == day]['distance']
temp[day]=round(temp_dist_df.iloc[-1]/1000,1)
# Create dataframe from temp dictionary
dist_ttl_df = pd.DataFrame(temp.items(), columns=['day', 'ttl_distance'])
dist_ttl_df
# -
# ## Create a speed column km/h
# +
# Create an empty dataframe
speed_df = pd.DataFrame(columns = ['time','distance','speed'])
# Calculate the speed by measuring the m/s times 3.6 for km/h
for day in days:
temp_speed_df = df[df['day'] == day].filter(items = ['time','distance'])
speed_list = []
for i in range(len(temp_speed_df)):
if i==0:
speed_list.append(0)
else:
dist_delta = float(temp_speed_df['distance'].iloc[i]-temp_speed_df['distance'].iloc[i-1])
time_delta = int(temp_speed_df['time'].iloc[i]-temp_speed_df['time'].iloc[i-1])
if time_delta == 0: # handles division error
speed = speed_list[i-1]
speed_list.append(speed)
else:
speed = (dist_delta/time_delta) * 3.6
speed_list.append(speed)
# Add new speed column to temp_speed_df
temp_speed_df['speed'] = np.asarray(speed_list)
speed_df = pd.concat([temp_speed_df,speed_df])
# -
# Check if the lengths match each other
print len(speed_df)
print len(df)
# Merge speed_df with the main dataframe on indexes
df = df.join(speed_df['speed'], how='outer')
# Make sure that the speed makes sense
df[df['speed']>100]
df.iloc[21945:21950]
# Remove those lines with extremely high speed. There must be inaccurate tracking
df = df[df['speed']<100]
# ## Create max and average speed per day and heartbeat
# +
# Create table with avg_speed
avg_speed_df = df.groupby('day').mean().reset_index().sort_values(by='day', ascending=1).filter(items=['day','speed'])
# Change column name
avg_speed_df.rename(columns={'speed': 'avg_speed'}, inplace=True)
# Create table with avg_speed
max_speed_df = df.groupby('day').max().reset_index().sort_values(by='day', ascending=1).filter(items=['day','speed'])
# Change column name
max_speed_df.rename(columns={'speed': 'max_speed'}, inplace=True)
# Merge two dfs
ttl_speed_df = pd.merge(avg_speed_df,max_speed_df,on='day')
# Create table with avg_speed
avg_heartbeat_df = df.groupby('day').mean().reset_index().sort_values(by='day', ascending=1).filter(items=['day','heartrate'])
# Change column name
avg_heartbeat_df.rename(columns={'heartrate': 'avg_active_HR'}, inplace=True)
# Merge two dfs
ttl_speed_df = pd.merge(ttl_speed_df,avg_heartbeat_df,on='day')
ttl_speed_df
# -
# ## Create cumulative distance in string format (hours:minutes:seconds)
# Create column with converted seconds to hours:minutes:seconds format (string)
df['time_form'] = df['time'].apply(lambda x: secToHours(x))
# ## Keep only selected columns
df = df.filter(items=['day','act_startDate','timestamp','day_no','iter_no','altitude','distance','heartrate','time','time_form','speed','long','lat'])
df.head()
# ## Extract data to a json to check D3
# +
# Exclude this filter. It is just to test NaN functionality at D3
#df = df[(df['day']=='2017-04-11') | (df['day']=='2017-05-01')]
# -
def create_json(df):
init_list = []
for i,day in enumerate(days):
temp = {}
temp['day'] = day
temp['ttl_dist'] = dist_ttl_df[dist_ttl_df['day']==day]['ttl_distance'].iloc[0]
temp['day_no'] = i+1
temp['path'] = df[df['day'] == day].ix[:,['lat','long']].values.tolist()
#.loc[:,df.columns.isin(['lat','long'])].values.tolist()
temp['distance'] = [i for i in df[df['day'] == day]['distance']]
temp['elevation'] = [i for i in df[df['day'] == day]['altitude']]
temp['elev_gain'] = alt_ttl_df[alt_ttl_df['day']==day]['elevation_gain'].iloc[0]
temp['speed'] = [i for i in df[df['day'] == day]['speed']]
temp['avg_speed'] = ttl_speed_df[ttl_speed_df['day']==day]['avg_speed'].iloc[0]
temp['max_speed'] = ttl_speed_df[ttl_speed_df['day']==day]['max_speed'].iloc[0]
temp['heartrate'] = [i for i in df[df['day'] == day]['heartrate']]
temp['avg_active_HR'] = ttl_speed_df[ttl_speed_df['day']==day]['avg_active_HR'].iloc[0]
temp['time_form'] = [i for i in df[df['day'] == day]['time_form']]
init_list.append(temp)
return init_list
# Transform the list of dicts into a json file
with open('../app/interactive_map/original_active.json', 'w') as outfile:
json.dump(create_json(df), outfile)
# ## Trim data points if they are too many to be handled by D3
# ### If the json from above is too big too be parsed by the browser, trim the df and run the json creation after the trim
df_trim = df.loc[list(trimmer(df['timestamp'], pd.to_timedelta(30, 's')))]
# Check if the trimming is correct
df_trim.head()
# Transform the list of dicts into a json file
with open('../app/interactive_map/trimmed_active.json', 'w') as outfile:
json.dump(create_json(df_trim), outfile)
# # Corrections
# There has been an issue after transforming commulative distances. Day#25 (29/07/2017) had a duplicate activity and the graphs mismatch. The duplicates have been manually excluded from the trimmed json file.
# ## Correct data issues with day#25
#
df_trim[df_trim['day_no']==25]
# # Totals df merge
# ## Merge totals and averages in order to create a summing up comparison among the days
# +
# Merge the totals tables
ttl_df = pd.merge(ttl_speed_df,dist_ttl_df,on='day')
ttl_df = pd.merge(ttl_df,alt_ttl_df,on='day')
# Create day_no column
ttl_df['day_no']= 1 + ttl_df.index
# -
# Add the total time spent on bike per day
for i,day in enumerate(days):
ttl_df['ttl_time'].iloc[i] = df_trim[df_trim['day']==day]['time_form'].iloc[-1]
ttl_df[:5]
# Calculate the totals to add them as hardcoded variables in D3
print "Total Distance Traveled: %.2f km" %sum(ttl_df['ttl_distance'])
print "Total Number of Days Traveled: %s days"% ttl_df['day_no'].iloc[-1]
print "Total Cycling Hours: %s hours" %secToHours(sum(ttl_df['ttl_time'].map(lambda x: hoursToSec(x))))
print "Total Elevation Gain: %.2f m" %sum(ttl_df['elevation_gain'])
print "Average Speed of Whole Trip: %.2f km/h" %round(np.mean(ttl_df['avg_speed']),2)
# Save it to a csv for D3
ttl_df.to_csv('../app/totals_linegraphs/totals_linegraphs.csv', index=False)
# # Discussion
# The idea from now on is to have two different dataframe, one for the totals and one for the activities.
# Concerning the **totals**:
#
# * Merge ttl_speed_df and alt_ttl_df on key day in order to have the total elevation gain, speed, max speed and active HR per day
# * Rename each column of the new dataframe from above to blabla_angelos
# * Then merge Andreas and Angelos dataframes to a single CSV with all data included
#
# Concerning the **activities**:
#
# * Make sure what are the columns we actually need for the D3 and minimize it(with the trimmer above) if needed in order to have the lowest possible size for javascript to not lag.
# * Identify the columns that will be andreas and angelos different (probably active HR and speed)
| data_manging/Parsing STRAVA csv - ANGELOS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
# %matplotlib inline
# -
#
# PyTorch: nn
# -----------
#
# A fully-connected ReLU network with one hidden layer, trained to predict y from x
# by minimizing squared Euclidean distance.
#
# This implementation uses the nn package from PyTorch to build the network.
# PyTorch autograd makes it easy to define computational graphs and take gradients,
# but raw autograd can be a bit too low-level for defining complex neural networks;
# this is where the nn package can help. The nn package defines a set of Modules,
# which you can think of as a neural network layer that has produces output from
# input and may have some trainable weights.
#
#
# +
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N = 64
D_in = 1000
H = 100
D_out = 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H), torch.nn.ReLU(), torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss()
learning_rate = 0.1
for t in range(501):
y_pred = model(x)
loss = loss_fn(y_pred, y)
if t % 100 == 0:
print(t, loss.item())
model.zero_grad()
loss.backward()
with torch.no_grad():
for param in model.parameters():
param -= learning_rate * param.grad
# + pycharm={"name": "#%%\n"}
| two_layer_net_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ----
# ## 5. 클래스와 객체 지향 프로그래밍
#
# ### 2) 클래스와 객체
# +
class Calculator:
def __init__(self):
self.result = 0
def plus(self, a, b):
self.result = a + b
return self.result
calculator1 = Calculator()
calculator2 = Calculator()
print(calculator1.plus(3,5))
print(calculator2.plus(1,3))
print(calculator2.plus(7,9))
# +
class Calculator:
def __init__(self, a):
self.result = a
def plus(self, a):
self.result = self.result + a
return self.result
def minus(self, a):
self.result = self.result - a
return self.result
calculator1 = Calculator(3)
calculator2 = Calculator(5)
print(calculator1.plus(3))
print(calculator2.minus(7))
print(calculator2.plus(16))
# +
class Calculator:
def __init__(self, a, b):
self.first = a
self.second = b
def reset(self, a, b):
self.first = a
self.second = b
def plus(self):
result = self.first + self.second
return result
def minus(self):
result = self.first - self.second
return result
def divide(self):
result = self.first / self.second
return result
def multiple(self):
result = self.first * self.second
return result
calculator1 = Calculator(5, 9)
calculator2 = Calculator(7, 9)
print(calculator1.plus())
print(calculator2.minus())
print(calculator2.plus())
calculator1.reset(10, 5)
print(calculator1.multiple())
# -
| 01/chapter2/05_class_object.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
import warnings
import numpy as np
from tqdm import trange
from mnist_util import fetch#, plot, imshow
np.set_printoptions(suppress=True)
warnings.filterwarnings('ignore')
# +
# data starts at offset 16
x_train_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
x_test_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'
# data starts at index 8
y_train_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'
y_test_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'
# download datasets
x_train = fetch(x_train_url)[16:].reshape(-1, 28*28)
y_train = fetch(y_train_url)[8:]
x_test = fetch(x_test_url)[16:].reshape(-1, 28*28)
y_test = fetch(y_test_url)[8:]
# display
i = 10
img = x_train[i]
print(y_train[i])
imshow(img.reshape(28, 28))
# +
np.random.seed(1337)
# quadratic cost function
def cost(a, y):
return np.square(y - a).mean(dtype=np.float32)
# derivative of cost function with respect to y_pred
def d_cost(a, y):
return (a - y)
def get_batch(size):
mask = np.random.randint(0, x_train.shape[0], size=(size))
xx, yy = x_train[mask], y_train[mask]
out = np.zeros((len(yy), 10))
out[range(yy.shape[0]), yy] = 1
yy = out
return xx, yy
def evaluate():
_, _, _, y_preds = forward(x_test)
y_preds = np.argmax(y_preds, axis=1)
return (y_preds == y_test).mean()
def init_weights(x, y):
return np.random.uniform(-1., 1., size=(x, y)) / np.sqrt(x * y)
# +
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
# -
layers = (784, 128, 10)
weights = [init_weights(x, y) for x, y in zip(layers[:-1], layers[1:])]
for w in weights:
print(w.shape)
def forward(x):
z1 = x.dot(weights[0])
a1 = sigmoid(z1)
z2 = a1.dot(weights[1])
a2 = sigmoid(z2)
return z1, a1, z2, a2
def backward(z1, a1, z2, a2):
e2 = d_cost(a2, y) * sigmoid_prime(z2)
e1 = e2.dot(weights[1].T) * sigmoid_prime(z1)
w2_adj = a1.T.dot(e2)
w1_adj = x.T.dot(e1)
return w1_adj, w2_adj
# +
# hyperparameters
epochs = 1200
lr = 0.001
batch_size = 128
losses, accuracies = [], []
# train
for i in (t := trange(epochs)):
# batch
x, y = get_batch(batch_size)
# forward pass
z1, a1, z2, a2 = forward(x)
# calculate loss
loss = cost(a2, y)
# backwards pass
w1_adj, w2_adj = backward(z1, a1, z2, a2)
# predict, calculate accuracy
predictions, actual = np.argmax(a2, axis=1), np.argmax(y, axis=1)
accuracy = np.equal(predictions, actual).mean()
# track loss/accuracy over time
losses.append(loss)
accuracies.append(accuracy)
t.set_description("loss %.2f accuracy %.2f" % (loss, accuracy))
# gradient descent
weights[0] = weights[0] - lr * w1_adj
weights[1] = weights[1] - lr * w2_adj
# -
plt.ylim(-0.1, 1.1)
plot(losses)
plot(accuracies)
evaluate()
m = [[0,0,0,0,0,0,0],
[0,1,0,0,1,0,0],
[0,1,0,0,1,0,0],
[0,1,1,1,1,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,0,0]]
# upscale to 28x28
m = np.concatenate([np.concatenate([[x]*4 for x in y]*4) for y in m])
imshow(m.reshape(28,28))
_, _, _, x = forward(m)
print(np.argmax(x))
| mnist_numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
A = np.matrix('3,1,2;4,2,5;6,7,8')
A
B = np.matrix('2,-1,3')
B
B = np.matrix('2,-1,3').transpose()
B
sol = np.linalg.solve(A,B)
sol
# #### File Handeling in Numpy
a = np.array([[1,2,3],[4,5,6],[7,8,9]])
a
np.savetxt('a.csv',a,delimiter=',')
np.loadtxt('a.csv',delimiter=',')
np.save('data.npy',a)
np.load('data.npy')
arr = np.arange(10)
arr
a
np.savez('both.npz',a,arr)
d = np.load('both.npz')
d.files
d[d.files[0]]
d[d.files[1]]
# ### Pandas
import pandas as pd
# ### Using List
my_list = [100,200,300]
pd.Series(my_list)
labels = ['A','B','C']
pd.Series(my_list,labels)
# #### Using Numpy Array
arr = np.array([100,200,300])
arr
z= pd.Series(arr,index=labels,name='Method2')
z
Method2
# ### Using Dictionaries
dict = {'A':100,'B':200,'C':300,'D':400}
z1 = pd.Series(dict)
z1
z2 = pd.Series([10,20,30,40],'A B D E'.split())
z2
'A B D E'.split()
z1
z1+z2
# ### Slicing a Series
z1['A':'C']
type(z1)
type(z1['A':'C'])
# ### Indexing
# **.loc & .iloc**
z1
z1.iloc[2]
z1.loc['C']
z1.A
z1.B
dict = {5:100,6:200,7:300,8:400}
z3 = pd.Series(dict)
z3
z1
z1[0]
z1[1]
z3
z3.iloc[0]
z3.iloc[2]
# ### Operaions
z1
z1[z1>100]
l = [True,False,True,False]
z1[l]
z1
z1+300
# #### Ordering on Series
z1.sort_values(ascending=False,inplace=True)
z1
# #### Aggregation on Series
z1.mean()
z1.sum()
z1.max()
z1.min()
z1.idxmax()
'ABDE'.split()
| Online Certificate Course in Data Science and Machine Learning rearranged/03 pandas/Pandas Series-15-09-2020 .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os, sys, math, time
from glob import glob
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
sys.path.insert(0,"/home/nico/Documents/TEAR/Codes_TEAR/PythonCodes/LibFolder")
from Lib_GeneralFunctions import *
from Lib_GeneralSignalProcNAnalysis import *
from Lib_ProfilePlotting import *
from Lib_ProfileProcessing import *
#=================== Plotting style ===================
from matplotlib.animation import FuncAnimation
from IPython.display import clear_output
plt.style.use('seaborn-whitegrid')
from matplotlib import cm
from matplotlib.colors import ListedColormap
from matplotlib.lines import Line2D
from matplotlib.gridspec import GridSpec
#definition of colormap
from palettable.scientific.sequential import LaJolla_20
cmap = LaJolla_20.mpl_colormap
plt.register_cmap(cmap=cmap)
plt.set_cmap('LaJolla_20')
plt.rcParams['image.cmap'] = 'LaJolla_20'
# -
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
def FontSizeControlFreak(SMALL_SIZE,MEDIUM_SIZE,BIGGER_SIZE):
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# Timestamp variable
start_time = time.time()
# Save into a class the
class SSCreference:
def __init__(self, filename, coordinates, RefSource="SEM2DPACK"):
line = pd.read_csv(filename.format("slip"), header=None)
self.Time = line[0]
self.Slip = line[1]
line = pd.read_csv(filename.format("sr"), header=None)
self.SlipRate = line[1]
self.Coord = coordinates #Only used for labels and printing
self.RefSource = RefSource
#end __init__
# Default object printing information
def __repr__(self):
return "The reference object was generated from: {} and the receiver is located at {}".format(self.RefSource, self.Coord)
#end __repr__
def __str__(self):
return "The reference object was generated from: {} and the receiver is located at {}".format(self.RefSource, self.Coord)
#end __str__
def PlotReference(self, ax, SlipSlipRate, filtering=True, **kwargs):
if SlipSlipRate=="Slip":
if(filtering):
ax.plot(self.Time, Butterworth(self.Slip, **kwargs), label = "", c = "k", ls = "--", zorder=1)
else:
ax.plot(self.Time, self.Slip, label = "", c = "k", ls = "--", zorder=1)
elif SlipSlipRate=="SlipRate":
if(filtering):
ax.plot(self.Time, Butterworth(self.SlipRate, **kwargs), label = "", c = "k", ls = "--", zorder=1)
else:
ax.plot(self.Time, self.SlipRate, label = "", c = "k", ls = "--", zorder=1)
return ax
# +
path = "/home/nico/Documents/TEAR/Codes_TEAR/ProfilePicking/Output/"
# Reference saved into a list of objects
RefList = [SSCreference(path + "Reference/sem2dpack/sem2d-{}-1.txt", "2km"),
SSCreference(path + "Reference/sem2dpack/sem2d-{}-2.txt", "4km"),
SSCreference(path + "Reference/sem2dpack/sem2d-{}-3.txt", "6km"),
SSCreference(path + "Reference/sem2dpack/sem2d-{}-4.txt", "8km"),
]
# -
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P4/"
Data = pd.read_csv(Path+"SlipAtReceiver.txt",sep="\t")
Data.columns
def FilterDataForFloatEquality(Data, Column, Value, Tolerance = 1e-1):
return Data.loc[(abs(Data[Column]-Value).lt(Tolerance))]
# # Separate per faultY parameter
# # Comparing the same location at different QP
# +
# Live Plotting of a underwritting file
class SlipDataObject:
def __init__(self, filename):
self.FileName = filename
self.Data = pd.read_csv(filename,sep="\t")
#end __init__
# Default object printing information
def __repr__(self):
return "File loaded: {}".format(self.FileName)
#end __repr__
def __str__(self):
return "File loaded: {}".format(self.FileName)
#end __str__
def FilterDataForFloatEquality(self, Data, Column, Value, Tolerance = 1e-1):
return Data.loc[(abs(self.Data[Column]-Value).lt(Tolerance))]
def reload(self):
self.Data = pd.read_csv(self.FileName, sep="\t")
def getDataSubDomain(self, FaultX, FaultXValue, FaultY, FaultYValue, q):
FxData = self.FilterDataForFloatEquality(self.Data, FaultX, FaultXValue)
FyData = self.FilterDataForFloatEquality(FxData, FaultY, FaultYValue)
return FyData.loc[FyData["q"].eq(q)]
def format_axes(fig):
for i, ax in enumerate(fig.axes):
ax.set_xlim(-0.5,4)
ax.set_ylim(-0.5,10)
Lines = fig.axes[-1].get_lines()[-5:]
legend2 = fig.axes[-1].legend(Lines, ['Reference', '8km','6km', '4km', '2km'], loc=1)
fig.axes[-1].add_artist(legend2)
def GenericFigAxis():
fig = plt.figure(constrained_layout=True, figsize=[12,4])
gs = GridSpec(1, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
return fig, [ax1, ax2]
# -
def Plotting(fig, axis, DataObj,FilteringSpecs):
DataObj.reload()
FilteredDataframe = DataObj.getDataSubDomain(**FilteringSpecs)
x_values = FilteredDataframe.Time
y_values = FilteredDataframe.Slip
yPrime_values = FilteredDataframe.SlipRate
AA=0
axis[0].plot(x_values, np.cumsum(yPrime_values)*(x_values.iloc[1]-x_values.iloc[0]))
#axis[0].plot(x_values, np.cumsum(y_values))
#axis[0].plot(x_values, y_values)
axis[0].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
axis[0].set_ylabel('Slip')
axis[0].set_title('Slip')
axis[1].plot(x_values, yPrime_values)
axis[1].set_ylabel('Slip rate')
axis[1].set_title('Slip rate')
axis[1].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
fig.gca().relim()
fig.gca().autoscale_view()
# # P6
#
# $S = \Delta V\ dt + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
#
# $dS = S - S_{t-1}$
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ =S_{t-1} + dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P6/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # P1
#
# $\dot{S} = \Delta V - \frac{\delta}{\mu}(\frac{f(t)-f(t-1)}{dt})(-\sigma^{avg}_{yy})$
#
# $dS = \dot{S}\ dt $
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ += S$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P1/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
format_axes(fig)
# -
# # P9
#
# $S = \Delta U + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
#
# $dS = S - S_{t-1}$
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ = S$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P9/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plotting(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # Looks like I didnt save the code when I commented out the slip updating scheme. Of course it was not calculating correctly the accumulation
#
# god...
# I start again and the plotting function is changed to not scroll up again
def Plottings(fig, axis, DataObj,FilteringSpecs):
DataObj.reload()
FilteredDataframe = DataObj.getDataSubDomain(**FilteringSpecs)
x_values = FilteredDataframe.Time
y_values = FilteredDataframe.Slip
yPrime_values = FilteredDataframe.SlipRate
AA=0
#axis[0].plot(x_values, np.cumsum(yPrime_values)*(x_values.iloc[1]-x_values.iloc[0]))
axis[0].plot(x_values, y_values)
axis[0].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
axis[0].set_ylabel('Slip')
axis[0].set_title('Slip')
axis[1].plot(x_values, yPrime_values)
axis[1].set_ylabel('Slip rate')
axis[1].set_title('Slip rate')
axis[1].set_xlabel('Time = {}'.format(FilteredDataframe.Time.iloc[-1]))
fig.gca().relim()
fig.gca().autoscale_view()
# # P2
#
# $S = \Delta U + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
#
# $dS = S - S_{t-1}$
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ += dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P2/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # ^After that change, now the slip is consistent with the slip rate
# # P3
#
# $S = \Delta U + \frac{\delta}{\mu}(c-f(t)(-\sigma^{avg}_{yy}))$
#
# $dS = S - S_{t-1}$
#
# $\gamma=\frac{S}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ += dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P3/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # P4
#
# $\dot{S} = \Delta V - \frac{\delta}{\mu}(\frac{f(t)-f(t-1)}{dt})(-\sigma^{avg}_{yy})$
#
# $dS = \dot{S}\ dt$
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ += dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P4/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
#Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # P10
#
# $S = \Delta U + \frac{\delta}{\mu}\ (f_{static}\sigma_{n}-f(t)\sigma^{avg}_{yy}) $
#
# $dS = S - S_{t-1}$
#
# $\gamma=\frac{S}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ += dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P10/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # P11
#
# $\dot{S} = \Delta V - \frac{\delta}{\mu}\ \frac{(f(t)-f(t-dt))}{dt}(-\sigma^{avg}_{yy}) $
#
# $dS = \dot{S}dt$
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# --------------
#
# $\sigma_{xy} -= 2\mu\ \gamma$
#
# $S_{q}\ += dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P11/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach (TransectStressAvg branch)")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
#Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/P13/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # P11
#
# $\dot{S} = \Delta V - \frac{\delta}{\mu}\ \frac{(f(t)-f(t-dt))}{dt}(-\sigma^{avg}_{yy}) $
#
# $dS = \dot{S}dt$
#
# $\gamma=\frac{dS}{2\delta} P_n(\phi/2\delta)$
#
# $S_{q}\ += dS$
#
# $\dot{S}_{q}=dS\ /\ dt$
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion50m/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":50,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion50mdelta100/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/TransectVersion/"
DataObj = SlipDataObject(Path + "SlipAtReceiver.txt")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/PlasticMultiplier/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/AvgStressPM/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":0,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/AvgStressTrial2/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":0,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/Runs/AvgStressTrial2/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":0,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
# # SCP connected Slip slip rate
# +
Path = "/home/nico/Documents/TEAR/Codes_TEAR/se2dr/WolfelResults/"
DataObj = SlipDataObject(Path+"SlipAtReceiver.txt")
fig, axis = GenericFigAxis()
fig.suptitle("Plastic Multiplier approach")
FilteringSpecs = {"FaultX":"FaultX", "FaultXValue":2000,
"FaultY":"FaultY", "FaultYValue":100,
"q":0}
plt.ion()
#[item.PlotReference(axis[0], "Slip", filtering=True) for item in RefList]
#[item.PlotReference(axis[1], "SlipRate", filtering=True) for item in RefList]
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":0})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":2000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":4000})
Plottings(fig, axis, DataObj, {**FilteringSpecs, "FaultXValue":6000})
#format_axes(fig)
# -
| PythonCodes/PlasticMultiplier/Pandas_PlasticMultiplier_SlipPlotter_20210521.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Learning to Identify Traffic Signs
#
# ---
# ## Load The Data
# +
# Load pickled data
import pickle
training_file = './traffic-signs-data/train.p'
validation_file = './traffic-signs-data/valid.p'
testing_file = './traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# -
# ---
#
# ## Dataset Summary & Exploration
#
# The pickled data is a dictionary with 4 key/value pairs:
#
# - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
# - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
# - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
# - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image.
# ### Basic Summary of the Data Set
# +
num_train, img_height, img_width, img_depth = train['features'].shape
n_train = num_train
n_validation = len(X_valid)
n_test = len(X_test)
image_shape = (img_height, img_width, img_depth)
y_train_set = set(y_train)
y_valid_set = set(y_valid)
y_test_set = set(y_test)
n_classes = len(y_train_set.union(y_valid_set).union(y_test_set))
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
# -
# ### Sample of each traffic sign
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# %matplotlib inline
signs = dict.fromkeys(range(0, 43))
for k in signs:
indices = np.where(y_train==k)[0]
if indices.any():
signs[k] = X_train[indices.item(0)]
continue
indices = np.where(y_valid==k)[0]
if indices.any():
signs[k] = X_valid[indices.item(0)]
continue
indices = np.where(y_test==k)[0]
if indices.any():
signs[k] = X_test[indices.item(0)]
plt.figure(figsize=(20, 20))
for code, image in signs.items():
plt.subplot(9, 5, code+1)
plt.axis("off")
plt.subplots_adjust(hspace=0.3)
plt.title(str(code))
plt.imshow(image)
# -
# ----
#
# ## Deep Learning Architecture (LeNet-5)
#
# Implemenation of deep neural network to identify traffic signs according to [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
#
# A paper titled ["Traffic Sign Recognition with Multi-Scale Convolutional Networks"](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf) was used as a baseline for the model and the [LeNet-5 architecture](http://yann.lecun.com/exdb/lenet/) by the same author was implemented here to train the model.
# ### Preprocess the Data Set (Normalization and shuffling)
# +
from sklearn.utils import shuffle
# Normalize the data
X_train = ((X_train - 127.5) / 127.5)
X_valid = ((X_valid - 127.5) / 127.5)
X_test = ((X_test - 127.5) / 127.5)
X_train, y_train = shuffle(X_train, y_train)
# -
# ### Model Architecture
# +
import tensorflow as tf
EPOCHS = 50
BATCH_SIZE = 200
# +
from tensorflow.contrib.layers import flatten
def LeNet(x):
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# Activation.
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# Activation.
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Activation.
fc1 = tf.nn.relu(fc1)
# Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Activation.
fc2 = tf.nn.relu(fc2)
# Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
# -
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
# +
rate = 0.002
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
# +
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
prediction = tf.argmax(logits, 1)
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
# -
# ### Train, Validate and Test the Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_valid, y_valid)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
# ---
#
# ## Test a Model on New Images
# Images were pulled from the internet and are stored in the **extra_signs** folder.
# ### Load and Output the Images
import glob
from scipy import misc
images = [misc.imread(path) for path in glob.glob('extra_signs/*.png')]
images = np.asarray(images)
for i, img in enumerate(images):
plt.subplot(3,2,i+1)
plt.axis("off")
plt.imshow(img)
plt.show()
# ### Predict the Sign Type for Each Image
# +
# Preprocess
X_test2 = ((images - 127.5) / 127.5)
y_test2 = np.array([11, 2, 4, 14, 32])
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
for i, img in enumerate(images):
pred = sess.run(prediction, feed_dict={x: np.array([img])})
plt.subplot(3,2,i+1)
plt.subplots_adjust(hspace=0.3)
plt.axis("off")
plt.title("Prediction: " + str(pred[0]))
plt.imshow(img)
# -
# ### Analyze Performance
### Calculate the accuracy for these 5 new images.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test2, y_test2)
print("Test Accuracy = {:.3f}".format(test_accuracy))
# ### Top 5 softmax probabilities for extra images
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
topk = sess.run(tf.nn.top_k(tf.nn.softmax(logits), k=5), feed_dict={x: images, y: 0})
for i, v in zip(topk.indices, topk.values):
print("Index ", i, " has values: ", v)
| traffic_sign_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generating Distributions
# A key activity in Monte Carlo modeling is to sample random numbers from a distribution. Numpy contains implementations for a number of distributions, include uniform (every value between 0 and 1 is equally likely) and normal (values are gaussian distributed with mean 0 and standard deviation 1).
#
# There are other types of distributions one might wish to draw from, and that is where generating distributions come from. This represents the simplest type of "generative model" we can envision - we choose it not because it is a hard problem, but rather because it is a simple way to conceive of the generative modeling problem and develop intuition on the problem.
#
# h/t to https://www.born2data.com/2017/generative_models-part1.html. I drew inspiration from this post as a good way to begin to get a handing on generative models from an intuitive point of view
# ## Theory
# Any distribution can be approximated by Inverse Transform Sampling: https://en.wikipedia.org/wiki/Inverse_transform_sampling
#
# All that is needed is a random uniform sampling function. We assume here that we have a method of generating such a uniform sample (recognizing that approximating uniform distributions is itself an algorithmic process).
#
# (From Wikiperdia) Inverse transformation sampling takes uniform samples of a number $u$ between 0 and 1, interpreted as a probability, and then returns the largest number $x$ from the domain of the distribution $P(X)$ such that $P(-\infty <X<x)\leq u$. (End from Wikipedia)
#
# This requires a cumulative distribution function (CDF) and a method of calculating it's inverse.
# ## Normal Distribution
# We generate a normal distribution from an input of the uniform normal distribution
#
# Scipy contains a method to calculate the Inverse CDF - $ppf$ or the "Percent Point Function"
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html
from scipy.stats import norm
import numpy as np
# Local implementation of random normal
def randn(size: int, mu: float, sigma: float) -> float:
rnd = np.random.rand(size) # Uniform random
return norm(mu, sigma).ppf(rnd)
# ### Validation
unit_normal = norm(0, 1)
print("PPF of 0.5 should equal 0:", unit_normal.ppf(0.5), "\n")
print("CDF of PPF of should return original value.\nOriginal Value is:", 0.5,
"\nCDF of PPF is:", unit_normal.cdf(unit_normal.ppf(0.5)))
# +
# Generate 100,000 random numbers from randn, and compute mean and std
z = randn(100000, 0, 1)
print("Mean and Standard Deviation of our Random Normal Distribution\nMean:", np.mean(z), "\nStd:", np.std(z), "\n\n")
z = randn(100000, 20, 14)
print("Mean and Standard Deviation of our Random Normal Distribution\nMean:", np.mean(z), "\nStd:", np.std(z), "\n\n")
# -
| distribution-functions/generate_distribution_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mutual Information Metric
#
# The `MutualInformationImageToImageMetric` class computes the mutual information between two images, i.e. the degree to which information content in one image is dependent on the other image. This example shows how `MutualInformationImageToImageMetric` can be used to map affine transformation parameters and register two images using a gradient ascent algorithm.
# +
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from urllib.request import urlretrieve
import itk
from itkwidgets import compare, checkerboard
# -
# ### Retrieve fixed and moving images for registration
#
# We aim to register two slice images, one of which has an arbitrary offset and rotation. We seek to use an affine transform to appropriately rotate and translate the moving image to register with the fixed image.
fixed_image_path = 'fixed.png'
moving_image_path = 'moving.png'
if not os.path.exists(fixed_image_path):
url = 'https://data.kitware.com/api/v1/file/602c10a22fa25629b97d2896/download'
urlretrieve(url, fixed_image_path)
if not os.path.exists(moving_image_path):
url = 'https://data.kitware.com/api/v1/file/602c10a32fa25629b97d28a0/download'
urlretrieve(url, moving_image_path)
fixed_image = itk.imread(fixed_image_path, itk.F)
moving_image = itk.imread(moving_image_path, itk.F)
checkerboard(fixed_image, moving_image)
# ### Prepare images for registration
ImageType = type(fixed_image)
# +
fixed_normalized_image = itk.normalize_image_filter(fixed_image)
fixed_smoothed_image = itk.discrete_gaussian_image_filter(fixed_normalized_image, variance=2.0)
moving_normalized_image = itk.normalize_image_filter(moving_image)
moving_smoothed_image = itk.discrete_gaussian_image_filter(moving_normalized_image, variance=2.0)
# -
compare(fixed_smoothed_image, moving_smoothed_image)
# ## Plot the `MutualInformationImageToImageMetric` surface
#
# For this relatively simple example we seek to adjust only the x- and y-offset of the moving image with a `TranslationTransform`. We can acquire `MutualInformationImageToImageMetric` values comparing the two images at many different possible offset pairs with `ExhaustiveOptimizer` and visualize this data set as a surface with `matplotlib`.
#
# The affine transform contains six parameters representing each element in an affine matrix `A` which will dictate how the moving image is sampled. We know that the moving image has been translated so we will visualize the two translation parameters, but we could set `X_INDEX` and `Y_INDEX` to visualize any pair of parameters. See [https://en.wikipedia.org/wiki/Affine_transformation#Image_transformation](https://en.wikipedia.org/wiki/Affine_transformation#Image_transformation) for more information on affine transformations.
X_INDEX = 4 # Translation in the X direction
Y_INDEX = 5 # Translation in the Y direction
# +
# Move at most 20 pixels away from the initial position
window_size = [0] * 6
window_size[X_INDEX] = 20 # Set lower if visualizing elements 0-3
window_size[Y_INDEX] = 20 # Set lower if visualizing elements 0-3
# Collect 50 steps of data along each axis
n_steps = [0] * 6
n_steps[X_INDEX] = 50
n_steps[Y_INDEX] = 50
# +
dim = fixed_image.GetImageDimension()
TransformType = itk.AffineTransform[itk.D,dim]
transform = TransformType.New()
# -
InterpolatorType = itk.LinearInterpolateImageFunction[ImageType, itk.D]
interpolator = InterpolatorType.New()
# +
MetricType = itk.MutualInformationImageToImageMetric[ImageType, ImageType]
metric = MetricType.New()
metric.SetNumberOfSpatialSamples(100)
metric.SetFixedImageStandardDeviation(5.0)
metric.SetMovingImageStandardDeviation(5.0)
metric.ReinitializeSeed(121212)
# +
ExhaustiveOptimizerType = itk.ExhaustiveOptimizer
optimizer = ExhaustiveOptimizerType.New()
# Map out [n_steps] in each direction
optimizer.SetNumberOfSteps(n_steps)
# Move [window_size / n_steps] units with every step
scales = optimizer.GetScales()
scales.SetSize(6)
for i in range(0,6):
scales.SetElement(i, (window_size[i] / n_steps[i]) if n_steps[i] != 0 else 1)
optimizer.SetScales(scales)
# +
# Collect data describing the parametric surface with an observer
surface = dict()
def print_iteration():
surface[tuple(optimizer.GetCurrentPosition())] = optimizer.GetCurrentValue()
optimizer.AddObserver(itk.IterationEvent(), print_iteration)
# +
RegistrationType = itk.ImageRegistrationMethod[ImageType, ImageType]
registrar = RegistrationType.New()
registrar.SetFixedImage(fixed_smoothed_image)
registrar.SetMovingImage(moving_smoothed_image)
registrar.SetOptimizer(optimizer)
registrar.SetTransform(transform)
registrar.SetInterpolator(interpolator)
registrar.SetMetric(metric)
registrar.SetFixedImageRegion(fixed_image.GetBufferedRegion())
registrar.SetInitialTransformParameters(transform.GetParameters())
# -
registrar.Update()
# +
# Check the extreme positions within the observed window
max_position = list(optimizer.GetMaximumMetricValuePosition())
min_position = list(optimizer.GetMinimumMetricValuePosition())
max_val = optimizer.GetMaximumMetricValue()
min_val = optimizer.GetMinimumMetricValue()
print(max_position)
print(min_position)
# +
# Set up values for the plot
x_vals = [list(set([x[i]
for x in surface.keys()])) for i in range(0,transform.GetNumberOfParameters())]
for i in range(0, transform.GetNumberOfParameters()):
x_vals[i].sort()
X, Y = np.meshgrid(x_vals[X_INDEX], x_vals[Y_INDEX])
Z = np.array([[surface[(1,0,0,1,x0,x1)] for x1 in x_vals[X_INDEX]]for x0 in x_vals[Y_INDEX]])
# +
# Plot the surface as a 2D heat map
fig = plt.figure()
# Invert the y-axis to represent the image coordinate system
plt.gca().invert_yaxis()
ax = plt.gca()
surf = ax.scatter(X, Y, c=Z, cmap=cm.coolwarm)
# Mark extremes on the plot
ax.plot(max_position[X_INDEX],max_position[Y_INDEX],'k^')
ax.plot(min_position[X_INDEX],min_position[Y_INDEX],'kv')
# +
# Plot the surface as a 3D scatter plot
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X,Y,Z,cmap=cm.coolwarm)
# -
# ## Follow gradient ascent
#
# Once we understand the shape of the parametric surface it is easier to visualize the gradient ascent algorithm. We see that there is some roughness to the surface, but it has a clear slope upwards. We want to maximize the mutual information between the two images in order to optimize registration. The results of gradient ascent optimization can be superimposed onto the `matplotlib` plot.
transform = TransformType.New()
interpolator = InterpolatorType.New()
# +
metric = MetricType.New()
metric.SetNumberOfSpatialSamples(100)
metric.SetFixedImageStandardDeviation(5.0)
metric.SetMovingImageStandardDeviation(5.0)
metric.ReinitializeSeed(121212)
# +
n_iterations = 200
optimizer = itk.GradientDescentOptimizer.New()
optimizer.SetLearningRate(1.0)
optimizer.SetNumberOfIterations(n_iterations)
optimizer.MaximizeOn()
# Set scales so that the optimizer can take
# large steps along translation parameters,
# moderate steps along rotational parameters, and
# small steps along scale parameters
optimizer.SetScales([100,0.5,0.5,100,0.0001,0.0001])
# +
descent_data = dict()
descent_data[0] = (1,0,0,1,0,0)
def log_iteration():
descent_data[optimizer.GetCurrentIteration() + 1] = tuple(optimizer.GetCurrentPosition())
optimizer.AddObserver(itk.IterationEvent(), log_iteration)
# +
registrar = RegistrationType.New()
registrar.SetFixedImage(fixed_smoothed_image)
registrar.SetMovingImage(moving_smoothed_image)
registrar.SetTransform(transform)
registrar.SetInterpolator(interpolator)
registrar.SetMetric(metric)
registrar.SetOptimizer(optimizer)
registrar.SetFixedImageRegion(fixed_image.GetBufferedRegion())
registrar.SetInitialTransformParameters(transform.GetParameters())
# -
registrar.Update()
print(f'Its: {optimizer.GetCurrentIteration()}')
print(f'Final Value: {optimizer.GetValue()}')
print(f'Final Position: {list(registrar.GetLastTransformParameters())}')
descent_data
x_vals = [descent_data[i][X_INDEX] for i in range(0,n_iterations)]
y_vals = [descent_data[i][Y_INDEX] for i in range(0,n_iterations)]
# We see in the plot that the metric generally improves as transformation parameters are updated with each iteration, but the final position may not align with the maximum position on the plot. This is one case in which it is difficult to visualize gradient ascent over a hyperdimensional space, where the optimizer is stepping through six parameter dimensions but the 2D plot we collected with `ExhaustiveOptimizer` represents a 'slice' in space with x\[0:4\] fixed at (1,0,0,1). Here it may be more useful to directly compare the two images after registration to evaluate fitness.
# +
fig = plt.figure()
# Note: We invert the y-axis to represent the image coordinate system
plt.gca().invert_yaxis()
ax = plt.gca()
surf = ax.scatter(X, Y, c=Z, cmap=cm.coolwarm)
for i in range(0,n_iterations-1):
plt.plot(x_vals[i:i+2],y_vals[i:i+2],'wx-')
plt.plot(descent_data[0][X_INDEX], descent_data[0][Y_INDEX],'bo')
plt.plot(descent_data[n_iterations-1][X_INDEX],descent_data[n_iterations-1][Y_INDEX],'ro')
plt.plot(max_position[X_INDEX], max_position[Y_INDEX], 'k^')
plt.plot(min_position[X_INDEX], min_position[Y_INDEX], 'kv')
# -
# ### Resample the moving image
#
# In order to apply the results of gradient ascent we must resample the moving image into the domain of the fixed image. The `TranslationTransform` whose parameters have been selected through gradient ascent is used to dictate how the moving image is sampled from the fixed image domain. We can compare the two images with `itkwidgets` to verify that registration is successful.
ResampleFilterType = itk.ResampleImageFilter[ImageType,ImageType]
resample = ResampleFilterType.New(
Transform=transform,
Input=moving_image,
Size=fixed_image.GetLargestPossibleRegion().GetSize(),
OutputOrigin=fixed_image.GetOrigin(),
OutputSpacing=fixed_image.GetSpacing(),
OutputDirection=fixed_image.GetDirection(),
DefaultPixelValue=100)
resample.Update()
checkerboard(fixed_image, resample.GetOutput())
# The image comparison shows that the images were successfully translated to overlap, but were not fully rotated to exactly align. If we were to explore further we could use a different optimizer with the metric, such as the `LBFGSBOptimizer` class, which may be more successful in optimizing over a rough parametric surface. We can also explore different metrics such as the `MattesMutualInformationImageToImageMetricv4` class to take advantage of the ITK v4+ registration framework, in contrast with the `MutualInformationImageToImageMetric` used in this example as part of the v3 framework.
# ### Clean up
os.remove(fixed_image_path)
os.remove(moving_image_path)
| src/Core/Transform/MutualInformationAffine/MutualInformationAffine.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.3
# language: julia
# name: julia-1.6
# ---
# # Basic data structures
#
# ## Arrays
#
# Array is an order collection of items. It can have one type of items or multiple. It is the most common type of data structure and can be thought like the "lists" from python.
#
integer_array = [1, 2, 3]
println(integer_array)
println(typeof(integer_array))
float_array = [1.0, 2.0, 3.0]
println(float_array)
println(typeof(float_array))
string_array = ["1", "2", "3"]
println(string_array)
println(typeof(string_array))
bool_array = [true, false, true]
println(bool_array)
println(typeof(bool_array))
char_array = ['a', 'b', 'c']
println(char_array)
println(typeof(char_array))
mixed_array = [1, "2", true, 'c']
println(mixed_array)
println(typeof(mixed_array))
mixed_array[1] = 100
println(mixed_array)
println(typeof(mixed_array))
integer_matrix = [[1, 2, 3], [4, 5, 6]]
println(multi_array)
println(typeof(multi_array))
integer_tensor = [[[1, 2, 3], [4, 5, 6]], [[[7, 8, 9], [10, 11, 12]]]]
println(integer_tensor)
println(typeof(integer_tensor))
#
# ## Tuples
#
# Tuples are just like arrays, but they are immutable. They are also sorted and can take any type of item. There is a also another slight difference between tuples and arrays
integer_tuple = (1, 2, 3)
println(integer_tuple)
println(typeof(integer_tuple))
float_tuple = (1.0, 2.0, 3.0)
println(float_tuple)
println(typeof(float_tuple))
float_tuple = (1.0, 2.0, 3.0)
println(float_tuple)
println(typeof(float_tuple))
string_tuple = ("1.0", "2.0", "3.0")
println(string_tuple)
println(typeof(string_tuple))
char_tuple = ('a', 'b', 'c')
println(char_tuple)
println(typeof(char_tuple))
bool_tuple = (true, false, true)
println(bool_tuple)
println(typeof(bool_tuple))
mixed_tuple = (1, "2", true, 'c')
println(mixed_tuple)
println(typeof(mixed_tuple))
mixed_tuple[1] = 100
integer_matrix__array_tuple = [(1, 2, 3), (4, 5, 6)]
println(integer_matrix__array_tuple)
println(typeof(integer_matrix__array_tuple))
integer_matrix__tuple_tuple = ((1, 2, 3), (4, 5, 6))
println(integer_matrix__tuple_tuple)
println(typeof(integer_matrix__tuple_tuple))
# ## Named Tuples
#
# The components of tuples can optionally be named, in which case a named tuple is constructed:
#
creds = (name="Vasilis", age=29, height=1.82)
creds
creds.name
# ## Dictionaries
#
# Dictionaries are hash maps. They are unordered collections of which the values are mapped to a key.
language = Dict("US" => "English", "Germany" => "German", "Greece" => "Greek", "Switzerland" => ("German", "French", "Italian", "Romanisch"))
language["US"]
language["Switzerland"]
language["Greece"] = "Ελληνικά"
language
language["Switzerland"][1]
# We cannot change a value in a tuple
language["Switzerland"][1] = "Deutsch"
language["Germany"] = "Deutsch"
language
@assert language["Germany"] == "Deutsch"
@assert length(float_tuple) == 3
| Day 02/Basic data structures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit ('.venv')
# name: python3
# ---
# # Example usage
#
# In this example we will demonstrate how to use `pycounts` to count the words in a file and plot the results for the 5 most common words.
# ## Imports
from pycounts.pycounts import count_words
from pycounts.plotting import plot_words
# ## Create a text file
# +
quote = """Insanity is doing the same thing, over and over, and expecting different results!"""
with open("einstein.txt", "w") as f:
f.write(quote)
# -
# ## Count words
# We can count the occurances of words in our text file using the `count_words` function. `count_words` will convert the text to lowercase, and remove all common punctuation before counting.
word_counts = count_words("einstein.txt")
print(word_counts)
# ## Plot words
# We can plot the counts for the top five words in text file using the `plot_words` function. By default `plot_words` will return the top 10 counts. We can override this by explicitly passing a value for `n`.
fig = plot_words(word_counts, n=5)
| docs/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.027632, "end_time": "2020-12-05T21:12:05.307914", "exception": false, "start_time": "2020-12-05T21:12:05.280282", "status": "completed"} tags=[]
# # NYC Taxi Fare Prediction
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 2.351567, "end_time": "2020-12-05T21:12:07.687733", "exception": false, "start_time": "2020-12-05T21:12:05.336166", "status": "completed"} tags=[]
# %matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
from scipy import stats
from scipy.stats import norm, skew
from sklearn import preprocessing
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
import lightgbm as lgbm
import xgboost as xgb
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# + papermill={"duration": 13.985303, "end_time": "2020-12-05T21:12:21.700995", "exception": false, "start_time": "2020-12-05T21:12:07.715692", "status": "completed"} tags=[]
df = pd.read_csv('/kaggle/input/new-york-city-taxi-fare-prediction/train.csv', nrows = 4000000)
test_df = pd.read_csv('/kaggle/input/new-york-city-taxi-fare-prediction/test.csv')
df.shape, test_df.shape
# + papermill={"duration": 0.05838, "end_time": "2020-12-05T21:12:21.786794", "exception": false, "start_time": "2020-12-05T21:12:21.728414", "status": "completed"} tags=[]
df.head()
# + papermill={"duration": 0.976155, "end_time": "2020-12-05T21:12:22.791291", "exception": false, "start_time": "2020-12-05T21:12:21.815136", "status": "completed"} tags=[]
df.isnull().sum().sort_index()/len(df)
# + papermill={"duration": 0.401679, "end_time": "2020-12-05T21:12:23.221446", "exception": false, "start_time": "2020-12-05T21:12:22.819767", "status": "completed"} tags=[]
df.dropna(subset=['dropoff_latitude', 'dropoff_longitude'], inplace = True)
# + papermill={"duration": 0.986638, "end_time": "2020-12-05T21:12:24.236872", "exception": false, "start_time": "2020-12-05T21:12:23.250234", "status": "completed"} tags=[]
df.describe()
# + papermill={"duration": 1.459676, "end_time": "2020-12-05T21:12:25.731911", "exception": false, "start_time": "2020-12-05T21:12:24.272235", "status": "completed"} tags=[]
df.drop(df[df['fare_amount'] < 2.5].index, axis=0, inplace = True)
df.drop(df[df['fare_amount'] > 500].index, axis=0, inplace = True)
# + papermill={"duration": 0.063546, "end_time": "2020-12-05T21:12:25.825867", "exception": false, "start_time": "2020-12-05T21:12:25.762321", "status": "completed"} tags=[]
test_df.describe()
# + papermill={"duration": 0.109675, "end_time": "2020-12-05T21:12:25.966643", "exception": false, "start_time": "2020-12-05T21:12:25.856968", "status": "completed"} tags=[]
df[df['passenger_count'] > 5].sort_values('passenger_count')
# + papermill={"duration": 4.881202, "end_time": "2020-12-05T21:12:30.881153", "exception": false, "start_time": "2020-12-05T21:12:25.999951", "status": "completed"} tags=[]
df.drop(df[df['pickup_longitude'] == 0].index, axis=0, inplace = True)
df.drop(df[df['pickup_latitude'] == 0].index, axis=0, inplace = True)
df.drop(df[df['dropoff_longitude'] == 0].index, axis=0, inplace = True)
df.drop(df[df['dropoff_latitude'] == 0].index, axis=0, inplace = True)
df.drop(df[df['passenger_count'] == 208].index, axis=0, inplace = True)
df.drop(df[df['passenger_count'] > 5].index, axis=0, inplace = True)
df.drop(df[df['passenger_count'] == 0].index, axis=0, inplace = True)
# + papermill={"duration": 688.44216, "end_time": "2020-12-05T21:23:59.355251", "exception": false, "start_time": "2020-12-05T21:12:30.913091", "status": "completed"} tags=[]
df['key'] = pd.to_datetime(df['key'])
key = test_df.key
test_df['key'] = pd.to_datetime(test_df['key'])
df['pickup_datetime'] = pd.to_datetime(df['pickup_datetime'])
test_df['pickup_datetime'] = pd.to_datetime(test_df['pickup_datetime'])
# + papermill={"duration": 2.750646, "end_time": "2020-12-05T21:24:02.139722", "exception": false, "start_time": "2020-12-05T21:23:59.389076", "status": "completed"} tags=[]
df['Year'] = df['pickup_datetime'].dt.year
df['Month'] = df['pickup_datetime'].dt.month
df['Date'] = df['pickup_datetime'].dt.day
df['Day of Week'] = df['pickup_datetime'].dt.dayofweek
df['Hour'] = df['pickup_datetime'].dt.hour
df.drop('pickup_datetime', axis = 1, inplace = True)
df.drop('key', axis = 1, inplace = True)
test_df['Year'] = test_df['pickup_datetime'].dt.year
test_df['Month'] = test_df['pickup_datetime'].dt.month
test_df['Date'] = test_df['pickup_datetime'].dt.day
test_df['Day of Week'] = test_df['pickup_datetime'].dt.dayofweek
test_df['Hour'] = test_df['pickup_datetime'].dt.hour
test_df.drop('pickup_datetime', axis = 1, inplace = True)
test_df.drop('key', axis = 1, inplace = True)
# + papermill={"duration": 1.422088, "end_time": "2020-12-05T21:24:03.594292", "exception": false, "start_time": "2020-12-05T21:24:02.172204", "status": "completed"} tags=[]
df.dropna(inplace=True)
df.drop(df.index[(df.pickup_longitude < -75) |
(df.pickup_longitude > -72) |
(df.pickup_latitude < 40) |
(df.pickup_latitude > 42)],inplace=True)
df.drop(df.index[(df.dropoff_longitude < -75) |
(df.dropoff_longitude > -72) |
(df.dropoff_latitude < 40) |
(df.dropoff_latitude > 42)],inplace=True)
# + papermill={"duration": 1.524487, "end_time": "2020-12-05T21:24:05.152481", "exception": false, "start_time": "2020-12-05T21:24:03.627994", "status": "completed"} tags=[]
df.describe()
# + papermill={"duration": 0.304426, "end_time": "2020-12-05T21:24:05.490663", "exception": false, "start_time": "2020-12-05T21:24:05.186237", "status": "completed"} tags=[]
import geopy.distance
def geodesic_dist(trip):
pickup_lat = trip['pickup_latitude']
pickup_long = trip['pickup_longitude']
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
distance = geopy.distance.geodesic((pickup_lat, pickup_long),
(dropoff_lat, dropoff_long)).miles
try:
return distance
except ValueError:
return np.nan
def circle_dist(trip):
pickup_lat = trip['pickup_latitude']
pickup_long = trip['pickup_longitude']
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
distance = geopy.distance.great_circle((pickup_lat, pickup_long),
(dropoff_lat, dropoff_long)).miles
try:
return distance
except ValueError:
return np.nan
# + papermill={"duration": 0.063972, "end_time": "2020-12-05T21:24:05.588484", "exception": false, "start_time": "2020-12-05T21:24:05.524512", "status": "completed"} tags=[]
def jfk_dist(trip):
jfk_lat = 40.6413
jfk_long = -73.7781
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
jfk_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (jfk_lat, jfk_long)).miles
return jfk_distance
def lga_dist(trip):
lga_lat = 40.7769
lga_long = -73.8740
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
lga_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (lga_lat, lga_long)).miles
return lga_distance
def ewr_dist(trip):
ewr_lat = 40.6895
ewr_long = -74.1745
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
ewr_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (ewr_lat, ewr_long)).miles
return ewr_distance
def tsq_dist(trip):
tsq_lat = 40.7580
tsq_long = -73.9855
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
tsq_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (tsq_lat, tsq_long)).miles
return tsq_distance
def cpk_dist(trip):
cpk_lat = 40.7812
cpk_long = -73.9665
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
cpk_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (cpk_lat, cpk_long)).miles
return cpk_distance
def lib_dist(trip):
lib_lat = 40.6892
lib_long = -74.0445
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
lib_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (lib_lat, lib_long)).miles
return lib_distance
def gct_dist(trip):
gct_lat = 40.7527
gct_long = -73.9772
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
gct_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (gct_lat, gct_long)).miles
return gct_distance
def met_dist(trip):
met_lat = 40.7794
met_long = -73.9632
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
met_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (met_lat, met_long)).miles
return met_distance
def wtc_dist(trip):
wtc_lat = 40.7126
wtc_long = -74.0099
dropoff_lat = trip['dropoff_latitude']
dropoff_long = trip['dropoff_longitude']
wtc_distance = geopy.distance.geodesic((dropoff_lat, dropoff_long), (wtc_lat, wtc_long)).miles
return wtc_distance
# + papermill={"duration": 0.046252, "end_time": "2020-12-05T21:24:05.668770", "exception": false, "start_time": "2020-12-05T21:24:05.622518", "status": "completed"} tags=[]
def optimize_floats(df):
floats = df.select_dtypes(include=['float64']).columns.tolist()
df[floats] = df[floats].apply(pd.to_numeric, downcast='float')
return df
def optimize_ints(df):
ints = df.select_dtypes(include=['int64']).columns.tolist()
df[ints] = df[ints].apply(pd.to_numeric, downcast='integer')
return df
def optimize(df):
return optimize_floats(optimize_ints(df))
# + papermill={"duration": 0.986928, "end_time": "2020-12-05T21:24:06.689588", "exception": false, "start_time": "2020-12-05T21:24:05.702660", "status": "completed"} tags=[]
df = optimize(df)
test_df = optimize(test_df)
# + papermill={"duration": 0.051555, "end_time": "2020-12-05T21:24:06.775311", "exception": false, "start_time": "2020-12-05T21:24:06.723756", "status": "completed"} tags=[]
def calc_dists(df):
df['geodesic'] = df.apply(lambda x: geodesic_dist(x), axis = 1 )
df['circle'] = df.apply(lambda x: circle_dist(x), axis = 1 )
df['jfk'] = df.apply(lambda x: jfk_dist(x), axis = 1 )
df['lga'] = df.apply(lambda x: lga_dist(x), axis = 1 )
df['ewr'] = df.apply(lambda x: ewr_dist(x), axis = 1 )
df['tsq'] = df.apply(lambda x: tsq_dist(x), axis = 1 )
df['cpk'] = df.apply(lambda x: cpk_dist(x), axis = 1 )
df['lib'] = df.apply(lambda x: lib_dist(x), axis = 1 )
df['gct'] = df.apply(lambda x: gct_dist(x), axis = 1 )
df['met'] = df.apply(lambda x: met_dist(x), axis = 1 )
df['wtc'] = df.apply(lambda x: wtc_dist(x), axis = 1 )
return df
# + papermill={"duration": 14573.608646, "end_time": "2020-12-06T01:27:00.419028", "exception": false, "start_time": "2020-12-05T21:24:06.810382", "status": "completed"} tags=[]
df = calc_dists(df)
test_df = calc_dists(test_df)
# + papermill={"duration": 0.041542, "end_time": "2020-12-06T01:27:00.495855", "exception": false, "start_time": "2020-12-06T01:27:00.454313", "status": "completed"} tags=[]
# df.drop(['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude'],axis=1,inplace=True)
# test_df.drop(['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude'],axis=1,inplace=True)
# + papermill={"duration": 5.341598, "end_time": "2020-12-06T01:27:05.872552", "exception": false, "start_time": "2020-12-06T01:27:00.530954", "status": "completed"} tags=[]
plt.figure(figsize=(10, 8))
sns.heatmap(df.drop('fare_amount', axis=1).corr(), square=True)
plt.suptitle('Pearson Correlation Heatmap')
plt.show();
# + papermill={"duration": 11.027235, "end_time": "2020-12-06T01:27:16.941539", "exception": false, "start_time": "2020-12-06T01:27:05.914304", "status": "completed"} tags=[]
(mu, sigma) = norm.fit(df['geodesic'])
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(19, 5))
ax1 = sns.distplot(df['geodesic'] , fit=norm, ax=ax1)
ax1.legend([f'Normal distribution ($\mu=$ {mu:.3f} and $\sigma=$ {sigma:.3f})'], loc='best')
ax1.set_ylabel('Frequency')
ax1.set_title('Distance Distribution')
ax2 = stats.probplot(df['geodesic'], plot=plt)
f.show();
# + papermill={"duration": 9.684636, "end_time": "2020-12-06T01:27:26.663529", "exception": false, "start_time": "2020-12-06T01:27:16.978893", "status": "completed"} tags=[]
(mu, sigma) = norm.fit(df['fare_amount'])
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(19, 5))
ax1 = sns.distplot(df['fare_amount'] , fit=norm, ax=ax1)
ax1.legend([f'Normal distribution ($\mu=$ {mu:.3f} and $\sigma=$ {sigma:.3f})'], loc='best')
ax1.set_ylabel('Frequency')
ax1.set_title('Fare Distribution')
ax2 = stats.probplot(df['fare_amount'], plot=plt)
f.show();
# + papermill={"duration": 2.977763, "end_time": "2020-12-06T01:27:29.680498", "exception": false, "start_time": "2020-12-06T01:27:26.702735", "status": "completed"} tags=[]
df.describe()
# + papermill={"duration": 1.048161, "end_time": "2020-12-06T01:27:30.769996", "exception": false, "start_time": "2020-12-06T01:27:29.721835", "status": "completed"} tags=[]
df = optimize(df)
test_df = optimize(test_df)
# + papermill={"duration": 0.052067, "end_time": "2020-12-06T01:27:30.862472", "exception": false, "start_time": "2020-12-06T01:27:30.810405", "status": "completed"} tags=[]
df.dtypes
# + papermill={"duration": 1.447538, "end_time": "2020-12-06T01:27:32.350988", "exception": false, "start_time": "2020-12-06T01:27:30.903450", "status": "completed"} tags=[]
X, y = df.drop('fare_amount', axis = 1), df['fare_amount']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=12)
# + papermill={"duration": 15770.099942, "end_time": "2020-12-06T05:50:22.492659", "exception": false, "start_time": "2020-12-06T01:27:32.392717", "status": "completed"} tags=[]
dtrain = xgb.DMatrix(X_train, label=y_train)
dvalid = xgb.DMatrix(X_test, label=y_test)
dtest = xgb.DMatrix(test_df)
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
xgb_params = {
'min_child_weight': 1,
'learning_rate': 0.05,
'colsample_bytree': 0.7,
'max_depth': 10,
'subsample': 0.7,
'n_estimators': 5000,
'n_jobs': -1,
'booster' : 'gbtree',
'silent': 1,
'eval_metric': 'rmse'}
model = xgb.train(xgb_params, dtrain, 700, watchlist, early_stopping_rounds=100, maximize=False, verbose_eval=50)
# + papermill={"duration": 531.022374, "end_time": "2020-12-06T05:59:13.565707", "exception": false, "start_time": "2020-12-06T05:50:22.543333", "status": "completed"} tags=[]
y_train_pred = model.predict(dtrain)
y_pred = model.predict(dvalid)
print('Train r2 score: ', r2_score(y_train_pred, y_train))
print('Test r2 score: ', r2_score(y_test, y_pred))
train_rmse = np.sqrt(mean_squared_error(y_train_pred, y_train))
test_rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print(f'Train RMSE: {train_rmse:.4f}')
print(f'Test RMSE: {test_rmse:.4f}')
# + papermill={"duration": 1.421211, "end_time": "2020-12-06T05:59:15.036852", "exception": false, "start_time": "2020-12-06T05:59:13.615641", "status": "completed"} tags=[]
test_preds = model.predict(dtest)
# + papermill={"duration": 0.261836, "end_time": "2020-12-06T05:59:15.348737", "exception": false, "start_time": "2020-12-06T05:59:15.086901", "status": "completed"} tags=[]
test_preds = model.predict(dtest)
submission = pd.DataFrame(
{'key': key, 'fare_amount': test_preds},
columns = ['key', 'fare_amount'])
submission.to_csv('submission1.csv', index = False)
# + papermill={"duration": 0.149137, "end_time": "2020-12-06T05:59:15.548649", "exception": false, "start_time": "2020-12-06T05:59:15.399512", "status": "completed"} tags=[]
| nyc-taxi-fare.ipynb |