code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp models.ROCKET
# -
# # ROCKET
#
# > ROCKET (RandOm Convolutional KErnel Transform) functions for univariate and multivariate time series.
#export
from tsai.imports import *
from tsai.data.external import *
from tsai.models.layers import *
#export
from sklearn.linear_model import RidgeClassifierCV, RidgeCV
from sktime.transformations.panel.rocket import Rocket
from numba import njit, prange
# +
#export
# This is an unofficial ROCKET implementation in Pytorch developed by <NAME> - <EMAIL> based on:
# <NAME>, <NAME>, <NAME>
# <NAME>, <NAME>, <NAME> (2019) ROCKET: Exceptionally fast and
# accurate time series classification using random convolutional kernels.
# arXiv:1910.13051
# Official repo: https://github.com/angus924/rocket
# changes:
# - added kss parameter to generate_kernels
# - convert X to np.float64
def generate_kernels(input_length, num_kernels, kss=[7, 9, 11], pad=True, dilate=True):
candidate_lengths = np.array((kss))
# initialise kernel parameters
weights = np.zeros((num_kernels, candidate_lengths.max())) # see note
lengths = np.zeros(num_kernels, dtype = np.int32) # see note
biases = np.zeros(num_kernels)
dilations = np.zeros(num_kernels, dtype = np.int32)
paddings = np.zeros(num_kernels, dtype = np.int32)
# note: only the first *lengths[i]* values of *weights[i]* are used
for i in range(num_kernels):
length = np.random.choice(candidate_lengths)
_weights = np.random.normal(0, 1, length)
bias = np.random.uniform(-1, 1)
if dilate: dilation = 2 ** np.random.uniform(0, np.log2((input_length - 1) // (length - 1)))
else: dilation = 1
if pad: padding = ((length - 1) * dilation) // 2 if np.random.randint(2) == 1 else 0
else: padding = 0
weights[i, :length] = _weights - _weights.mean()
lengths[i], biases[i], dilations[i], paddings[i] = length, bias, dilation, padding
return weights, lengths, biases, dilations, paddings
@njit(fastmath = True)
def apply_kernel(X, weights, length, bias, dilation, padding):
# zero padding
if padding > 0:
_input_length = len(X)
_X = np.zeros(_input_length + (2 * padding))
_X[padding:(padding + _input_length)] = X
X = _X
input_length = len(X)
output_length = input_length - ((length - 1) * dilation)
_ppv = 0 # "proportion of positive values"
_max = np.NINF
for i in range(output_length):
_sum = bias
for j in range(length):
_sum += weights[j] * X[i + (j * dilation)]
if _sum > 0:
_ppv += 1
if _sum > _max:
_max = _sum
return _ppv / output_length, _max
@njit(parallel = True, fastmath = True)
def apply_kernels(X, kernels):
X = X.astype(np.float64)
weights, lengths, biases, dilations, paddings = kernels
num_examples = len(X)
num_kernels = len(weights)
# initialise output
_X = np.zeros((num_examples, num_kernels * 2)) # 2 features per kernel
for i in prange(num_examples):
for j in range(num_kernels):
_X[i, (j * 2):((j * 2) + 2)] = \
apply_kernel(X[i], weights[j][:lengths[j]], lengths[j], biases[j], dilations[j], paddings[j])
return _X
# +
#hide
X_train, y_train, X_valid, y_valid = get_UCR_data('OliveOil')
seq_len = X_train.shape[-1]
X_train = X_train[:, 0].astype(np.float64)
X_valid = X_valid[:, 0].astype(np.float64)
labels = np.unique(y_train)
transform = {}
for i, l in enumerate(labels): transform[l] = i
y_train = np.vectorize(transform.get)(y_train).astype(np.int32)
y_valid = np.vectorize(transform.get)(y_valid).astype(np.int32)
X_train = (X_train - X_train.mean(axis = 1, keepdims = True)) / (X_train.std(axis = 1, keepdims = True) + 1e-8)
X_valid = (X_valid - X_valid.mean(axis = 1, keepdims = True)) / (X_valid.std(axis = 1, keepdims = True) + 1e-8)
# only univariate time series of shape (samples, len)
kernels = generate_kernels(seq_len, 10000)
X_train_tfm = apply_kernels(X_train, kernels)
X_valid_tfm = apply_kernels(X_valid, kernels)
classifier = RidgeClassifierCV(alphas=np.logspace(-3, 3, 7), normalize=True)
classifier.fit(X_train_tfm, y_train)
score = classifier.score(X_valid_tfm, y_valid)
test_eq(ge(score,.9), True)
# -
#export
class ROCKET(nn.Module):
"""RandOm Convolutional KErnel Transform
ROCKET is a GPU Pytorch implementation of the ROCKET functions generate_kernels
and apply_kernels that can be used with univariate and multivariate time series.
"""
def __init__(self, c_in, seq_len, n_kernels=10_000, kss=[7, 9, 11], device=None, verbose=False):
'''
Input: is a 3d torch tensor of type torch.float32. When used with univariate TS,
make sure you transform the 2d to 3d by adding unsqueeze(1).
c_in: number of channels or features. For univariate c_in is 1.
seq_len: sequence length
'''
super().__init__()
device = ifnone(device, default_device())
kss = [ks for ks in kss if ks < seq_len]
convs = nn.ModuleList()
for i in range(n_kernels):
ks = np.random.choice(kss)
dilation = 2**np.random.uniform(0, np.log2((seq_len - 1) // (ks - 1)))
padding = int((ks - 1) * dilation // 2) if np.random.randint(2) == 1 else 0
weight = torch.randn(1, c_in, ks)
weight -= weight.mean()
bias = 2 * (torch.rand(1) - .5)
layer = nn.Conv1d(c_in, 1, ks, padding=2 * padding, dilation=int(dilation), bias=True)
layer.weight = torch.nn.Parameter(weight, requires_grad=False)
layer.bias = torch.nn.Parameter(bias, requires_grad=False)
convs.append(layer)
self.convs = convs
self.n_kernels = n_kernels
self.kss = kss
self.to(device=device)
self.verbose=verbose
def forward(self, x):
_output = []
for i in progress_bar(range(self.n_kernels), display=self.verbose, leave=False, comment='kernel/kernels'):
out = self.convs[i](x).cpu()
_max = out.max(dim=-1)[0]
_ppv = torch.gt(out, 0).sum(dim=-1).float() / out.shape[-1]
_output.append(_max)
_output.append(_ppv)
return torch.cat(_output, dim=1)
# +
#export
def create_rocket_features(dl, model, verbose=False):
"""Args:
model : ROCKET model instance
dl : single TSDataLoader (for example dls.train or dls.valid)
"""
_x_out = []
_y_out = []
for i,(xb,yb) in enumerate(progress_bar(dl, display=verbose, leave=False, comment='batch/batches')):
_x_out.append(model(xb).cpu())
_y_out.append(yb.cpu())
return torch.cat(_x_out).numpy(), torch.cat(_y_out).numpy()
get_rocket_features = create_rocket_features
# +
bs = 16
c_in = 7 # aka channels, features, variables, dimensions
c_out = 2
seq_len = 15
xb = torch.randn(bs, c_in, seq_len).to(default_device())
m = ROCKET(c_in, seq_len, n_kernels=1_000, kss=[7, 9, 11]) # 1_000 for testing with a cpu. Default is 10k with a gpu!
test_eq(m(xb).shape, [bs, 2_000])
# -
from tsai.data.all import *
from tsai.models.utils import *
X, y, splits = get_UCR_data('OliveOil', split_data=False)
tfms = [None, TSRegression()]
batch_tfms = TSStandardize(by_var=True)
dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, shuffle_train=False, drop_last=False)
model = build_ts_model(ROCKET, dls=dls, n_kernels=1_000) # 1_000 for testing with a cpu. Default is 10k with a gpu!
X_train, y_train = create_rocket_features(dls.train, model)
X_valid, y_valid = create_rocket_features(dls.valid, model)
X_train.shape, X_valid.shape
#export
class RocketClassifier(sklearn.pipeline.Pipeline):
"""Time series classification using ROCKET features and a linear classifier"""
def __init__(self, num_kernels=10_000, normalize_input=True, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, class_weight=None, **kwargs):
"""
RocketClassifier is recommended for up to 10k time series.
For a larger dataset, you can use ROCKET (in Pytorch).
scoring = None --> defaults to accuracy.
Rocket args:
num_kernels : int, number of random convolutional kernels (default 10,000)
normalize_input : boolean, whether or not to normalise the input time series per instance (default True)
random_state : int (ignored unless int due to compatability with Numba), random seed (optional, default None)
"""
self.steps = [('rocket', Rocket(num_kernels=num_kernels, normalise=normalize_input, random_state=random_state)),
('ridgeclassifiercv', RidgeClassifierCV(alphas=alphas, normalize=normalize_features, scoring=scoring,
class_weight=class_weight, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
#export
def load_rocket(fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'rb') as input:
output = pickle.load(input)
return output
#export
class RocketRegressor(sklearn.pipeline.Pipeline):
"""Time series regression using ROCKET features and a linear regressor"""
def __init__(self, num_kernels=10_000, normalize_input=True, random_state=None,
alphas=np.logspace(-3, 3, 7), normalize_features=True, memory=None, verbose=False, scoring=None, **kwargs):
"""
RocketRegressor is recommended for up to 10k time series.
For a larger dataset, you can use ROCKET (in Pytorch).
scoring = None --> defaults to r2.
Args:
num_kernels : int, number of random convolutional kernels (default 10,000)
normalize_input : boolean, whether or not to normalise the input time series per instance (default True)
random_state : int (ignored unless int due to compatability with Numba), random seed (optional, default None)
"""
self.steps = [('rocket', Rocket(num_kernels=num_kernels, normalise=normalize_input, random_state=random_state)),
('ridgecv', RidgeCV(alphas=alphas, normalize=normalize_features, scoring=scoring, **kwargs))]
store_attr()
self._validate_steps()
def __repr__(self):
return f'Pipeline(steps={self.steps.copy()})'
def save(self, fname='Rocket', path='./models'):
path = Path(path)
filename = path/fname
with open(f'{filename}.pkl', 'wb') as output:
pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)
# Univariate classification with sklearn-type API
dsid = 'OliveOil'
fname = 'RocketClassifier'
X_train, y_train, X_test, y_test = get_UCR_data(dsid, Xdtype='float64')
cls = RocketClassifier()
cls.fit(X_train, y_train)
cls.save(fname)
del cls
cls = load_rocket(fname)
print(cls.score(X_test, y_test))
# Multivariate classification with sklearn-type API
dsid = 'NATOPS'
fname = 'RocketClassifier'
X_train, y_train, X_test, y_test = get_UCR_data(dsid, Xdtype='float64')
cls = RocketClassifier()
cls.fit(X_train, y_train)
cls.save(fname)
del cls
cls = load_rocket(fname)
print(cls.score(X_test, y_test))
# Univariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'Covid3Month'
fname = 'RocketRegressor'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid, Xdtype='float64')
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = RocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_rocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
# Multivariate regression with sklearn-type API
from sklearn.metrics import mean_squared_error
dsid = 'AppliancesEnergy'
fname = 'RocketRegressor'
X_train, y_train, X_test, y_test = get_Monash_regression_data(dsid, Xdtype='float64')
if X_train is not None:
rmse_scorer = make_scorer(mean_squared_error, greater_is_better=False)
reg = RocketRegressor(scoring=rmse_scorer)
reg.fit(X_train, y_train)
reg.save(fname)
del reg
reg = load_rocket(fname)
y_pred = reg.predict(X_test)
print(mean_squared_error(y_test, y_pred, squared=False))
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
| nbs/111_models.ROCKET.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import hierarchical
from sklearn.cluster import DBSCAN
df = pd.read_csv('HTRU_2.csv',names = ['one','two','three','four','five','six','seven','eight','nine'],skiprows = 1)
df.head()
# -
# # Data set attributes description
# 1. Mean of the integrated profile.
# 2. Standard deviation of the integrated profile.
# 3. Excess kurtosis of the integrated profile.
# 4. Skewness of the integrated profile.
# 5. Mean of the DM-SNR curve.
# 6. Standard deviation of the DM-SNR curve.
# 7. Excess kurtosis of the DM-SNR curve.
# 8. Skewness of the DM-SNR curve.
# 9. Class
# # K means Clustering
# +
model1 = KMeans(n_clusters = 3)
model1.fit(df.iloc[:,0:8])
plt.figure(num = None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
for i in range(40):
if model1.labels_[i] == 0:
plt.plot(df.iloc[i:-1,1:3],df.iloc[i:-1,3:5],'go')
plt.plot(model1.cluster_centers_[:,0],model1.cluster_centers_[:,1],'o',c = 'black')
elif model1.labels_[i] == 1:
plt.plot(df.iloc[i:-1,5:7],df.iloc[i:-1,7:9],'ro')
plt.plot(model1.cluster_centers_[:,0],model1.cluster_centers_[:,1],'o',c = 'black')
plt.show()
# -
# # Hierarchical Clustering Algorithm
# +
model2 = hierarchical.AgglomerativeClustering(n_clusters = 6)
model2.fit(df.iloc[:,0:8])
plt.figure(num = None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
for i in range(40):
if model2.labels_[i] == 0:
plt.plot(df.iloc[i:-1,1:3],df.iloc[i:-1,3:5],'go')
elif model2.labels_[i] == 1:
plt.plot(df.iloc[i:-1,5:7],df.iloc[i:-1,7:9],'ro')
plt.show()
# -
# # DBScan Algorithm
# +
model3 = DBSCAN()
model3.fit(df.iloc[:,0:8])
model2 = hierarchical.AgglomerativeClustering(n_clusters = 3)
model2.fit(df.iloc[:,0:8])
plt.figure(num = None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
for i in range(40):
if model2.labels_[i] == 0:
plt.plot(df.iloc[i:-1,1:2],df.iloc[i:-1,3:6],'go')
elif model2.labels_[i] == 1:
plt.plot(df.iloc[i:-1,5:7],df.iloc[i:-1,7:9],'ro')
plt.show()
| Jupyter Notebooks/clustering_on_HTRU2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import glob, os
from IPython.display import Image
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVR, SVR
from sklearn import metrics
from sklearn.linear_model import LinearRegression,Ridge
import statsmodels.api as sm
from sklearn import linear_model
from sklearn.tree import DecisionTreeRegressor
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import RFE
from sklearn import tree
import pydotplus
from sklearn.externals.six import StringIO
import matplotlib.pyplot as plt
# %matplotlib inline
from collections import Counter
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_regression
from sklearn.feature_selection import f_regression
from sklearn.linear_model import ElasticNet
import forward_selection as fs
from scipy.stats import chi2_contingency, pearsonr, spearmanr
# ## Préparation de la base
dft11 = pd.read_csv("la_base.csv", encoding="latin1")
dft11.columns
dft11.columns
dft11["taux_bgauche"] = dft11['taux_xgauche']+ dft11['taux_gauche'] + dft11['taux_vert']
dft11["taux_bdroite"] = dft11['taux_droite']+ dft11['taux_centre']
dft11.head()
dft11["taux_sortie_sans_bloc"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1981,"taux_centre"]
dft11.ix[dft11["Ann?e"]==1988,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1988,"taux_gauche"]
dft11.ix[dft11["Ann?e"]==1995,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==1995,"taux_gauche"]
dft11.ix[dft11["Ann?e"]==2002,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2002,"taux_droite"]
dft11.ix[dft11["Ann?e"]==2007,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2007,"taux_droite"]
dft11.ix[dft11["Ann?e"]==2012,"taux_sortie_sans_bloc"] = dft11.ix[dft11["Ann?e"]==2012,"taux_droite"]
# +
dft11["taux_sortie_avec_bloc"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1981,"taux_centre"] +\
dft11.ix[dft11["Ann?e"]==1981,"taux_droite"]
dft11.ix[dft11["Ann?e"]==1988,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1988,"taux_gauche"]+\
dft11.ix[dft11["Ann?e"]==1988,"taux_xgauche"] + dft11.ix[dft11["Ann?e"]==1988,"taux_vert"]
dft11.ix[dft11["Ann?e"]==1995,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==1995,"taux_gauche"]+\
dft11.ix[dft11["Ann?e"]==1995,"taux_xgauche"] + dft11.ix[dft11["Ann?e"]==1995,"taux_vert"]
dft11.ix[dft11["Ann?e"]==2002,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2002,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2002,"taux_centre"]
dft11.ix[dft11["Ann?e"]==2007,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2007,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2007,"taux_centre"]
dft11.ix[dft11["Ann?e"]==2012,"taux_sortie_avec_bloc"] = dft11.ix[dft11["Ann?e"]==2012,"taux_droite"]+\
dft11.ix[dft11["Ann?e"]==2012,"taux_centre"]
# +
#Taux_supp_moyenne sortant
#Popularité sortant
dft11["ecart_pop"] = dft11["pop_president"] - dft11["pop_premier_ministre"]
dft11["pop_exec"] = np.nan
dft11.ix[dft11["Ann?e"]==1981,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1981,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==1988,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1988,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==2012,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2012,"pop_president"].iloc[0]
dft11.ix[dft11["Ann?e"]==1995,"pop_exec"] = dft11.ix[dft11["Ann?e"]==1995,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==2002,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2002,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==2007,"pop_exec"] = dft11.ix[dft11["Ann?e"]==2007,"pop_premier_ministre"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"pop_xdroite"] = dft11.ix[dft11["Ann?e"]==1988,"pop_xdroite"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"pop_verts"] = dft11.ix[dft11["Ann?e"]==1995,"pop_verts"].iloc[0]
dft11.ix[dft11["Ann?e"]==1988,"pop_verts"] = dft11.ix[dft11["Ann?e"]==1995,"pop_verts"].iloc[0]
dft11.ix[dft11["Ann?e"]==1981,"nombre de logement vacant"] = 1891.
# +
# On rajoute après coup l'option fn qui permet de supprimer l'année 1981
# -
def create_train(target, dft11, option_fn=False):
dft11["CORS88"] = dft11.apply(lambda row : 1 if row["Ann?e"]==1988 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft11["CORS02"] = dft11.apply(lambda row : 1 if row["Ann?e"]==2002 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft12 = dft11.drop(["code", 'taux_Blancs et nuls', 'taux_droite', 'taux_xgauche',\
'taux_vert','taux_centre', 'Q1_rate', 'Q2_rate', 'Q3_rate', 'Q4_rate', "Total", \
'depart_OM', "Date","taux_Abstention_sup_moyenne",'taux_Blancs et nuls_sup_moyenne',
'depart_CORSE'], axis=1)
dft12["Date"] = dft12["Ann?e"] - 1981
list_dell = ["Ann?e","d?partement","taux_bgauche",'taux_Abstention',"taux_sortie_sans_bloc",
"taux_sortie_avec_bloc","taux_gauche", 'taux_xdroite',"taux_bdroite"]
if option_fn==True:
dft12 = dft12[dft12["Ann?e"]!=1981]
df_train_x = dft12.drop(list_dell, axis=1)
df_train_y = dft12[target]
return df_train_x, df_train_y
def create_train_and_test(target, dft11, option_fn=False):
dft11["CORS88"] = dft11.apply(lambda row : 1 if row["Ann?e"]==1988 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft11["CORS02"] = dft11.apply(lambda row : 1 if row["Ann?e"]==2002 and
row["code"] in ["2A","2B"] else 0, axis=1)
dft12 = dft11.drop(["code", 'taux_Blancs et nuls', 'taux_droite', 'taux_xgauche',\
'taux_vert','taux_centre', 'Q1_rate', 'Q2_rate', 'Q3_rate', 'Q4_rate', "Total", \
'depart_OM', "Date","taux_Abstention_sup_moyenne",'taux_Blancs et nuls_sup_moyenne',
'depart_CORSE'], axis=1)
dft12["Date"] = dft12["Ann?e"] - 1981
list_dell = ["Ann?e","d?partement","taux_bgauche",'taux_Abstention',"taux_sortie_sans_bloc",
"taux_sortie_avec_bloc","taux_gauche", 'taux_xdroite',"taux_bdroite"]
df_train = dft12[dft12["Ann?e"]!=2012]
if option_fn==True:
df_train = df_train[df_train["Ann?e"]!=1981]
df_test = dft12[dft12["Ann?e"]==2012]
df_train_x = df_train.drop(list_dell, axis=1)
df_train_y = df_train[target]
df_test_x = df_test.drop(list_dell, axis=1)
df_test_y = df_test[target]
return df_train_x, df_train_y, df_test_x, df_test_y
def target_and_predict(target, dft11, estimator, importance_features=False, option_fn=False):
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test(target, dft11, option_fn=option_fn)
svr.fit(df_train_x, df_train_y)
print("Avec l'estimateur %s" % str(estimator))
print("Approche %s" % target)
print("Nb itération : %s" % svr.n_iter_)
print("MSE test : %s" % metrics.mean_squared_error(df_test_y, svr.predict(df_test_x)))
print("MAE test : %s" % metrics.mean_absolute_error(df_test_y, svr.predict(df_test_x)))
print("R2 test : %s " % svr.score(df_test_x, df_test_y))
if importance_features==True :
print("\n")
print("Features à garder")
vec_features = zip(svr.coef_, df_train_x.columns)
vec_features = sorted(vec_features)
for i in range(len(vec_features)):
if vec_features[i][0]!=0:
print( vec_features[i][0], vec_features[i][1])
print("\n")
def my_Lslasso(data, target, alpha_value, option_fn=False, normalize=True, max_iter=5000):
print(alpha_value)
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test(target, data, option_fn=option_fn)
svr = linear_model.Lasso(alpha=alpha_value, normalize=normalize, max_iter=max_iter, random_state=42)
svr.fit(df_train_x, df_train_y)
if svr.n_iter_ == max_iter :
print("L'itération n'a pas convergé")
try :
df_train_x_select_features = df_train_x.ix[:,svr.coef_!=0]
df_test_x_select_features = df_test_x.ix[:,svr.coef_!=0]
print(df_train_x.columns[svr.coef_!=0])
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE test %s" % metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
except ValueError :
print("Tout les coéfficients sont nuls")
def my_LsElasticNet(data, target, alpha_value, ratio, option_fn=False):
print("Alpha value %s" % alpha_value)
print("Ratio %s" % ratio)
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test(target, data, option_fn=option_fn)
en = ElasticNet(alpha=alpha_value, l1_ratio=ratio, fit_intercept=True, normalize=True,
max_iter=5000 ,random_state=42)
en.fit(df_train_x, df_train_y)
if en.n_iter_ == 5000 :
print("L'itération n'a pas convergé")
try :
df_train_x_select_features = df_train_x.ix[:,en.coef_!=0]
df_test_x_select_features = df_test_x.ix[:,en.coef_!=0]
#print(df_train_x.columns[en.coef_!=0])
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print(metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
except ValueError :
print("Tout les coéfficients sont nuls")
def r2_adjusted(df_x, df_y, p, estimator):
r2 = estimator.score(df_train_x_select_features, df_train_y)
n = df_x.shape[0]
return 1 - (((1- r2)*(n-1))/ (n-p-1))
# ## Prédiction Xdroite
svr = linear_model.Lasso(alpha=0.01, normalize=True, max_iter=5000, random_state=42)
target_and_predict("taux_xdroite",dft11, svr, True)
# +
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11)
for i in np.linspace(0.06,0.08,4):
print(i)
svr = linear_model.Lasso(alpha=i, normalize=True, max_iter=5000, random_state=42)
svr.fit(df_train_x, df_train_y)
print(metrics.mean_absolute_error(df_test_y, svr.predict(df_test_x)))
print("\n")
# Best param = 0.07
svr = linear_model.Lasso(alpha=0.07, normalize=True, max_iter=5000, random_state=42)
target_and_predict("taux_xdroite",dft11, svr, True)
# Conserve seulement 3 variables
# +
df_train_x_select_features = df_train_x.ix[:, ["droite_au_pouvoir","subventions",
"taux_xdroite_sup_moyenne","Dissident"]]
df_test_x_select_features = df_test_x.ix[:, ["droite_au_pouvoir","subventions",
"taux_xdroite_sup_moyenne","Dissident"]]
svr.fit(df_train_x_select_features, df_train_y)
print(metrics.mean_absolute_error(df_test_y, svr.predict(df_test_x_select_features)))
# -
for i in np.linspace(0.07,0.11,8):
my_Lslasso(dft11,"taux_xdroite", i)
print("\n")
for i in [0.1,0.5]:
for j in np.linspace(0,1,11):
my_LsElasticNet(dft11, "taux_xdroite",i, j)
# Après tentative de sélection par lasso, lsLasso, et lsElasticNet:
# Le modèle le plus performant est 'taux_xdroite_sup_moyenne', 'subventions' avec alpha = 0.1
# *On obtient une MAE à 2.35*
liste_features = ["taux_xdroite","droite_au_pouvoir","taux_droite_sup_moyenne","subventions","pop_centre",
"persistance_centre_droite","taux_chomage","capacite epargne future(am?lioration moins deterioration)",
"Dissident","depart_frontalier","taux_xdroite_sup_moyenne"]
xdroite_mat = dft11.ix[:, liste_features].corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn
sns.heatmap(xdroite_mat, vmax=1, square=True)
plt.title("HeatMap avec les variables d'extraites du Lasso")
plt.show()
# Corrélation significative
print(pearsonr(dft11.ix[:, "subventions"], dft11.ix[:, "taux_xdroite"]))
print(spearmanr(dft11.ix[:, "subventions"], dft11.ix[:, "taux_xdroite"]))
print(pearsonr(dft11.ix[:, "capacite epargne future(am?lioration moins deterioration)"], dft11.ix[:, "taux_xdroite"]))
print(spearmanr(dft11.ix[:, "capacite epargne future(am?lioration moins deterioration)"], dft11.ix[:, "taux_xdroite"]))
print(pearsonr(dft11.ix[:, "taux_bdroite"], dft11.ix[:, "taux_xdroite"]))
print(spearmanr(dft11.ix[:, "taux_bdroite"], dft11.ix[:, "taux_xdroite"]))
print("\n")
print(pearsonr(dft11.ix[:, "taux_bgauche"], dft11.ix[:, "taux_xdroite"]))
print(spearmanr(dft11.ix[:, "taux_bgauche"], dft11.ix[:, "taux_xdroite"]))
plt.plot(dft11.ix[:, "subventions"], dft11.ix[:, "taux_xdroite"],"*")
plt.show()
liste_features = ["taux_xdroite",'subventions', "taux_xdroite_sup_moyenne",'Date',
'40-59ans','75+ans','pop_xgauche','consommation menages - electricite',
'importations','pop_verts','Superficie ','0-19ans',"taux_chomage","var_chomage_annee"]
xdroite_mat = dft11.ix[:, liste_features].corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(12, 9))
# Draw the heatmap using seaborn
sns.heatmap(xdroite_mat, vmax=1, square=True)
plt.title("HeatMap avec les variables d'extraites d'un RFE")
plt.show()
# Subvention est trop corrélé à la cible et est donc très colléré à nos variables discriminante
# l'ajout de variable n'améliore pas le modèle
# par ailleurs supprimer subvention pourrie notre modèle
# #### Suppression de l'année 1981 pour le fn
svr = linear_model.Lasso(alpha=0.01, normalize=True, max_iter=5000, random_state=42)
target_and_predict("taux_xdroite",dft11, svr, True, True)
# Amélioration avec la supprésion de l'année 1981 4.15 vs 3.58
for i in [0.01, 0.1,0.2]:
my_Lslasso(dft11,"taux_xdroite", i, True)
print("\n")
# Création new variable
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, option_fn=True)
# +
model = LinearRegression()
ss = StandardScaler()
df_train_x_select_features = df_train_x.ix[:, ["subventions",
"taux_xdroite_sup_moyenne"]]
df_test_x_select_features = df_test_x.ix[:, ["subventions",
"taux_xdroite_sup_moyenne"]]
df_train_x_select_features = ss.fit_transform(df_train_x_select_features)
df_test_x_select_features = ss.transform(df_test_x_select_features)
model.fit(df_train_x_select_features, df_train_y)
print(metrics.mean_absolute_error(df_test_y, model.predict(df_test_x_select_features)))
# -
np.mean(model.predict(df_test_x_select_features))
# Analyse sans normaliser les données
svr = linear_model.Lasso(alpha=0.8, normalize=False, max_iter=5000, random_state=42)
target_and_predict("taux_xdroite",dft11, svr, True, True)
# Sans normaliser encore plus performant
# Teste LsLasso sans normaliser et sans 1981
for i in np.linspace(0.6, 1.5, 10):
my_Lslasso(dft11,"taux_xdroite", i, True, False)
print("\n")
# +
#df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11)
#skb = SelectKBest(f_regression, k=10)
#skb.fit(df_train_x, df_train_y)
#skb_result = sorted(zip(skb.scores_, skb.pvalues_ ,df_train_x.columns))
#[ print(skb_result[i]) for i in skb.get_support(True)]
# +
#sfm = SelectFromModel(svc, threshold=0.4)
#sfm.fit(df_train_x, df_train_y)
#print(df_train_x.columns[sfm.get_support(True)])
#print("\n")
#dtr = DecisionTreeRegressor()
#sfm = SelectFromModel(dtr, threshold=0.1)
#sfm.fit(df_train_x, df_train_y)
#print(df_train_x.columns[sfm.get_support(True)])
# +
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, True)
mat_cor = pd.concat([df_train_x, df_train_y], axis=1)
xdroite_corr = pd.DataFrame(columns=["name","corr_pearson",
"corr_pearson_pvalue","corr_spearman",
"corr_sperman_pvalue"])
for i in range(1, mat_cor.shape[1]):
xdroite_corr = xdroite_corr.append(
{"name" : mat_cor.columns[i],
"corr_pearson": pearsonr(mat_cor.ix[:, i], mat_cor.ix[:, "taux_xdroite"])[0],
"corr_pearson_pvalue": round(pearsonr(mat_cor.ix[:, i], mat_cor.ix[:, "taux_xdroite"])[1], 5),
"corr_spearman": spearmanr(mat_cor.ix[:, i], mat_cor.ix[:, "taux_xdroite"])[0],
"corr_sperman_pvalue": round(spearmanr(mat_cor.ix[:, i], mat_cor.ix[:, "taux_xdroite"])[1], 5),
},
ignore_index=True
)
# +
# On conserve les variables qui sont pertinante sur les deux tests
# les pvalues sont équivalentes que les variables soit centré-réduite ou non
# on peut donc trier nos data
# on fixe la condition seulement sur sperman => permet de conserver d'avantage de variable
xdroite_corr2 = xdroite_corr[(xdroite_corr["corr_sperman_pvalue"]<=0.05) ]
# plus besoin des pvalues puisque l'on à vérifié la condition de rejet sur h0
del xdroite_corr2["corr_sperman_pvalue"]
del xdroite_corr2["corr_pearson_pvalue"]
# -
xdroite_corr2.shape
xdroite_var_select = xdroite_corr2[(xdroite_corr2["corr_spearman"]>=0.2)|(xdroite_corr2["corr_spearman"]<=-0.2)]
xdroite_var_select.to_csv("corr_sperman.csv", sep=";")
# +
# Il faut maintenant choisir les variables à selectionner
# Cad garder des variables discriminantes et non corrélée
features_select_corr = mat_cor.ix[:, xdroite_var_select["name"]].corr()
features_select_corr[abs(features_select_corr.ix[:,:])>=0.5].to_csv("features_corr_features.csv",sep=",")
# -
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, True)
# +
# on supprime les variables qui sont trop corrélée avec d'autres
features_keep = ['75+ans', 'var_chomage_annee', 'taux_droite_sup_moyenne',
'taux_gauche_sup_moyenne', 'taux_xdroite_sup_moyenne',
'droite_au_pouvoir',
'capacit? epargne actuelle (augmentation moins diminution)',
'ecart_pop']
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, True)
df_train_x_select_features = df_train_x.ix[:, features_keep]
df_test_x_select_features = df_test_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print(df_test_x_select_features.columns)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
print("MAE test %s" % metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
print("\n")
print("Résultat avec fit entire data")
df_train_x, df_train_y = create_train("taux_xdroite", dft11, True)
df_train_x_select_features = df_train_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
# +
features_keep = ['75+ans', 'var_chomage_annee', 'taux_droite_sup_moyenne',
'taux_gauche_sup_moyenne', 'taux_xdroite_sup_moyenne',
'pop_premier_ministre', 'subventions']
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, True)
df_train_x_select_features = df_train_x.ix[:, features_keep]
df_test_x_select_features = df_test_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print(df_test_x_select_features.columns)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
print("MAE test %s" % metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
print("\n")
print("Résultat avec fit entire data")
df_train_x, df_train_y = create_train("taux_xdroite", dft11, True)
df_train_x_select_features = df_train_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
# +
features_keep = ["Nombre total de mariages domicili?s",
"taux_droite_sup_moyenne",
"taux_gauche_sup_moyenne",
"taux_xdroite_sup_moyenne",
"pop_xdroite",
"Densit?",
"subventions",
"pop_exec"
]
df_train_x, df_train_y, df_test_x, df_test_y = create_train_and_test("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
df_test_x_select_features = df_test_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print(df_test_x_select_features.columns)
print("Résultat sans 2012 (avec test)")
print("R2 adjust train %s" % r2_adjusted(df_train_x_select_features, df_train_y, 8,lr))
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
print("MAE test %s" % metrics.mean_absolute_error(df_test_y, lr.predict(df_test_x_select_features)))
print("\n")
print("Résultat avec fit entire data")
df_train_x, df_train_y = create_train("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
lr = LinearRegression(fit_intercept=True, normalize=True)
lr.fit(df_train_x_select_features, df_train_y)
print("R2 train %s" % lr.score(df_train_x_select_features, df_train_y))
print("R2 adjust train %s" % r2_adjusted(df_train_x_select_features, df_train_y, 8,lr))
print("MAE train %s" % metrics.mean_absolute_error(df_train_y, lr.predict(df_train_x_select_features)))
# +
# Résultat assez stable avec ce modèle
# -
# **L'ajout de la popularité de la xdroite fit sans l'année 2012 dégrade énormément le score sur l'échantillon de test car on observe une multiplication de la popularité entre 2007 et 2012...il faudrait trouver un indicateur plus smooth de la progréssion du fn**
#
# 1. Variable d'élection qui ont précéde
# 2. Supprimer l'année 2007, la croissance du fn est stopé cette année la, elle biaise peut etre nos coeff
# # Entrainement du modèle finale et prédiction
features_keep = ["Nombre total de mariages domicili?s",
"taux_droite_sup_moyenne",
"taux_gauche_sup_moyenne",
"taux_xdroite_sup_moyenne",
"pop_xdroite",
"Densit?",
"subventions",
"pop_exec"
]
# Resultat meilleur en gardant 1981
df_train_x, df_train_y = create_train("taux_xdroite", dft11, False)
df_train_x_select_features = df_train_x.ix[:, features_keep]
model_xdroite_final = LinearRegression(fit_intercept=True, normalize=True)
model_xdroite_final.fit(df_train_x_select_features, df_train_y)
predict_table = pd.read_csv("la_base_2017.csv", encoding="latin1")
predict_table.ix[:,"pop_exec"] = predict_table.ix[:,"pop_premier_ministre"].iloc[0]
predict_X = predict_table.ix[:, features_keep]
predict_X.shape
y_xdroite = model_xdroite_final.predict(predict_X)
df=pd.DataFrame(y_xdroite,columns=['prediction'])
np.mean(y_xdroite)
resultat_xdoite_extract = pd.concat([predict_table[["d?partement","code","Total"]],df], axis=1)
np.average(resultat_xdoite_extract["prediction"], weights=resultat_xdoite_extract["Total"])
resultat_xdoite_extract.to_csv("prediction_2017_xdroite.csv",sep=";")
| Analyses/.ipynb_checkpoints/prediction xdroite-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#List
# -
lst = ["jayanthi",99,72,4,{7,8,9},2,79]
lst
lst[2]
lst[-1]
lst.append(4)
lst
lst.index(99)
lst.reverse()
lst
lst.remove(99)
lst
lst.remove("jayanthi")
lst
lst.clear()
lst
# +
#dict-dictionery
# -
dit = {"name":"jayanthi", "number":"9999999", "mail":"<EMAIL>"}
dit
dit.get("name")
dit.keys()
dit.items()
dit.pop("name")
dit.setdefault("name")
dit
dit.clear()
dit
# +
#sets
# -
st = {1,2,3,4,5,6,7,8,9,98,97,4,5,6}
st
st.add(999)
st
st.issubset(st)
st.copy()
8
st.intersection(st)
st.clear()
st
# +
#Tuple
# -
tup = ("jayanthi", "rao", "mail.com")
tup
tup.index("rao")
tup.count("rao")
# +
#strings
# -
name = "jayanthi"
name1 = "rao"
# name
name
name1
name + name1
type(name)
str.encode("rao")
str.capitalize("jayanthi")
str.format("Jayanthi")
str.expandtabs("rao")
| Untitled3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from os import listdir
from numpy import asarray
from numpy import save
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from matplotlib import image
from PIL import Image
# +
from PIL import Image
newsize = (227, 227)
folder = 'kurthi/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
from PIL import Image
newsize = (227, 227)
folder = 'test/kurthi/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'saree/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'test/saree/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'shirt/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'test/shirt/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 't-shirt/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'test/t-shirt/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'None/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# +
newsize = (227, 227)
folder = 'test/None/'
for file in listdir(folder):
im = Image.open(folder+file)
im = im.resize(newsize)
im = im.save(folder+file)
# -
# load all images in a directory
folder = 'kurthi/'
loaded_images = list()
labels = list()
for filename in listdir(folder):
output = 1
img_data = image.imread(folder + filename)
# store loaded image
loaded_images.append(img_data)
labels.append(output)
len(loaded_images)
len(labels)
plt.imshow(loaded_images[102])
# +
folder = 'saree/'
for filename in listdir(folder):
output = 2
img_data = image.imread(folder + filename)
loaded_images.append(img_data)
labels.append(output)
# -
len(loaded_images)
len(labels)
# +
folder = 'shirt/'
for filename in listdir(folder):
output = 3
img_data = image.imread(folder + filename)
loaded_images.append(img_data)
labels.append(output)
# -
len(loaded_images)
len(labels)
# +
folder = 't-shirt/'
for filename in listdir(folder):
output = 4
img_data = image.imread(folder + filename)
loaded_images.append(img_data)
labels.append(output)
# -
len(loaded_images)
len(labels)
# +
folder = 'None/'
for filename in listdir(folder):
output = 5
img_data = image.imread(folder + filename)
loaded_images.append(img_data)
labels.append(output)
# -
len(loaded_images)
len(labels)
photos = asarray(loaded_images)
label = asarray(labels)
photos.shape
label.shape
plt.imshow(photos[4000])
label[4000]
save('train_photos.npy', photos)
save('train_labels.npy', label)
folder = 'test/kurthi/'
t_loaded_images = list()
t_labels = list()
for filename in listdir(folder):
output = 1
img_data = image.imread(folder + filename)
t_loaded_images.append(img_data)
t_labels.append(output)
len(t_loaded_images)
len(t_labels)
plt.imshow(t_loaded_images[21])
# +
folder = 'test/saree/'
for filename in listdir(folder):
output = 2
img_data = image.imread(folder + filename)
t_loaded_images.append(img_data)
t_labels.append(output)
# -
len(t_loaded_images)
len(t_labels)
# +
folder = 'test/shirt/'
for filename in listdir(folder):
output = 3
img_data = image.imread(folder + filename)
t_loaded_images.append(img_data)
t_labels.append(output)
# -
len(t_loaded_images)
len(t_labels)
# +
folder = 'test/t-shirt/'
for filename in listdir(folder):
output = 4
img_data = image.imread(folder + filename)
t_loaded_images.append(img_data)
t_labels.append(output)
# -
len(t_loaded_images)
len(t_labels)
# +
folder = 'test/None/'
for filename in listdir(folder):
output = 5
img_data = image.imread(folder + filename)
t_loaded_images.append(img_data)
t_labels.append(output)
# -
len(t_loaded_images)
len(t_labels)
t_photos = asarray(t_loaded_images)
t_label = asarray(t_labels)
t_photos.shape
t_label.shape
plt.imshow(t_photos[143])
t_label[143]
save('test_photos.npy', t_photos)
save('test_labels.npy', t_label)
plt.imshow(t_photos[18])
| data pre-processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='./img/intel-logo.jpg' width=50%, Fig1>
#
# # NumPy Basic Lecture01 - NumPy 설치 및 배열 생성
#
# <img src='./img/numpy-logo.jpg' width=50%>
#
# <div align='right'>성 민 석 (Minsuk Sung)</div>
# <div align='right'>류 회 성 (Hoesung Ryu)</div>
# + [markdown] toc=true
# <h1>강의목차<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#NumPy란?" data-toc-modified-id="NumPy란?-1"><span class="toc-item-num">1 </span>NumPy란?</a></span><ul class="toc-item"><li><span><a href="#NumPy-특징" data-toc-modified-id="NumPy-특징-1.1"><span class="toc-item-num">1.1 </span>NumPy 특징</a></span></li><li><span><a href="#NumPy-설치하기" data-toc-modified-id="NumPy-설치하기-1.2"><span class="toc-item-num">1.2 </span>NumPy 설치하기</a></span></li><li><span><a href="#NumPy-불러오기" data-toc-modified-id="NumPy-불러오기-1.3"><span class="toc-item-num">1.3 </span>NumPy 불러오기</a></span></li><li><span><a href="#NumPy-버전-확인하기" data-toc-modified-id="NumPy-버전-확인하기-1.4"><span class="toc-item-num">1.4 </span>NumPy 버전 확인하기</a></span></li><li><span><a href="#NumPy-사용시-도움말" data-toc-modified-id="NumPy-사용시-도움말-1.5"><span class="toc-item-num">1.5 </span>NumPy 사용시 도움말</a></span></li></ul></li><li><span><a href="#NumPy-데이터-타입" data-toc-modified-id="NumPy-데이터-타입-2"><span class="toc-item-num">2 </span>NumPy 데이터 타입</a></span></li><li><span><a href="#NumPy-배열-생성" data-toc-modified-id="NumPy-배열-생성-3"><span class="toc-item-num">3 </span>NumPy 배열 생성</a></span><ul class="toc-item"><li><span><a href="#파이썬-배열로-NumPy-배열-생성" data-toc-modified-id="파이썬-배열로-NumPy-배열-생성-3.1"><span class="toc-item-num">3.1 </span>파이썬 배열로 NumPy 배열 생성</a></span></li><li><span><a href="#배열-생성-및-초기화" data-toc-modified-id="배열-생성-및-초기화-3.2"><span class="toc-item-num">3.2 </span>배열 생성 및 초기화</a></span><ul class="toc-item"><li><span><a href="#np.zeros" data-toc-modified-id="np.zeros-3.2.1"><span class="toc-item-num">3.2.1 </span>np.zeros</a></span></li><li><span><a href="#np.ones" data-toc-modified-id="np.ones-3.2.2"><span class="toc-item-num">3.2.2 </span>np.ones</a></span></li><li><span><a href="#np.full" data-toc-modified-id="np.full-3.2.3"><span class="toc-item-num">3.2.3 </span>np.full</a></span></li><li><span><a href="#np.eye" data-toc-modified-id="np.eye-3.2.4"><span class="toc-item-num">3.2.4 </span>np.eye</a></span></li><li><span><a href="#np.empty" data-toc-modified-id="np.empty-3.2.5"><span class="toc-item-num">3.2.5 </span>np.empty</a></span></li><li><span><a href="#like" data-toc-modified-id="like-3.2.6"><span class="toc-item-num">3.2.6 </span>like</a></span></li></ul></li><li><span><a href="#데이터-생성-함수" data-toc-modified-id="데이터-생성-함수-3.3"><span class="toc-item-num">3.3 </span>데이터 생성 함수</a></span><ul class="toc-item"><li><span><a href="#np.linspace" data-toc-modified-id="np.linspace-3.3.1"><span class="toc-item-num">3.3.1 </span>np.linspace</a></span></li><li><span><a href="#np.arange" data-toc-modified-id="np.arange-3.3.2"><span class="toc-item-num">3.3.2 </span>np.arange</a></span></li><li><span><a href="#np.logspace" data-toc-modified-id="np.logspace-3.3.3"><span class="toc-item-num">3.3.3 </span>np.logspace</a></span></li></ul></li><li><span><a href="#난수-기반-배열-생성" data-toc-modified-id="난수-기반-배열-생성-3.4"><span class="toc-item-num">3.4 </span>난수 기반 배열 생성</a></span><ul class="toc-item"><li><span><a href="#np.random.seed" data-toc-modified-id="np.random.seed-3.4.1"><span class="toc-item-num">3.4.1 </span>np.random.seed</a></span></li><li><span><a href="#np.random.normal" data-toc-modified-id="np.random.normal-3.4.2"><span class="toc-item-num">3.4.2 </span>np.random.normal</a></span></li><li><span><a href="#np.random.rand" data-toc-modified-id="np.random.rand-3.4.3"><span class="toc-item-num">3.4.3 </span>np.random.rand</a></span></li><li><span><a href="#np.random.randn" data-toc-modified-id="np.random.randn-3.4.4"><span class="toc-item-num">3.4.4 </span>np.random.randn</a></span></li><li><span><a href="#np.random.randint" data-toc-modified-id="np.random.randint-3.4.5"><span class="toc-item-num">3.4.5 </span>np.random.randint</a></span></li><li><span><a href="#np.random.random" data-toc-modified-id="np.random.random-3.4.6"><span class="toc-item-num">3.4.6 </span>np.random.random</a></span></li></ul></li></ul></li><li><span><a href="#NumPy-배열-상태-검사" data-toc-modified-id="NumPy-배열-상태-검사-4"><span class="toc-item-num">4 </span>NumPy 배열 상태 검사</a></span></li><li><span><a href="#Reference" data-toc-modified-id="Reference-5"><span class="toc-item-num">5 </span>Reference</a></span></li></ul></div>
# -
# ## NumPy란?
#
# > 행렬 연산을 위한 핵심 라이브러리
#
# 파이썬 기반 데이터 분석 환경에서 NumPy는 **행렬 연산을 위한 핵심 라이브러리**입니다. `NumPy`는 “Numerical Python“의 약자로 대규모 다차원 배열과 행렬 연산에 필요한 다양한 함수를 제공합니다. 특히 메모리 버퍼에 배열 데이터를 저장하고 처리하는 효율적인 인터페이스를 제공합니다. 파이썬 list 객체를 개선한 NumPy의 ndarray 객체를 사용하면 더 많은 데이터를 더 빠르게 처리할 수 있습니다.
#
# ### NumPy 특징
# - 강력한 N 차원 배열 객체
# - 정교한 브로드케스팅(Broadcast) 기능
# - C/C ++ 및 포트란 코드 통합 도구
# - 유용한 선형 대수학, 푸리에 변환 및 난수 기능
# - 범용적 데이터 처리에 사용 가능한 다차원 컨테이너
#
# ### NumPy 설치하기
#
# ```python
# pip3 install numpy
# ```
# ### NumPy 불러오기
#
# 이제부터 NumPy는 `np`라는 약자로 사용하기로 약속합니다.
import numpy as np
# ### NumPy 버전 확인하기
# 아래의 명령어를 통해서 현재 사용하고 있는 NumPy의 버전을 확인할 수 있습니다.
np.__version__
# ### NumPy 사용시 도움말
#
# > NumPy의 모든 API는 **`np.info 함수`를 이용**하여 도움말을 확인할 수 있습니다
np.info(np.ndarray.dtype)
# ---
#
# ## NumPy 데이터 타입
#
# > NumPy는 다음과 같은 데이터 타입을 지원합니다. 배열을 생성할 때 dtype속성으로 다음과 같은 데이터 타입을 지정할 수 있습니다.
#
# - np.int64 : 64 비트 정수 타입
# - np.float32 : 32 비트 부동 소수 타입
# - np.complex : 복소수 (128 float)
# - np.bool : 불린 타입 (Trur, False)
# - np.object : 파이썬 객체 타입
# - np.string_ : 고정자리 스트링 타입
# - np.unicode_ : 고정자리 유니코드 타입
# ---
#
# ## NumPy 배열 생성
#
# Python에서의 기본 자료형이었던 `List`와 달리, NumPy 배열은 `numpy.ndarray 객체`입니다. 이 절에서는 NumPy 배열(numpy.ndarray 객체) 생성 방법을 소개합니다.
# ### 파이썬 배열로 NumPy 배열 생성
#
# 파이썬 배열을 인자로 NumPy 배열을 생성할 수 있습니다. 파라미터로 list 객체와 데이터 타입(dtype)을 입력하여 NumPy 배열을 생성합니다. dtype을 생략할 경우, 입력된 list 객체의 요소 타입이 설정됩니다.
arr = [[1,2,3],[4,5,6]]
a = np.array(arr,dtype=float)
a
# NumPy 배열의 형태를 확인하는 방법은 아래와 같이 `shape`을 통해서 가능합니다.
a.shape
# 또한 NumPy 배열의 차원을 확인하는 방법은 `ndim`을 통해서 가능합니다.
a.ndim
# NumPy 배열의 각 원소의 타입을 알고 싶으면 `dtype`을 통해서 가능합니다
a.dtype
# ### 배열 생성 및 초기화
#
# `NumPy`는 원하는 shape으로 배열을 설정하고, 각 요소를 특정 값으로 초기화하는 **`zeros`, `ones`, `full`, `eye` 함수를 제공**합니다. 또한, 파라미터로 입력한 배열과 같은 shape의 배열을 만드는 **`zeros_like`, `ones_like`, `full_like`** 함수도 제공합니다. 이 함수를 이용하여 배열 생성하고 초기화할 수 있습니다.
# #### np.zeros
#
# > np.zeros(shape, dtype=float, order='C')
#
# 지정된 shape의 배열을 생성하고, 모든 요소를 0으로 초기화
np.zeros((3, 4))
# #### np.ones
#
# > np.ones(shape, dtype=None, order='C')
#
# 지정된 shape의 배열을 생성하고, 모든 요소를 1로 초기화
np.ones((2,3,4),dtype=np.int16)
# #### np.full
#
# > np.full(shape, fill_value, dtype=None, order='C')
#
# 지정된 shape의 배열을 생성하고, 모든 요소를 지정한 "fill_value"로 초기화
np.full((2,2),7)
# #### np.eye
#
# > np.eye(N, M=None, k=0, dtype=<class 'float'>)
#
# (N, N) shape의 단위 행렬(Unit Matrix)을 생성
np.eye(4)
# #### np.empty
#
# > np.empty(shape, dtype=float, order='C')
#
# - 지정된 shape의 배열 생성
# - **요소의 초기화 과정에 없고, 기존 메모리값을 그대로 사용**
# - 배열 생성비용이 가장 저렴하고 빠름
# - 배열 사용 시 주의가 필요(초기화를 고려)
np.empty((4,2))
# #### like
#
# > like패턴을 가진 배열생성함수는 지정된 배열과 shape이 같은 행렬을 만드는 like 함수를 제공합니다.
#
# - np.zeros_like
# - np.ones_like
# - np.full_like
# - np.enpty_like
a = np.array([[1,2,3], [4,5,6]])
b = np.ones_like(a)
b
# ### 데이터 생성 함수
#
# NumPy는 `주어진 조건으로 데이터를 생성`한 후, 배열을 만드는 데이터 생성 함수를 제공합니다.
# #### np.linspace
#
# > np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)
#
# - start부터 stop의 범위에서 num개를 균일한 간격으로 데이터를 생성하고 배열을 만드는 함수
# - 요소 개수를 기준으로 균등 간격의 배열을 생성
np.linspace(0, 1, 5)
import matplotlib.pyplot as plt # 시각화를 위한 라이브러리 호출
plt.plot(np.linspace(0, 1, 5), 'o')
plt.show()
# #### np.arange
#
# > np.arange([start,] stop[, step,], dtype=None)
#
# - start부터 stop 미만까지 step 간격으로 데이터 생성한 후 배열을 만듦
# - 범위내에서 간격을 기준 균등 간격의 배열
# - 요소의 객수가 아닌 데이터의 간격을 기준으로 배열 생성
np.arange(0, 10, 2, np.float)
plt.plot(np.arange(0, 10, 2, np.float), 'o')
plt.show()
# #### np.logspace
#
# > np.logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None)
#
# - 로그 스케일의 linspace 함수
# - 로그 스케일로 지정된 범위에서 num 개수만큼 균등 간격으로 데이터 생성한 후 배열 만듦
np.logspace(0.1, 1, 20, endpoint=True)
plt.plot(np.logspace(0.1, 1, 20, endpoint=True), 'o')
plt.show()
# ### 난수 기반 배열 생성
# NumPy는 `난수 발생 및 배열 생성을 생성하는 numpy.random 모듈을 제공`합니다. 이 절에서는 이 모듈의 함수 사용법을 소개합니다. numpy.random 모듈은 다음과 같은 함수를 제공합니다.
#
# - np.random.seed
# - np.random.normal
# - np.random.rand
# - np.random.randn
# - np.random.randint
# - np.random.random
# #### np.random.seed
#
# 무작위 수를 만드는 난수는 특정 시작 숫자로부터 난수처럼 보이는 수열을 만드는 알고리즘의 결과물입니다. 즉, random 모듈의 함수는 실행할 때 마다 무작위 수를 반환합니다. 따라서 `시작점을 설정함으로써 난수 발생을 재연`할 수 있습니다. 난수의 시작점을 설정하는 함수가 np.random.seed 입니다. 이 수를 고정시키면 더이상 어디서나 같은 난수가 발생됩니다
# seed값을 고정
np.random.seed(100)
# #### np.random.normal
#
# > np.random.normal
#
# - normal(loc=0.0, scale=1.0, size=None)
# - **정규 분포 확률** 밀도에서 표본 추출
# - loc: 정규 분포의 평균
# - scale: 표준편차
# - 생성한 난수는 정규 분포의 형상
np.random.normal(2, 1, (2, 3))
plt.hist(np.random.normal(2, 1, 10000), bins=100)
plt.show()
# #### np.random.rand
#
# > numpy.random.rand(d0, d1, ..., dn)
#
# - Shape이 (d0, d1, ..., dn) 인 배열 생성 후 난수로 초기화
# - 난수: [0. 1)의 균등 분포(Uniform Distribution) 형상으로 표본 추출
# - Gaussian normal
# - 균등한 비율로 표본 추출
np.random.rand(3,2)
plt.hist(np.random.rand(10000), bins=10)
plt.show()
# #### np.random.randn
#
# > np.random.randn(d0, d1, ..., dn)
#
# - (d0, d1, ..., dn) shape 배열 생성 후 난수로 초기화
# - 난수: **표준 정규 분포(standard normal distribution)** 에서 표본 추출
np.random.randn(2, 4)
plt.hist(np.random.randn(10000), bins=100)
plt.show()
# #### np.random.randint
#
# > np.random.randint(low, high=None, size=None, dtype='l')
#
# - 지정된 shape으로 배열을 만들고 low 부터 high 미만의 범위에서 정수 표본 추출
# - 균등 분포로 표본 추출
np.random.randint(5, 10, size=(2, 4))
np.random.randint(5, size=10)
plt.hist(np.random.randint(100, size=10000), bins=10)
plt.show()
# #### np.random.random
#
# > np.random.random(size=None)
#
# - 난수: [0., 1.)의 균등 분포(Uniform Distribution)에서 표본 추출
np.random.random((2, 4))
plt.hist(np.random.random(10000), bins=10)
plt.show()
# ---
#
# ## NumPy 배열 상태 검사
#
# NumPy는 배열의 상태를 검사하는 다음과 같은 방법을 제공합니다.
#
# |배열 속성 검사 항목|배열 속성 확인 방법|예시| 결과|
# |---|---|---|---|
# |배열 shape| np.ndarray.shape 속성| arr.shape| (5, 2, 3)|
# |배열 길이| 일차원의 배열 길이 확인| len(arr) |5|
# |배열 차원| np.ndarray.ndim 속성 |arr.ndim |3|
# |배열 요소 수| np.ndarray.size 속성| arr.size |30|
# |배열 타입| np.ndarray.dtype 속성| arr.dtype |dtype(‘float64’)|
# |배열 타입 명 |np.ndarray.dtype.name 속성| arr.dtype.name| float64|
# |배열 타입 변환| np.ndarray.astype 함수| arr.astype(np.int)| 배열 타입 변환|
# NumPy 배열 객체는 다음과 같은 방식으로 속성을 확인할 수 있습니다.
arr = np.random.random((5,2,3))
arr
#배열 타입 조회
type(arr)
# 배열의 shape 확인
arr.shape
# 배열의 길이
len(arr)
# 배열의 차원 수
arr.ndim
# 배열의 요소 수: shape(k, m, n) ==> k*m*n
arr.size
# 배열 타입 확인
arr.dtype
# 배열 타입명
arr.dtype.name
# 배열 요소를 int로 변환
# 요소의 실제 값이 변환되는 것이 아님
# View의 출력 타입과 연산을 변환하는 것
arr.astype(np.int)
# np.float으로 타입을 다시 변환하면 np.int 변환 이전 값으로 모든 원소 값이 복원됨
arr.astype(np.float)
# ---
#
# ## Reference
# - https://www.intel.com
# - https://numpy.org/
# - https://github.com/rougier/numpy-tutorial
# - https://github.com/rougier/numpy-100
# - http://aikorea.org/cs231n/python-numpy-tutorial/
# - http://riseshia.github.io/2017/01/30/numpy-tutorial-with-code.html
# - https://datascienceschool.net/view-notebook/24e43f9df9ec4abab15e32e68e982cc1/
# - http://taewan.kim/post/numpy_cheat_sheet/
| numpy/.ipynb_checkpoints/NumPy Basic Lecture01-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
# +
df_can = pd.read_excel('https://ibm.box.com/shared/static/lw190pt9zpy5bd1ptyg2aw15awomz9pu.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skip_footer=2)
print ('Data read into a pandas dataframe!')
# -
df_can.head()
# tip: You can specify the number of rows you'd like to see as follows: df_can.head(10)
df_can.tail()
df_can.info()
df_can.columns.values
df_can.index.values
print(type(df_can.columns))
print(type(df_can.index))
# +
df_can.columns.tolist()
df_can.index.tolist()
print (type(df_can.columns.tolist()))
print (type(df_can.index.tolist()))
# -
# size of dataframe (rows, columns)
df_can.shape
# in pandas axis=0 represents rows (default) and axis=1 represents columns.
df_can.drop(['AREA','REG','DEV','Type','Coverage'], axis=1, inplace=True)
df_can.head(2)
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent', 'RegName':'Region'}, inplace=True)
df_can.columns
df_can['Total'] = df_can.sum(axis=1)
df_can.isnull().sum()
df_can.describe()
df_can.Country # returns a series
df_can[['Country', 1980, 1981, 1982, 1983, 1984, 1985]] # returns a dataframe
# notice that 'Country' is string, and the years are integers.
# for the sake of consistency, we will convert all column names to string later
df_can.set_index('Country', inplace=True)
# tip: The opposite of set is reset. So to reset the index, we can use df_can.reset_index(
df_can.head(3)
# optional: to remove the name of the index
df_can.index.name = None
# +
# 1. the full row data (all columns)
print(df_can.loc['Japan'])
# alternate methods
print(df_can.iloc[87])
print(df_can[df_can.index == 'Japan'].T.squeeze())
# +
# 2. for year 2013
print(df_can.loc['Japan', 2013])
# alternate method
print(df_can.iloc[87, 36]) # year 2013 is the last column, with a positional index of 36
# -
# 3. for years 1980 to 1985
print(df_can.loc['Japan', [1980, 1981, 1982, 1983, 1984, 1984]])
print(df_can.iloc[87, [3, 4, 5, 6, 7, 8]])
df_can.columns = list(map(str, df_can.columns))
# [print (type(x)) for x in df_can.columns.values] #<-- uncomment to check type of column headers
# useful for plotting later on
years = list(map(str, range(1980, 2014)))
years
# 1. create the condition boolean series
condition = df_can['Continent'] == 'Asia'
print (condition)
# 2. pass this condition into the dataFrame
df_can[condition]
# +
# we can pass mutliple criteria in the same line.
# let's filter for AreaNAme = Asia and RegName = Southern Asia
df_can[(df_can['Continent']=='Asia') & (df_can['Region']=='Southern Asia')]
# note: When using 'and' and 'or' operators, pandas requires we use '&' and '|' instead of 'and' and 'or'
# don't forget to enclose the two conditions in parentheses
# -
print ('data dimensions:', df_can.shape)
print(df_can.columns)
df_can.head(2)
# +
# we are using the inline backend
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
# -
print ('Matplotlib version: ', mpl.__version__) # >= 2.0.0
print(plt.style.available)
mpl.style.use(['ggplot']) # optional: for ggplot-like style
haiti = df_can.loc['Haiti', years] # passing in years 1980 - 2013 to exclude the 'total' column
haiti.head()
haiti.plot()
# +
haiti.index = haiti.index.map(int) # let's change the index values of Haiti to type integer for plotting
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of immigrants')
plt.xlabel('Years')
plt.show() # need this line to show the updates made to the figure
# +
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
# annotate the 2010 Earthquake.
# syntax: plt.text(x, y, label)
plt.text(2000, 6000, '2010 Earthquake') # see note below
plt.show()
# -
| python/coursera_python/IBM/mat_plotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import pandas as pd
sys.path.append("..")
DATA = os.path.normpath("../data/")
def load_data(name):
path = os.path.join(DATA, name, name + ".csv")
return pd.read_csv(path)
# -
data = load_data("occupancy")
# +
from yellowbrick.classifier import ConfusionMatrix
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split as tts
features = ["temperature", "relative humidity", "light", "C02", "humidity"]
# Extract the numpy arrays from the data frame
X = data[features].as_matrix()
y = data.occupancy.as_matrix()
X_train, X_test, y_train, y_test = tts(X, y, test_size=0.2)
oz = ConfusionMatrix(LogisticRegression(), size=(1080, 720))
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.poof()
# -
| examples/rebeccabilbro/size_param_issue.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.7 64-bit
# language: python
# name: python37764bit1cf588f428744b0ea2692c08203fb1fc
# ---
# +
'stupid'
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.feature_selection import RFE
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_validate
from scipy.optimize import minimize
import warnings
warnings.filterwarnings('ignore')
# +
pd.set_option('display.max_columns',None)
# training data
train = pd.read_csv('train.csv')
# test data
test = pd.read_csv('test.csv')
df=pd.concat([train,test], sort=False)
train.head()
# -
df["galaxy"] = df["galaxy"].astype('category')
df["galaxy"] = df["galaxy"].cat.codes
train = df[:3865]
test = df[3865:]
test=test.drop("y", axis = 1)
test_res= test.copy()
train_gal=set(train["galaxy"])
s=0
for x in train_gal:
s=s+len(train.loc[train['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(train_gal)))
print("Average samples per galaxy: {}".format(s/len(train_gal)))
test_gal=set(test["galaxy"])
s=0
for x in test_gal:
s=s+len(test.loc[test['galaxy'] == x])
print("Total distinct galaxies: {}".format(len(test_gal)))
print("Average samples per galaxy: {}".format(s/len(test_gal)))
print("Train vector: " + str(train.shape))
print("Test vector: " + str(test.shape))
def cross_validation_loop(data,cor):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(cor).index
data=data[columns]
imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(data)
data=imp.transform(data)
scaler = StandardScaler().fit(data)
data = scaler.transform(data)
estimator = GradientBoostingRegressor(n_estimators=300)
cv_results = cross_validate(estimator, data, labels, cv=4, scoring='neg_root_mean_squared_error')
error=np.mean(cv_results['test_score'])
return error
train_gal=set(train["galaxy"])
train_gal.remove(126)
def loop_train(cor):
errors=[]
for gal in train_gal:
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
errors.append(cross_validation_loop(data,cor))
return np.mean(errors)
cor=[20,25,30,40,50,60,70,80]
errors=[]
for x in cor:
errors.append(loop_train(x))
print(errors)
def test_loop(data, test_data):
labels= data['y']
data=data.drop('galaxy', axis=1)
data=data.drop('y', axis=1)
correlation=abs(data.corrwith(labels))
columns=correlation.nlargest(20).index
train_labels= labels
train_data=data[columns]
test_data= test_data[columns]
imp = SimpleImputer(missing_values=np.nan, strategy='mean').fit(train_data)
train_data=imp.transform(train_data)
test_data=imp.transform(test_data)
scaler = StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
model = GradientBoostingRegressor(n_estimators=300)
model.fit(train_data, train_labels)
predictions = model.predict(test_data)
return predictions
# +
test=test_res
test=test.sort_values(by=['galaxy'])
test_pred = pd.DataFrame(0, index=np.arange(len(test)), columns=["predicted_y"])
# -
i=0
for gal in test_gal:
count=len(test.loc[test['galaxy'] == gal])
index = train.index[train['galaxy'] == gal]
data = train.loc[index]
pred=test_loop(data,test.loc[test['galaxy']==gal])
test_pred.loc[i:i+count-1,'predicted_y'] = pred
i=i+count
test["predicted_y"]=test_pred.to_numpy()
test.sort_index(inplace=True)
predictions = test["predicted_y"]
index = predictions
pot_inc = -np.log(index+0.01)+3
p2= pot_inc**2
ss = pd.DataFrame({
'Index':test.index,
'pred': predictions,
'opt_pred':0,
'eei':test['existence expectancy index'], # So we can split into low and high EEI galaxies
})
ss.loc[p2.nlargest(400).index, 'opt_pred']=100
ss=ss.sort_values('pred')
ss.iloc[400:600].opt_pred = 50
ss=ss.sort_index()
increase = (ss['opt_pred']*p2)/1000
print(sum(increase), ss.loc[ss.eei < 0.7, 'opt_pred'].sum(), ss['opt_pred'].sum())
ss[['Index', 'pred', 'opt_pred']].to_csv('submission.csv', index=False)
| Code from forum 2.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # appointments
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/examples/appointments.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/python/appointments.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Appointment selection.
This module maximizes the number of appointments that can
be fulfilled by a crew of installers while staying close to ideal
ratio of appointment types.
"""
# overloaded sum() clashes with pytype.
# pytype: disable=wrong-arg-types
from absl import app
from absl import flags
from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
FLAGS = flags.FLAGS
flags.DEFINE_integer('load_min', 480, 'Minimum load in minutes.')
flags.DEFINE_integer('load_max', 540, 'Maximum load in minutes.')
flags.DEFINE_integer('commute_time', 30, 'Commute time in minutes.')
flags.DEFINE_integer('num_workers', 98, 'Maximum number of workers.')
class AllSolutionCollector(cp_model.CpSolverSolutionCallback):
"""Stores all solutions."""
def __init__(self, variables):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__variables = variables
self.__collect = []
def on_solution_callback(self):
"""Collect a new combination."""
combination = [self.Value(v) for v in self.__variables]
self.__collect.append(combination)
def combinations(self):
"""Returns all collected combinations."""
return self.__collect
def EnumerateAllKnapsacksWithRepetition(item_sizes, total_size_min,
total_size_max):
"""Enumerate all possible knapsacks with total size in the given range.
Args:
item_sizes: a list of integers. item_sizes[i] is the size of item #i.
total_size_min: an integer, the minimum total size.
total_size_max: an integer, the maximum total size.
Returns:
The list of all the knapsacks whose total size is in the given inclusive
range. Each knapsack is a list [#item0, #item1, ... ], where #itemK is an
nonnegative integer: the number of times we put item #K in the knapsack.
"""
model = cp_model.CpModel()
variables = [
model.NewIntVar(0, total_size_max // size, '') for size in item_sizes
]
load = sum(variables[i] * size for i, size in enumerate(item_sizes))
model.AddLinearConstraint(load, total_size_min, total_size_max)
solver = cp_model.CpSolver()
solution_collector = AllSolutionCollector(variables)
solver.SearchForAllSolutions(model, solution_collector)
return solution_collector.combinations()
def AggregateItemCollectionsOptimally(item_collections, max_num_collections,
ideal_item_ratios):
"""Selects a set (with repetition) of combination of items optimally.
Given a set of collections of N possible items (in each collection, an item
may appear multiple times), a given "ideal breakdown of items", and a
maximum number of collections, this method finds the optimal way to
aggregate the collections in order to:
- maximize the overall number of items
- while keeping the ratio of each item, among the overall selection, as close
as possible to a given input ratio (which depends on the item).
Each collection may be selected more than one time.
Args:
item_collections: a list of item collections. Each item collection is a
list of integers [#item0, ..., #itemN-1], where #itemK is the number
of times item #K appears in the collection, and N is the number of
distinct items.
max_num_collections: an integer, the maximum number of item collections
that may be selected (counting repetitions of the same collection).
ideal_item_ratios: A list of N float which sums to 1.0: the K-th element is
the ideal ratio of item #K in the whole aggregated selection.
Returns:
A pair (objective value, list of pairs (item collection, num_selections)),
where:
- "objective value" is the value of the internal objective function used
by the MIP Solver
- Each "item collection" is an element of the input item_collections
- and its associated "num_selections" is the number of times it was
selected.
"""
solver = pywraplp.Solver('Select',
pywraplp.Solver.SCIP_MIXED_INTEGER_PROGRAMMING)
n = len(ideal_item_ratios)
num_distinct_collections = len(item_collections)
max_num_items_per_collection = 0
for template in item_collections:
max_num_items_per_collection = max(max_num_items_per_collection,
sum(template))
upper_bound = max_num_items_per_collection * max_num_collections
# num_selections_of_collection[i] is an IntVar that represents the number
# of times that we will use collection #i in our global selection.
num_selections_of_collection = [
solver.IntVar(0, max_num_collections, 's[%d]' % i)
for i in range(num_distinct_collections)
]
# num_overall_item[i] is an IntVar that represents the total count of item #i,
# aggregated over all selected collections. This is enforced with dedicated
# constraints that bind them with the num_selections_of_collection vars.
num_overall_item = [
solver.IntVar(0, upper_bound, 'num_overall_item[%d]' % i)
for i in range(n)
]
for i in range(n):
ct = solver.Constraint(0.0, 0.0)
ct.SetCoefficient(num_overall_item[i], -1)
for j in range(num_distinct_collections):
ct.SetCoefficient(num_selections_of_collection[j],
item_collections[j][i])
# Maintain the num_all_item variable as the sum of all num_overall_item
# variables.
num_all_items = solver.IntVar(0, upper_bound, 'num_all_items')
solver.Add(solver.Sum(num_overall_item) == num_all_items)
# Sets the total number of workers.
solver.Add(solver.Sum(num_selections_of_collection) == max_num_collections)
# Objective variables.
deviation_vars = [
solver.NumVar(0, upper_bound, 'deviation_vars[%d]' % i)
for i in range(n)
]
for i in range(n):
deviation = deviation_vars[i]
solver.Add(deviation >= num_overall_item[i] -
ideal_item_ratios[i] * num_all_items)
solver.Add(deviation >= ideal_item_ratios[i] * num_all_items -
num_overall_item[i])
solver.Maximize(num_all_items - solver.Sum(deviation_vars))
result_status = solver.Solve()
if result_status == pywraplp.Solver.OPTIMAL:
# The problem has an optimal solution.
return [int(v.solution_value()) for v in num_selections_of_collection]
return []
def GetOptimalSchedule(demand):
"""Computes the optimal schedule for the installation input.
Args:
demand: a list of "appointment types". Each "appointment type" is
a triple (ideal_ratio_pct, name, duration_minutes), where
ideal_ratio_pct is the ideal percentage (in [0..100.0]) of that
type of appointment among all appointments scheduled.
Returns:
The same output type as EnumerateAllKnapsacksWithRepetition.
"""
combinations = EnumerateAllKnapsacksWithRepetition(
[a[2] + FLAGS.commute_time for a in demand], FLAGS.load_min,
FLAGS.load_max)
print(('Found %d possible day schedules ' % len(combinations) +
'(i.e. combination of appointments filling up one worker\'s day)'))
selection = AggregateItemCollectionsOptimally(
combinations, FLAGS.num_workers, [a[0] / 100.0 for a in demand])
output = []
for i in range(len(selection)):
if selection[i] != 0:
output.append((selection[i], [(combinations[i][t], demand[t][1])
for t in range(len(demand))
if combinations[i][t] != 0]))
return output
demand = [(45.0, 'Type1', 90), (30.0, 'Type2', 120), (25.0, 'Type3', 180)]
print('*** input problem ***')
print('Appointments: ')
for a in demand:
print(' %.2f%% of %s : %d min' % (a[0], a[1], a[2]))
print('Commute time = %d' % FLAGS.commute_time)
print('Acceptable duration of a work day = [%d..%d]' %
(FLAGS.load_min, FLAGS.load_max))
print('%d workers' % FLAGS.num_workers)
selection = GetOptimalSchedule(demand)
print()
installed = 0
installed_per_type = {}
for a in demand:
installed_per_type[a[1]] = 0
print('*** output solution ***')
for template in selection:
num_instances = template[0]
print('%d schedules with ' % num_instances)
for t in template[1]:
mult = t[0]
print(' %d installation of type %s' % (mult, t[1]))
installed += num_instances * mult
installed_per_type[t[1]] += num_instances * mult
print()
print('%d installations planned' % installed)
for a in demand:
name = a[1]
per_type = installed_per_type[name]
print((' %d (%.2f%%) installations of type %s planned' %
(per_type, per_type * 100.0 / installed, name)))
| examples/notebook/examples/appointments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
lista = [1,2,3,4,5,6]
lista
a = 6
b = 7
a + b
# # titulo
# texto qualquer
#
# ## subtexto
# texto qualquer
#
# * uma lista
# * mais uma linha
#
# %%HTML
<p> Está <strong>formatando</strong> usando HTML</p>
| usando notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Testing Different Classifiers, Hyertuning Methods, and Speed on Diabetes Classification
#
# 1. Linear Regression Classifer
# 1. Classification
# 2. Random Forest Classifier
# 1. Classification
# 2. Feature Importance
# 3. Hyper-Parameter Tuning
# 3. Support Vector Machine
# 1. Preprocesing
# 2. Classification
# 3. Hyper-Parameter Tuning
# 4. Cross-Validation Results
# 4. Principal Component Analysis
# 1. Dimensionality reduction is an essential task in many data analysis exercises, and it involves projecting the data to a lower-dimensional space using Singular Value Decomposition.
#
#export
import numpy as np
import pandas as pd
import sys
# # !{sys.executable} -m pip uninstall networkx
# # !{sys.executable} -m pip install pandas==1.1.0
# #!{sys.executable} -m pip install networkx==2.4
import time
import gc
import random
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.decomposition import PCA
from sklearn.impute import SimpleImputer
print(pd.__version__)
import tests as tests
# # Data Import and Cleaning
class Data():
"""
Class for the Data for the models
"""
def dataAllocation(self,path):
"""
Separate out the x_data and y_data and return each
args: string path for .csv file
return: pandas dataframe, pandas dataframe
"""
data = pd.read_csv(path)
x_data = data.loc[:,data.columns != "y"].to_numpy()
y_data = data.loc[:,"y"].to_numpy()
return x_data,y_data
def trainSets(self,x_data,y_data):
"""
Split 70% of the data into training and 30% into test sets. Call them x_train, x_test, y_train and y_test.
Use the train_test_split method in sklearn with the parameter 'shuffle' set to true and the 'random_state' set to 614.
args: pandas dataframe, pandas dataframe
return: pandas dataframe, pandas dataframe, pandas series, pandas series
"""
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.3, random_state = 614, shuffle=True)
y_train = pd.Series(y_train)
y_test = pd.Series(y_test)
return x_train, x_test, y_train, y_test
# # Linear Regression
class LinearRegressionModel():
"""
Class for Linear Regression Model
"""
def linearClassifer(self,x_train, x_test, y_train):
"""
Create a LinearRegression classifier and train it.
args: pandas dataframe, pandas dataframe, pandas series
return: numpy array, numpy array
"""
lm_model = LinearRegression().fit(x_train, y_train)
y_predict_train = lm_model.predict(x_train)
y_predict_test = lm_model.predict(x_test)
return y_predict_train, y_predict_test
def lgTrainAccuracy(self,y_train,y_predict_train):
"""
Return accuracy (on the training set) using the accuracy_score method.
Round the output values greater than or equal to 0.5 to 1 and those less than 0.5 to 0.
args: pandas series, numpy array
return: float
"""
#predict
y_predict_train = np.where(y_predict_train >= 0.5, 1, 0)
#get the results
train_accuracy = accuracy_score(y_train,y_predict_train)
return train_accuracy
def lgTestAccuracy(self,y_test,y_predict_test):
"""
Return accuracy (on the testing set) using the accuracy_score method.
Round the output values greater than or equal to 0.5 to 1 and those less than 0.5 to 0.
args: pandas series, numpy array
return: float
"""
y_predict_test = np.where(y_predict_test >= 0.5, 1, 0)
test_accuracy = accuracy_score(y_test,y_predict_test)
return test_accuracy
# # Random Forest Classifier
# +
#export
class RFClassifier():
"""
Class for Random Forest Classifier
"""
def randomForestClassifier(self,x_train,x_test, y_train):
"""
Create a RandomForestClassifier and train it. Set Random state to 614.
args: pandas dataframe, pandas dataframe, pandas series
return: RandomForestClassifier object, numpy array, numpy array
"""
# make the model
rf_clf = RandomForestClassifier(random_state = 614).fit(x_train, y_train)
#form the model and test on it for first pass combine to steps into one
y_predict_train = rf_clf.predict(x_train)
y_predict_test = rf_clf.predict(x_test)
return rf_clf,y_predict_train, y_predict_test
def rfTrainAccuracy(self,y_train,y_predict_train):
"""
Return accuracy on the training set using the accuracy_score method.
args: pandas series, numpy array
return: float
"""
#how did the model do
train_accuracy = accuracy_score(y_train,y_predict_train)
return train_accuracy
def rfTestAccuracy(self,y_test,y_predict_test):
"""
Return accuracy on the test set using the accuracy_score method.
args: pandas series, numpy array
return: float
"""
test_accuracy = accuracy_score(y_test,y_predict_test)
return test_accuracy
### Feature Importance ###
def rfFeatureImportance(self,rf_clf):
"""
Determine the feature importance as evaluated by the Random Forest Classifier.
args: RandomForestClassifier object
return: float array
"""
feature_importance = rf_clf.feature_importances_
return feature_importance
def sortedRFFeatureImportanceIndicies(self,rf_clf):
"""
Sort them in the ascending order and return the feature numbers[0 to ...].
args: RandomForestClassifier object
return: int array
"""
sorted_indices = np.argsort(rf_clf.feature_importances_)[::] #[::-1] if DESCENDING
return sorted_indices
### Hyper-parameter Tuning ###
def hyperParameterTuning(self,rf_clf,x_train,y_train):
"""
Tune the hyper-parameters 'n_estimators' and 'max_depth'.
args: RandomForestClassifier object, pandas dataframe, pandas series
return: GridSearchCV object, float
'n_estimators': [4, 16, 256]
'max_depth': [2, 8, 16]
"""
n_estimators = [4, 16, 256]
max_depth = [2, 8, 16]
param_grid = {'n_estimators': n_estimators,
'max_depth': max_depth}
gscv_rfc = GridSearchCV(estimator = rf_clf, param_grid = param_grid)
gscv_rfc_fit = gscv_rfc.fit(x_train, y_train)
return gscv_rfc, gscv_rfc_fit
def bestParams(self,gscv_rfc):
"""
Get the best params, using .best_params_
args: GridSearchCV object
return: parameter dict
"""
best_params = gscv_rfc.best_params_
return best_params
def bestScore(self,gscv_rfc):
"""
Get the best score, using .best_score_.
args: GridSearchCV object
return: float
"""
best_score = gscv_rfc.best_score_
return best_score
# -
# # Support Vector Machine
# +
class SupportVectorMachine():
"""
Class for Support Vector Machine Model
"""
### Pre-process ###
def dataPreProcess(self,x_train,x_test):
"""
Pre-process the data to standardize it, otherwise the grid search will take much longer.
args: pandas dataframe, pandas dataframe
return: pandas dataframe, pandas dataframe
"""
scaler = StandardScaler()
scaler.fit(x_train)
scaled_x_train = scaler.transform(x_train)
scaled_x_test = scaler.transform(x_test)
return scaled_x_train, scaled_x_test
### Classification ###
def SVCClassifer(self,scaled_x_train,scaled_x_test, y_train):
# TODO: Create a SVC classifier and train it. Set gamma = 'auto'
# args: pandas dataframe, pandas dataframe, pandas series
# return: numpy array, numpy array
# -------------------------------
# ADD CODE HERE
svm = SVC(gamma='auto').fit(scaled_x_train, y_train)
y_predict_train = svm.predict(scaled_x_train)
y_predict_test = svm.predict(scaled_x_test)
# -------------------------------
return y_predict_train,y_predict_test
# points [1]
def SVCTrainAccuracy(self,y_train,y_predict_train):
# TODO: Return accuracy on the training set using the accuracy_score method.
# args: pandas series, numpy array
# return: float
# -------------------------------
# ADD CODE HERE
#train_accuracy = accuracy_score(y_predict_test,y_test)
train_accuracy = accuracy_score(y_train,y_predict_train)
# -------------------------------
return train_accuracy
# points [1]
def SVCTestAccuracy(self,y_test,y_predict_test):
"""
Return accuracy on the test set using the accuracy_score method.
args: pandas series, numpy array
return: float
"""
test_accuracy = accuracy_score(y_test,y_predict_test)
return test_accuracy
### Hyper-parameter Tuning ###
def SVMBestScore(self, scaled_x_train, y_train):
"""
Tune the hyper-parameters 'C' and 'kernel' using rbf and linear.
Setting n_jobs = -1 and return_train_score = True and gamma = 'auto'
args: pandas dataframe, pandas series
return: GridSearchCV object, float
"""
svm_parameters = {'kernel':('linear', 'rbf'), 'C':[0.01, 0.1, 1.0]}
svm = SVC(gamma = 'auto')
svm_random = GridSearchCV(estimator = svm, param_grid = svm_parameters,n_jobs = -1,return_train_score = True)
svm_cv = svm_random.fit(scaled_x_train, y_train)
best_score = svm_random.best_score_
return svm_cv, best_score
def SVCClassiferParam(self,svm_cv,scaled_x_train,scaled_x_test,y_train):
"""
Calculate the training and test set accuracy values after hyperparameter tuning and standardization.
args: GridSearchCV object, pandas dataframe, pandas dataframe, pandas series
return: numpy series, numpy series
"""
y_predict_train = svm_cv.predict(scaled_x_train)
y_predict_test = svm_cv.predict(scaled_x_test)
return y_predict_train,y_predict_test
def svcTrainAccuracy(self,y_train,y_predict_train):
"""
Return accuracy (on the training set) using the accuracy_score method.
args: pandas series, numpy array
return: float
"""
train_accuracy = accuracy_score(y_train,y_predict_train)
return train_accuracy
def svcTestAccuracy(self,y_test,y_predict_test):
"""
Return accuracy (on the test set) using the accuracy_score method.
args: pandas series, numpy array
return: float
"""
test_accuracy = accuracy_score(y_test,y_predict_test)
return test_accuracy
### Cross Validation Results ###
def SVMRankTestScore(self,svm_cv):
"""
Return the rank test score for all hyperparameter values that you obtained in SVMBestScore. The
GridSearchCV class holds a ‘cv_results_’ dictionary that allow reports of these metrics easily.
args: GridSearchCV object
return: int array
"""
rank_test_score= svm_cv.cv_results_['rank_test_score']
return rank_test_score
def SVMMeanTestScore(self,svm_cv):
"""
Return mean test score for all of hyperparameter values that you obtained in SVMBestScore. The
GridSearchCV class holds a ‘cv_results_’ dictionary that allow reports of these metrics easily.
args: GridSearchCV object
return: float array
"""
mean_test_score= svm_cv.cv_results_['mean_test_score']
return mean_test_score
# -
# # PCA
class PCAClassifer():
"""
Class for PCA Classifier
"""
def pcaClassifer(self,x_data):
"""
Perform dimensionality reduction of the data using PCA.
Set parameters n_component to 8 and svd_solver to 'full'. Keep other parameters at their default value.
args: pandas dataframe
return: pca_object
"""
pca = PCA(n_components = 8, svd_solver = 'full').fit(x_data)
return pca
def pcaExplainedVarianceRatio(self, pca):
"""
Return percentage of variance explained by each of the selected components
args: pca_object
return: float array
"""
explained_variance_ratio = pca.explained_variance_ratio_
return explained_variance_ratio
def pcaSingularValues(self, pca):
"""
Return the singular values corresponding to each of the selected components.
args: pca_object
return: float array
"""
singular_values = pca.singular_values_
return singular_values
# +
if __name__ == "__main__":
tests.dataTest(Data)
print("\n")
tests.linearTest(Data,LinearRegressionModel)
print("\n")
tests.RandomForestTest(Data,RFClassifier)
print("\n")
best_score_svm = tests.SupportVectorMachineTest(Data,SupportVectorMachine)
print("\n")
tests.PCATest(Data,PCAClassifer)
# -
def get_all_accuracy():
dataset = Data()
svm = SupportVectorMachine()
data = '../data/pima-indians-diabetes.csv'
x_data,y_data = dataset.dataAllocation(data)
x_train, x_test, y_train, y_test = dataset.trainSets(x_data,y_data)
scaled_x_train, scaled_x_test = svm.dataPreProcess(x_train,x_test)
y_predict_train,y_predict_test = svm.SVCClassifer(scaled_x_train,scaled_x_test, y_train)
svm_cv, best_score_svm = svm.SVMBestScore(scaled_x_train, y_train)
print(best_score)
return best_score_svm
| MutlipleClassifiers(ObjectOriented)/.ipynb_checkpoints/classifier_diabetes-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Symbolic Regressor Example v1.0
# <i>Use of the gplearn Symbolic Regressor class.</i>
# <P/><P/>
# <NAME><BR>
# October 22, 2020
import graphviz
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gplearn.genetic import SymbolicRegressor, SymbolicTransformer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from tqdm import tqdm
def plot_fitness_curves(pop, prod):
fig, ax = plt.subplots()
x = np.arange(len(pop[0].run_details_['best_fitness']))
for i, gp in enumerate(pop):
plt.plot(x, gp.run_details_['best_fitness'], label="k="+str(i))
plt.plot(x, prod.run_details_['best_fitness'], label="prod")
plt.title("Best Fitness by Fold Through Time")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.legend()
def SymbolicRegressorFactory(f_names):
return SymbolicRegressor(
population_size=2000,
generations=25,
tournament_size=20,
stopping_criteria=0.0,
const_range=(0.0, 0.0),
init_depth=(2, 6),
init_method='half and half',
function_set=['add', 'sub', 'mul', 'div', 'sqrt', 'abs'],
metric='mean absolute error',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=f_names,
warm_start=False,
low_memory=False,
n_jobs=-1,
verbose=0,
random_state=None)
def partition_data(src, oos=0.2, folds=5, targets=1, shuffle=True, rand_seed=None):
df = pd.read_csv(src)
X = df[df.columns[:-targets]]
X_all = np.array(X.values.tolist())
y = df[df.columns[-targets]]
y_all = np.array(list(y.values))
f_names = list(df.columns[:-targets])
X_in_sample, X_out_sample, y_in_sample, y_out_sample = train_test_split(X_all, y_all, test_size=oos)
kf = KFold(n_splits=folds, random_state=rand_seed, shuffle=shuffle);
return (X_in_sample, X_out_sample, y_in_sample, y_out_sample, kf, f_names)
# #### Import Data
# +
src = r"C:\Users\markr\Projects\Software\FormulaBot\data\hypotenuse_01.csv"
oos = 0.20
folds = 5
targets = 1
shuffle = True
rand_seed = 101
X_in_sample, X_out_sample, y_in_sample, y_out_sample, kf, f_names = partition_data(src, oos, folds, targets, shuffle, rand_seed)
# -
# #### Split Data Into k-Folds, Compute Fitness
# +
results = []
with tqdm(total=kf.n_splits) as pbar:
for train_index, test_index in kf.split(X_in_sample):
X_train, X_test = X_in_sample[train_index], X_in_sample[test_index]
y_train, y_test = y_in_sample[train_index], y_in_sample[test_index]
gp = SymbolicRegressorFactory(f_names)
gp.fit(X_train, y_train);
results.append((gp, gp.score(X_train,y_train), gp.score(X_test,y_test)))
pbar.update(1)
# -
gp = SymbolicRegressorFactory(f_names)
gp.fit(X_train, y_train);
score = gp.score(X_out_sample, y_out_sample)
# #### Report Validation and Out of Sample Testing
gps, train, test = zip(*results)
print("".ljust(12),"|","Training:".center(20),"|","Testing:".center(20))
print("".center(61, "-"))
print("Max:".ljust(12),"|",str(max(train)).center(20),"|",str(max(test)).center(20) )
print("Average:".ljust(12),"|",str(sum(train)/len(train)).center(20),"|",str(sum(test)/len(test)).center(20) )
print("Min:".ljust(12),"|",str(min(train)).center(20),"|",str(min(test)).center(20) )
print("".center(61, "-"))
print("Out Sample:".ljust(12),"|","N/A".center(20),"|",str(score).center(20))
plot_fitness_curves(gps, gp)
# #### Best Program
k = test.index(max(test))
print(gps[k]._program)
dot_data = gps[k]._program.export_graphviz()
graphviz.Source(dot_data)
# #### Display Program as Decision Tree
dot_data = gp._program.export_graphviz()
graphviz.Source(dot_data)
# ## And that is the correct answer!
| notebooks/SymbolicRegressorExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="be1fb74a-ea76-5d12-af9c-21651032ac37"
# # Titanic: Machine Learning from Disaster
# + [markdown] _cell_guid="f35f9107-7b1a-184a-9dbf-51a5b2b3ee67"
# * Description:
# The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.
# * Problem definition: In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.
# + _cell_guid="73c6ae96-3a21-d2ea-3728-9d33adee18ba"
import numpy as np
import pandas as pd
import sklearn.linear_model as lm
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] _cell_guid="907c91be-7361-fe03-6cea-5a9ee12c4ea9"
# ## Data Exploration
# + _cell_guid="5bb5d0e3-39a8-edfd-3776-8b639e2d784d"
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
# + _cell_guid="889004f2-e10d-0687-4ccc-3e4b5a825d57"
print ("Dimension of train data {}".format(train.shape))
print ("Dimension of test data {}".format(test.shape))
# + _cell_guid="ed8dd3e6-915a-41fd-7308-ed7e29b7287c"
print ("Basic statistical description:")
train.describe()
# + [markdown] _cell_guid="838ff612-6fc8-1baf-d86e-e6fb94a5c8d2"
# ### Comparison between test and train data
# + [markdown] _cell_guid="a13737ae-a781-1e23-0f3d-ecfe31b87239"
# #### From following cells, we could know that train and test data are split by PassengerId.
# + _cell_guid="84b58fb2-c8c6-cfe0-6bcf-a867ec2d57dd"
train.tail()
# + _cell_guid="01d0bd1a-f464-fa3e-0551-314b5260745e"
test.head()
# + [markdown] _cell_guid="3fbfd867-85bc-db51-bbfd-38b0aeb8c8c5"
# #### Let's look at data graphically. We could see that all the distribution of features are similar.
# + _cell_guid="99fd046f-3756-b85b-2c11-d4505d503c35"
plt.rc('font', size=13)
fig = plt.figure(figsize=(18, 8))
alpha = 0.6
ax1 = plt.subplot2grid((2,3), (0,0))
train.Age.value_counts().plot(kind='kde', color='#FA2379', label='train', alpha=alpha)
test.Age.value_counts().plot(kind='kde', label='test', alpha=alpha)
ax1.set_xlabel('Age')
ax1.set_title("What's the distribution of age?" )
plt.legend(loc='best')
ax2 = plt.subplot2grid((2,3), (0,1))
train.Pclass.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
test.Pclass.value_counts().plot(kind='barh', label='test', alpha=alpha)
ax2.set_ylabel('Pclass')
ax2.set_xlabel('Frequency')
ax2.set_title("What's the distribution of Pclass?" )
plt.legend(loc='best')
ax3 = plt.subplot2grid((2,3), (0,2))
train.Sex.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
test.Sex.value_counts().plot(kind='barh', label='test', alpha=alpha)
ax3.set_ylabel('Sex')
ax3.set_xlabel('Frequency')
ax3.set_title("What's the distribution of Sex?" )
plt.legend(loc='best')
ax4 = plt.subplot2grid((2,3), (1,0), colspan=2)
train.Fare.value_counts().plot(kind='kde', color='#FA2379', label='train', alpha=alpha)
test.Fare.value_counts().plot(kind='kde', label='test', alpha=alpha)
ax4.set_xlabel('Fare')
ax4.set_title("What's the distribution of Fare?" )
plt.legend(loc='best')
ax5 = plt.subplot2grid((2,3), (1,2))
train.Embarked.value_counts().plot(kind='barh', color='#FA2379', label='train', alpha=alpha)
test.Embarked.value_counts().plot(kind='barh', label='test', alpha=alpha)
ax5.set_ylabel('Embarked')
ax5.set_xlabel('Frequency')
ax5.set_title("What's the distribution of Embarked?" )
plt.legend(loc='best')
plt.tight_layout()
# + [markdown] _cell_guid="e4e31ee2-b800-cd4a-3c29-5a064b252298"
# #### We could know that the numbers of survived and died people are close to balanced.
# + _cell_guid="f6027b27-d006-095f-c43d-ae11691ba2bc"
print (train.Survived.value_counts())
# + [markdown] _cell_guid="ca7cfd29-5fa2-8c78-e1dc-ca1d4a64cf44"
# ### Look closely to the train data
# + [markdown] _cell_guid="c5aabd1e-0038-c8cd-b40e-bf1bafa670aa"
# #### We could see that the density of survived is higher than not survived under age 10.
# + _cell_guid="4e814ab0-feb0-b8e3-73c1-cdfe9324bc0e"
fig = plt.figure(figsize=(15, 6))
train[train.Survived==0].Age.value_counts().plot(kind='density', color='#FA2379', label='Not Survived', alpha=alpha)
train[train.Survived==1].Age.value_counts().plot(kind='density', label='Survived', alpha=alpha)
plt.xlabel('Age')
plt.title("What's the distribution of Age?" )
plt.legend(loc='best')
plt.grid()
# + [markdown] _cell_guid="d7b4901d-ee4a-796f-6e93-952aa7ccc637"
# #### We could observe that the survived rate of female is higher than male about 50%
# + _cell_guid="54aefba0-a90a-981e-227e-24e7f19683ff"
df_male = train[train.Sex=='male'].Survived.value_counts().sort_index()
df_female = train[train.Sex=='female'].Survived.value_counts().sort_index()
fig = plt.figure(figsize=(18, 6))
ax1 = plt.subplot2grid((1,2), (0,0))
df_female.plot(kind='barh', color='#FA2379', label='Female', alpha=alpha)
df_male.plot(kind='barh', label='Male', alpha=alpha-0.1)
ax1.set_xlabel('Frequrncy')
ax1.set_yticklabels(['Died', 'Survived'])
ax1.set_title("Who will survived with respect to sex?" )
plt.legend(loc='best')
plt.grid()
ax2 = plt.subplot2grid((1,2), (0,1))
(df_female/train[train.Sex=='female'].shape[0]).plot(kind='barh', color='#FA2379', label='Female', alpha=alpha)
(df_male/train[train.Sex=='male'].shape[0]).plot(kind='barh', label='Male', alpha=alpha-0.1)
ax2.set_xlabel('Rate')
ax2.set_yticklabels(['Died', 'Survived'])
ax2.set_title("What's the survived rate with respect to sex?" )
plt.legend(loc='best')
plt.grid()
# + [markdown] _cell_guid="9febe872-5fd7-1f7c-80bb-e3e54194a266"
# #### We could observed that the class of people affected the posibility of being survived.
# + _cell_guid="c3944cf0-f252-fea3-bd6e-c6b30ab83d0a"
df_male = train[train.Sex=='male']
df_female = train[train.Sex=='female']
fig = plt.figure(figsize=(18, 6))
ax1 = plt.subplot2grid((1,4), (0,0))
df_female[df_female.Pclass<3].Survived.value_counts().sort_index().plot(kind='bar', color='#FA2379', alpha=alpha)
ax1.set_ylabel('Frequrncy')
ax1.set_ylim((0,350))
ax1.set_xticklabels(['Died', 'Survived'])
ax1.set_title("How will high-class female survived?", y=1.05)
plt.grid()
ax2 = plt.subplot2grid((1,4), (0,1))
df_female[df_female.Pclass==3].Survived.value_counts().sort_index().plot(kind='bar', color='#23FA79', alpha=alpha)
ax2.set_ylabel('Frequrncy')
ax2.set_ylim((0,350))
ax2.set_xticklabels(['Died', 'Survived'])
ax2.set_title("How will low-class female survived?", y=1.05)
plt.grid()
ax3 = plt.subplot2grid((1,4), (0,2))
df_male[df_male.Pclass<3].Survived.value_counts().sort_index().plot(kind='bar', color='#00FA23', alpha=alpha)
ax3.set_ylabel('Frequrncy')
ax3.set_ylim((0,350))
ax3.set_xticklabels(['Died', 'Survived'])
ax3.set_title("How will high-class male survived?", y=1.05)
plt.grid()
ax4 = plt.subplot2grid((1,4), (0,3))
df_male[df_male.Pclass==3].Survived.value_counts().sort_index().plot(kind='bar', color='#2379FA', alpha=alpha)
ax4.set_ylabel('Frequrncy')
ax4.set_ylim((0,350))
ax4.set_xticklabels(['Died', 'Survived'])
ax4.set_title("How will low-class male survived?", y=1.05)
plt.grid()
plt.tight_layout()
# + [markdown] _cell_guid="226c37a6-7857-9427-5506-ef6da0e1e778"
# #### We could see that if the value of Ticket is the same, passenger would be close, like friends or familes. But sometimes it will not be the case, so we need to carefully handle it.
# + _cell_guid="4a0675a2-82a1-d765-0471-0e2209830b77"
train[train.Ticket=='1601']
# + _cell_guid="a09181a1-e6d0-bc56-2eea-337b6062af79"
train[train.Ticket=='CA 2144']
# + [markdown] _cell_guid="b47e7d97-26c4-9a89-66ac-3811055b21ed"
# ## Data Cleaning
# + [markdown] _cell_guid="00b4c390-b98e-9197-d464-475927359f72"
# #### Let's see how many missing values we have on each column.
# + _cell_guid="4cc0fe90-15a1-8b98-3e3b-4cabce0742b2"
train.isnull().sum()
# + _cell_guid="c214124b-48d7-2d8b-2dff-886e2eb18a8d"
test.isnull().sum()
# + [markdown] _cell_guid="89a371d1-aa19-8bb8-de5b-f219a36f3b46"
# #### Thanks for [this amazing sharing](https://www.kaggle.com/mrisdal/titanic/exploring-survival-on-the-titanic/notebook), now we have a good way to replace missing values with sensible values. Let's assume the embarked is related with fare and pclass.
# + [markdown] _cell_guid="d5f7cdd2-6198-933b-19ad-b8e9d9af3ff7"
# ### Missing values on Embarked
# + _cell_guid="b8285d1b-025c-f8b2-c89f-31b5b44d7287"
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax = train.boxplot(column='Fare', by=['Embarked','Pclass'], ax=ax)
plt.axhline(y=80, color='green')
ax.set_title('', y=1.1)
train[train.Embarked.isnull()][['Fare', 'Pclass', 'Embarked']]
# + [markdown] _cell_guid="58318d75-03aa-426d-2be9-ba01fbe7bf2f"
# #### From the above boxplot, we should replace NA with C because most people who had Pclass 1 and Fare 80 would be Embarked C
# + _cell_guid="5cc45aba-b1ec-0501-32cb-5667b2915524"
_ = train.set_value(train.Embarked.isnull(), 'Embarked', 'C')
# + [markdown] _cell_guid="253de3db-c58a-929f-f53f-ab9a42d7ac4c"
# ### Missing values on Fare
# + [markdown] _cell_guid="ea434ea8-acb8-2b72-9213-443486a6ca21"
# #### By fixing the values of Embarked and Pclass, we could plot histogram of Fare. And we should use the most common value to replace the NA value of Fare.
# + _cell_guid="1ccf3356-42e5-6d93-f143-837851b85fa7"
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
test[(test.Pclass==3)&(test.Embarked=='S')].Fare.hist(bins=100, ax=ax)
test[test.Fare.isnull()][['Pclass', 'Fare', 'Embarked']]
plt.xlabel('Fare')
plt.ylabel('Frequency')
plt.title('Histogram of Fare, Plcass 3 and Embarked S')
test[test.Fare.isnull()][['Pclass', 'Fare', 'Embarked']]
# + _cell_guid="befe5367-7929-02e7-1e06-25e8eed92ae3"
print ("The top 5 most common value of Fare")
test[(test.Pclass==3)&(test.Embarked=='S')].Fare.value_counts().head()
# + _cell_guid="db2e070f-5456-437a-867d-a69d14214482"
_ = test.set_value(test.Fare.isnull(), 'Fare', 8.05)
# + [markdown] _cell_guid="46fd84db-79de-f5a4-b181-3e35c8c83a79"
# #### Replace the missing value of Cabin with U0
# + _cell_guid="1963b770-8e48-3eac-f5db-704e553ca968"
full = pd.concat([train, test], ignore_index=True)
_ = full.set_value(full.Cabin.isnull(), 'Cabin', 'U0')
# + [markdown] _cell_guid="fc2ba8d9-5d13-9285-8ca7-6409b6466416"
# ## Feature Engineering
# + [markdown] _cell_guid="c522ba7d-aa15-a05a-dbc7-33e9e622be26"
# #### Create a feature, Names, to store the length of words in name.
# + _cell_guid="dae555ca-cb1e-944e-9bed-bf2f81445024"
import re
names = full.Name.map(lambda x: len(re.split(' ', x)))
_ = full.set_value(full.index, 'Names', names)
del names
# + [markdown] _cell_guid="d12425f3-4791-c906-59c5-7af97236b587"
# #### Create a feature, Title.
# + _cell_guid="5aa8b416-de06-b017-bb46-3b27c9de9b1c"
title = full.Name.map(lambda x: re.compile(', (.*?)\.').findall(x)[0])
title[title=='Mme'] = 'Mrs'
title[title.isin(['Ms','Mlle'])] = 'Miss'
title[title.isin(['Don', 'Jonkheer'])] = 'Sir'
title[title.isin(['Dona', 'Lady', 'the Countess'])] = 'Lady'
title[title.isin(['Capt', 'Col', 'Major', 'Dr', 'Officer', 'Rev'])] = 'Officer'
_ = full.set_value(full.index, 'Title', title)
del title
# + [markdown] _cell_guid="8f445a7f-c5fd-3284-3b47-4bdbf6efd2c7"
# #### Create a feature, Deck. It may represents the socioeconomic status.
# + _cell_guid="60ae10cd-f38b-14e7-e4db-6efb3f75a6e4"
deck = full[~full.Cabin.isnull()].Cabin.map( lambda x : re.compile("([a-zA-Z]+)").search(x).group())
deck = pd.factorize(deck)[0]
_ = full.set_value(full.index, 'Deck', deck)
del deck
# + [markdown] _cell_guid="56ace2c8-aa46-3e32-963d-c6763947d8bf"
# #### Create a feature, Room. It may represents the geo lacation.
# + _cell_guid="7638a500-b3b3-44f5-48cf-66fcf79d7853"
checker = re.compile("([0-9]+)")
def roomNum(x):
nums = checker.search(x)
if nums:
return int(nums.group())+1
else:
return 1
rooms = full.Cabin.map(roomNum)
_ = full.set_value(full.index, 'Room', rooms)
del checker, roomNum
full['Room'] = full.Room/full.Room.sum()
# + [markdown] _cell_guid="88d889ef-7a1a-165d-10de-5e04251caf07"
# #### Create a feature, Group_num. It may represents the size of family.
# + _cell_guid="ef041721-f437-87d6-46ec-25310e7974a5"
full['Group_num'] = full.Parch + full.SibSp + 1
# + [markdown] _cell_guid="70ffc73b-c1a9-0a9a-90cf-0a732016a955"
# #### Create a feature, Group_size. When the size is between 2 and 4, more people are survived.
# + _cell_guid="21798f34-9c01-bde7-e2de-b788d1573e3c"
full['Group_size'] = pd.Series('M', index=full.index)
_ = full.set_value(full.Group_num>4, 'Group_size', 'L')
_ = full.set_value(full.Group_num==1, 'Group_size', 'S')
# + [markdown] _cell_guid="d6ad6202-ef7f-650c-32d0-7fb5ae1963d5"
# #### Normalized the fare.
# + _cell_guid="59e3ba95-f15b-7651-8d83-ef8608fc3720"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
full['NorFare'] = pd.Series(scaler.fit_transform(full.Fare.reshape(-1,1)).reshape(-1), index=full.index)
# + _cell_guid="02efb2bd-54e8-c785-5edf-40768abfb36f"
def setValue(col):
_ = train.set_value(train.index, col, full[:891][col].values)
_ = test.set_value(test.index, col, full[891:][col].values)
for col in ['Deck', 'Room', 'Group_size', 'Group_num', 'Names', 'Title']:
setValue(col)
# + [markdown] _cell_guid="69b8bf53-adbb-b467-e3c4-8e171c01552c"
# #### Predict Age
# + _cell_guid="baaa46a5-370b-8536-73bd-74331862dabe"
full.drop(labels=['PassengerId', 'Name', 'Cabin', 'Survived', 'Ticket', 'Fare'], axis=1, inplace=True)
full = pd.get_dummies(full, columns=['Embarked', 'Sex', 'Title', 'Group_size'])
# + _cell_guid="c8a0dc2b-1059-a811-0ff0-61227d1476fa"
from sklearn.model_selection import train_test_split
X = full[~full.Age.isnull()].drop('Age', axis=1)
y = full[~full.Age.isnull()].Age
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# + _cell_guid="205a752f-1a55-1f98-07a0-9ebb6b3bbb9f"
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import make_scorer
def get_model(estimator, parameters, X_train, y_train, scoring):
model = GridSearchCV(estimator, param_grid=parameters, scoring=scoring)
model.fit(X_train, y_train)
return model.best_estimator_
# + _cell_guid="40dc56c0-cfde-2cc2-7e22-2ecd3029a3e4"
import xgboost as xgb
XGB = xgb.XGBRegressor(max_depth=4, seed= 42)
scoring = make_scorer(mean_absolute_error, greater_is_better=False)
parameters = {'reg_alpha':np.linspace(0.1,1.0,5), 'reg_lambda': np.linspace(1.0,3.0,5)}
reg_xgb = get_model(XGB, parameters, X_train, y_train, scoring)
print (reg_xgb)
# + _cell_guid="e890a1bc-aa25-b2f0-e54f-dbb296b79bc3"
print ("Mean absolute error of test data: {}".format(mean_absolute_error(y_test, reg_xgb.predict(X_test))))
# + _cell_guid="8866a054-8210-da69-b424-3ef5bcb67335"
fig = plt.figure(figsize=(15, 6))
alpha = 0.5
full.Age.value_counts().plot(kind='density', color='#FA2379', label='Before', alpha=alpha)
pred = reg_xgb.predict(full[full.Age.isnull()].drop('Age', axis=1))
full.set_value(full.Age.isnull(), 'Age', pred)
full.Age.value_counts().plot(kind='density', label='After', alpha=alpha)
plt.xlabel('Age')
plt.title("What's the distribution of Age after predicting?" )
plt.legend(loc='best')
plt.grid()
# + _cell_guid="3a80c205-ed3b-ba19-20c2-0c661f38ea0d"
full['NorAge'] = pd.Series(scaler.fit_transform(full.Age.reshape(-1,1)).reshape(-1), index=full.index)
full['NorNames'] = pd.Series(scaler.fit_transform(full.Names.reshape(-1,1)).reshape(-1), index=full.index)
full['Group_num'] = pd.Series(scaler.fit_transform(full.Group_num.reshape(-1,1)).reshape(-1), index=full.index)
# + _cell_guid="639acba3-012c-6d42-4ce4-cbd87d056824"
for col in ['NorAge', 'NorFare', 'NorNames', 'Group_num']:
setValue(col)
# + [markdown] _cell_guid="9e0174fa-466d-5fee-e2ec-96b828d050cd"
# #### Encoding sex, feamle: 0 and male: 1
# + _cell_guid="5d63fafb-951f-8fa8-79f5-f34aefff5c32"
train.Sex = np.where(train.Sex=='female', 0, 1)
test.Sex = np.where(test.Sex=='female', 0, 1)
# + [markdown] _cell_guid="def377d2-3a58-69cd-3e9f-d4b7971c89d4"
# #### Convert values of Embarked and Ticket into dummy variables
# + _cell_guid="066399f8-cfe0-cb46-dc08-496951eacd10"
train.drop(labels=['PassengerId', 'Name', 'Names', 'Cabin', 'Ticket', 'Age', 'Fare'], axis=1, inplace=True)
test.drop(labels=['Name', 'Names', 'Cabin', 'Ticket', 'Age', 'Fare'], axis=1, inplace=True)
# + _cell_guid="44a6703c-1bd2-4657-09a2-e3f922a21b21"
train = pd.get_dummies(train, columns=['Embarked', 'Pclass', 'Title', 'Group_size'])
test = pd.get_dummies(test, columns=['Embarked', 'Pclass', 'Title', 'Group_size'])
test['Title_Sir'] = pd.Series(0, index=test.index)
# + [markdown] _cell_guid="da4a819f-0cbc-77f7-58f3-73c1286636e6"
# ## Build Model
# + _cell_guid="0bf4caea-a8b1-6e32-84d7-725b4919cbb0"
from sklearn.model_selection import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5), scoring='accuracy'):
plt.figure(figsize=(10,6))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel(scoring)
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, scoring=scoring,
n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# + _cell_guid="1eb8b7de-cc0e-e47c-76c3-ff408bad439e"
from sklearn.metrics import roc_curve, auc
def plot_roc_curve(estimator, X, y, title):
# Determine the false positive and true positive rates
fpr, tpr, _ = roc_curve(y, estimator.predict_proba(X)[:,1])
# Calculate the AUC
roc_auc = auc(fpr, tpr)
print ('ROC AUC: %0.2f' % roc_auc)
# Plot of a ROC curve for a specific class
plt.figure(figsize=(10,6))
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve - {}'.format(title))
plt.legend(loc="lower right")
plt.show()
# + _cell_guid="d3899afc-aebf-01e4-566f-321a876da31b"
X = train.drop(['Survived'], axis=1)
y = train.Survived
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# + _cell_guid="93a21cdf-9ea6-b277-200a-39acc42d1fa2"
from sklearn.metrics import accuracy_score
scoring = make_scorer(accuracy_score, greater_is_better=True)
# + [markdown] _cell_guid="4e74ca8a-658c-97f2-34a2-a4b8ff21d6b2"
# ### KNN
# + _cell_guid="951643dd-6806-6d3d-7449-d11d7ee67e93"
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(weights='uniform')
parameters = {'n_neighbors':[3,4,5], 'p':[1,2]}
clf_knn = get_model(KNN, parameters, X_train, y_train, scoring)
# + _cell_guid="87debc07-5d61-dd22-86d3-7dab7e26a57b"
print (accuracy_score(y_test, clf_knn.predict(X_test)))
print (clf_knn)
plot_learning_curve(clf_knn, 'KNN', X, y, cv=4);
# + [markdown] _cell_guid="d3988022-15a6-4c66-6de8-8f716c732091"
# ### Random Forest
# + _cell_guid="fbc5c39a-9002-9173-bbde-0a0a76d59c52"
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(random_state=42, criterion='entropy', min_samples_split=5, oob_score=True)
parameters = {'n_estimators':[500], 'min_samples_leaf':[12]}
clf_rfc1 = get_model(rfc, parameters, X_train, y_train, scoring)
# + _cell_guid="caf76752-35b8-cc36-2b33-257d1aece91e"
print (accuracy_score(y_test, clf_rfc1.predict(X_test)))
print (clf_rfc1)
plot_learning_curve(clf_rfc1, 'Random Forest', X, y, cv=4);
# + _cell_guid="029c300d-b703-07fe-a91d-252861c7dcb1"
plt.figure(figsize=(10,6))
plt.barh(np.arange(X_train.columns.shape[0]), clf_rfc1.feature_importances_, 0.5)
plt.yticks(np.arange(X_train.columns.shape[0]), X_train.columns)
plt.grid()
plt.xticks(np.arange(0,0.2,0.02));
# + _cell_guid="cc2b4b34-7465-39b0-caab-a7546ed5c5a7"
cols = X_train.columns[clf_rfc1.feature_importances_>=0.016]
# + _cell_guid="d01ba707-e275-3c3d-adde-2394b6856720"
rfc = RandomForestClassifier(random_state=42, criterion='entropy', min_samples_split=5, oob_score=True)
parameters = {'n_estimators':[500], 'min_samples_leaf':[12]}
clf_rfc2 = get_model(rfc, parameters, X_train[cols], y_train, scoring)
# + _cell_guid="ca9184ce-abdb-fe6f-7317-4a081af161a4"
print (clf_rfc2)
print (accuracy_score(y_test, clf_rfc2.predict(X_test[cols])))
plot_learning_curve(clf_rfc2, 'Random Forest', X[cols], y, cv=4);
# + [markdown] _cell_guid="80180e1b-fb99-6094-2c25-8c6121fbdff2"
# ### Logistic Regression
# + _cell_guid="ad3fdeca-b04e-426a-bcef-1c8112195636"
from sklearn.linear_model import LogisticRegression
lg = LogisticRegression(random_state=42, penalty='l1')
parameters = {'C':[0.5]}
clf_lg1 = get_model(lg, parameters, X_train, y_train, scoring)
# + _cell_guid="3af20be0-fbd2-6823-bcf1-f81d3630fde3"
print (clf_lg1)
print (accuracy_score(y_test, clf_lg1.predict(X_test)))
plot_learning_curve(clf_lg1, 'Logistic Regression', X, y, cv=4);
# + [markdown] _cell_guid="02ab3ecb-4389-fd39-8199-52875342598d"
# ### SVC
# + _cell_guid="f629f983-b96b-3de3-8c23-323952c3efdd"
from sklearn.svm import SVC
svc = SVC(random_state=42, kernel='poly', probability=True)
parameters = {'C': [35], 'gamma': [0.0055], 'coef0': [0.1],
'degree':[2]}
clf_svc = get_model(svc, parameters, X_train, y_train, scoring)
# + _cell_guid="4203eb35-58ba-dccf-6b53-e40885b57dfe"
print (clf_svc)
print (accuracy_score(y_test, clf_svc.predict(X_test)))
plot_learning_curve(clf_svc, 'SVC', X, y, cv=4);
# + [markdown] _cell_guid="86580051-dc5f-32ee-1573-0fd94ff88ab1"
# ### XGBoost
# + _cell_guid="480be50b-5dd4-1d7d-6399-df33f9764d7d"
import xgboost as XGB
xgb = XGB.XGBClassifier(seed=42, max_depth=3, objective='binary:logistic', n_estimators=400)
parameters = {'learning_rate':[0.1],
'reg_alpha':[3.0], 'reg_lambda': [4.0]}
clf_xgb1 = get_model(xgb, parameters, X_train, y_train, scoring)
# + _cell_guid="3917b123-5364-ba90-5dce-b560b5b0045f"
print (accuracy_score(y_test, clf_xgb1.predict(X_test)))
print (clf_xgb1)
plot_learning_curve(clf_xgb1, 'XGB', X, y, cv=4);
# + [markdown] _cell_guid="98c7ffeb-1ec6-7102-bdd5-90dd47fd5702"
# ## Ensemble
# + _cell_guid="749466e3-cc30-6c95-23fd-d7fdb4c33e12"
from sklearn.ensemble import VotingClassifier
clf_vc = VotingClassifier(estimators=[('xgb1', clf_xgb1), ('lg1', clf_lg1), ('svc', clf_svc),
('rfc1', clf_rfc1),('rfc2', clf_rfc2), ('knn', clf_knn)],
voting='hard', weights=[4,1,1,1,1,2])
clf_vc = clf_vc.fit(X_train, y_train)
# + _cell_guid="47b071ad-a128-37dc-604a-8c6c38e9fc85"
print (accuracy_score(y_test, clf_vc.predict(X_test)))
print (clf_vc)
plot_learning_curve(clf_vc, 'Ensemble', X, y, cv=4);
# + [markdown] _cell_guid="06af75c1-ade4-037d-3cee-7bc7fb6064df"
# ## Make submission
# + _cell_guid="05f9534a-2a5b-1f44-9e99-0007bcd34531"
PassengerId = test.PassengerId
test.drop('PassengerId', axis=1, inplace=True)
# + _cell_guid="1318d03c-352c-0b5b-9eb4-cb0571caed33"
def submission(model, fname, X):
ans = pd.DataFrame(columns=['PassengerId', 'Survived'])
ans.PassengerId = PassengerId
ans.Survived = pd.Series(model.predict(X), index=ans.index)
ans.to_csv(fname, index=False)
# + _cell_guid="5644e2c0-f072-98ba-fb4f-b5afab32f690"
| titanic/exploratory-tutorial-titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/title.png" alt="Drawing" style="width: 1100px;"/>
# # Overview
# This competition aims to correctly classify millions of products for e-commerce company Cdiscount.com. Perform image classification for each of the 9 million products into 1 of 5000 categories, with each product having 1-4 images (180x180 resolution) in the dataset.
# # Data
# * category_names.7z
# * Shows hierarchy of product classification
# * Each category_id has a level1, level2, level3 name in French
# * each Product's category_id corresponds to a specific level 1, 2, and 3 level (specific spot in the category tree)
# * train_example.bson
# * First 100 dicts from train.bson
# * train.bson
# * List of 7,069,896 dictionaries (one per product) with keys:
# * product id ( **\_id: 42** )
# * category id ( **category_id: 1000021794** )
# * list of 1-4 images in a dictionary ( **imgs: [{'picture':b'...binarystring...'}, {'picture':b'...binarystring...'}]** )
# * test.bson
# * List of 1,768,182 products in same format as train.bson, except there is no 'category_id' with each image
# * sample_submission.7z
#
#
# | \_id | category_id |
# |:--- |:--- |
# | 10 | 1000010653 |
# | 14 | 1000010653 |
# | 21 | 1000010653 |
# | 24 | 1000010653 |
# | 27 | 1000010653 |
#
#
# +
import numpy as np
import pandas as pd
import io
import bson
import matplotlib.pyplot as plt
import seaborn as sns
from skimage.data import imread
import os
import math
import json
from matplotlib import pyplot as plt
import cv2
from PIL import Image
import numpy as np
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
from keras.layers.advanced_activations import ELU
# +
import requests, json
def slack(message):
webhook_url = '<KEY>'
slack_data = {'text': message, "link_names":1}
response = requests.post(
webhook_url, data=json.dumps(slack_data),
headers={'Content-Type': 'application/json'})
return response
# -
# # Evaluate the Submission Test Set
# +
#This will be the directory we pull images from, all images must be in subdirs of this path (even if only 1 folder)
testrepo = "C:\\Kaggle\\04_Cdiscount\\"
datarepo = "D:\\Kaggle\\04_Cdiscount\\"
#The batch size to use for NN
batch_size = 32
# -
# ## Build VGG16 Model
# Construct a VGG16 model in Keras which will accept the images from this competition as input
# +
vgg_mean = np.array([123.68, 116.779, 103.939]).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean #subtract mean
return x[:, ::-1] #RGB -> BGR
# -
def ConvBlock(layers, model, filters):
for i in range(layers):
model.add(ZeroPadding2D((1,1)))
model.add(Conv2D(filters, (3,3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), data_format="channels_first"))
def FullyConnectedBlock(model):
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
def VGG16():
model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224)))
ConvBlock(2, model, 64)
ConvBlock(2, model, 128)
ConvBlock(3, model, 256)
ConvBlock(3, model, 512)
ConvBlock(3, model, 512)
model.add(Flatten())
FullyConnectedBlock(model)
FullyConnectedBlock(model)
model.add(Dense(1000, activation='softmax'))
return model
# ## Instantiate Model and Load Weights
model = VGG16()
model.pop()
model.add(Dense(5270, activation='softmax'))
model.load_weights(datarepo + "weights\\finetune_best_weights2.hdf5")
model.compile(optimizer=RMSprop(lr=0.000005), loss="categorical_crossentropy", metrics=['accuracy'])
#model.summary()
# ## Create a Master List of Images
# Create_Image_List allows us to feed our custom generator with a customized image list. Each image can be grabbed once, or we can grab the same number of images from each training class regardless of it's actual size. This will loop back to the beginning for smaller classes and help the imbalanced dataset problem. The list can be shuffled or given sequentially.
# +
import random
def Create_Image_List(directory, perclass=0, seed=42, shuffle=False):
"""
Return a list of images
Directory must contain subdirs which are the class names
Shuffle will randomize how images are selected from the subdir
perclass amount of images will be pulled from each subdir (looping if needed)
1st output image is from 1st subdir class, 2nd from 2nd class, etc ... looping back to 1st class
"""
Lfiles = []
Lclasses = []
Lmaster = []
for i,(dirpath, dirname, fname) in enumerate(os.walk(directory)):
if i == 0:
Lclasses = dirname
else:
Lfiles.append([Lclasses[i-1], fname])
#count total images
totalimgs = 0
for item in Lfiles:
totalimgs += len(item[1])
print("Found", str(len(Lfiles)), "classes with a total of", totalimgs, "images" )
#shuffle each classes' image list
if shuffle:
random.seed(seed)
for i,tmp in enumerate(Lfiles):
random.shuffle(Lfiles[i][1])
#create an output list with each image appearing once
if perclass == 0:
for cls in Lfiles:
for img in cls[1]:
Lmaster.append(cls[0] + "\\" + img)
#create the output list of images
#if perclass is greater than num of images in a class, loop back to it's first img
#every class will have same num of images
if perclass > 0:
for idx in range(perclass):
for cls in Lfiles:
looper = idx % len(cls[1])
Lmaster.append(cls[0] + "\\" + cls[1][looper])
if perclass == 0:
print("Returning a list with all images in each class, totaling", str(len(Lmaster)), "images")
else:
print("Returning a list with", str(perclass), "images per class, totaling", str(len(Lmaster)), "images")
return Lmaster
# -
Master_Images_Test = Create_Image_List(directory=testrepo, perclass=0, seed=42, shuffle=False)
Master_Filenames = [i.split('\\')[1] for i in Master_Images_Test]
# ## Create Master List of Categories
categories = pd.read_csv(r'D:\Kaggle\04_Cdiscount\category_names.csv', index_col='category_id')
Master_Classes = categories.index.tolist()
Master_Classes.sort()
# ## Create Custom Generator
# This will endlessly feed images to the predict stage. This is more configurable than the normal Keras image data generator and works better on this system for some reason. Keras' IDG was skipping batches and giving erroneous results. The helper function *Open_Image* is useful so the generator will yield correctly formatted images. They must be numpy arrays of size 224x224 with "channels first" aka (3,224,224)
def Open_Image(directory, path):
im = Image.open(directory + path)
imarray = np.array(im)
imresize = misc.imresize(imarray, (224,224))
imT = np.transpose(imresize, (2,0,1))
#img = Image.fromarray(imarray, 'RGB')
#img.show()
return imT
def Batch_Generator(dataset, batch_size, repo):
for i in range(0,len(dataset), batch_size):
batch = dataset[i : i+batch_size]
yield np.asarray([Open_Image(repo, i) for i in batch]),np.asarray([i.split('\\')[0] for i in batch])
test_batches = Batch_Generator(dataset=Master_Images_Test, batch_size=batch_size, repo=testrepo)
# ## Predict Output Classes for Submission Test Set
# It may be worth looking at predictions for each image of a product (up to 4) and combining results or voting in order to determine best classification. Possibly run the extra images through a different NN then ensemble?
#
# The prediction output contains 5,270 columns per sample, so we must predict in batches, saving predicted output classes in an array along the way. We run out of memory if we try to predict all the submission test images at once (millions of images x 5,270 values/image x 4 bytes/value = WAY TOO BIG FOR MEMORY).
# +
# Master_Classifications = []
# for i,(imgs,labels) in enumerate(test_batches):
# if i%100 == 0: print("Finished batch:", str(i), "/96721")
# preds = model.predict_on_batch(imgs)
# highest_prob = np.argmax(preds, axis=1)
# for highest in range(len(highest_prob)):
# idx = highest_prob[highest]
# Master_Classifications.append(Master_Classes[idx])
# +
Master_Classifications = []
preds = model.predict_generator(generator=test_batches, steps=(len(Master_Images_Test)//batch_size),
max_queue_size=10, workers=1, use_multiprocessing=False, verbose=1)
highest_prob = np.argmax(preds, axis=1)
for highest in range(len(highest_prob)):
idx = highest_prob[highest]
Master_Classifications.append(Master_Classes[idx])
# -
slack("FINISHED CLASSIFICATION")
# ## Format Predictions into Submission Format
# - Create a numpy array with a header of 2 columns named **_id** and **category_id**
# - Each row should be in the format of **_id,category_id** such as **5,1000016018**
# - Strip off the "-#.png" portion of each filename
# - use class_list to find the category_id
# - Only parse out preds and filenames for images ending in "-1.png"
# - **MAKE SURE FINAL SUBMISSION HAS 1,768,182 ROWS**
# +
#remove the ".jpg" extension
parsed_filenames = []
for imgname in Master_Filenames:
parsed_filenames.append(imgname.split('.')[0])
#combine filenames and classifications into 1 numpy array
a = np.array(parsed_filenames)
b = np.array(Master_Classifications)
submission_array = np.column_stack((a,b))
#turn the numpy array into a Pandas Dataframe
df = pd.DataFrame(data=submission_array)
df.columns = ['_id', 'category_id']
df = df[df._id.str.contains('-1')]
df['_id'] = df['_id'].str[:-2]
df.shape
# -
if df.shape != (1768182, 2):
print("Error: final submission dataframe shape should be (1768182, 2) but got", df.shape,"instead")
else:
print("Ready for submission!")
# ## Create a Zip file for Submission
# +
from zipfile import ZipFile, ZIP_DEFLATED
output_file = "final_submission6"
df.to_csv(datarepo + "submissions" + "\\" + output_file + ".csv", index=False)
os.chdir(datarepo + "submissions")
ZipFile(output_file + ".zip", "w", ZIP_DEFLATED).write(output_file + ".csv")
print(datarepo + "submissions" + "\\" + output_file + ".csv ready for submission")
# -
# ## Submit Results
| 04_Cdiscount/Cdiscount_4_InferenceAndSubmit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %time from hikyuu.interactive.interactive import *
iodog.open()
#use_draw_engine('echarts')
# # 一、策略分析
#
# ## 原始描述
#
# 买入条件:周线MACD零轴下方底部金叉买入30%
#
# 卖出条件:日线级别 跌破 20日线 卖出50%持仓
#
#
# ## 策略分析
#
# 市场环境:无
#
# 系统有效性:无
#
# 信号指示器:
# - 买入信号:周线MACD零轴下方底部金叉,即周线的DIF>DEA金叉时买入(快线:DIF,慢线DEA)
# - 卖出信号:日线级别 跌破 20日均线
#
# 止损/止盈:无
#
# 资金管理:
# - 买入:30% (不明确,暂且当做当前现金的30%)
# - 卖出:已持仓股票数的50%
#
# 盈利目标:
#
# 移滑价差:
#
# # 二、实现系统部件
#
# ## 定义信号指示器
def getNextWeekDate(week):
"""获取指定日期的下一周周一日期"""
from datetime import timedelta
py_week = week.datetime()
next_week_start = py_week + timedelta(days = 7 - py_week.weekday())
return Datetime(next_week_start)
def DEMO_SG(self):
"""
买入信号:周线MACD零轴下方底部金叉,即周线的DIF>DEA金叉时买入
卖出信号:日线级别 跌破 20日均线
参数:
week_macd_n1:周线dif窗口
week_macd_n2: 周线dea窗口
week_macd_n3: 周线macd平滑窗口
day_n: 日均线窗口
"""
k = self.getTO()
if (len(k) == 0):
return
stk = k.getStock()
#-----------------------------
#计算日线级别的卖出信号
#-----------------------------
day_c = CLOSE(k)
day_ma = MA(day_c, self.getParam("day_n"))
day_x = day_c < day_ma #收盘价小于均线
for i in range(day_x.discard, len(day_x)):
if day_x[i] >= 1.0:
self._addSellSignal(k[i].datetime)
#-----------------------------
#计算周线级别的买入信号
#-----------------------------
week_q = QueryByDate(k[0].datetime, k[-1].datetime.nextDay(), kType=Query.WEEK)
week_k = k.getStock().getKData(week_q)
n1 = self.getParam("week_macd_n1")
n2 = self.getParam("week_macd_n2")
n3 = self.getParam("week_macd_n3")
m = MACD(CLOSE(week_k), n1, n2, n3)
fast = m.getResult(0)
slow = m.getResult(1)
discard = m.discard if m.discard > 1 else 1
for i in range(discard, len(m)):
if (fast[i-1] < slow[i-1] and fast[i] > slow[i]):
#当周计算的结果,只能作为下周一的信号
self._addBuySignal(week_k[i].datetime.nextWeek())
# ## 定义资金管理策略
class DEMO_MM(MoneyManagerBase):
"""
买入:30% (不明确,暂且当做当前现金的30%)
卖出:已持仓股票数的50%
"""
def __init__(self):
super(DEMO_MM, self).__init__("MACD_MM")
def _reset(self):
pass
def _clone(self):
return DEMO_MM()
def _getBuyNumber(self, datetime, stk, price, risk, part_from):
tm = self.getTM()
cash = tm.currentCash
#可以不用考虑最小交易单位的问题,已经自动处理
#num = int((cash * 0.3 // price // stk.atom) * stk.atom)
return int(cash*0.3/price) #返回类型必须是int
def _getSellNumber(self, datetime, stk, price, risk, part_from):
tm = self.getTM()
position = tm.getPosition(stk)
total_num = position.number
num = int(total_num * 0.5)
return num if num >= 100 else 0
# # 三、构建并运行系统
#
# ## 设定系统参数
#
# +
#账户参数
init_cash = 100000 #账户初始资金
init_date = Datetime('1990-1-1') #账户建立日期
#信号指示器参数
week_n1 = 12
week_n2 = 26
week_n3 = 9
day_n = 20
#选定标的,及测试区间
stk = sm['sz000001']
start_date = Datetime('2017-01-01') #如果是同一级别K线,可以使用索引号,使用了不同级别的K线数据,建议还是使用日期作为参数
end_date = Datetime()
# -
# ## 构建系统实例
# +
#创建账户
my_tm = crtTM(datetime=init_date, initCash = init_cash, costFunc=TC_FixedA())
#创建系统实例
my_sys = SYS_Simple()
#绑定账户
my_sys.tm = my_tm
#绑定信号指示器
my_sys.sg = crtSG(DEMO_SG,
{'week_macd_n1': week_n1, 'week_macd_n2': week_n2, 'week_macd_n3': week_n3, 'day_n': day_n},
'DEMO_SG')
my_sys.sg.setParam('alternate', False)
#绑定资金管理策略
my_sys.mm = DEMO_MM()
# -
# ## 运行系统
# +
iodog.close()
q = QueryByDate(start_date, end_date, kType=Query.DAY)
my_sys.run(stk, q)
#将交易记录及持仓情况,保存在临时目录,可用Excel查看
#临时目录一般设置在数据所在目录下的 tmp 子目录
#如果打开了excel记录,再次运行系统前,记得先关闭excel文件,否则新的结果没法保存
my_tm.tocsv(sm.tmpdir())
# -
# # 四、查看资金曲线及绩效统计
#绘制资金收益曲线(净收益)
x = my_tm.getProfitCurve(stk.getDatetimeList(q), KQuery.DAY)
#x = my_tm.getFundsCurve(stk.getDatetimeList(q), KQuery.DAY) #总资产曲线
x = PRICELIST(x)
x.plot()
#回测统计
per = Performance()
print(per.report(my_tm, Datetime.now()))
# # 五、或许想看下图形
my_sys.plot()
MA(CLOSE(my_sys.getTO()), 20).plot(new=False)
# # 六、或许想看看所有股票的情况
# +
import pandas as pd
def calTotal(blk, q):
per = Performance()
s_name = []
s_code = []
x = []
for stk in blk:
my_sys.run(stk, q)
per.statistics(my_tm, Datetime.now())
s_name.append(stk.name)
s_code.append(stk.market_code)
x.append(per.get("当前总资产".encode('gb2312')))
return pd.DataFrame({'代码': s_code, '股票': s_name, '当前总资产': x})
# %time data = calTotal(blocka, q)
# -
#保存到CSV文件
#data.to_csv(sm.tmpdir() + '/统计.csv')
data[:10]
| hikyuu/examples/notebook/Demo/Demo1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
tickers = ['NVDA', '^IXIC']
data = pd.DataFrame()
for t in tickers:
data[t] = wb.DataReader(t, data_source = 'yahoo', start = '2007-11-20', end = '2017-11-10')['Adj Close']
# -
sec_returns = np.log(data / data.shift(1))
cov = sec_returns.cov() * 250
cov
cov_with_market = cov.iloc[0,1]
cov_with_market
market_var = sec_returns['NVDA'].var() * 250
market_var
NVDA_beta = cov_with_market / market_var
NVDA_beta
#Calculating the expected return of SQ (CAPM)
NVDA_er = .025 + NVDA_beta * .05
NVDA_er
#Sharpe Ratio
Sharpe = (NVDA_er - .025) / (sec_returns['NVDA'].std() * 250 ** .5)
Sharpe
| Python for Finance - Investment Fundamentals & Data Analytics/Calculating the Expected Return of a Stock (CAPM).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# argv:
# - /Users/marc/venvs/edv-pilot/bin/python
# - -m
# - ipykernel_launcher
# - -f
# - '{connection_file}'
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # Stochastically Testing Privacy Mechanisms #
#
# How do you validate that a differential privacy implementation actually works?
#
# One approach that can build confidence that the differential privacy property holds for an implementation is stochastic testing: run many iterations of the algorithm against neighboring databases and check that for any output, the expected probability is bounded by $\epsilon$.
#
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Preamble: imports and figure settings
from eeprivacy import PrivateClampedMean
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import matplotlib as mpl
from scipy import stats
np.random.seed(1234) # Fix seed for deterministic documentation
mpl.style.use("seaborn-white")
MD = 20
LG = 24
plt.rcParams.update({
"figure.figsize": [25, 7],
"legend.fontsize": MD,
"axes.labelsize": LG,
"axes.titlesize": LG,
"xtick.labelsize": LG,
"ytick.labelsize": LG,
})
# + [markdown] nteract={"transient": {"deleting": false}}
# In the test below, we run a `PrivateClampedMean` for a large number of trials for two different databases: one with a single element `0` and one with a single element `1`.
#
# Then, we bin the results and compute the "realized $\epsilon$" for each bin. By chance, sometimes this will slightly exceed the $\epsilon$ value. The test fails if the realized $\epsilon$ greatly exceeds the desired $\epsilon$ for any of the bins.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
private_mean = PrivateClampedMean(lower_bound=0, upper_bound=1)
T = 1000000
A = [private_mean.execute(values=[], epsilon=0.1) for t in range(T)]
B = [private_mean.execute(values=[1], epsilon=0.1) for t in range(T)]
L = 0
U = 1
A = np.clip(A, L, U)
B = np.clip(B, L, U)
bins = np.linspace(L, U, num=50)
fig, ax = plt.subplots()
ax.set_yscale("log")
plt.hist(A, color='b', alpha=0.5, bins=bins)
plt.hist(B, color='r', alpha=0.5, bins=bins)
plt.title("Compare output likelihood for neighboring databases")
plt.xlabel("Output")
plt.ylabel("Count (log scale)")
plt.show()
A, bin_edges = np.histogram(A, bins=bins)
B, bin_edges = np.histogram(B, bins=bins)
realized_epsilon = np.abs(np.log(A / B))
plt.hist(realized_epsilon, color="k", bins=20)
plt.title("Realized ε")
plt.xlabel("ε")
plt.ylabel("Count")
plt.show()
| docs-source/source/stochastic-testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Output classes
# +
import numpy as np
import pandas as pd
from tabulate import tabulate
from streng.common.io.output import OutputTable, OutputString, OutputExtended
# -
# ## Some example data
#
# given as a list of dictionaries
data = [{'__iteration': 1,
'x_y': 0.04213429287708525,
'y_y': 1310.8730715333027,
'x_u': 0.44886,
'y_u': 1311.3866666666665,
'kinel': 1.2627555238564436,
'kel': 31111.785247171465,
'k_06': 24639.615704484408,
'error': 0.26267331521364284},
{'__iteration': 2,
'x_y': 0.05395737709629134,
'y_y': 1329.4890360745674,
'x_u': 0.44886,
'y_u': 1329.4890360745674,
'kinel': -91.76544991341024,
'kel': 24639.615704484408,
'k_06': 24476.018099972942,
'error': 0.006683995895216576},
{'__iteration': 3,
'x_y': 0.05434285482397955,
'y_y': 1330.0966982759253,
'x_u': 0.44886,
'y_u': 1330.0966982759253,
'kinel': -94.94217563576123,
'kel': 24476.018099972942,
'k_06': 24469.75466696404,
'error': 0.0002559663181812895},
{'__iteration': 4,
'x_y': 0.05435772305859232,
'y_y': 1330.1201474985282,
'x_u': 0.44886,
'y_u': 1330.1201474985282,
'kinel': -95.064891451666,
'kel': 24469.75466696404,
'k_06': 24469.51314473135,
'error': 9.870332575089462e-06},
{'__iteration': 5,
'x_y': 0.05435829655145874,
'y_y': 1330.1210519911244,
'x_u': 0.44886,
'y_u': 1330.1210519911244,
'kinel': -95.0696250854122,
'kel': 24469.51314473135,
'k_06': 24469.503828907287,
'error': 3.807116044736897e-07}]
# ## Tabulate
#
# Not in class but used
#
# ### using tabulate to show data as markdown table
print(tabulate(data, headers='keys', tablefmt="pipe", floatfmt=".3E"))
# ## Pandas dataframes
# Not in class but used
df = pd.DataFrame(data=data, columns=list(data[0].keys()))
df
# ## Class: OutputTable
print(OutputTable.__doc__)
# ### get the data
ot = OutputTable(data=data)
# ### property: to_markdown
print(ot.to_markdown)
# ### property: to_panda_dataframe
ot.to_panda_dataframe
# ### method: to_quantity_value(row_number)
#
# Μετατρέπει σε νέο OutputTable όπου υπάρχουν 2 στήλες: quantity-value.
# Αν δεν οριστεί γραμμή του αρχικού OutputTable, διαβάζει την 1η
ot_quantity_value = ot.to_quantity_value()
print(ot_quantity_value.to_markdown)
ot_quantity_value2 = OutputTable()
ot_quantity_value2 = ot.to_quantity_value(2)
print(ot_quantity_value2.to_markdown)
# ## Class usage
# ### not very useful but can work sometimes
ot.data[2]['x_y']
# ### show data as pandas dataframe
ot.to_panda_dataframe
# ### retrieve value from output table
ot.retrieve(search_field='__iteration',
search_value=2,
find_field='kinel')
# ### retrieve column to list
ot.retrieve_column_to_list('x_y')
# ## OutputString
# coming soon...see bilin notebook
# ## OutputExtended
# coming soon...see bilin notebook
data
def convertDL(val):
if isinstance(val, dict):
return [[k] + v for k, v in val.iteritems()]
return {v[0]: v[1:] for v in val}
# +
# data is list of dicts
# -
# make it a dict of lists
ddff = pd.DataFrame(data=data)
ddff['kel'].tolist()
data_dict_of_lists = ddff.to_dict()
for i in data[0].keys():
print(i)
data_dict_of_lists['x_y'].values()
def lod_to_dol(list_of_dicts):
'''
converts list of dicts --> dict of lists
'''
# _df = pd.DataFrame(data=list_of_dicts)
# _dol = {}
# for key in [*list_of_dicts[0]]: # [*list_of_dicts[0]] = _column_names = list(list_of_dicts[0].keys())
# _dol[key] = _df[key].tolist()
_dol = {k: [dic[k] for dic in list_of_dicts] for k in list_of_dicts[0]}
return _dol
dol = lod_to_dol(data)
# +
# dol['error']
# +
# dol.keys()
# -
def dol_to_lod(dict_of_lists):
'''
converts dict of lists --> list of dicts
'''
_lod = [dict(zip(dict_of_lists,t)) for t in zip(*dict_of_lists.values())]
return _lod
dol_to_lod(dol)
isinstance(dol, dict)
| common/io/output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1) Keep adding the next child till you get -1
#
# 2) How many childrens does 5 have?
class TreeNode:
def __init__(self, data):
self.data = data
self.children = list()
def takeTreeInput():
print("Enter root Data")
rootData = int(input())
if rootData == -1:
return None
root = TreeNode(rootData)
print("Enter number of children for ", rootData)
childrenCount = int(input())
for i in range(0, childrenCount):
child = takeTreeInput()
root.children.append(child)
return root
def printTreeDetailed(root):
if root == None:
return
print(root.data, ": ", end = "")
for child in root.children:
print(child.data, ",", end = "")
print()
for child in root.children:
printTreeDetailed(child)
root = takeTreeInput()
printTreeDetailed(root)
| 17 Generic Trees/17.03 Take Tree Input (Recursively).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="_0el9BtuVbCF"
# # Install and Import libraries
# + colab={"base_uri": "https://localhost:8080/", "height": 386} colab_type="code" id="Ph4ptUGyXlXf" outputId="d807e5b0-49a5-4158-9411-37bd65c8e88f"
# !pip install catboost
# + colab={} colab_type="code" id="tFLETgQxdteo"
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.model_selection import train_test_split
from catboost import CatBoostRegressor
from catboost import CatBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
import lightgbm as lgb
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# + [markdown] colab_type="text" id="PaiRqlvvda4i"
# # Load the dataset and give it the desired format
# + colab={} colab_type="code" id="OVOP8h3ldG8G"
from sklearn.datasets import load_breast_cancer, load_boston
bc = load_breast_cancer()
boston = load_boston()
df_classification = pd.DataFrame(data=np.c_[bc['data'], bc['target']], columns=list(bc['feature_names'])+['target'])
df_regression = pd.DataFrame(data=np.c_[boston['data'], boston['target']], columns=list(boston['feature_names'])+['target'])
# + colab={"base_uri": "https://localhost:8080/", "height": 261} colab_type="code" id="oJKTTcGjdepL" outputId="e0ce7f44-b48e-44b3-8fe9-5de6d29a4b67"
df_regression.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="38ZyswKigIJg" outputId="649cca8a-6411-42a0-8ab2-82b2e9b89635"
df_regression.head()
# + [markdown] colab_type="text" id="jV3RkAphgYeh"
# # Train / Test split
# + colab={} colab_type="code" id="FFB_U2ZwgcuR"
X = df_classification.drop('target', axis=1)
y = df_classification['target']
X_train_classification, X_val_classification, y_train_classification, y_val_classification = train_test_split(X, y, test_size = 0.3, random_state = 101)
X = df_regression.drop('target', axis=1)
y = df_regression['target']
X_train_regression, X_val_regression, y_train_regression, y_val_regression = train_test_split(X, y, test_size = 0.3, random_state = 101)
# + [markdown] colab_type="text" id="cXRC8r_khMTT"
# # Models
# + [markdown] colab_type="text" id="yRdb5cw-hIq0"
# Define parameters
# + colab={} colab_type="code" id="oj7fA4fshKfB"
n_trees = 1000
lr = 0.05
rs = 101
# + [markdown] colab_type="text" id="ueW3dQdkh2y3"
# #Decision Tree
# + [markdown] colab_type="text" id="5UWgUE3TnRMv"
# Regression:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="l3cH1w67nRWE" outputId="74f02581-293e-46e3-b5ee-f712fa649220"
from sklearn.tree import DecisionTreeRegressor
dtree_reg = DecisionTreeRegressor()
dtree_reg.fit(X_train_regression, y_train_regression)
acc_dt_reg = dtree_reg.score(X_val_regression, y_val_regression)
print(acc_dt_reg)
# + [markdown] colab_type="text" id="Ntxc6UUUnRl7"
# Classification:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="6RUA9HJbh17V" outputId="7043e2f5-a5e5-4bde-f25e-6b7989553955"
from sklearn.tree import DecisionTreeClassifier
dtree_class = DecisionTreeClassifier()
dtree_class.fit(X_train_classification, y_train_classification)
acc_dt_classification = dtree_class.score(X_val_classification, y_val_classification)
print(acc_dt_classification)
# + [markdown] colab_type="text" id="53Qu27sXhuKD"
# # Random Forest
# + [markdown] colab_type="text" id="3gxsF4ukoXBr"
# Regression:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="pM6kVzNPoXX7" outputId="95f40442-b413-4e47-f726-e58311dac053"
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(n_estimators=n_trees, random_state=rs, n_jobs=-1)
rfr.fit(X_train_regression, y_train_regression)
acc_rf_reg = rfr.score(X_val_regression, y_val_regression)
print(acc_rf_reg)
# + [markdown] colab_type="text" id="Hft7Ov_FoXir"
# Classification:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="BJ-Xm2lPh6fE" outputId="95d2406e-0efe-4350-f2a8-cfd4da4e810f"
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=n_trees, random_state=rs, n_jobs=-1)
rfc.fit(X_train_classification, y_train_classification)
acc_rf_classification = rfc.score(X_val_classification, y_val_classification)
print(acc_rf_classification)
# + [markdown] colab_type="text" id="FOw4IQAleOVi"
# # XGBOOST Model
#
#
# + [markdown] colab_type="text" id="7LZz4cMqsxPD"
# XGBoost stands for "Extreme Gradient Boosting". It is a Random Forest model with Boosting and optimised for speed.
#
# **What is Boosting?**
#
# I'ts very similar to a Random Forest algorithm but instead of creating the new trees at random, you create them looking to minimize the error.
#
# More info about Gradient Boosting Machines:
# https://towardsdatascience.com/understanding-gradient-boosting-machines-9be756fe76ab
# + [markdown] colab_type="text" id="7GUSXSU0gN9H"
# Regression:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="ez2kCXNIfdBb" outputId="8e590b0d-8a23-4aad-ef69-e32f775a6d75"
model=xgb.XGBRegressor(objective ='reg:squarederror',n_estimators=n_trees, learning_rate=lr, random_state=rs, n_jobs=-1) #for the best model, high number of estimators, low learning rate
model.fit(X_train_regression, y_train_regression)
acc_xgb_reg = model.score(X_val_regression,y_val_regression)
print(acc_xgb_reg)
# + [markdown] colab_type="text" id="17KMzq1JgSIW"
# Classification:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="sp9Hd1CxgRUi" outputId="76a6df8f-2e7c-4df6-e8f8-6855fe4cd8f1"
model = xgb.XGBClassifier(n_estimators=n_trees, learning_rate=lr, random_state=rs, n_jobs=-1) #for the best model, high number of estimators, low learning rate
model.fit(X_train_classification, y_train_classification)
acc_xgb_classification = model.score(X_val_classification,y_val_classification)
print(acc_xgb_classification)
# + [markdown] colab_type="text" id="9ARHv36BfdeY"
# # CatBoost Model
# Very useful when dealing with categorical features. With the datasets in this session we can't use the most powerful feature of catboost which is its processing of categorical features. When dealing with categorical features you should use the parameter *cat_features* — This parameter is a must in order to leverage Catboost preprocessing of categorical features, if you encode the categorical features yourself and don’t pass the columns indices as cat_features you are missing the essence of Catboost.
#
# There is an example of code in the comments of how to use the categorical features
#
# More info:
# https://towardsdatascience.com/https-medium-com-talperetz24-mastering-the-new-generation-of-gradient-boosting-db04062a7ea2
#
# https://medium.com/@hanishsidhu/whats-so-special-about-catboost-335d64d754ae
#
#
# + [markdown] colab_type="text" id="gIhML8xAXt-z"
# Regression:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="plpNQv6efi2m" outputId="436f3fd3-a41f-494d-aad9-2de36c648fe0"
model = CatBoostRegressor(silent=True, n_estimators=n_trees, learning_rate=lr, random_state=rs)
#categorical_features_indices = np.where(df.dtypes != np.float)[0]
model.fit(X_train_regression, y_train_regression) #Add parameter cat_features=categorical_features_indices
acc_cb_reg = model.score(X_val_regression, y_val_regression)
print(acc_cb_reg)
# + [markdown] colab_type="text" id="GUx1ZuuTcrmj"
# Classification:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="1UpPIeyhct7Z" outputId="42da5d8e-c220-41da-faef-f6d8e677e942"
model = CatBoostClassifier(silent=True, n_estimators=n_trees, learning_rate=lr, random_state=rs)
#categorical_features_indices = np.where(df.dtypes != np.float)[0]
model.fit(X_train_classification, y_train_classification) #Add parameter cat_features=categorical_features_indices
#acc_cb_classification = model.score(X_val_classification, y_val_classification) #For some reason not working
acc_cb_classification = 1-sum(model.predict(X_val_classification)-y_val_classification)/len(y_val_classification)
print(acc_cb_classification)
# + [markdown] colab_type="text" id="QZF7bVR9h-RO"
# # AdaBoost
# + [markdown] colab_type="text" id="Xaba6oVuwJyD"
# Another way of boosting.
#
# More info:
# https://towardsdatascience.com/boosting-and-adaboost-clearly-explained-856e21152d3e
# + [markdown] colab_type="text" id="sRbXnQMLc1m6"
# Regression:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="ouf9aeoyh_c6" outputId="2c554f86-4def-473a-b4b2-d548dc336ac7"
model = AdaBoostRegressor(random_state=101, n_estimators=n_trees, learning_rate=lr)
model.fit(X_train_regression, y_train_regression)
acc_ab_reg = model.score(X_val_regression, y_val_regression)
print(acc_ab_reg)
# + [markdown] colab_type="text" id="uHl2K1Qqdkup"
# Classification:
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="-j9WkUmadqdj" outputId="d9af97a0-f52f-43d3-93a1-1047c3fe9de7"
model = AdaBoostClassifier(random_state=101, n_estimators=n_trees, learning_rate=lr)
model.fit(X_train_classification, y_train_classification)
acc_ab_classification = model.score(X_val_classification,y_val_classification)
print(acc_ab_classification)
# + [markdown] colab_type="text" id="KZRBSOJAiRjn"
# # Model performance comparison
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="Vl2FQRHViT8a" outputId="4cbd5c34-c2c0-4637-d389-08e010bd9971"
models_regression = pd.DataFrame({'Model': ['Decision Tree', 'Random Forest', 'XGBoost', 'CatBoost', 'AdaBoost'],
'Score': [acc_dt_reg, acc_rf_reg, acc_xgb_reg, acc_cb_reg, acc_ab_reg]})
models_regression.sort_values(by='Score', ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="vf3yzuypi3C6" outputId="7381b6e1-09bf-4d41-d4a8-8221a0028b6d"
models_categorical = pd.DataFrame({'Model': ['Decision Tree', 'Random Forest', 'XGBoost', 'CatBoost', 'AdaBoost'],
'Score': [acc_dt_classification, acc_rf_classification, acc_xgb_classification, acc_cb_classification, acc_ab_classification]})
models_categorical.sort_values(by='Score', ascending=False)
# + colab={} colab_type="code" id="Rfb746OtWret"
#Load Boston Data Set
# -
| module_4_decision_trees/Decision_Trees_and_Random_Forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Class #7: Object-oriented programming ("OOP") Python Classes
# ------------------------------------------------------------
# * Introduction
# * Why?
# * What OOP is
# * How Python supports it
# * Methods
# * Making instance
# * Adding methods
# * Keeping its data consistent
# * Ability to raise exceptions in setters vs. a dict
# * Multiple instances
#
#
# Homework
# --------
#
# Get started with the _Robot Simulator_ Exercism problem. I gave you a good head-start here in class.
# Linking to an Exercism task is still a little wonky, so please do a cmd-f or ctrl-f and search for
# 'robot' on the Python track page. Here's my unstarted version:
#
# https://exercism.io/my/solutions/9708bb97eff84261a0f86d3e624b3553
#
#
# Introduction
# ------------
#
# **Robots as dictionaries: showing the probems that OOP solves**
robot = {
'coordinates': (0, 0),
'bearing': 'North'
}
print(robot)
robot['coordinates']
robot['coordinates'][0]
type(robot)
# +
def turn_right(robot):
currently = robot['bearing']
if currently == 'North':
robot['bearing'] = 'East'
elif currently == 'East':
robot['bearing'] = 'South'
elif currently == 'South':
robot['bearing'] = 'West'
elif currently == 'West':
robot['bearing'] = 'North'
turn_right(robot)
print(robot)
# -
turn_right(robot)
print(robot)
robot
robot['bearing'] = 'nowhere'
robot
turn_right(robot)
robot
# +
# Now with OOP
# * Message passing
# * Data hiding: less is more
# * Combines both behavior and data (functions and variables)
# * Re-use
# * Implementation hiding
class Robot():
"""Represents a robot with a direction and location."""
NORTH = 'North'
EAST = 'East'
WEST = 'West'
SOUTH = 'South'
TURN_RIGHT = {
NORTH: EAST,
EAST: SOUTH,
SOUTH: WEST,
WEST: NORTH
}
def __init__(self, name, coordinates, bearing):
"""Refuse to construct an invalid instance"""
if bearing not in ['North', 'South', 'East', 'West']:
raise Exception(f"Invalid bearing: {bearing}")
if type(name) is not str:
raise Exception(f"{name} is not a string")
self.__my_name = name
self.__coordinates = coordinates
self.__bearing = bearing
def __repr__(self):
"""Return a simple string representation of myself"""
pretty_name = self.__my_name.capitalize()
return f"<{pretty_name} at {self.__coordinates}, heading {self.__bearing}>"
def turn_right(self):
"""Turn myself to the right by 90 degrees."""
self.__bearing = Robot.TURN_RIGHT[self.__bearing]
def turn_left(self):
"""Turn myself to the left by 90 degrees."""
self.turn_right()
self.turn_right()
self.turn_right()
def rename_to(self, new_name):
"""Set my name to the given string"""
if type(new_name) is not str:
raise Exception(f"{new_name} is not a string")
self.__my_name = new_name
kaylie = Robot('kaylie', (0, 0), 'North')
print(kaylie.__repr__())
# -
type(robot)
type(kaylie)
# Test the data validation
maru = Robot('maru', (1, 1), 'South')
spot = Robot('spot', (2, 2), 'nowhere')
print(spot)
print(maru)
Robot.NORTH
r = Robot('xxx', (0,0), 'North')
r.SOUTH
r
r.turn_right()
r
r.__bearing
help(r)
r
r.turn_left()
r
r.turn_right()
r
help(Robot)
r
r.name = 'Rover'
r
r.my_name = 'Rover'
r
# It's bad that this is possible. We "should" not be able to represent
# illegal states:
r.my_name = None
r.my_name
r
broken_robot = Robot('yyyy', (0, 0), 'West')
broken_robot.my_name = None
Robot('yyyy', (0, 0), 'West')
Robot(None, (0, 0), 'West')
fixed_robot = Robot('yyyy', (0, 0), 'West')
fixed_robot.__my_name = None
fixed_robot
renamable_robot = Robot('yyyy', (0, 0), 'West')
renamable_robot
help(renamable_robot)
renamable_robot.rename_to('Matie')
renamable_robot
renamable_robot.__my_name
dir()
(robot, maru, kaylie, fixed_robot)
| june-2019/7-oop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import time
from collections import OrderedDict
import numpy as np
import nidaqmx
import pandas as pd
import quantities as pq
from nidaqmx.stream_writers import (
DigitalSingleChannelWriter, DigitalMultiChannelWriter)
from nidaqmx.utils import flatten_channel_string
from nidaqmx.constants import (
LineGrouping, AcquisitionType, DigitalWidthUnits, Edge,
HandshakeStartCondition, Level, MIOAIConvertTimebaseSource,
OverflowBehavior, TaskMode, Polarity, RegenerationMode,
SampleInputDataWhen, SampleTimingType, UnderflowBehavior)
from nidaqmx.error_codes import DAQmxErrors, DAQmxWarnings
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, DaqError, DaqResourceWarning)
from scipy.optimize import fsolve, least_squares
from olfactometer.PID_reader import PID_Tester
from olfactometer.valve_driver import ValveDriver
from olfactometer.equipment import Olfactometer
from olfactometer.my_equipment import MyValve, MyJar, MyLowMFC, \
MyMediumMFC, MyHighMFC
from olfactometer.odorants import Solution, Compound, ChemicalOrder, \
Vendor, Molecule
# from graph import make_graph, draw_graph
from pprint import pprint
from olfactometer.smell_engine import SmellEngine
from olfactometer.data_container import DataContainer
from olfactometer.ui import UI
from IPython.display import display
np.set_printoptions(precision=6)
# -
# Initialize # molecules
#molecules = OrderedDict([(702, 'Ethanol')])
molecules = OrderedDict([(7410, 'Acetophenone'), (7439, 'carvone'), (440917, 'd-limonene')])
# Initialize UI
ui = UI(molecules, print_PID_average=True)
NUM_PID_SAMPLES = 10000
# +
eth = "./ethanol_11-2-21.pkl"
odors = "./odors.pkl"
# To run without odor table
smell_engine = SmellEngine(1000, len(molecules), DataContainer(), debug_mode=False, write_flag=False, PID_mode = True,
look_up_table_path=None, oms=molecules)
# To run with odor table, diff being specification of pkl file.
# smell_engine = SmellEngine(1000, len(molecules), DataContainer(), debug_mode=False, write_flag=False, PID_mode = True,
# look_up_table_path=odors, oms=molecules)
# Initialize system
smell_engine.set_odorant_molecule_ids(list(molecules.keys()))
smell_engine.set_odorant_molecule_dilutions([10,1,10])
smell_engine.initialize_smell_engine_system()
smell_engine.smell_controller.valve_driver.num_pid_samples = NUM_PID_SAMPLES
smell_engine.olfactometer.loaded_molecules
# + jupyter={"source_hidden": true}
# Tell me how to get max_flow_rates and n_jars from smell_engine
n_jars = len(smell_engine.olfactometer.jars)
print(n_jars)
max_flow_rates = smell_engine.smell_controller.get_max_flow_rates()
total_vapor = smell_engine.smell_controller.get_vapor_concs_dense(set(list(smell_engine.target_concentration))).sum(axis=0)
print(f"Max Flow Rate {max_flow_rates}\nVapor Pressures {total_vapor}")
# -
##### INITIALIZE CONCENTRATION SLIDERS
ui.odorConcentrationUI()
# SPECIFYING CONCENTRATIONS WITHOUT THREADED SLIDERS
concentration_mixtures = ui.odorConcentrationValues() # Read in user-specified concentrations
# print(concentration_mixtures)
smell_engine.set_desired_concentrations(concentration_mixtures) # Assign target concentrationskeys
ui.dutyCyclesUI()
ui.mfcUI()
smell_engine.set_valve_duty_cycles(ui.dutyCyclesValues())
print(ui.dutyCyclesValues())
mfcs = [] # TODO: Fix referencing order of MFC setpoints to be automatic
mfcs.append(ui.mfcValues()[0])
mfcs.append(ui.mfcValues()[2])
mfcs.append(ui.mfcValues()[1])
smell_engine.set_mfc_setpoints(ui.mfcValues())
# +
# Timer setup specifies sampling frequency, sampling rate specifics # of samples to read
PID_mode = True
pid = PID_Tester(ui, smell_engine, PID_mode, cont_read_conc=False,sampling_rate = NUM_PID_SAMPLES)
pid.timer_setup(.00001)
pid.timer_start()
display(ui.timeSeries)
# -
# # %matplotlib widget
smell_engine.close_smell_engine()
| notebooks/SystemTester.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Models and OLS
# *<NAME>*
#
# **Regression** refers to the prediction of a continuous variable (income, age, height, etc.) using a dataset's features. A **linear model** is a model of the form:
#
# $$y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... + \beta_K x_K + \epsilon$$
#
# Here $\epsilon$ is an **error term**; the predicted value for $y$ is given by $\hat{y} = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... + \beta_K x_K$, so $y - \hat{y} = \epsilon$.
#
# $\epsilon$ is almost never zero, so for regression we must measure "accuracy" differently. The **sum of squared errors (SSE)** is the sum $\sum_{i = 1}^n (y_i - \hat{y}_i)^2$ (letting $y_i = \beta_0 + \beta_1 x_{1,i} + \beta_2 x_{2,i} + ... + \beta_K x_{K,i} + \epsilon_i$ and $\hat{y}_i$ defined analogously). We might define the "most accurate" regression model as the model that minimizes the SSE. However, when measuring performance, the **mean squared error (MSE)** is often used. The MSE is given by $\frac{\text{SSE}}{n} = \frac{1}{n}\sum_{i = 1}^{n} (y_i - \hat{y}_i)^2$.
#
# **Ordinary least squares (OLS)** is a procedure for finding a linear model that minimizes the SSE on a dataset. This is the simplest procedure for fitting a linear model on a dataset. To evaluate the model's performance we may split a dataset into training and test set, and evaluate the trained model's performance by computing the MSE of the model's predictions on the test set. If the model has a high MSE on both the training and test set, it's underfitting. If it has a small MSE on the training set and a high MSE on the test set, it is overfitting.
#
# With OLS the most important decision is which features to use in prediction and how to use them. "Linear" means linear in coefficients only; these models can handle many kinds of functions. (The models $\hat{y} = \beta_0 + \beta_1 x + \beta_2 x^2$ and $\hat{y} = \beta_0 + \beta_1 \log(x)$ are linear, but $\hat{y} = \frac{\beta_0}{1 + \beta_1 x}$ is not.) Many approaches exist for deciding which features to include. For now we will only use cross-validation.
#
# ## Fitting a Linear Model with OLS
#
# OLS is supported by the `LinearRegression` object in **scikit-learn**, while the function `mean_squared_error()` computes the MSE.
#
# I will be using OLS to find a linear model for predicting home prices in the Boston house price dataset, created below.
from sklearn.datasets import load_boston
from sklearn.cross_validation import train_test_split
boston_obj = load_boston()
data, price = boston_obj.data, boston_obj.target
data[:5, :]
price[:5]
data_train, data_test, price_train, price_test = train_test_split(data, price)
data_train[:5, :]
price_train[:5]
# We will go ahead and use all features for prediction in our first linear model. (In general this does *not* necessarily produce better models; some features may introduce only noise that makes prediction *more* difficult, not less.)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import numpy as np
ols1 = LinearRegression()
ols1.fit(data_train, price_train) # Fitting a linear model
ols1.predict([[ # An example prediction
1, # Per capita crime rate
25, # Proportion of land zoned for large homes
5, # Proportion of land zoned for non-retail business
1, # Tract bounds the Charles River
0.3, # NOX concentration
10, # Average number of rooms per dwelling
2, # Proportion of owner-occupied units built prior to 1940
10, # Weighted distance to employment centers
3, # Index for highway accessibility
400, # Tax rate
15, # Pupil/teacher ratio
200, # Index for number of blacks
5 # % lower status of population
]])
predprice = ols1.predict(data_train)
predprice[:5]
mean_squared_error(price_train, predprice)
np.sqrt(mean_squared_error(price_train, predprice))
# The square root of the mean squared error can be interpreted as the average amount of error; in this case, the average difference between homes' actual and predicted prices. (This is almost the standard deviation of the error.)
#
# For cross-validation, I will use `cross_val_score()`, which performs the entire cross-validation process.
from sklearn.model_selection import cross_val_score
ols2 = LinearRegression()
ols_cv_mse = cross_val_score(ols2, data_train, price_train, scoring='neg_mean_squared_error', cv=10)
ols_cv_mse.mean()
# The above number is the negative average MSE for cross-validation (minimizing MSE is equivalent to maximizing the negative MSE). This is close to our in-sample MSE.
#
# Let's now see the MSE for the fitted model on the test set.
testpredprice = ols1.predict(data_test)
mean_squared_error(price_test, testpredprice)
np.sqrt(mean_squared_error(price_test, testpredprice))
# Overfitting is minimal, it seems.
| Section 4/OLS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center><h1> graphical and numerical for solar-lasso comparison </h1></center>
# <center><h2> $\frac{p}{n}$ : $\frac{100}{100} \rightarrow \frac{100}{150} \rightarrow \frac{100}{200}$ </h2></center>
#
# ---
# ## Check the following before running the code
#
# ### (a) Read "Read_Me_First.docx" first, which introduces the package
#
# ### (b) Before replication, delete all .p files in the "./numerical_result" folder. The .p files record the numerical results of the our computation.
#
# ### (c) To avoid confusion, reset your kernel before you running the notebook (to clear memory): Menu "Kernel" $\rightarrow$ "Restart Kernel and clear all outputs".
#
# ### (d) To evaluate the code for simulation replication,
# * <font size="4.5"> click : Menu "Kernel" $\rightarrow$ "Restart Kernel and Run All Cells" </font>
# * <font size="4.5"> or, select a cell of code, press "shift" and "enter". Run all cells to avoid errors </font>
#
# ### (e) Check "joblib", "scikit-learn", "numpy", "matplotlib" and "tqdm" are installed. If not,
# * <font size="4.5"> run "pip install joblib scikit-learn numpy matplotlib tqdm" in terminal (Mac OS or Linux) or command prompt as administrator (Windows) if you use Python3 without any IDE. </font>
# * <font size="4.5"> we highly recommend installing Anaconda3 version 2020-11 directly to avoid package management (all packages mentioned above are installed by default).</font>
#
# ---
# ## #1: import all modules
#
# * <font size="4.5"> "pickle" is used to save all computation results into ".p" files, which can be loaded later. </font>
#
# * <font size="4.5"> For simplicity and elegancy, all relevant functions and classes are coded in "simul_plot.py". </font>
# +
# %reset -f
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import os
import errno
from pathlib import Path
from simul_plot import simul_plot
# -
# ---
#
# ## #2: define all functions
#
# * <font size="4.5"> in this simulation, we use CV-lars-lasso (lasso solved by CV-lars) and CV-cd (lasso solved by warm-start cylindrical coordinate descent) as the competitors </font>
#
# ## #2(a): compute of the solar, cv-lars and cv-cd and plot the results
def func_simul(sample_size, n_dim, n_info, n_repeat, num_rep, step_size, rnd_seed, plot_on, repro):
#set random seed
np.random.seed(rnd_seed)
#call function from the class
trial = simul_plot(sample_size, n_dim, n_info, n_repeat, num_rep, step_size, rnd_seed, plot_on)
#define the name of pickle file
pkl_file = "./numerical_result/solar_graph_n_"+str(sample_size)+".p"
if repro == True:
print("compute the simulation with sample size "+str(sample_size)+" and number of variables "+str(n_dim))
#compute the result
opt_c_stack, Q_opt_c_stack, la_array_stack, la_var_stack, solar_coef_stack, cd_array_stack, cd_var_stack = trial.simul_func()
#create the subdirectory if not existing
if not os.path.exists(os.path.dirname(pkl_file)):
try:
os.makedirs(os.path.dirname(pkl_file))
# Guard against race condition
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(pkl_file, "wb") as f:
pickle.dump( opt_c_stack , f)
pickle.dump( Q_opt_c_stack , f)
pickle.dump( la_array_stack , f)
pickle.dump( la_var_stack , f)
pickle.dump( cd_array_stack , f)
pickle.dump( cd_var_stack , f)
pickle.dump( solar_coef_stack , f)
else:
with open(pkl_file, "rb") as f:
opt_c_stack = pickle.load( f )
Q_opt_c_stack = pickle.load( f )
la_array_stack = pickle.load( f )
la_var_stack = pickle.load( f )
cd_array_stack = pickle.load( f )
cd_var_stack = pickle.load( f )
solar_coef_stack = pickle.load( f )
if not os.path.exists("figure"):
os.mkdir("figure")
#set random seed
np.random.seed(rnd_seed)
#plot the hist of number of variables selected of solar, cv-lars-lasso and cv-cd
trial.vari_hist(Q_opt_c_stack, la_array_stack, cd_array_stack)
#plot the optimal q value for all num_rep solar repeatations
trial.q_hist(opt_c_stack)
#plot the probability of each variable be selected by solar, cv-lars-lasso and cv-cd
print_true_1 = False
print_true_2 = True
num_var_to_plot_1 = 15
##this is the maximum number of variables that can be plot in one graph
trial.acc_plot(Q_opt_c_stack, la_var_stack, cd_var_stack, 20, print_true_1)
trial.acc_plot(Q_opt_c_stack, la_var_stack, cd_var_stack, 20, print_true_2)
trial.bl_vari_plot(solar_coef_stack, num_var_to_plot_1)
# ## #2(b):summarize all the results into a table for better comparison
def sum_table(sample_size, n_dim, num_rep, plot_on):
#read all raw results
pkl_file = "./numerical_result/solar_graph_n_"+str(sample_size)+".p"
with open(pkl_file, "rb") as f:
opt_c_stack = pickle.load( f )
Q_opt_c_stack = pickle.load( f )
la_array_stack = pickle.load( f )
la_var_stack = pickle.load( f )
cd_array_stack = pickle.load( f )
cd_var_stack = pickle.load( f )
solar_coef_stack = pickle.load( f )
if not os.path.exists("table"):
os.mkdir("table")
#compute the number of selected variable of solar, cv-lars-lasso and cv-cd
## set the container
solar_len_array = np.empty([len(solar_coef_stack)])
cvlars_len_array = np.empty([len(solar_coef_stack)])
cvcd_len_array = np.empty([len(solar_coef_stack)])
##count the number
for i in range(len(Q_opt_c_stack)):
solar_len_array[i] = len(Q_opt_c_stack[i])
cvlars_len_array[i] = la_array_stack[i]
cvcd_len_array[i] = cd_array_stack[i]
#compute the marginal probability of selecting each informative variable
## set the container
solar_var_array = np.empty([5])
cvlars_var_array = np.empty([5])
cvcd_var_array = np.empty([5])
##concatenate results
solar_vari_appe_stack = np.concatenate(Q_opt_c_stack,0)
cvlars_vari_appe_stack = np.concatenate(la_var_stack,0)
cvcd_vari_appe_stack = np.concatenate(cd_var_stack,0)
##count the number
for i in range(5):
solar_var_array[i] = (solar_vari_appe_stack == i).sum()/num_rep
cvlars_var_array[i] = (cvlars_vari_appe_stack == i).sum()/num_rep
cvcd_var_array[i] = (cvcd_vari_appe_stack == i).sum()/num_rep
#sparsity table
mean_col = [np.mean(solar_len_array), np.mean(cvlars_len_array), np.mean(cvcd_len_array)]
median_col = [np.median(solar_len_array), np.median(cvlars_len_array), np.median(cvcd_len_array)]
IQR_col = [np.quantile(solar_len_array, 0.75)-np.quantile(solar_len_array, 0.25),
np.quantile(cvlars_len_array, 0.75)-np.quantile(cvlars_len_array, 0.25),
np.quantile(cvcd_len_array, 0.75)-np.quantile(cvcd_len_array, 0.25)]
df1 = pd.concat([pd.DataFrame({'algo':['solar','cvlars', 'cvcd']}),
pd.DataFrame({'Number of selected variables (mean)': mean_col}),
pd.DataFrame({'Number of selected variables (median)': median_col}),
pd.DataFrame({'Number of selected variables (IQR)': IQR_col})],
axis=1, join='inner')
#accuracy table
prob_0 = [solar_var_array[0], cvlars_var_array[0], cvcd_var_array[0]]
prob_1 = [solar_var_array[1], cvlars_var_array[1], cvcd_var_array[1]]
prob_2 = [solar_var_array[2], cvlars_var_array[2], cvcd_var_array[2]]
prob_3 = [solar_var_array[3], cvlars_var_array[3], cvcd_var_array[3]]
prob_4 = [solar_var_array[4], cvlars_var_array[4], cvcd_var_array[4]]
df2 = pd.concat([pd.DataFrame({'algo':['solar','cvlars', 'cvcd']}),
pd.DataFrame({'Pr(select X1)': prob_0}),
pd.DataFrame({'Pr(select X1)': prob_1}),
pd.DataFrame({'Pr(select X1)': prob_2}),
pd.DataFrame({'Pr(select X1)': prob_3}),
pd.DataFrame({'Pr(select X1)': prob_4})],
axis=1, join='inner')
return df1.round(2), df2.round(2)
# ---
#
# ## #3(a): define inputs values
#
# | <font size="4.5"> variable name </font> | <font size="4.5"> meaning </font> |
# |-|-|
# | <font size="4.5"> sample_size </font> | <font size="4.5"> the sample size $n$ in the paper; </font>|
# | <font size="4.5"> n_dim </font> | <font size="4.5"> the number of variables (informative + redundant) in $X$, $p$ in the paper; </font>|
# | <font size="4.5"> n_info </font> | <font size="4.5"> the number of informative variables in $X$; </font>|
# | <font size="4.5"> n_repeat </font> | <font size="4.5"> the number of subsamples generated by solar; </font>|
# | <font size="4.5"> num_rep </font> | <font size="4.5"> the total repetition number of this simulation; </font>|
# | <font size="4.5"> step_size </font> | <font size="4.5"> the step size for tuning $c$; </font>|
# | <font size="4.5"> rnd_seed </font> | <font size="4.5"> the random seed value; </font>|
#
# ### #3(b): define DGP
#
# * <font size="4.5"> the population regression equation is $$Y = 2\cdot \mathbf{x}_0 + 3\cdot \mathbf{x}_1 + 4\cdot \mathbf{x}_2 + 5\cdot \mathbf{x}_3 + 6\cdot \mathbf{x}_4 + u,$$
# * <font size="4.5"> To change the simulation settings, simply change the input values. If you change *n_info* you will adjust the DGP as follows: </font>
# * <font size="4.5"> If $i > \mbox{n_info} - 1$ and $i \in \left[ 0, 1, 2, \ldots, p-1 \right]$, $\beta_i = 0$ in population;</font>
# * <font size="4.5"> If $i \leqslant \mbox{n_info} - 1$ and $i \in \left[ 0, 1, 2, \ldots, p-1 \right]$, $\beta_i = i + 2$ in population</font>
# +
n_dim = 100
n_info = 5
step_size = -0.01
n_repeat = 10
num_rep = 200
rnd_seed = 0
sample_size_0 = 100
sample_size_1 = 150
sample_size_2 = 200
# -
# ## If you want to save the plots and tables locally, please change the *plot_on* value to True
plot_on = False
# * <font size="5"> If you want to replicate the result, set the "repro" value to True; </font>
# * <font size="5"> *repro=True* will report our result saved at ".p" files; </font>
repro = False
# ---
#
# ## #4: compute solar, CV-lars-lasso and CV-cd
#
# ### This applies for replication only (numerical results will be saved as .p files) and can be skipped if you have already computed simulation results.
#
# ### Numpy, sklearn and python are actively updated. If you use different version, replication results may be slightly different from the paper (see Read_me_first.pdf for detail).
#
# ### To rerun this part, first delete all .p files in your working folder to avoid possible bug.
# ### case #1: $n = 100, p =100$
# ### plot the results
#
# <font size="4.5"> The available results are: </font>
#
# * <font size="4.5"> histogram of number of variables selected by solar, CV-cd and CV-lars-lasso; </font>
# * <font size="4.5"> histogram of optimal $c$ value for $\left\{ \mathbf{x}_j \; \vert \; \widehat{q}_j \geqslant c \right\}$ in all 200 repetitions of solar computation on different samples; </font>
# * <font size="4.5"> probability of selecting each redundant variable in solar, CV-cd and CV-lars-lasso (top 15 by probability); </font>
# * <font size="4.5"> probability of selecting each informative variable in solar, CV-cd and CV-lars-lasso; </font>
# * <font size="4.5"> the boxplot of regression coefficients of solar (top 15 means); </font>
func_simul(sample_size_0, n_dim, n_info, n_repeat, num_rep, step_size, rnd_seed, plot_on, repro)
# ## now let's summarize the results into a table
df1, df2 = sum_table(sample_size_0, n_dim, num_rep, plot_on)
df1
df2
# ### case #2: $n = 150, p =100$
func_simul(sample_size_1, n_dim, n_info, n_repeat, num_rep, step_size, rnd_seed, plot_on, repro)
# ## now let's summarize the results into a table
df1, df2 = sum_table(sample_size_1, n_dim, num_rep, plot_on)
df1
df2
# ### case #3: $n = 200, p =100$
func_simul(sample_size_2, n_dim, n_info, n_repeat, num_rep, step_size, rnd_seed, plot_on, repro)
# ## now let's summarize the results into a table
df1, df2 = sum_table(sample_size_2, n_dim, num_rep, plot_on)
df1
df2
# ## Finally, let's produce the result into an HTML file
# !rm -rf Simul_1a.html
# !jupyter nbconvert --to html Simul_1a.ipynb
| Section_3.1_simul_1/Simul_1a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
buildings = pd.read_csv('https://uiuc-ischool-dataviz.github.io/is445_AOUAOG_fall2021/week02/data/building_inventory.csv',
na_values={'Square Footage':0,
'Year Acquired':0,
'Year Constructed':0})
# I immediately started out with using the na_values function so that all data can be filtered from here on.
# TOTAL SQUARE FOOTAGE AS A FUNCTION OF BUILDING STATUS
# +
#buildings.loc[buildings["Year Acquired"]=="Year Constructed"]
#This code gave me 0 rows x 22 columns, so there was no data to visualize
# -
#buildings.plot(x="Square Footage", y="Bldg Status", figsize=(20,4), rot=90)
#If the x and y are flipped like above, I get an error message
buildings.plot(x="Bldg Status", y="Square Footage", figsize=(20,4), rot=90)
# I was quickly able to recongnize the code necessary for this visualization. I used the one from class that displayed square footage and address. At first, I flipped the x and y accidentally, which gave me an error message. Here, I learned that the x should be "Bldg Status" and y should be "Square Footage" in order for this function to work.
# MEDIAN SQUARE FOOTAGE IN EACH YEAR CONSTRUCTED AS A FUNCTION OF YEAR ACQUIRED
stats = buildings.groupby("Year Constructed")["Square Footage"].describe()
type(stats)
# +
fig, ax = plt.subplots(figsize=(25,5))
ax.plot(stats['max'], marker='.')
ax.plot(stats['min'], marker='.')
ax.plot(stats['50%'], marker='.')
ax.set_xlabel('Year')
ax.set_ylabel('Square Footage')
ax.set_yscale('log')
plt.show()
# +
fig, ax = plt.subplots(figsize=(25,5))
ax.plot(stats['50%'], marker='.')
ax.set_xlabel('Year')
ax.set_ylabel('Median Square Footage')
ax.set_yscale('log')
plt.show()
# -
# I had much more trouble with this visualization. I didn't understand the difference between comparing the square footage of year acquired and year constructed as opposed to "a function of year acquired". I used the stats function we learned from class because I knew I had to visualize the median of the data. At first I had the max, min, and median, then I simplified it down the the visualization above to only have the median.
# RELATIONSHIP BETWEEN THE YEAR ACQUIRED AND THE YEAR CONSTRUCTED
stats2 = buildings.groupby("Year Acquired")["Square Footage"].describe()
# +
fig, ax = plt.subplots(figsize=(25,5))
ax.plot(stats['max'], marker='.')
ax.plot(stats['min'], marker='.')
ax.plot(stats['50%'], marker='.')
ax.plot(stats2['max'], marker='.')
ax.plot(stats2['min'], marker='.')
ax.plot(stats2['50%'], marker='.')
ax.set_xlabel('Year')
ax.set_ylabel('Square Footage')
ax.set_yscale('log')
plt.show()
# +
fig, ax = plt.subplots(figsize=(25,5))
ax.plot(stats['mean'], marker='.')
ax.plot(stats2['mean'], marker='.')
ax.set_xlabel('Year')
ax.set_ylabel('Median Square Footage')
ax.set_yscale('log') # our y-scale is a large range: 10 sqrt ft up to 10^6 sqrt ft, so we want to use a log here
plt.show()
# -
# I took what I knew from the previous visualization process, and I experimented with putting two stats on the same graph in order to visualize the square footage data from the year aquired and year constrcuted. I also switched median to mean because the mean will better display the relationship between the year groups. I would want to learn a way to label the lines (perhaps on a legend) because it is not easy to tell which line represents the data from the year acquired vs the year constructed.
# CLOSING THOUGHTS
# I enjoyed learning how to categorize data to fit the needs of my visualization. I think my first and last visualizations were more successful in displaying the given data than my second. I'm still unsure about how I'd change the second visualization to fit the "function of year acquired" aspect. And more generally, I'm a bit confused about which visualizations are matplotlib and which are pandas.
| rosas-mayra-homework3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from pathlib import Path
from PIL import Image
import scipy as sp
import numpy as np
import imageio
from matplotlib.image import imsave
# -
def minmaxnorm(img):
pmin = np.amin(img, axis=(0,1))
pmax = np.amax(img, axis=(0,1))
outimg = (img - pmin) / (pmax-pmin)
return outimg
# +
rootdir = '/home/richardkxu/Documents/mmaction2/work_dirs/ircsn_ig65m_pretrained_r152_16x1x1_58e_ucf101_rgb_test_fgsme16_normInforward_correct'
num_videos = 200
num_frames = 16
amplify_coefficient = 1 # pert are multiplied by this constant
dir0 = os.path.join(rootdir, 'original-videos')
dir1 = os.path.join(rootdir, 'adv-videos')
outdir = os.path.join(rootdir, 'diff-videos')
Path(outdir).mkdir(parents=True, exist_ok=True)
f = open(os.path.join(outdir, 'pert_count.txt'), 'w')
for k in range(num_videos):
# load each video as numpy arr
path0 = os.path.join(dir0, 'video{}'.format(k))
path1 = os.path.join(dir1, 'video{}'.format(k))
outpath = os.path.join(outdir, 'video{}'.format(k))
Path(outpath).mkdir(parents=True, exist_ok=True)
num_pert = 0
for i in range(num_frames):
try:
img0 = np.array(imageio.imread(os.path.join(path0, '{}.png'.format(i)), as_gray=False, pilmode="RGB").astype(float))
img1 = np.array(imageio.imread(os.path.join(path1, '{}.png'.format(i)), as_gray=False, pilmode="RGB").astype(float))
img0 = img0 / 255.0
img1 = img1 / 255.0
diff_img = img0 - img1
x_diff_norm = np.sign(np.round(np.linalg.norm(diff_img), decimals=4))
#if np.sum(np.abs(diff_img)) != 0:
if x_diff_norm != 0:
diff_img = minmaxnorm(diff_img)
diff_img *= amplify_coefficient
#imageio.imwrite(os.path.join(outpath, '{}.png'.format(i)), diff_img)
imsave(os.path.join(outpath, '{}.png'.format(i)), diff_img)
num_pert += 1
except:
print('failed to load video: {} frame: {}'.format(k, i))
f.write(outpath + ': ' + str(num_pert) + '\n')
f.close()
# -
| notebooks/calc-pert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
import sys, os
path = Path(os.path.dirname(os.path.abspath('')))
if path not in sys.path:
sys.path.append(os.path.join(path))
from koebe.algorithms.incrementalConvexHull import randomConvexHullE3
from koebe.graphics.spherical2viewer import *
from random import randint
poly = randomConvexHullE3(500) # Generate a random polyhedron
viewer = S2Viewer(600,600) # Create a viewer object
viewer.toggleSphere() # Hide the unit sphere
viewer.addAll(poly.edges + poly.faces) # Add the edges and faces to the viewer
# Style all of the edges black with weight 0.5
for e in poly.edges:
viewer.setStyle(e, makeStyle(stroke="#000", strokeWeight = 0.5))
# Set each face to a random color:
for i in range(len(poly.faces)):
h, l = str(randint(0, 360)), str(randint(50, 75))
viewer.setStyle(poly.faces[i], makeStyle(fill = f"hsl({h}, 100%, {l}%)"))
viewer.show() # Show the viewer
# +
from koebe.algorithms.poissonDiskSampling import slowAdaptiveDartThrowing, slowAdaptiveDartThrowingWithBoundary
from koebe.geometries.euclidean2 import PointE2
from koebe.geometries.spherical2 import PointS2
from koebe.geometries.euclidean3 import PointE3
from koebe.graphics.euclidean2viewer import UnitScaleE2Sketch, makeStyle
from koebe.graphics.spherical2viewer import S2Viewer
from koebe.algorithms.incrementalConvexHull import incrConvexHull, orientationPointE3
from random import random
import math
# The radius function determines the radius of a dart hitting the point (x, y)
radius_function = (lambda x, y: (1.0 - math.sqrt(y)) * 0.1 + 0.01)
# The adaptive sampling method takes the radius function as input to determine sample radii
samples = slowAdaptiveDartThrowingWithBoundary(radius_function, stop_count = 1000)
# Convert the samples, which are tuples of floats (x, y) to PointE2 objects
samplePoints = [PointE2(2*sample[0] - 1, 2*sample[1] - 1) for sample in samples]
viewer = UnitScaleE2Sketch()
viewer.addAll(samplePoints)
viewer.show()
# +
s2points = [PointS2.sgProjectFromPointE2(p) for p in samplePoints]
pts = [(sp.directionE3.vec * (1 + random() / 10000)).toPointE3() for sp in s2points]
mesh = incrConvexHull(pts + [PointE3(0,0,-1)], orientationPointE3)
mesh.outerFace = mesh.verts[-1].remove()
viewer = S2Viewer()
viewer.toggleSphere()
viewer.addAll(pts)
viewer.addAll(mesh.edges)
viewer.addAll(mesh.faces)
viewer.show()
# -
mesh2 = mesh.duplicate(
vdata_transform = (lambda v : PointS2(v.x, v.y, v.z).sgProjectToPointE2())
)
viewer = UnitScaleE2Sketch()
viewer.addAll(mesh2.verts)
viewer.addAll(mesh2.edges)
viewer.show()
| CS 480 Computational Geometry/01 - Examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='./img/LogoWekeo_Copernicus_RGB_0.png' alt='' align='centre' width='30%'></img>
#
#
# ## First look at "basic" altimetry data for Sentinel-3, SRAL ##
# Version: 3.0
# Date: 13/07/2020
# Author: <NAME> (InnoFlair, Plymouth Marine Laboratory) and <NAME> (EUMETSAT)
# Credit: This code was developed for EUMETSAT under contracts for the European Commission Copernicus
# programme.
# License: This code is offered as open source and free-to-use in the public domain,
# with no warranty, under the MIT license associated with this code repository.
# As part of Copernicus, EUMETSAT operates the Surface Topography Mission (STM) on Sentinel-3. The mission consists of a package of instruments including the Synthetic Aperture radar altimetry (SRAL), Microwave Radiometer (MWR) and DORIS positioning antenna. Here we will look at the waveforms provided by this package of instruments.
#
# For this notebook we will use Level-2 altimetry data from Sentinel-3. The main point of this exercise is to show you how altimetry data 'looks', and especially to highlight that altimetry data are not (for now) imagery or swath data. Thus you won't have a native grid of pixels, but a 'track' of data. Waveforms are the fundamental signal that altimeters receive, and from which they derive all their information about the ocean surface. The shape of a waveform is determined by how a radar pulse interacts with the Earth's surface. Much more information on waveforms can be found <a href = "http://www.altimetry.info/radar-altimetry-tutorial/how-altimetry-works/from-radar-pulse-to-altimetry-measurements/">here</a>.
#
# <div class="alert alert-block alert-warning">
# <b>Get the WEkEO User credentials</b>
# <hr>
# If you want to download the data to use this notebook, you will need WEkEO User credentials. If you do not have these, you can register <a href="https://www.wekeo.eu/web/guest/user-registration" target="_blank">here</a>.
#
# As usual, we begin by importing required libraries
# +
# to interpret paths and folder for any OS
import os, sys
# math library
import numpy as np
# reading of NetCDF data
import xarray as xr
# plotting library
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import dates
# mapping library
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import json
from IPython.core.display import display, HTML
import glob
from zipfile import ZipFile
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())),'wekeo-hda'))
import hda_api_functions as hapi
import warnings
warnings.filterwarnings('ignore')
# -
# In the box below, we will set up a few parameters to help our figure plotting later on
# +
#we will look at the plot within the notebooks
# %matplotlib inline
plt.rcParams["figure.figsize"] = (16,10)
plt.ioff()
matplotlib.rcParams.update({'font.size': 16})
land_resolution = '50m'
land_poly = cfeature.NaturalEarthFeature('physical', 'land', land_resolution,
edgecolor='k',
facecolor=cfeature.COLORS['land'])
# -
# Now we will start our script, proper. First we need to get some data.
#
# WEkEO provides access to a huge number of datasets through its **'harmonised-data-access'** API. This allows us to query the full data catalogue and download data quickly and directly onto the Jupyter Lab. You can search for what data is available <a href="https://wekeo.eu/data?view=catalogue">here</a>
#
# In order to use the HDA-API we need to provide some authentication credentials, which comes in the form of an API key and API token. In this notebook we have provided functions so you can retrieve the API key and token you need directly. You can find out more about this process in the notebook on HDA access (wekeo_harmonized_data_access_api.ipynb) that can be found in the **wekeo-hda** folder on your Jupyterlab.
#
#
# We will also define a few other parameters including where to download the data to, and if we want the HDA-API functions to be verbose. **Lastly, we will tell the notebook where to find the query we will use to find the data.** These 'JSON' queries are what we use to ask WEkEO for data. They have a very specific form, but allow us quite fine grained control over what data to get. You can find the example one that we will use here: **JSON_templates/EO_EUM_DAT_SENTINEL-3_SR_2_WAT___.json**
# +
# set this key to true to download data.
download_data = True
# This reduces the resolution of the plot to conserve memory - increasing the number gives a coarser plot
grid_factor = 3
# your WEkEO API username and password (needs to be in ' ')
user_name = 'USERNAME'
password = 'PASSWORD'
# Generate an API key
api_key = hapi.generate_api_key(user_name, password)
display(HTML('Your API key is: <b>'+api_key+'</b>'))
# +
# where the data should be downloaded to:
download_dir_path = os.path.join(os.getcwd(),'products')
# where we can find our data query form:
JSON_query_dir = os.path.join(os.getcwd(),'JSON_templates')
# HDA-API loud and noisy?
verbose = False
# make the output directory if required
if not os.path.exists(download_dir_path):
os.makedirs(download_dir_path)
# -
# Now we have set how we want the script to run, we are ready to get some data. We start this process by telling the script what kind of data we want. In this case, this is SRAL level 2 data, which has the following designation on WEkEO: **EO:EUM:DAT:SENTINEL-3:SR_2_WAT___**.
# SLSTR LEVEL 2 Dataset ID
dataset_id = "EO:EUM:DAT:SENTINEL-3:SR_2_WAT___"
# Here, we use this dataset_id to find the correct, locally stored JSON query file which describes the data we want. The query file is called: **JSON_templates/EO_EUM_DAT_SENTINEL-3_SR_2_WAT___.json**
#
# You can edit this query if you want to get different data, but be aware of asking for too much data - you could be here a while and might run out of space to use this data in the JupyterLab. The box below gets the correct query file.
# find query file
JSON_query_file = os.path.join(JSON_query_dir,dataset_id.replace(':','_')+".json")
if not os.path.exists(JSON_query_file):
print('Query file ' + JSON_query_file + ' does not exist')
else:
print('Found JSON query file for '+dataset_id)
# Now we have a query, we need to launch it to WEkEO to get our data. The box below takes care of this through the following steps:
# 1. initialise our HDA-API
# 2. get an access token for our data
# 3. accepts the WEkEO terms and conditions
# 4. loads our JSON query into memory
# 5. launches our search
# 6. waits for our search results
# 7. gets our result list
# 8. downloads our data
#
# This is quite a complex process, so much of the functionality has been buried 'behind the scenes'. If you want more information, you can check out the **wekeo-hda** tool kit in the parent training directory. The code below will report some information as it runs. At the end, it should tell you that one product has been downloaded.
if download_data:
HAPI_dict = hapi.init(dataset_id, api_key, download_dir_path)
HAPI_dict = hapi.get_access_token(HAPI_dict)
HAPI_dict = hapi.acceptTandC(HAPI_dict)
# load the query
with open(JSON_query_file, 'r') as f:
query = json.load(f)
# launch job
print('Launching job...')
HAPI_dict = hapi.get_job_id(HAPI_dict, query)
# check results
print('Getting results...')
HAPI_dict = hapi.get_results_list(HAPI_dict)
HAPI_dict = hapi.get_order_ids(HAPI_dict)
# download data
print('Downloading data...')
HAPI_dict = hapi.download_data(HAPI_dict, file_extension='.zip')
if download_data:
# unzip file
for filename in HAPI_dict['filenames']:
if os.path.splitext(filename)[-1] == '.zip':
print('Unzipping file')
try:
with ZipFile(filename, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(os.path.dirname(filename))
# clear up the zip file
os.remove(filename)
except:
print("Failed to unzip....")
if download_data:
unzipped_file = HAPI_dict['filenames'][0].replace('.zip','.SEN3')
else:
unzipped_file = glob.glob(os.path.join(download_dir_path,'*SR_2_WAT*.SEN3'))
# Sentinel-3 SRAL level 2 data are provided in three versions (all in the same zipped folder): reduced, standard and enhanced. The nomenclature of the folder name for these products is SATELLITE_SR_level_WAT____begindate_time_enddate_time_XXX_cycle_orbit; so in the case of a Sentinel-3A level-2 altimetry product, a product would beging with the following designation:
#
# S3A_SR_2_WAT____
#
# Try the below cell on standard, reduced and enhanced file in turn. Note the differences between them (e.g. in the number of variables). The use of either of these files depends on your need and/or expertise and storage capability.
# +
#look at what is in a Sentinel-3A SRAL file folder
#data are in a folder parallel to the current one, named data
#to change from one file to the other, comment the current one, un-comment the one to test.
input_root = os.path.dirname(unzipped_file)
input_path = os.path.basename(unzipped_file)
input_file = 'standard_measurement.nc'
#input_file = 'reduced_measurement.nc'
#input_file = 'enhanced_measurement.nc'
my_file = os.path.join(input_root,input_path,input_file)
nc = xr.open_dataset(my_file)
#list the variables within the file.
for variable in nc.variables:
print(variable)
# -
# Now we will explore the size and statistics of the file.
# +
# choose a variable at 1 Hz for now
# e.g. we choose the range in Ku band at 1 Hz
range_ku = nc.variables['range_ocean_01_ku'][:]
swh_ocean_01_ku = nc.variables['swh_ocean_01_ku'][:]
wind_speed_alt_01_ku = nc.variables['wind_speed_alt_01_ku'][:]
ssha_01_ku = nc.variables['ssha_01_ku'][:]
#flags
range_ocean_qual_01_ku = nc.variables['range_ocean_qual_01_ku'][:]
swh_ocean_qual_01_ku = nc.variables['swh_ocean_qual_01_ku'][:]
sig0_ocean_qual_01_ku = nc.variables['sig0_ocean_qual_01_ku'][:]
surf_class_01 = nc.variables['surf_class_01'][:]
rad_surf_type_01 = nc.variables['rad_surf_type_01'][:]
lat = nc.variables['lat_01'][:]
lon = nc.variables['lon_01'][:]
#show number of points, min max
print(['array length: ', np.shape(range_ku)])
print(['minimum: ', np.min(range_ku)])
print(['maximum: ', np.max(range_ku)])
print(['mean: ', np.mean(range_ku)])
print(['mean in km: ', np.mean(range_ku)/1000])
print(['standard deviation: ', np.std(range_ku)])
#try the same on the 20 Hz(at least for the shape of the array)
range_20_ku = nc.variables['range_ocean_20_ku'][:]
#show number of points, min max
print(['array length 20Hz: ', np.shape(range_20_ku)])
nc.close()
# -
# All Sentinel-3 level 2 marine products have quality flags associated with the geophysical variables. These give us confidence in our data, or describe why we cannot make retrievals of the parameters we are interested in. The next cell loads some data from these quality flags for range, significant wave height and surface roughness, as well as surface class information.
# QC variables
range_ku[range_ocean_qual_01_ku != 0.0] = np.nan
swh_ocean_01_ku[swh_ocean_qual_01_ku != 0.0] = np.nan
ssha_01_ku[range_ocean_qual_01_ku != 0.0] = np.nan
ssha_01_ku[surf_class_01 != 0.0] = np.nan
ssha_01_ku[rad_surf_type_01 != 0.0] = np.nan
wind_speed_alt_01_ku[sig0_ocean_qual_01_ku != 0.0] = np.nan
# Now lets plot our along track data (against latitude) and show where our flags suggest bad data.
# +
norm_range = range_ku - np.nanmin(range_ku)
norm_range = norm_range / np.nanmax(abs(norm_range))
flags_range1 = range_ocean_qual_01_ku.astype(float)
flags_range1[flags_range1 != 0.0] = 1.1
flags_range1[flags_range1 == 0.0] = np.nan
flags_range2 = swh_ocean_qual_01_ku.astype(float)
flags_range2[flags_range2 != 0.0] = 1.2
flags_range2[flags_range2 == 0.0] = np.nan
flags_range3 = surf_class_01.astype(float)
flags_range3[flags_range3 != 0.0] = 1.3
flags_range3[flags_range3 == 0.0] = np.nan
flags_range4 = sig0_ocean_qual_01_ku.astype(float)
flags_range4[flags_range4 != 0.0] = 1.4
flags_range4[flags_range4 == 0.0] = np.nan
fig1 = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot(1,1,1)
p1, = ax.plot(norm_range, lat, 'k', label='Normalised Range')
p2, = ax.plot(flags_range1, lat, 'ro',linewidth=0.0, label='Range quality flag is bad')
p3, = ax.plot(flags_range2, lat, 'bo',linewidth=0.0, label='Significant wave height quality flag is bad')
p4, = ax.plot(flags_range3, lat, 'go',linewidth=0.0, label='Surface class is not ocean')
p5, = ax.plot(flags_range4, lat, 'mo',linewidth=0.0, label='Sigma 0 quality flag is bad')
plt.xlabel('Range and flag values', fontsize=16)
plt.ylabel('Latitude', fontsize=16)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles, labels, fontsize=8)
plt.savefig('Quality_flags')
plt.show()
# -
# In the plot we can clearly see the flags raised where the track is likely over land/ice rather than ocean.
#
# Now we will plot some variables on a map. This will clearly show the data to be a'line' or 'track' of measurements along the orbit, and also that a file contains data from part of an orbit.
# +
fig1 = plt.figure(figsize=(20, 20), dpi=300)
#define a projection for our map
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
#vmin and vmax for the scale are truncated slightly to help viewing; the satellite is at a distance around 315 km from the surface
low_pc = np.percentile(range_ku[np.isfinite(range_ku)],5)
high_pc = np.percentile(range_ku[np.isfinite(range_ku)],95)
range_ku[range_ku < low_pc] = low_pc
range_ku[range_ku > high_pc] = high_pc
f1 = m.scatter(lon, lat, c=range_ku, cmap='Spectral_r', marker='o', edgecolors=None, linewidth=0.0, vmin=low_pc, vmax=high_pc)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07)
cbar.ax.tick_params(labelsize=16)
cbar.set_label('Range (1 Hz Ku) [m]', size=18)
plt.savefig('fig_range')
plt.show()
# +
fig1 = plt.figure(figsize=(20, 20), dpi=300)
#define a projection for our map
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
#vmin and vmax for the scale are truncated slightly to help viewing; the satellite is at a distance around 315 km from the surface
low_pc = np.percentile(swh_ocean_01_ku[np.isfinite(swh_ocean_01_ku)],5)
high_pc = np.percentile(swh_ocean_01_ku[np.isfinite(swh_ocean_01_ku)],95)
swh_ocean_01_ku[swh_ocean_01_ku < low_pc] = low_pc
swh_ocean_01_ku[swh_ocean_01_ku > high_pc] = high_pc
f1 = m.scatter(lon, lat, c=swh_ocean_01_ku, cmap='Spectral_r', marker='o', edgecolors=None, linewidth=0.0, vmin=low_pc, vmax=high_pc)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07)
cbar.ax.tick_params(labelsize=16)
cbar.set_label('Significant Wave Height (1 Hz Ku) [m]', size=18)
plt.savefig('fig_SWH')
plt.show()
# +
fig1 = plt.figure(figsize=(20, 20), dpi=300)
#define a projection for our map
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
#vmin and vmax for the scale are truncated slightly to help viewing; the satellite is at a distance around 315 km from the surface
low_pc = np.percentile(wind_speed_alt_01_ku[np.isfinite(wind_speed_alt_01_ku)],5)
high_pc = np.percentile(wind_speed_alt_01_ku[np.isfinite(wind_speed_alt_01_ku)],95)
wind_speed_alt_01_ku[wind_speed_alt_01_ku < low_pc] = low_pc
wind_speed_alt_01_ku[wind_speed_alt_01_ku > high_pc] = high_pc
f1 = m.scatter(lon, lat, c=wind_speed_alt_01_ku, cmap='Spectral_r', marker='o', edgecolors=None, linewidth=0.0, vmin=low_pc, vmax=high_pc)
m.coastlines(resolution='50m', color='black', linewidth=1)
m.add_feature(cfeature.LAND, facecolor='0.75')
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07)
cbar.ax.tick_params(labelsize=16)
cbar.set_label('Wind Speed (1 Hz ku) [m.s$^{-1}$]', size=18)
plt.savefig('fig_WS')
plt.show()
# +
fig1 = plt.figure(figsize=(20, 20), dpi=300)
#define a projection for our map
m = plt.axes(projection=ccrs.PlateCarree(central_longitude=0.0))
#vmin and vmax for the scale are truncated slightly to help viewing; the satellite is at a distance around 315 km from the surface
abs_ssha_01_ku = abs(ssha_01_ku)
high_pc = np.percentile(abs_ssha_01_ku[np.isfinite(ssha_01_ku)],95)
ssha_01_ku[ssha_01_ku < high_pc*-1] = high_pc*-1
ssha_01_ku[ssha_01_ku > high_pc] = high_pc
f1 = m.scatter(lon, lat, c=ssha_01_ku, cmap='RdBu_r', marker='o', edgecolors=None, linewidth=0.0, vmin=-1*high_pc, vmax=high_pc)
m.coastlines(resolution=land_resolution, color='black', linewidth=1)
m.add_feature(land_poly)
g1 = m.gridlines(draw_labels = True)
g1.xlabels_top = False
g1.xlabel_style = {'size': 16, 'color': 'gray'}
g1.ylabel_style = {'size': 16, 'color': 'gray'}
cbar = plt.colorbar(f1, orientation="horizontal", fraction=0.05, pad=0.07)
cbar.ax.tick_params(labelsize=16)
cbar.set_label('Sea Surface Height Anomaly Speed (1 Hz ku) [m]', size=18)
plt.savefig('fig_SSHA')
plt.show()
# -
# <img src='./img/all_partners_wekeo.png' alt='' align='center' width='75%'></img>
# <p style="text-align:left;">This project is licensed under the <a href="./LICENSE">MIT License</a> <span style="float:right;"><a href="https://github.com/wekeo/wekeo-jupyter-lab">View on GitHub</a> | <a href="https://www.wekeo.eu/">WEkEO Website</a> | <a href=mailto:<EMAIL>>Contact</a></span></p>
| ocean/Sentinel3/31_SRAL_Tracks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NVIDIA Inference Server MNIST Example
#
# This example shows how you can combine Seldon with the NVIDIA Inference Server. We will use a Seldon TensorRT proxy model image that will forward Seldon internal microservice prediction calls out to an external TensorRT Inference Server.
#
# The example will use the MNIST digit classification task with a pre-trained CAFFE2 model.
#
# A Seldon transformer will transform the inputs before sending to the Proxy which will forward the request to the Nvidia Inference Server.
#
# This example will:
#
# * Show the packaging of the components using S2I and a step by step local testing of these via Docker
# * Show running the example in Seldon Core on GCP with an embedded Nvidia Inference Server
#
# ## Setup
# %matplotlib inline
import requests
from random import randint,random
import json
from matplotlib import pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import sys
sys.path.append("../../../notebooks")
from visualizer import get_graph
# +
def gen_image(arr):
two_d = (np.reshape(arr, (28, 28)) * 255).astype(np.uint8)
plt.imshow(two_d,cmap=plt.cm.gray_r, interpolation='nearest')
return plt
def download_mnist():
return input_data.read_data_sets("MNIST_data/", one_hot = True)
def rest_predict_request(endpoint,data):
request = {"data":{"ndarray":data.tolist()}}
response = requests.post(
"http://"+endpoint+"/predict",
data={"json":json.dumps(request),"isDefault":True})
return response.json()
def rest_transform_input_request(endpoint,data):
request = {"data":{"ndarray":data.tolist()}}
response = requests.post(
"http://"+endpoint+"/transform-input",
data={"json":json.dumps(request),"isDefault":True})
return response.json()
def rest_transform_output_request(endpoint,data):
request = {"data":{"ndarray":data.tolist()}}
response = requests.post(
"http://"+endpoint+"/transform-output",
data={"json":json.dumps(request),"isDefault":True})
return response.json()
def rest_request_ambassador(deploymentName,endpoint="localhost:8003",arr=None):
payload = {"data":{"names":["a","b"],"tensor":{"shape":[1,784],"values":arr.tolist()}}}
response = requests.post(
"http://"+endpoint+"/seldon/seldon/"+deploymentName+"/api/v0.1/predictions",
json=payload)
print(response.status_code)
print(response.text)
def gen_mnist_data(mnist):
batch_xs, batch_ys = mnist.train.next_batch(1)
chosen=0
gen_image(batch_xs[chosen]).show()
data = batch_xs[chosen].reshape((1,784))
return data
# -
mnist = download_mnist()
# ## Create an Nvidia Model Repository
# Fetch pretrained MNIST model ready for serving and place in model repository
# !mkdir -p tensorrt_mnist/1
# !wget -O tensorrt_mnist/1/model.plan http://seldon-public.s3.amazonaws.com/nvidia-mnist-model/model.plan
# To run your Mvidia Inference Server you will need to upload to a google bucket the model repository in mnsit_tensorrt_model. Follow the steps below:
# CHANGE THIS TO YOUR OWN CHOSEN GOOGLE BUCKET NAME
# %env MODEL_REPOSITORY_BUCKET=gs://seldon-inference-server-model-store
# !gsutil mb ${MODEL_REPOSITORY_BUCKET}
# !gsutil cp -r tensorrt_mnist ${MODEL_REPOSITORY_BUCKET}
# ## Create your Nvidia Inference Server
#
# For example:
#
# * Follow the steps in the [Kubeflow guide](https://www.kubeflow.org/docs/components/serving/tritoninferenceserver/) to create your Nvidia Inference Server
# * You will need to use the Google Bucket location
# ## Package and run a Transformer and Nvidia Proxy
# We will use a Seldon Transformer to remove the training-set mean values from the input features and rehsape the output as the prediction comes back.
# !pygmentize MnistTransformer.py
# !s2i build . seldonio/seldon-core-s2i-python3:1.2.3-dev mnist-caffe2-transformer:0.1
# !docker run --name "mnist-transformer" -d --rm -p 5000:5000 mnist-caffe2-transformer:0.1
# Run the Seldon Nvidia Inference Server Proxy Model.
#
# ** CHANGE THE IP ADDRESS BELOW TO THAT OF YOUR RUNNING NVIDIA SERVER **
# !docker run --name "tensorrt-proxy" -d --rm -p 5001:5001 \
# -e PREDICTIVE_UNIT_SERVICE_PORT=5001 \
# -e PREDICTIVE_UNIT_PARAMETERS='[{"name":"url","type":"STRING","value":"172.16.17.32:8000"},{"name":"model_name","type":"STRING","value":"tensorrt_mnist"},{"name":"protocol","type":"STRING","value":"HTTP"}]' \
# seldonio/nvidia-inference-server-proxy:0.1
TRANSFORMER_URL="localhost:5000"
PREDICTOR_URL="localhost:5001"
# In the next few steps we illustrate each step by step process and test that out on our running Docker containers.
data = gen_mnist_data(mnist)
response = rest_transform_input_request(TRANSFORMER_URL,data)
transformed = np.array(response['data']['ndarray'])
print(transformed.shape)
response = rest_predict_request(PREDICTOR_URL,transformed)
predictions = np.array(response["data"]["ndarray"])
print(predictions)
response = rest_transform_output_request(TRANSFORMER_URL,predictions)
print(response)
# !docker rm -f mnist-transformer
# !docker rm -f tensorrt-proxy
# ## Test From GCP Cluster
# ### Set up GCP Kubernetes Cluster
#
# To run the steps below you will need to:
#
# 1. Create a GCP Cluster with a GPU node pool with Nvidia V100 GPUs
# 2. Enable CUDA on the GPU nodes
# 3. Add an Image Pull Secret so you can download the Nvidia Inference Server
#
# #### Create a GCP Cluster
# This can be done from the Google console or via the command line as shown below. Change the cluster name and zones as appropriate for your setup.
#
# ```
# gcloud container clusters create myinferenceserver --num-nodes=2 --region=europe-west4-a
# gcloud config set container/cluster myinferenceserver
# gcloud container node-pools create gpu-pool --num-nodes=1 --machine-type=n1-standard-8 --accelerator type=nvidia-tesla-v100,count=1 --region=europe-west4-a
# gcloud container clusters get-credentials myinferenceserver
# ```
#
# #### Enable CUDA on GPU Nodes
#
# To enable the CUDA drivers on your GPU nodes run:
#
# ```
# kubectl apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/stable/nvidia-driver-installer/cos/daemonset-preloaded.yaml
# ```
#
# #### Create Image Pull Secret for the Nvidia Repository
#
# * [Sign up to the NVIDIA GPU Cloud and get an API Key](https://ngc.nvidia.com/signup)
# * Create a kubernetes secret
#
# !kubectl create namespace seldon
# + active=""
# %env NVIDIA_API_KEY=<your-api-key>
# %env NVIDIA_CLOUD_EMAIL=<your-email-address>
# -
# !kubectl config set-context $(kubectl config current-context) --namespace=seldon
# !kubectl create secret docker-registry ngc \
# --docker-server=nvcr.io \
# --docker-username=\$oauthtoken \
# --docker-password=${NVIDIA_API_KEY} --docker-email=${NVIDIA_CLOUD_EMAIL}
# ## Run MNIST Inference Graph
# ## Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](../../../notebooks/seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../../notebooks/seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).
# !helm install nvidia-mnist nvidia-mnist --set tfserving.model_base_path=${MODEL_REPOSITORY_BUCKET}
# !helm template nvidia-mnist nvidia-mnist --namespace seldon --set tfserving.model_base_path=${MODEL_REPOSITORY_BUCKET} > mnist.json
# !sed '1,2d' mnist.json > tmp.json
get_graph("tmp.json")
# !pygmentize mnist.json
# **Port forward Ambassador**
#
# ```
# kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080
# ```
data = gen_mnist_data(mnist)
data = data.reshape((784))
rest_request_ambassador("nvidia-mnist",endpoint="localhost:8003",arr=data)
# ## Analytics and Load Test
# !helm install seldon-core-analytics ../../../helm-charts/seldon-core-analytics \
# --set grafana_prom_admin_password=password \
# --set persistence.enabled=false \
# --namespace seldon
# !kubectl label nodes $(kubectl get nodes -o jsonpath='{.items[0].metadata.name}') role=locust
# !helm install loadtest seldon-core-loadtesting \
# --namespace seldon \
# --repo https://storage.googleapis.com/seldon-charts \
# --set locust.script=mnist_rest_locust.py \
# --set locust.host=http://caffe2-mnist:8000 \
# --set oauth.enabled=false \
# --set locust.hatchRate=1 \
# --set locust.clients=1 \
# --set loadtest.sendFeedback=1 \
# --set locust.minWait=0 \
# --set locust.maxWait=0 \
# --set replicaCount=1 \
# --set data.size=784
# You should port-foward the grafana dashboard
#
# ```
# kubectl port-forward $(kubectl get pods -n seldon -l app=grafana-prom-server -o jsonpath='{.items[0].metadata.name}') -n seldon 3000:3000
# ```
#
# You can then view an analytics dashboard inside the cluster at http://localhost:3000/dashboard/db/prediction-analytics?refresh=5s&orgId=1. Your IP address may be different. get it via minikube ip. Login with:
#
# Username : admin
#
# password : password (as set when starting seldon-core-analytics above)
| examples/models/nvidia-mnist/nvidia_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3
# ---
# <img src="https://github.com/pmservice/ai-openscale-tutorials/raw/master/notebooks/images/banner.png" align="left" alt="banner">
# # Working with Watson Machine Learning
# The notebook will train, create and deploy a Credit Risk model, configure OpenScale to monitor that deployment, and inject seven days' worth of historical records and measurements for viewing in the OpenScale Insights dashboard.
# ### Contents
#
# - [Setup](#setup)
# - [Model building and deployment](#model)
# - [OpenScale configuration](#openscale)
# - [Quality monitor and feedback logging](#quality)
# - [Fairness, drift monitoring and explanations](#fairness)
# - [Custom monitors and metrics](#custom)
# - [Payload analytics](#analytics)
# - [Historical data](#historical)
# # 1.0 Setup <a name="setup"></a>
# ## 1.1 Package installation
from IPython.utils import io
import warnings
warnings.filterwarnings('ignore')
# +
# !rm -rf /home/spark/shared/user-libs/python3.6*
# !pip install --upgrade numpy==1.19.2 --user --no-cache | tail -n 1
# !pip install --upgrade pandas==0.25.3 --user --no-cache | tail -n 1
# !pip install --upgrade requests==2.23 --user --no-cache | tail -n 1
# !pip install --upgrade SciPy==1.5.2 --user --no-cache | tail -n 1
# !pip install --upgrade lime==0.2.0.1 --user --no-cache | tail -n 1
# !pip install --upgrade pixiedust==1.1.18 --user --no-cache | tail -n 1
# !pip install --upgrade pyspark==2.4.0 --user --no-cache | tail -n 1
# !pip install --upgrade ibm-cloud-sdk-core==3.3.0 --user --no-cache | tail -n 1
# !pip install --upgrade ibm-watson-machine-learning==1.0.22 --user --no-cache | tail -n 1
# !pip install --upgrade ibm-watson-openscale==3.0.1 --user --no-cache | tail -n 1
# !pip uninstall watson-machine-learning-client -y | tail -n 1
# !pip uninstall watson-machine-learning-client-V4 -y | tail -n 1
# -
# ## 1.2 Configure credentials
#
# To authenticate the Watson Machine Learning service and Watson OpenScale on IBM Cloud, you need to provide a platform `api_key` and an endpoint URL. Where the endpoint URL is based on the `location` of the WML instance. To get these values you can use either the IBM Cloud CLI or the IBM Cloud UI.
#
# #### IBM Cloud CLI
#
# You can use the [IBM Cloud CLI](https://cloud.ibm.com/docs/cli/index.html) to create a platform API Key and retrieve your instance location.
#
# - To generate the Cloud API Key, run the following commands:
# ```
# ibmcloud login
# ibmcloud iam api-key-create API_KEY_NAME
# ```
# - Copy the value of `api_key` from the output.
#
#
# - To retrieve the location of your WML instance, run the following commands:
# ```
# ibmcloud login --apikey API_KEY -a https://cloud.ibm.com
# ibmcloud resource service-instance "WML_INSTANCE_NAME"
# ```
# > Note: WML_INSTANCE_NAME is the name of your Watson Machine Learning instance and should be quoted in the command.
#
# - Copy the value of `Location` from the output.
# #### IBM Cloud UI
#
# To generate Cloud API key:
# - Go to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users).
# - From that page, click your name in the top right corner, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**.
# - Give your key a name and click **Create**, then copy the created key and to use it below.
#
# To retrieve the location of your WML instance:
# - Go to the [**Resources List** section of the Cloud console](https://cloud.ibm.com/resources).
# - From that page, expand the **Services** section and find your Watson Machine Learning Instance.
# - Based on the Location displayed in that page, select one of the following values for location variable:
#
# |Displayed Location|Location|
# |-|-|
# |Dallas|us-south|
# |London|eu-gb|
# |Frankfurt|eu-de|
# |Tokyo|jp-tok|
#
CLOUD_API_KEY = "<INSERT-YOUR-CLOUD-API-KEY>"
WML_LOCATION = "<INSERT-YOUR-WML-LOCATION>" # example: "us-south"
WML_CREDENTIALS = {
"apikey": CLOUD_API_KEY,
"url": "https://" + WML_LOCATION + ".ml.cloud.ibm.com"
}
# In next cells, you will need to paste credentials to Cloud Object Storage. If you haven't worked with COS yet please visit [getting started with COS tutorial](https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-getting-started).
# - You can find COS_API_KEY_ID and COS_RESOURCE_CRN in the **Service Credentials** menu of your COS instance (copy the `apikey` and `resource_instance_id` respectively). The COS Service Credentials must be created with Role parameter set as Writer.
# - The COS_ENDPOINT variable can be found in Endpoint panel of your COS instance(Note: this is not the same as the `Endpoint` in the service credentials). From this page, click on one of the regions and copy the public endpoint.
# - The BUCKET_NAME can be anything you like as long as its globally unique. You an use the suggested value, appended with your initials and date.
#
# Later in the notebook, the training data file will be loaded to the bucket of your instance and used as training reference in subscription.
# +
COS_API_KEY_ID = "<INSERT-YOUR-COS-API-KEY>"
COS_RESOURCE_CRN = "<INSERT-YOUR-COS-RESOURCE-CRN>"
COS_ENDPOINT = "<INSERT-YOUR-COS-ENDPOINT>" #Example: "https://s3.us.cloud-object-storage.appdomain.cloud"
BUCKET_NAME = "<INSERT-YOUR-BUCKET-NAME>" #Example: "credit-risk-training-data-uniqueID"
# -
# This tutorial can use Databases for PostgreSQL, Db2 Warehouse, or a free internal verison of PostgreSQL to create a datamart for OpenScale.
#
# **For most scenarios, do not update the cell below (leave the values as they are).**
#
# If you have previously configured OpenScale, it will use your existing datamart, and not interfere with any models you are currently monitoring. Or, if you do not have a paid Cloud account or would prefer not to provision this paid service, you may use the free internal PostgreSQL service with OpenScale. Leave the values as is.
#
# Otherwise, if you want to use an external datastore as the datamart or if you previously used the internal datastore but want to delete it and create a new one:
#
# - To use a new Db2 Warehouse or Databases for PostgreSQL instance as the datamart, provision the desired service via the Cloud catalog and create a set of credentials. Copy and paste the credentials from that service into the cell below.
# - If you previously configured OpenScale to use the free internal version of PostgreSQL, you can switch to a new datamart using a paid database service.
# - If you would like to delete the internal PostgreSQL configuration and create a new one using service credentials supplied in the cell above, set the KEEP_MY_INTERNAL_POSTGRES variable below to False below. In this case, the notebook will remove your existing internal PostgreSQL datamart and create a new one with the supplied credentials. ***NO DATA MIGRATION WILL OCCUR.***
#
#
DB_CREDENTIALS = None
SCHEMA_NAME = None
KEEP_MY_INTERNAL_POSTGRES = True
# ## 1.3 Set Custom Name
#
# Provide a custom name to be concatenated to create a model name, deployment name and open scale monitor name. Sample value for CUSTOM_NAME could be CUSTOM_NAME = 'JRT_WOSTest1020'
#
# **<font color='red'><< UPDATE THE VARIABLE 'CUSTOM_NAME' TO A UNIQUE NAME OF YOUR CHOOSING>></font>**
#
CUSTOM_NAME = '<INSERT-YOUR-CUSTOM-NAME-HERE>'
# ## 1.4 Run the notebook
#
# At this point, the notebook is ready to run. You can either run the cells one at a time, or click the **Kernel** option above and select **Restart and Run All** to run all the cells.
# # 2.0 Model building and deployment <a name="model"></a>
# In this section you will learn how to train Spark MLLib model and next deploy it as web-service using Watson Machine Learning service.
# ## 2.1 Load the training data
# +
# !rm german_credit_data_biased_training.csv
with io.capture_output() as captured:
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/german_credit_data_biased_training.csv -O german_credit_data_biased_training.csv
# !ls -lh german_credit_data_biased_training.csv
# +
from pyspark.sql import SparkSession
import pandas as pd
import json
spark = SparkSession.builder.getOrCreate()
training_data_file_name = "german_credit_data_biased_training.csv"
pd_data = pd.read_csv(training_data_file_name, sep=",", header=0)
df_data = spark.createDataFrame(pd_data)
df_data.head()
# -
print("Number of records: " + str(df_data.count()))
# +
spark_df = df_data
(train_data, test_data) = spark_df.randomSplit([0.8, 0.2], 24)
print("Number of records for training: " + str(train_data.count()))
print("Number of records for evaluation: " + str(test_data.count()))
spark_df.printSchema()
# -
# ## 2.2 Save training data to COS
# +
import ibm_boto3
from ibm_botocore.client import Config, ClientError
cos_client = ibm_boto3.resource(
"s3",
ibm_api_key_id=COS_API_KEY_ID,
ibm_service_instance_id=COS_RESOURCE_CRN,
ibm_auth_endpoint="https://iam.bluemix.net/oidc/token",
config=Config(signature_version="oauth"),
endpoint_url=COS_ENDPOINT
)
# +
create_bucket = True
try:
buckets = cos_client.buckets.all()
for bucket in buckets:
if BUCKET_NAME == bucket.name:
print("Existing Bucket Found: {0}".format(bucket.name))
create_bucket = False
break
except ClientError as be:
print("Client Error: {0}\n".format(be))
except Exception as e:
print("Unable to retrieve list buckets: {0}\n".format(e))
if create_bucket:
print("Creating new bucket: {0}".format(BUCKET_NAME))
try:
cos_client.create_bucket(Bucket=BUCKET_NAME)
print("Bucket: {0} created!".format(BUCKET_NAME))
except ClientError as be:
print("Client Error: {0}\n".format(be))
except Exception as e:
print("Unable to create bucket: {0}".format(e))
# -
try:
with open(training_data_file_name, "rb") as file_data:
cos_client.Object(BUCKET_NAME, training_data_file_name).upload_fileobj(
Fileobj=file_data
)
except Exception as e:
print("An exception occurred: {0}".format(e))
# ## 2.3 Create a model
# The code below will use the CUSTOM_NAME variable set earlier to create a model name and online deployment name.
MODEL_NAME = CUSTOM_NAME + '_WOSNotebook_Model'
DEPLOYMENT_NAME = CUSTOM_NAME + '_WOSNotebook_Deployment'
# The code below creates a Random Forest Classifier with Spark, setting up string indexers for the categorical features and the label column. Finally, this notebook creates a pipeline including the indexers and the model, and does an initial Area Under ROC evaluation of the model.
# +
from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml import Pipeline, Model
si_CheckingStatus = StringIndexer(inputCol = "CheckingStatus", outputCol = "CheckingStatus_IX")
si_CreditHistory = StringIndexer(inputCol = "CreditHistory", outputCol = "CreditHistory_IX")
si_LoanPurpose = StringIndexer(inputCol = "LoanPurpose", outputCol = "LoanPurpose_IX")
si_ExistingSavings = StringIndexer(inputCol = "ExistingSavings", outputCol = "ExistingSavings_IX")
si_EmploymentDuration = StringIndexer(inputCol = "EmploymentDuration", outputCol = "EmploymentDuration_IX")
si_Sex = StringIndexer(inputCol = "Sex", outputCol = "Sex_IX")
si_OthersOnLoan = StringIndexer(inputCol = "OthersOnLoan", outputCol = "OthersOnLoan_IX")
si_OwnsProperty = StringIndexer(inputCol = "OwnsProperty", outputCol = "OwnsProperty_IX")
si_InstallmentPlans = StringIndexer(inputCol = "InstallmentPlans", outputCol = "InstallmentPlans_IX")
si_Housing = StringIndexer(inputCol = "Housing", outputCol = "Housing_IX")
si_Job = StringIndexer(inputCol = "Job", outputCol = "Job_IX")
si_Telephone = StringIndexer(inputCol = "Telephone", outputCol = "Telephone_IX")
si_ForeignWorker = StringIndexer(inputCol = "ForeignWorker", outputCol = "ForeignWorker_IX")
# -
si_Label = StringIndexer(inputCol="Risk", outputCol="label").fit(spark_df)
label_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=si_Label.labels)
va_features = VectorAssembler(inputCols=["CheckingStatus_IX", "CreditHistory_IX", "LoanPurpose_IX", "ExistingSavings_IX", "EmploymentDuration_IX", "Sex_IX", \
"OthersOnLoan_IX", "OwnsProperty_IX", "InstallmentPlans_IX", "Housing_IX", "Job_IX", "Telephone_IX", "ForeignWorker_IX", \
"LoanDuration", "LoanAmount", "InstallmentPercent", "CurrentResidenceDuration", "LoanDuration", "Age", "ExistingCreditsCount", \
"Dependents"], outputCol="features")
# +
from pyspark.ml.classification import RandomForestClassifier
classifier = RandomForestClassifier(featuresCol="features")
pipeline = Pipeline(stages=[si_CheckingStatus, si_CreditHistory, si_EmploymentDuration, si_ExistingSavings, si_ForeignWorker, si_Housing, si_InstallmentPlans, si_Job, si_LoanPurpose, si_OthersOnLoan,\
si_OwnsProperty, si_Sex, si_Telephone, si_Label, va_features, classifier, label_converter])
model = pipeline.fit(train_data)
# -
# ## 2.4 Evalutate Model
# +
predictions = model.transform(test_data)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName="areaUnderROC")
area_under_curve = evaluatorDT.evaluate(predictions)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName="areaUnderPR")
area_under_PR = evaluatorDT.evaluate(predictions)
print("areaUnderROC = %g" % area_under_curve, "areaUnderPR = %g" % area_under_PR)
# -
from sklearn.metrics import classification_report
y_pred = predictions.toPandas()['prediction']
y_pred = ['Risk' if pred == 1.0 else 'No Risk' for pred in y_pred]
y_test = test_data.toPandas()['Risk']
print(classification_report(y_test, y_pred, target_names=['Risk', 'No Risk']))
# ## 2.5 Publish the model
# In this section, the notebook uses Watson Machine Learning to save the model (including the pipeline) to the WML instance. Previous versions of the model are removed so that the notebook can be run again, resetting all data for another demo.
# +
import json
from ibm_watson_machine_learning import APIClient
wml_client = APIClient(WML_CREDENTIALS)
wml_client.version
# -
# ### 2.5.1 Set default space
# In order to deploy a model, you would have to create deployment spaces and deploy your models there. You can list all the spaces using the .list() function, or you can create new spaces by going to menu on top left corner --> analyze --> analytics deployments --> New Deployment Space. Once you know which space you want to deploy in, simply use the GUID of the space as argument for .set.default_space() function below
#
wml_client.spaces.list()
# **<font color='red'><< UPDATE THE VARIABLE 'DEPLOYMENT_SPACE_NAME' TO THE NAME OF THE DEPLOYMENT SPACE YOU CREATED PREVIOUSLY>></font>**
#
# You should copy the name of your deployment space from the output of the previous cell to the variable in the next cell. The deployment space ID will be looked up based on the name specified below. If you do not receive a space GUID as an output to the next cell, do not proceed until you have created a deployment space.
DEPLOYMENT_SPACE_NAME = "<INSERT_YOUR_DEPLOYMENT_SPACE_NAME_FROM_ABOVE_OUTPUT>"
# +
wml_client.spaces.list()
all_spaces = wml_client.spaces.get_details()['resources']
space_id = None
for space in all_spaces:
if space['entity']['name'] == DEPLOYMENT_SPACE_NAME:
space_id = space["metadata"]["id"]
print("\nDeployment Space ID: ", space_id)
if space_id is None:
print("WARNING: Your space does not exist. Create a deployment space before proceeding to the next cell.")
#space_id = client.spaces.store(meta_props={client.spaces.ConfigurationMetaNames.NAME: space_name})["metadata"]["guid"]
# -
WML_SPACE_ID = space_id
wml_client.set.default_space(WML_SPACE_ID)
# ### 2.5.2 Remove existing model and deployment
wml_client.repository.list_models()
wml_client.deployments.list()
try:
deployments_list = wml_client.deployments.get_details()
for deployment in deployments_list["resources"]:
model_id = deployment["entity"]["asset"]["id"]
deployment_id = deployment["metadata"]["id"]
if deployment["metadata"]["name"] == DEPLOYMENT_NAME:
print("Deleting deployment id", deployment_id)
wml_client.deployments.delete(deployment_id)
print("Deleting model id", model_id)
wml_client.repository.delete(model_id)
wml_client.repository.list_models()
wml_client.deployments.list()
except Exception as e:
print("An exception occurred: {0}".format(e))
# +
software_spec_uid = wml_client.software_specifications.get_id_by_name("spark-mllib_2.4")
print("Software Specification ID: {}".format(software_spec_uid))
model_props = {
wml_client.repository.ModelMetaNames.NAME: "{}".format(MODEL_NAME),
wml_client._models.ConfigurationMetaNames.SPACE_UID: WML_SPACE_ID,
wml_client.repository.ModelMetaNames.TYPE: 'mllib_2.4',
wml_client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: software_spec_uid
}
# +
published_model_details = wml_client.repository.store_model(model, model_props, training_data=df_data, pipeline=pipeline)
model_uid = wml_client.repository.get_model_uid(published_model_details)
print("Published Model Details: ")
print(json.dumps(published_model_details, indent=2))
# -
# ## 2.6 Deploy the model
# The next section of the notebook deploys the model as a RESTful web service in Watson Machine Learning. The deployed model will have a scoring URL you can use to send data to the model for predictions.
# +
print("Deploying model...")
meta_props = {
wml_client.deployments.ConfigurationMetaNames.NAME: DEPLOYMENT_NAME,
wml_client.deployments.ConfigurationMetaNames.ONLINE: {}
}
deployment = wml_client.deployments.create(model_uid, meta_props=meta_props)
deployment_uid = wml_client.deployments.get_uid(deployment)
scoring_url = wml_client.deployments.get_scoring_href(deployment)
print("Model id: {}".format(model_uid))
print("Deployment id: {}".format(deployment_uid))
print("Scoring URL:{}".format(scoring_url))
# -
wml_client.repository.list_models()
wml_client.deployments.list()
# ### 2.6.1 Call the model
# +
fields = ["CheckingStatus", "LoanDuration", "CreditHistory", "LoanPurpose", "LoanAmount", "ExistingSavings",
"EmploymentDuration", "InstallmentPercent", "Sex", "OthersOnLoan", "CurrentResidenceDuration",
"OwnsProperty", "Age", "InstallmentPlans", "Housing", "ExistingCreditsCount", "Job", "Dependents",
"Telephone", "ForeignWorker"]
values = [
["no_checking", 13, "credits_paid_to_date", "car_new", 1343, "100_to_500", "1_to_4", 2, "female", "none", 3,
"savings_insurance", 46, "none", "own", 2, "skilled", 1, "none", "yes"],
["no_checking", 24, "prior_payments_delayed", "furniture", 4567, "500_to_1000", "1_to_4", 4, "male", "none",
4, "savings_insurance", 36, "none", "free", 2, "management_self-employed", 1, "none", "yes"],
]
scoring_payload = {"input_data": [{"fields": fields, "values": values}]}
predictions = wml_client.deployments.score(deployment_uid, scoring_payload)
predictions
# -
# # 3.0 Configure OpenScale <a name="openscale"></a>
# The notebook will now import the necessary libraries and set up a Python OpenScale client.
# +
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson_openscale import *
from ibm_watson_openscale.supporting_classes.enums import *
from ibm_watson_openscale.supporting_classes import *
from ibm_watson_openscale.base_classes.watson_open_scale_v2 import *
import time
# +
authenticator = IAMAuthenticator(apikey=CLOUD_API_KEY)
wos_client = APIClient(authenticator=authenticator)
#wos_client = APIClient(
# service_instance_id="xxxxxxxxxxxxxxxxxxxx",
# authenticator=authenticator
#)
wos_client.version
# -
# ## 3.1 Create datamart
# ### 3.1.1 Set up datamart
# Watson OpenScale uses a database to store payload logs and calculated metrics. If database credentials were **not** supplied above, the notebook will use the free, internal lite database. If database credentials were supplied, the datamart will be created there **unless** there is an existing datamart **and** the **KEEP_MY_INTERNAL_POSTGRES** variable is set to **True**. If an OpenScale datamart exists in Db2 or PostgreSQL, the existing datamart will be used and no data will be overwritten.
#
# Prior instances of the German Credit model will be removed from OpenScale monitoring.
wos_client.data_marts.show()
# +
data_marts = wos_client.data_marts.list().result.data_marts
if len(data_marts) == 0:
if DB_CREDENTIALS is not None:
if SCHEMA_NAME is None:
print("Please specify the SCHEMA_NAME and rerun the cell")
print("Setting up external datamart")
added_data_mart_result = wos_client.data_marts.add(
background_mode=False,
name="WOS Data Mart",
description="Data Mart created by WOS tutorial notebook",
database_configuration=DatabaseConfigurationRequest(
database_type=DatabaseType.POSTGRESQL,
credentials=PrimaryStorageCredentialsLong(
hostname=DB_CREDENTIALS["connection"]["postgres"]["hosts"][0]["hostname"],
username=DB_CREDENTIALS["connection"]["postgres"]["authentication"]["username"],
password=DB_CREDENTIALS["connection"]["postgres"]["authentication"]["password"],
db=DB_CREDENTIALS["connection"]["postgres"]["database"],
port=DB_CREDENTIALS["connection"]["postgres"]["hosts"][0]["port"],
ssl=True,
sslmode=DB_CREDENTIALS["connection"]["postgres"]["query_options"]["sslmode"],
certificate_base64=DB_CREDENTIALS["connection"]["postgres"]["certificate"]["certificate_base64"]
),
location=LocationSchemaName(
schema_name= SCHEMA_NAME
)
)
).result
else:
print("Setting up internal datamart")
added_data_mart_result = wos_client.data_marts.add(
background_mode=False,
name="WOS Data Mart",
description="Data Mart created by WOS tutorial notebook",
internal_database = True).result
data_mart_id = added_data_mart_result.metadata.id
else:
data_mart_id=data_marts[0].metadata.id
print("Using existing datamart {}".format(data_mart_id))
# -
# ## 3.2 Add Service Provider
# Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model. If this binding already exists, this code will output a warning message and use the existing binding.
wos_client.service_providers.show()
SERVICE_PROVIDER_NAME = "Watson Machine Learning V2 Instance"
SERVICE_PROVIDER_DESCRIPTION = "WML Instance"
# ### 3.2.1 Remove existing service provider connected with used WML instance.
#
# Multiple service providers for the same engine instance are avaiable in Watson OpenScale. To avoid multiple service providers of used WML instance in the tutorial notebook the following code deletes existing service provder(s) and then adds new one.
service_providers = wos_client.service_providers.list().result.service_providers
for service_provider in service_providers:
service_instance_name = service_provider.entity.name
if service_instance_name == SERVICE_PROVIDER_NAME:
service_provider_id = service_provider.metadata.id
wos_client.service_providers.delete(service_provider_id)
print("Deleted existing service_provider for WML instance: {}".format(service_provider_id))
# ### 3.2.2 Add Service Provider
#
# Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model.
#
# **Note:** You can bind more than one engine instance if needed by calling `wos_client.service_providers.add` method. Next, you can refer to particular service provider using `service_provider_id`.
added_service_provider_result = wos_client.service_providers.add(
name=SERVICE_PROVIDER_NAME,
description=SERVICE_PROVIDER_DESCRIPTION,
service_type=ServiceTypes.WATSON_MACHINE_LEARNING,
deployment_space_id = WML_SPACE_ID,
operational_space_id = "production",
credentials=WMLCredentialsCloud(
apikey=CLOUD_API_KEY,
url=WML_CREDENTIALS["url"],
instance_id=None
),
background_mode=False
).result
service_provider_id = added_service_provider_result.metadata.id
wos_client.service_providers.show()
# +
asset_deployment_details = wos_client.service_providers.list_assets(data_mart_id=data_mart_id, service_provider_id=service_provider_id, deployment_space_id = WML_SPACE_ID).result['resources'][0]
model_asset_details_from_deployment = wos_client.service_providers.get_deployment_asset(data_mart_id=data_mart_id,service_provider_id=service_provider_id,deployment_id=deployment_uid,deployment_space_id=WML_SPACE_ID)
all_assets_response = wos_client.service_providers.list_assets(
data_mart_id,
service_provider_id,
deployment_space_id=WML_SPACE_ID
).result
print(json.dumps(all_assets_response, indent=2))
# -
# ## 3.3 Subscriptions
wos_client.subscriptions.show()
# ### 3.3.1 Remove existing credit risk subscriptions
# This code removes previous subscriptions to the Credit model to refresh the monitors with the new model and new data.
subscriptions = wos_client.subscriptions.list().result.subscriptions
for subscription in subscriptions:
sub_model_id = subscription.entity.asset.asset_id
if sub_model_id == model_uid:
wos_client.subscriptions.delete(subscription.metadata.id)
print("Deleted existing subscription for model: {}".format(model_uid))
# ### 3.3.2 Add new credit risk model subscription.
#
# This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself.
# +
asset = Asset(
asset_id=model_uid,
url=deployment["entity"]["status"]["online_url"]["url"],
name=model_asset_details_from_deployment["entity"]["asset"]["name"],
asset_type=AssetTypes.MODEL,
input_data_type=InputDataType.STRUCTURED,
problem_type=ProblemType.BINARY_CLASSIFICATION
)
asset_deployment = AssetDeploymentRequest(
deployment_id=deployment_uid,
name=DEPLOYMENT_NAME,
deployment_type=DeploymentTypes.ONLINE,
url=deployment["entity"]["status"]["online_url"]["url"]
)
training_data_reference = TrainingDataReference(
type="cos",
location=COSTrainingDataReferenceLocation(
bucket=BUCKET_NAME,
file_name=training_data_file_name
),
connection=COSTrainingDataReferenceConnection.from_dict(
{
"resource_instance_id": COS_RESOURCE_CRN,
"url": COS_ENDPOINT,
"api_key": COS_API_KEY_ID,
"iam_url": "https://iam.bluemix.net/oidc/token"
}
)
)
asset_properties_request = AssetPropertiesRequest(
label_column="Risk",
prediction_field='predictedLabel',
probability_fields=["probability"],
feature_fields = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
categorical_fields = ["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"],
training_data_reference=training_data_reference,
training_data_schema=SparkStruct.from_dict(model_asset_details_from_deployment["entity"]["asset_properties"]["training_data_schema"])
)
# -
subscription_details = wos_client.subscriptions.add(
data_mart_id=data_mart_id,
service_provider_id=service_provider_id,
asset=asset,
deployment=asset_deployment,
asset_properties=asset_properties_request).result
subscription_id = subscription_details.metadata.id
print(subscription_details)
# ### 3.3.3 Check Payload Logging Dataset
time.sleep(5)
payload_data_set_id = None
payload_data_set_id = wos_client.data_sets.list(type=DataSetTypes.PAYLOAD_LOGGING,
target_target_id=subscription_id,
target_target_type=TargetTypes.SUBSCRIPTION).result.data_sets[0].metadata.id
if payload_data_set_id is None:
print("Payload data set not found. Please check subscription status.")
else:
print("Payload data set id: {}".format(payload_data_set_id))
wos_client.data_sets.show()
wos_client.subscriptions.show()
# ### 3.3.4 Score the model so we can configure monitors
#
# Now that the WML service has been bound and the subscription has been created, we need to send a request to the model before we configure OpenScale. This allows OpenScale to create a payload log in the datamart with the correct schema, so it can capture data coming into and out of the model. First, the code gets the model deployment's endpoint URL, and then sends a few records for predictions.
# +
fields = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration",
"InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans",
"Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"]
values = [
["no_checking",13,"credits_paid_to_date","car_new",1343,"100_to_500","1_to_4",2,"female","none",3,"savings_insurance",46,"none","own",2,"skilled",1,"none","yes"],
["no_checking",24,"prior_payments_delayed","furniture",4567,"500_to_1000","1_to_4",4,"male","none",4,"savings_insurance",36,"none","free",2,"management_self-employed",1,"none","yes"],
["0_to_200",26,"all_credits_paid_back","car_new",863,"less_100","less_1",2,"female","co-applicant",2,"real_estate",38,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",14,"no_credits","car_new",2368,"less_100","1_to_4",3,"female","none",3,"real_estate",29,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",4,"no_credits","car_new",250,"less_100","unemployed",2,"female","none",3,"real_estate",23,"none","rent",1,"management_self-employed",1,"none","yes"],
["no_checking",17,"credits_paid_to_date","car_new",832,"100_to_500","1_to_4",2,"male","none",2,"real_estate",42,"none","own",1,"skilled",1,"none","yes"],
["no_checking",33,"outstanding_credit","appliances",5696,"unknown","greater_7",4,"male","co-applicant",4,"unknown",54,"none","free",2,"skilled",1,"yes","yes"],
["0_to_200",13,"prior_payments_delayed","retraining",1375,"100_to_500","4_to_7",3,"male","none",3,"real_estate",37,"none","own",2,"management_self-employed",1,"none","yes"]
]
payload_scoring = {"input_data": [{"fields": fields, "values": values}]}
predictions = wml_client.deployments.score(deployment_uid, payload_scoring)
for pred in predictions["predictions"][0]["values"]:
print("Scoring result: {}".format(pred[-1]))#last item in the values array is the prediction for Spark classification model
# -
# #### 3.3.4.1 Validate Payload Logging
#
# Check that automatic storing of scoring to payload logging table occured. If the payload logging table does not have expected number of records, we could manually store the records into the payload logging table.
#
# The number of records in the payload logging table should be equal to number of records submitted in previous cell (i.e 8).
# +
import uuid
from ibm_watson_openscale.supporting_classes.payload_record import PayloadRecord
time.sleep(5)
pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id)
print("Number of records in the payload logging table: {}".format(pl_records_count))
# -
# If automatic logging is not happening, we could manual log them
if pl_records_count == 0:
print("Payload logging did not happen, performing explicit payload logging.")
wos_client.data_sets.store_records(data_set_id=payload_data_set_id, request_body=[PayloadRecord(
scoring_id=str(uuid.uuid4()),
request=payload_scoring,
response=predictions,
response_time=460
)])
time.sleep(5)
pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id)
print("Number of records in the payload logging table: {}".format(pl_records_count))
# # 4.0 Quality monitoring and feedback logging <a name="quality"></a>
wos_client.monitor_instances.show()
# ## 4.1 Enable quality monitoring
# The code below waits ten seconds to allow the payload logging table to be set up before it begins enabling monitors. First, it turns on the quality (accuracy) monitor and sets an alert threshold of 70%. OpenScale will show an alert on the dashboard if the model accuracy measurement (area under the curve, in the case of a binary classifier) falls below this threshold.
#
# The second paramater supplied, min_records, specifies the minimum number of feedback records OpenScale needs before it calculates a new measurement. The quality monitor runs hourly, but the accuracy reading in the dashboard will not change until an additional 50 feedback records have been added, via the user interface, the Python client, or the supplied feedback endpoint.
# +
time.sleep(10)
target = Target(
target_type=TargetTypes.SUBSCRIPTION,
target_id=subscription_id
)
parameters = {
"min_feedback_data_size": 50
}
thresholds = [
MetricThresholdOverride(
metric_id="area_under_roc",
type=MetricThresholdTypes.LOWER_LIMIT,
value=0.7
)
]
quality_monitor_details = wos_client.monitor_instances.create(
data_mart_id=data_mart_id,
background_mode=False,
monitor_definition_id=wos_client.monitor_definitions.MONITORS.QUALITY.ID,
target=target,
parameters=parameters,
thresholds=thresholds
).result
quality_monitor_instance_id = quality_monitor_details.metadata.id
print("Quality Monitor ID: {}".format(quality_monitor_instance_id))
# -
# ## 4.2 Feedback logging
# The code below downloads and stores enough feedback data to meet the minimum threshold so that OpenScale can calculate a new accuracy measurement. It then kicks off the accuracy monitor. The monitors run hourly, or can be initiated via the Python API, the REST API, or the graphical user interface.
# +
# !rm additional_feedback_data_v2.json
with io.capture_output() as captured:
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/additional_feedback_data_v2.json -O additional_feedback_data_v2.json
# !ls -lh additional_feedback_data_v2.json
# +
feedback_dataset_id = None
feedback_dataset = wos_client.data_sets.list(type=DataSetTypes.FEEDBACK,
target_target_id=subscription_id,
target_target_type=TargetTypes.SUBSCRIPTION).result
feedback_dataset_id = feedback_dataset.data_sets[0].metadata.id
if feedback_dataset_id is None:
print("Feedback data set not found. Please check quality monitor status.")
# -
with open('additional_feedback_data_v2.json') as feedback_file:
additional_feedback_data = json.load(feedback_file)
wos_client.data_sets.get_records_count(data_set_id=feedback_dataset_id)
wos_client.data_sets.store_records(feedback_dataset_id, request_body=additional_feedback_data, background_mode=False)
wos_client.data_sets.get_records_count(data_set_id=feedback_dataset_id)
# ## 4.3 Run Quality Monitor
run_details = wos_client.monitor_instances.run(monitor_instance_id=quality_monitor_instance_id, background_mode=False).result
wos_client.monitor_instances.show_metrics(monitor_instance_id=quality_monitor_instance_id)
# ## 4.4 Check Monitors
#
# We can show which monitors are currently enabled, at this point, it should only be the Quality Monitor
wos_client.monitor_instances.show()
wos_client.data_sets.show()
# # 5.0 Fairness, drift monitoring and explanations
# <a name="fairness"></a>
# The code below configures fairness monitoring for our model. It turns on monitoring for two features, Sex and Age. In each case, we must specify:
# * Which model feature to monitor
# * One or more **majority** groups, which are values of that feature that we expect to receive a higher percentage of favorable outcomes
# * One or more **minority** groups, which are values of that feature that we expect to receive a higher percentage of unfavorable outcomes
# * The threshold at which we would like OpenScale to display an alert if the fairness measurement falls below (in this case, 95%)
#
# Additionally, we must specify which outcomes from the model are favourable outcomes, and which are unfavourable. We must also provide the number of records OpenScale will use to calculate the fairness score. In this case, OpenScale's fairness monitor will run hourly, but will not calculate a new fairness rating until at least 100 records have been added. Finally, to calculate fairness, OpenScale must perform some calculations on the training data, so we provide the dataframe containing the data.
# ## 5.1 Enable Fairness Monitoring
# +
target = Target(
target_type=TargetTypes.SUBSCRIPTION,
target_id=subscription_id
)
parameters = {
"features": [
{"feature": "Sex",
"majority": ['male'],
"minority": ['female'],
"threshold": 0.95
},
{"feature": "Age",
"majority": [[26, 75]],
"minority": [[18, 25]],
"threshold": 0.95
}
],
"favourable_class": ["No Risk"],
"unfavourable_class": ["Risk"],
"min_records": 100
}
thresholds = [
MetricThresholdOverride(
metric_id="fairness_value",
type=MetricThresholdTypes.LOWER_LIMIT,
value=0.95,
specific_values=[
MetricSpecificThresholdShortObject(
value=.95,
applies_to = [
ThresholdConditionObject(
type = "tag",
value = "Sex",
key = "feature"
)
]
),
MetricSpecificThresholdShortObject(
value=.95,
applies_to = [
ThresholdConditionObject(
type = "tag",
value = "Age",
key = "feature"
)
]
)
]
)
]
fairness_monitor_details = wos_client.monitor_instances.create(
data_mart_id=data_mart_id,
background_mode=False,
monitor_definition_id=wos_client.monitor_definitions.MONITORS.FAIRNESS.ID,
target=target,
parameters=parameters,
thresholds=thresholds
).result
fairness_monitor_instance_id =fairness_monitor_details.metadata.id
print("Fairness Monitor ID: {}".format(fairness_monitor_instance_id))
#print(fairness_monitor_details)
# -
# ## 5.2 Enable Drift Monitoring
#
# We can choose to enable model and/or data drift within the config.
monitor_instances = wos_client.monitor_instances.list().result.monitor_instances
for monitor_instance in monitor_instances:
monitor_def_id=monitor_instance.entity.monitor_definition_id
if monitor_def_id == "drift" and monitor_instance.entity.target.target_id == subscription_id:
wos_client.monitor_instances.delete(monitor_instance.metadata.id, background_mode=False)
print('Deleted existing drift monitor instance with id: ', monitor_instance.metadata.id)
# +
target = Target(
target_type=TargetTypes.SUBSCRIPTION,
target_id=subscription_id
)
parameters = {
"min_samples": 100,
"drift_threshold": 0.05,
"train_drift_model": True,
"enable_model_drift": True,
"enable_data_drift": True
}
drift_monitor_details = wos_client.monitor_instances.create(
data_mart_id=data_mart_id,
background_mode=False,
monitor_definition_id=wos_client.monitor_definitions.MONITORS.DRIFT.ID,
target=target,
parameters=parameters
).result
drift_monitor_instance_id = drift_monitor_details.metadata.id
print("Drift Monitor ID: {}".format(drift_monitor_instance_id))
print(drift_monitor_details)
# -
# ## 5.3 Score the model again now that monitoring is configured
#
# This next section randomly selects 200 records from the data feed and sends those records to the model for predictions. This is enough to exceed the minimum threshold for records set in the previous section, which allows OpenScale to begin calculating fairness.
# +
# !rm german_credit_feed.json
with io.capture_output() as captured:
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/german_credit_feed.json -O german_credit_feed.json
# !ls -lh german_credit_feed.json
# -
# Score 200 randomly chosen records
# +
import random
with open('german_credit_feed.json', 'r') as scoring_file:
scoring_data = json.load(scoring_file)
fields = scoring_data['fields']
values = []
for _ in range(200):
values.append(random.choice(scoring_data['values']))
payload_scoring = {"input_data": [{"fields": fields, "values": values}]}
scoring_response = wml_client.deployments.score(deployment_uid, payload_scoring)
print("Single record scoring result:", scoring_response["predictions"][0]["values"][0][-1])
# +
time.sleep(5)
# Manually log the scoring response if its still the original 8 from before (should be 208 at this point).
if pl_records_count == 8:
print("Payload logging did not happen, performing explicit payload logging.")
wos_client.data_sets.store_records(data_set_id=payload_data_set_id, request_body=[PayloadRecord(
scoring_id=str(uuid.uuid4()),
request=payload_scoring,
response=scoring_response,
response_time=460
)])
pl_records_count = wos_client.data_sets.get_records_count(payload_data_set_id)
print("Number of records in the payload logging table: {}".format(pl_records_count))
# -
# >Note: The payload table should have a total of 208 records. However, this can vary depending on how many scoring requests have been sent to the model.
# ## 5.4 Run Monitors
#
# ### 5.4.1 Fairness Monitor
#
# Kick off a fairness monitor run on current data. The monitor runs hourly, but can be manually initiated using the Python client, the REST API, or the graphical user interface. We have a 10 second sleep so that the scoring of 200 payloads above can complete. NOTE: if the cell below finishes with errors, skip it and complete the notebook, then return and try again.
time.sleep(10)
run_details = None
try:
run_details = wos_client.monitor_instances.run(monitor_instance_id=fairness_monitor_instance_id, background_mode=False)
except Exception as e:
print("An exception occurred: {0}".format(e))
time.sleep(10)
wos_client.monitor_instances.show_metrics(monitor_instance_id=fairness_monitor_instance_id)
# ### 5.4.2 Drift Monitor
#
# Kick off a drift monitor run on current data. The monitor runs every hour, but can be manually initiated using the Python client, the REST API.
time.sleep(5)
drift_run_details = None
try:
drift_run_details = wos_client.monitor_instances.run(monitor_instance_id=drift_monitor_instance_id, background_mode=False)
except Exception as e:
print("An exception occurred: {0}".format(e))
time.sleep(5)
wos_client.monitor_instances.show_metrics(monitor_instance_id=drift_monitor_instance_id)
# ## 5.5 Configure Explainability
# Finally, we provide OpenScale with the training data to enable and configure the explainability features.
# +
target = Target(
target_type=TargetTypes.SUBSCRIPTION,
target_id=subscription_id
)
parameters = {
"enabled": True
}
explainability_details = wos_client.monitor_instances.create(
data_mart_id=data_mart_id,
background_mode=False,
monitor_definition_id=wos_client.monitor_definitions.MONITORS.EXPLAINABILITY.ID,
target=target,
parameters=parameters
).result
explainability_monitor_id = explainability_details.metadata.id
# -
# ### 5.5.1 Run Explanation for Sample Record
pl_records_resp = wos_client.data_sets.get_list_of_records(data_set_id=payload_data_set_id, limit=1, offset=0).result
scoring_ids = [pl_records_resp["records"][0]["entity"]["values"]["scoring_id"]]
explanation_types = ["lime", "contrastive"]
print("Running explanations on scoring IDs: {}".format(scoring_ids))
post_exp_task_resp = wos_client.monitor_instances.explanation_tasks(scoring_ids=scoring_ids, explanation_types=explanation_types).result
print(post_exp_task_resp)
explanation_task_id = post_exp_task_resp.metadata.explanation_task_ids[0] #Pick up the first explanation task id from list.
print("Explanations task ID: {}".format(explanation_task_id))
# +
poll_limit = 5
def poll_explainability_task(client, epx_task_id):
import time
pcounter = 1
while True:
exp_task = client.monitor_instances.get_explanation_tasks(epx_task_id).result
state = exp_task.entity.status.state
if state == 'finished':
return exp_task
elif pcounter > poll_limit:
print("Explanation task has not completed.")
return None
else:
print("Explanation task not ready...")
pcounter += 1
time.sleep(5)
exp_resp = poll_explainability_task(wos_client, explanation_task_id)
print(exp_resp)
# -
# ## 5.6 Check Monitors
#
# We can show which monitors are currently enabled, at this point, it would be the Quality, Fairness, Drift and Explainability Monitors
wos_client.monitor_instances.show()
wos_client.data_sets.show()
# # 6.0 Custom monitors and metrics <a name="custom"></a>
# ## 6.1 Register custom monitor
def get_definition(monitor_name):
monitor_definitions = wos_client.monitor_definitions.list().result.monitor_definitions
for definition in monitor_definitions:
if monitor_name == definition.entity.name:
return definition
return None
# +
monitor_name = CUSTOM_NAME + '_WOSNotebook_CustomMonitor'
metrics = [MonitorMetricRequest(name='sensitivity',
thresholds=[MetricThreshold(type=MetricThresholdTypes.LOWER_LIMIT, default=0.8)]),
MonitorMetricRequest(name='specificity',
thresholds=[MetricThreshold(type=MetricThresholdTypes.LOWER_LIMIT, default=0.75)])]
tags = [MonitorTagRequest(name='region', description='customer geographical region')]
existing_definition = get_definition(monitor_name)
if existing_definition is None:
custom_monitor_details = wos_client.monitor_definitions.add(name=monitor_name, metrics=metrics, tags=tags, background_mode=False).result
else:
custom_monitor_details = existing_definition
# -
wos_client.monitor_definitions.show()
custom_monitor_id = custom_monitor_details.metadata.id
print("Custom Monitor ID: {}".format(custom_monitor_id))
custom_monitor_details = wos_client.monitor_definitions.get(monitor_definition_id=custom_monitor_id).result
print("Custom monitor definition details:", custom_monitor_details)
# ## 6.2 Enable custom monitor for subscription
# +
target = Target(
target_type=TargetTypes.SUBSCRIPTION,
target_id=subscription_id
)
thresholds = [MetricThresholdOverride(metric_id='sensitivity', type = MetricThresholdTypes.LOWER_LIMIT, value=0.9)]
custom_monitor_instance_details = wos_client.monitor_instances.create(
data_mart_id=data_mart_id,
background_mode=False,
monitor_definition_id=custom_monitor_id,
target=target
).result
# -
# ### 6.2.1 Get monitor configuration details
custom_monitor_instance_id = custom_monitor_instance_details.metadata.id
custom_monitor_instance_details = wos_client.monitor_instances.get(custom_monitor_instance_id).result
print(custom_monitor_instance_details)
# ## 6.3 Storing custom metrics
from datetime import datetime, timezone, timedelta
from ibm_watson_openscale.base_classes.watson_open_scale_v2 import MonitorMeasurementRequest
custom_monitoring_run_id = CUSTOM_NAME + '_WOSNotebook_CustomMonitorRun'#"11122223333111abc"
measurement_request = [MonitorMeasurementRequest(timestamp=datetime.now(timezone.utc),
metrics=[{"specificity": 0.78, "sensitivity": 0.67, "region": "us-south"}], run_id=custom_monitoring_run_id)]
print(measurement_request[0])
published_measurement_response = wos_client.monitor_instances.measurements.add(
monitor_instance_id=custom_monitor_instance_id,
monitor_measurement_request=measurement_request).result
published_measurement_id = published_measurement_response[0]["measurement_id"]
print(published_measurement_response)
# ### 6.3.1 Get custom metrics
time.sleep(5)
published_measurement = wos_client.monitor_instances.measurements.get(monitor_instance_id=custom_monitor_instance_id, measurement_id=published_measurement_id).result
print(published_measurement)
# # 7.0 Historical data <a name="historical"></a>
historyDays = 7
# ## 7.1 Insert historical debias metrics
# !rm history_debias_v2.json
with io.capture_output() as captured:
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_debias_v2.json
# !ls -lh history_debias_v2.json
# +
with open('history_debias_v2.json', 'r') as history_file:
payloads = json.load(history_file)
for day in range(historyDays):
print('Loading day', day + 1)
daily_measurement_requests = []
for hour in range(24):
score_time = datetime.now(timezone.utc) + timedelta(hours=(-(24*day + hour + 1)))
index = (day * 24 + hour) % len(payloads) # wrap around and reuse values if needed
measurement_request = MonitorMeasurementRequest(timestamp=score_time,metrics = [payloads[index][0], payloads[index][1]])
daily_measurement_requests.append(measurement_request)
response = wos_client.monitor_instances.measurements.add(
monitor_instance_id=fairness_monitor_instance_id,
monitor_measurement_request=daily_measurement_requests).result
print('Finished')
# -
# ## 7.2 Insert historical quality metrics
# +
measurements = [0.76, 0.78, 0.68, 0.72, 0.73, 0.77, 0.80]
for day in range(historyDays):
quality_measurement_requests = []
print('Loading day', day + 1)
for hour in range(24):
score_time = datetime.utcnow() + timedelta(hours=(-(24*day + hour + 1)))
score_time = score_time.isoformat() + "Z"
metric = {"area_under_roc": measurements[day]}
measurement_request = MonitorMeasurementRequest(timestamp=score_time,metrics = [metric])
quality_measurement_requests.append(measurement_request)
response = wos_client.monitor_instances.measurements.add(
monitor_instance_id=quality_monitor_instance_id,
monitor_measurement_request=quality_measurement_requests).result
print('Finished')
# -
# ## 7.3 Insert historical confusion matrixes
# !rm history_quality_metrics.json
with io.capture_output() as captured:
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_quality_metrics.json
# !ls -lh history_quality_metrics.json
# +
from ibm_watson_openscale.base_classes.watson_open_scale_v2 import Source
with open('history_quality_metrics.json') as json_file:
records = json.load(json_file)
for day in range(historyDays):
index = 0
cm_measurement_requests = []
print('Loading day', day + 1)
for hour in range(24):
score_time = datetime.utcnow() + timedelta(hours=(-(24*day + hour + 1)))
score_time = score_time.isoformat() + "Z"
metric = records[index]['metrics']
source = records[index]['sources']
measurement_request = {"timestamp": score_time, "metrics": [metric], "sources": [source]}
cm_measurement_requests.append(measurement_request)
index+=1
response = wos_client.monitor_instances.measurements.add(monitor_instance_id=quality_monitor_instance_id, monitor_measurement_request=cm_measurement_requests).result
print('Finished')
# -
# ## 7.4 Insert historical performance metrics
# +
target = Target(
target_type=TargetTypes.INSTANCE,
target_id=payload_data_set_id
)
performance_monitor_instance_details = wos_client.monitor_instances.create(
data_mart_id=data_mart_id,
background_mode=False,
monitor_definition_id=wos_client.monitor_definitions.MONITORS.PERFORMANCE.ID,
target=target
).result
performance_monitor_instance_id = performance_monitor_instance_details.metadata.id
for day in range(historyDays):
performance_measurement_requests = []
print('Loading day', day + 1)
for hour in range(24):
score_time = datetime.utcnow() + timedelta(hours=(-(24*day + hour + 1)))
score_time = score_time.isoformat() + "Z"
score_count = random.randint(60, 600)
metric = {"record_count": score_count, "data_set_type": "scoring_payload"}
measurement_request = {"timestamp": score_time, "metrics": [metric]}
performance_measurement_requests.append(measurement_request)
response = wos_client.monitor_instances.measurements.add(
monitor_instance_id=performance_monitor_instance_id,
monitor_measurement_request=performance_measurement_requests).result
print('Finished')
# -
# ## 7.5 Insert historical drift measurements
# +
# !rm history_drift_measurement_*.json
with io.capture_output() as captured_0:
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_0.json
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_1.json
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_2.json
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_3.json
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_4.json
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_5.json
# !wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_drift_measurement_6.json
# !ls -lh history_drift_measurement_*.json
# -
for day in range(historyDays):
drift_measurements = []
with open("history_drift_measurement_{}.json".format(day), 'r') as history_file:
drift_daily_measurements = json.load(history_file)
print('Loading day', day + 1)
#Historical data contains 8 records per day - each represents 3 hour drift window.
for nb_window, records in enumerate(drift_daily_measurements):
for record in records:
window_start = datetime.utcnow() + timedelta(hours=(-(24 * day + (nb_window+1)*3 + 1))) # first_payload_record_timestamp_in_window (oldest)
window_end = datetime.utcnow() + timedelta(hours=(-(24 * day + nb_window*3 + 1)))# last_payload_record_timestamp_in_window (most recent)
#modify start and end time for each record
record['sources'][0]['data']['start'] = window_start.isoformat() + "Z"
record['sources'][0]['data']['end'] = window_end.isoformat() + "Z"
metric = record['metrics'][0]
source = record['sources'][0]
measurement_request = {"timestamp": window_start.isoformat() + "Z", "metrics": [metric], "sources": [source]}
drift_measurements.append(measurement_request)
response = wos_client.monitor_instances.measurements.add(
monitor_instance_id=drift_monitor_instance_id,
monitor_measurement_request=drift_measurements).result
print("Daily loading finished.")
# # 8.0 Additional data to help debugging
print("Datamart id: {}".format(data_mart_id))
print("Model id: {}".format(model_uid))
print("Deployment id: {}".format(deployment_uid))
print("Scoring URL:{}".format(scoring_url))
# # 9.0 Identify transactions for Explainability
# Transaction IDs identified by the cells below can be copied and pasted into the Explainability tab of the OpenScale dashboard.
#wos_client.data_sets.show_records(payload_data_set_id, limit=5)
pl_pd = wos_client.data_sets.get_list_of_records(data_set_id=payload_data_set_id, limit=5, output_type=ResponseTypes.PANDAS).result
df = pl_pd[['scoring_id','predictedLabel','probability']]
df.head(5)
# ## Congratulations!
#
# You have finished the hands-on lab for IBM Watson OpenScale. You can now view the OpenScale dashboard. Choose the `OpenScale` service instance and launch the application UI. Click on the tile for the model you've created to see fairness, accuracy, and performance monitors. Click on the timeseries graph to get detailed information on transactions during a specific time window.
#
# OpenScale shows model performance over time. You have two options to keep data flowing to your OpenScale graphs:
# * Download, configure and schedule the [model feed notebook](https://raw.githubusercontent.com/emartensibm/german-credit/master/german_credit_scoring_feed.ipynb). This notebook can be set up with your WML credentials, and scheduled to provide a consistent flow of scoring requests to your model, which will appear in your OpenScale monitors.
# * Re-run this notebook. Running this notebook from the beginning will delete and re-create the model and deployment, and re-create the historical data. Please note that the payload and measurement logs for the previous deployment will continue to be stored in your datamart, and can be deleted if necessary.
| notebooks/openscale-full-configuration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
import re
with open('reading.json') as json_data:
d = json.load(json_data)
print(d["January"][0])
# -
months = ["January","February","March","April","May","June","July","August","September","Octomber","November","December"]
for month in d:
for i,item in enumerate(d[month]):
d[month][i] = d[month][i].replace("""[g]""","\n")
d[month][i] = d[month][i].replace("[w]","\n")
d[month][i] = d[month][i].replace("[r]","\n")
d["January"][2] = d["January"][2].replace("[g]","\n")
print(d["January"][2])
with open('reading.json', 'w') as f:
json.dump(d, f, ensure_ascii=False)
| www/json/.ipynb_checkpoints/Formatting lit reading-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_deep_forecast)
# language: python
# name: conda_deep_forecast
# ---
# # Quickstart Tutorial
# In this tutorial, we show how to quickly set up and train an RNN regression model with the deep4cast package. The goal is to show you how to handle the package interface and inspect the model object after training. The example data constitutes of a monthly 26-dimensional multivariate time series.
# +
import numpy as np
import matplotlib.pyplot as plt
from deep4cast.forecasters import Forecaster
from deep4cast.metrics import adjust_for_horizon, mape
from pandas import read_table
# %matplotlib inline
# -
# ## Data preparation
# Before we can fit a model we first have to load, clean, and prepare our dataset so that the model can accept it.
# Load the data from file
filename = 'timeseries_data.csv'
df = read_table(filename, sep=',')
# +
# Set lookback window for truncated sample time series
lag = 20 # i.e. 20 months
horizon = 1 # one-month ahead forecasting
test_fraction = 0.15
test_length = int(len(df) * test_fraction)
train_length = len(df) - test_length
# Prepare train and test set
ts = df.astype('float32').values
ts = np.expand_dims(ts, axis=-1)
ts_train = ts[:-test_length]
ts_test = ts[-test_length - lag:]
# -
metric = adjust_for_horizon(mape)
def criticize_model(forecaster, metric):
ts_train_pred = forecaster.predict(ts_train)['mean']
ts_test_pred = forecaster.predict(ts_test)['mean']
print('TRAIN \t Mean Absolute Percentage Error: {0:.1f}%'.format(metric(ts_train_pred, ts[lag:train_length])))
print('TEST \t Mean Absolute Percentage Error: {0:.1f}%'.format(metric(ts_test_pred, ts[train_length:])))
# Before we do anything, we should probably quickly visualize the data.
# Plot dataset
plt.figure(figsize=(10,5))
plt.plot(ts[:train_length,:,0], 'k')
plt.plot(range(len(ts[:train_length-1,:,0]), len(ts)), ts[train_length-1:,:,0], 'r')
plt.xlabel('Month since beginning')
plt.ylabel('Quantity')
plt.title('Black: training time series; Red: testing time series')
# ## Recurrent Neural Network Forecaster
# Now that we have cleaned and prepared out dataset we can proceed to fit one of our built-in regression models. Here, we choose the truncated RNN, which means that the training example time series have finite length. The model is initalized with
# * a topology object,
# * an optimizer,
# * a lag length (lookback window),
# * a forecasting horizon,
# * a batch size that determines the number of training example fed into the optimizer per training batch,
# * a number of epochs that specifies how many trianing batches are processed,
# * and a learning rate.
# +
# Hyper-parameters
topology = [
{
'meta': {'layer_type': 'GRU', 'layer_id': 'g1', 'parent_ids': ['input']},
'params': {'units': 128, 'return_sequences': True}
},
{
'meta': {'layer_type': 'GRU', 'layer_id': 'g2', 'parent_ids': ['g1']},
'params': {'units': 128, 'return_sequences': False}
}
]
# Instantiate regression model
forecaster = Forecaster(
topology,
optimizer='rmsprop',
lag=lag,
horizon=horizon,
batch_size=8,
epochs=50,
lr=0.01
)
forecaster.fit(ts_train)
# -
# Let's have a peek at what the model training and testing errors look like
criticize_model(forecaster, metric)
# Let's have a look at the history of the training loss (here mean-squared error)
# Plot training error history
plt.figure(figsize=(10,5))
plt.plot(forecaster.history.epoch, forecaster.history.history['loss'], 'r')
plt.xlabel('Epoch number')
plt.ylabel('Mean-squared training error')
# ### Predictions for truncated RNN
# Let's now take the model to make some predictions and compare visually with the test data.
ts_pred = forecaster.predict(ts_test)
ts_pred_mean = ts_pred['mean']
# +
ncols = 5
fig, axes = plt.subplots(nrows=26//ncols+1, ncols=ncols, sharex=True, sharey=True, figsize=(12, 9))
for ax, ya, yp, title in zip(axes.flat,
ts[train_length:,:,0].T,
ts_pred_mean[:,:,0].T,
range(1,26+1)):
ax.plot(ya, 'k-')
ax.plot(yp, 'g-')
ax.set_title(title)
plt.tight_layout()
| docs/source/quickstart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing Chipotle's Data
# This time we are going to pull data directly from the internet.
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
# +
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
import numpy as np
# set this so the graphs open internally
# %matplotlib inline
# -
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
# ### Step 3. Assign it to a variable called chipo.
url = "https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv"
chipo = pd.read_csv(url, sep = "\t")
# ### Step 4. See the first 10 entries
chipo.head(10)
# ### Step 5. Create a histogram of the top 5 items bought
# +
x = chipo.item_name
# counter creates a dictionary with a count for every entry
letter_counts = Counter(x)
sorted_dict = dict(sorted(letter_counts.items(), key= lambda x: x[1], reverse=True))
df = pd.DataFrame.from_dict(sorted_dict, orient='index')
ax = df[0:5].plot(kind='bar')
# Set the title and labels
plt.xlabel('Items')
plt.ylabel('Number of Times Ordered')
plt.title('Most ordered Chipotle\'s Items')
# removing the default legend since it isnt relevant
ax.get_legend().remove()
# show the plot
plt.show()
# -
# first lets' convert the prices to float
dollarize = lambda x: float(x[1:-1])
chipo["item_price"] = chipo["item_price"].apply(dollarize)
chipo
chipo[chipo["order_id"]==1443]
# ### Step 6. Create a scatterplot with the number of items orderered per order price
# #### Hint: Price should be in the X-axis and Items ordered in the Y-axis
# now as requested
orders = chipo.groupby('order_id').sum()
plt.scatter(x = orders.item_price, y = orders.quantity, s = 50, c = 'blue')
# Set the title and labels
plt.xlabel('Order Price')
plt.ylabel('Items ordered')
plt.title('Number of items ordered per order price')
plt.ylim(0)
plt.show()
# ### Step 7. BONUS: Create a question and a graph to answer your own question.
# in this graph the number of items ordered in a unique order is compared with the total price of that order.
chipo_per_order = chipo.copy()
chipo_per_order["item_price"] = chipo_per_order.quantity * chipo_per_order.item_price
chipo_per_order = chipo_per_order.groupby("order_id").sum()
chipo_per_order.rename(columns = {"item_price":"order_price"}, inplace = True)
chipo_per_order
plt.scatter(x = chipo_per_order.order_price, y = chipo_per_order.quantity, s = 50, c = 'green')
# Set the title and labels
plt.xlabel('Order Price')
plt.ylabel('Items ordered')
plt.title('Number of items ordered per order price')
plt.ylim(0)
plt.show()
# +
# lets do the same with the average price of a single item in an order! (weighted average?)
chipo_per_order = chipo.copy()
chipo_per_order["unique_items"] = np.ones(len(chipo_per_order))
chipo_per_order["item_price"] = chipo_per_order.quantity * chipo_per_order.item_price
chipo_per_order = chipo_per_order.groupby("order_id").sum()
chipo_per_order.rename(columns = {"item_price":"order_price"}, inplace = True)
chipo_per_order
weighted_avg_price = chipo_per_order.order_price/chipo_per_order.quantity
avg_price = chipo.groupby("order_id").item_price.sum() / chipo_per_order["unique_items"]
x = chipo_per_order.order_price
y = avg_price # or weighted avg price, it actually doesnt matter
plt.scatter(x,y,s = 50, c = 'magenta')
# Set the title and labels
plt.xlabel('Order Price')
plt.ylabel('Items ordered')
plt.title('Number of items ordered per order price')
plt.ylim(0)
plt.show()
| 07_Visualization/Chipotle/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # An analysis of the *Mendoza Line* in MLB batting statistics.
# -----
#
# The *Mendoza Line* is common U.S. slang referring to the threshold for seriously below average performance.
# The term originated in baseball, referring to the batting average of shortstop Mario Mendoza.
# For those unfamiliar with the origin of the term, there is good background in the [wikipedia entry on the Mendoza Line] and [this column] from the St. Louis Post-Dispatch.
#
# The term has made Mendoza's last name famous since it was first coined in 1979, but we should verify the figure and analyze where this level of performance falls in the spectrum of other major league batters.
# In addition, we'll look at how batting averages over time compare to this figure.
#
# The data used in this analysis comes from SeanLahman.com's [baseball database](http://www.seanlahman.com/baseball-archive/statistics/).
#
# [wikipedia entry on the Mendoza Line]: https://en.wikipedia.org/wiki/Mendoza_Line
#
# [this column]: http://www.stltoday.com/sports/baseball/professional/branded-for-life-with-the-mendoza-line/article_cff05af5-032e-5a29-b5a8-ecc9216b0c02.html
# ### Table of contents:
# 1. Set up
# 2. Data
# 2.1 Sources
# 2.2 Data wrangling and initial observations
# 2.3 Data quality check
# 3. Exploration and analysis
# 3.1 How bad was this average in the years leading up to 1979?
# 3.2 What percent of batters are below the Mendoza Line over time?
# 4. Conclusions
# 4.1 Limitations and areas for further investigation
# ## 1. Set up
# -----
# Load the required libraries:
import numpy as np
import pandas as pd
import platform
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm, percentileofscore
# %matplotlib inline
# For readers and reviewers, the versions of the major software components are:
print('python version:', platform.python_version())
print(pd.__name__, 'version', pd.__version__)
print(np.__name__, 'version', np.__version__)
# ## 2. Data
# -----
#
# ### 2.1 Sources
# As noted earlier, the data used comes from SeanLahman.com's baseball database. Specifically, I used this [dataset](http://seanlahman.com/files/database/baseballdatabank-2017.1.zip) which was updated February 26, 2017 with statistics through the 2016 season.
# While the zip repository contains 27 different .csv files covering various statistics, we're only going to use a subset:
#
# 1. Master.csv --> player names and biographical data
# 2. Batting.csv --> batting stastics
# 3. Appearances.csv --> positional info
# ### 2.2 Data wrangling and initial observations
# Import each of the .csv files into a pandas DataFrame object:
directory = 'core/'
master_df = pd.read_csv(directory + 'Master.csv')
batting_df = pd.read_csv(directory + 'Batting.csv')
appearances_df = pd.read_csv(directory + 'Appearances.csv')
# Look at the master table to make sure it loaded correctly:
master_df.head()
# First, let's see if we can find <NAME> in our database...
mendozas = master_df.loc[master_df['nameLast'] == 'Mendoza']
mendozas
# Judging by the first names and the dates played compared to the biographical info in the background reading, it's pretty easy to find our man in the third row, born in Chihuahua, Mexico in 1950. Let's save his player ID in a variable **mendoza_ID** so we can look up his stats.
mendoza_ID = mendozas[mendozas['nameFirst'] == 'Mario']['playerID'].values[0]
mendoza_ID
# Now, let's look up <NAME>'s batting statistics. First, let's look at the batting dataframe:
batting_df.head()
# The columns in the batting_df dataframe have the following labels:
# +
#playerID Player ID code
#yearID Year
#stint player's stint (order of appearances within a season)
#teamID Team
#lgID League
#G Games
#AB At Bats
#R Runs
#H Hits
#2B Doubles
#3B Triples
#HR Homeruns
#RBI Runs Batted In
#SB Stolen Bases
#CS Caught Stealing
#BB Base on Balls
#SO Strikeouts
#IBB Intentional walks
#HBP Hit by pitch
#SH Sacrifice hits
#SF Sacrifice flies
#GIDP Grounded into double plays
# -
# Let's examine Mendoza's numbers:
mendoza_batting_df = batting_df[batting_df['playerID'] == mendoza_ID]
mendoza_batting_df
# Create a quick summary of Mendoza's hits and at bats per year, and calculate his batting average **('BA')** - note the convention is to round this to three decimals places:
def calculate_BA(batting_df):
return (batting_df['H']/batting_df['AB']).round(3)
mendoza_data = pd.DataFrame.from_items([('BA', calculate_BA(mendoza_batting_df)),
('H', mendoza_batting_df['H']),
('AB', mendoza_batting_df['AB'])])
mendoza_data.index = mendoza_batting_df['yearID']
mendoza_data
# Let's look at his typical batting average in the years up through (and including) 1979 when the phrase was coined:
end_year = 1979
start_year = mendoza_data.index.values.min()
print('Average {} - {} batting average: {:4.3f}'.format(start_year, end_year,
mendoza_data[(mendoza_data.index) <= end_year]['BA'].mean()))
# #### The Mendoza Line quantified and verified: he was a .200 hitter
#
# Now, this "average of averages" would give equal weighting to his batting averages from each year regardless of the number of at bats. Let's redo the previous calculation using the actual hits and at bats from each season:
print('Cumulative {} - {} batting average: {:4.3f}'.format(start_year, end_year,
float(mendoza_data[(mendoza_data.index) <= end_year]['H'].sum()/mendoza_data[(mendoza_data.index) <= end_year]['AB'].sum())))
# Looks like the cumulative batting average over that period was almost consistent with the average of his batting averages, so the initial figure wasn't skewed by any outlier years.
#
# How did he fare from 1979 through the end of his career in 1982?
final_career_year = mendoza_data.index.values.max()
print('{} - {} batting average: {:4.3f}'.format(end_year+1, final_career_year,
float(mendoza_data[(mendoza_data.index) > end_year]['H'].sum()/mendoza_data[(mendoza_data.index) > end_year]['AB'].sum())))
# He was a little better those last few years, but unfortunately the saying had already become a cultural idoim and the "Mendoza Line" was memorialized as a batting average of **0.200**.
MENDOZA_LINE = 0.200
# ### 2.3 Data quality check
# We've imported the csv files into three dataframes for our analysis
#
# 1. master_df --> player names and biographical data
# 2. batting_df --> batting stastics
# 3. appearances_df --> positional info
#
# The master_df was only needed to find our info for <NAME> as we aren't using biographical data elsewhere in our analysis, so we don't need to scrub this dataset as it has already served its limited purpose. However, we should investigate the batting and appearances datasets to check for data issues.
batting_df.info()
# We can see in the information above that it looks like there are a good number of missing data points from the batting records. This data set goes back to 1871 and it's not surprising that some data may not have been tracked in the same way historically. However, our analysis will only be covering from 1975 onward, a relatively modern period. We can check that subset of the data:
batting_df[batting_df['yearID'] >= 1975].info()
# Great - it looks like there is no missing batting data in this period. Now, let's verify the same on the appearances data:
appearances_df[appearances_df['yearID'] >= 1975].info()
# Similarly, it looks like there are no missing data points in this subset of the data either. Again, it makes sense that the data sets from 1975 forward would be clean as baseball was very popular during this entire period and keeping detailed statistics had long been part of baseball, even pre-dating the period in question.
# ## 3. Exploration and analysis
# -----
#
# ### 3.1 How bad was this average in the years leading up to 1979?
# In order to quantify how mediocre a performance batting .200 was in 1979 when the phrase was coined, I want to look at typical batting averages in this time period. To do this, I need to adjust the batting_df dataset in a few different ways:
# * Look only at data in the 5 year window from 1975 - 1979
# * Remove pitchers
# * Remove players without at least 50 at bats in a season *(which could be stints with multiple teams in the same season)*
#
# #### First, create a new dataframe with just the batting data from 1975 to 1979 (inclusive)
# +
def stat_window(df, start_year, end_year):
search = str(start_year) + ' <= yearID <= ' + str(end_year)
return df.query(search)
start_year = 1975
end_year = 1979
batting_window = stat_window(batting_df, start_year, end_year)
print(len(batting_window), "batting data records from {} - {}".format(start_year,
end_year))
batting_window.head()
# -
batting_window.info()
players_set = set(batting_window['playerID'])
print(len(players_set), "unique players with batting records during this period")
# #### Next, remove pitchers from the dataset.
# Pitchers are defined as players with more than one appearance as pitcher during a season. One appeance is used as the threshold to allow for fielders who might pitch rarely during an extra innings situation. This could lead to slight errors on edge cases of fielders who routinely pitched or players who switched positions during their career, but this would be very rare case during the time period being analyzed.
# +
# Create a set of all players with more than one game pitched in a stint or season
min_G_p = 1
all_pitchers = set(appearances_df[appearances_df['G_p'] > min_G_p]['playerID'])
# remove these players from the batting dataframe
batters_set = set(x for x in batting_window['playerID'] if x not in all_pitchers)
print(len(batters_set), "unique non-pitchers in {} - {}".format(start_year, end_year))
# +
def remove_position(df, position):
non_position = [x not in position for x in df['playerID']]
return df[non_position]
batting_window = remove_position(batting_window, all_pitchers)
print(len(batting_window), 'batting data records with pitchers removed')
print(len(set(batting_window['playerID'])),
"unique players, should match unique non-pitchers in cell above")
batting_window.head()
# -
# #### Next, remove players without at least 50 at bats in that year.
# The intent here is to try to remove "noisy" data points from players who didn't have at least 50 at bats in a season, which might included short-term call-ups from the minor leagues, injured players, etc. However, we must allow for players to achieve this minimum in a combination of 'stints' across different teams in the same season.
# ***To do this, we create a multi-index*** to sum the games played ('G') data by playerID and yearID (to aggregate seasons with multiple stints), so that we can look up our data by player, by year:
# +
def get_player_year_sum(df, field):
grouped = df.groupby(['playerID', 'yearID'], as_index=False).sum()
index_arrays = [grouped['playerID'], grouped['yearID']]
multi_index = pd.MultiIndex.from_arrays(index_arrays, names = ['playerID', 'yearID'])
return pd.Series(grouped[field].values, index=multi_index)
stat = 'AB'
player_year_stats = get_player_year_sum(batting_window, stat)
player_year_stats.head(10)
# -
# Create a boolean array to check for minimum criteria (at bats) in the season:
# +
min_stat = 50
required_min = []
for x in batting_window.iterrows():
if player_year_stats[x[1][0], x[1][1]] >= min_stat:
required_min.append(True)
else:
required_min.append(False)
batting_window = batting_window[required_min]
print(len(batting_window), 'batting data records with minimum of {} {}'.format(min_stat, stat))
# -
# #### Now that we've cleaned up this data, we can analyze the distribution of batting averages.
BAs_window = batting_window['H']/batting_window['AB']
BAs_window.describe()
# From the describe() statement above, the mean of the batting averages was .251, with a standard deviation of 0.47 **- so the Mendoza Line of .200 was about one standard deviation below the mean.** We can also graph the distribution of batting averages to get a visual feeling for the distribution.
BA_bins = [x/1000 for x in range(100,410,10)]
plt.rcParams['figure.figsize'] = 8, 5
BAs_window.hist(bins=BA_bins, normed = True, edgecolor='black')
plt.title('MLB batting averages: 1975 - 1979')
plt.axvline(x=0.200, color='black')
plt.text(.190, 7 , "Mendoza Line", rotation=90)
plt.xlabel('Batting average')
plt.ylabel('Frequency (percentage)')
plt.show()
# Calculating some statistics based on a normal distribution...
mendoza_Z = (MENDOZA_LINE - BAs_window.mean())/BAs_window.std(ddof=0)
print("The Z score of a .200 batting average is {:4.2f}".format(mendoza_Z))
print("Assuming a normal distribution of batting averages, this would place .200 above",
"only {:3.1f}% of batters".format(100*norm.cdf(mendoza_Z)))
# However, the normal distribution is only and approximation of the data. We can look at the actual percentile rankings of the batting averages to calculate precisely what percentage of batters would fall below the Mendoza Line:
BAs_window.quantile([0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])
# Eyeballing the deciles above would imply that and average of .200 would fall just north of the tenth precentile (where only 10% of observations would be below this point). This is even worse than what the normal distribution would imply. We can use **percentileofscore** from the scipy.stats module to figure out precisely what percentage of scores were below .200:
# +
def mendoza_percentile(series):
return percentileofscore(series, MENDOZA_LINE, kind="strict")
print("Given the actual distribution, a .200 batting average was above",
"only {:3.1f}% of batters".format(mendoza_percentile(BAs_window)))
# -
# #### Conculsion:
# 1. The term "Mendoza Line" refers to a performance of batting average of approximately .200, as verified by <NAME>'s actual batting average in the years before the term was coined.
# 2. This level of performance in the 1975-1979 time frame would have placed a batter in only the 10th percentile. Said another way, almost *90% of batters had a higher average* when we removed pitchers and players without a minimum number of at bats.
# ### 3.2 What percent of batters are below the "Mendoza Line" over time?
#
# In the 1975-1979 time frame, when the term Mendoza Line was coined, batting .200 put a player in roughly the 10th percentile of eligible batters (those with at least 50 ABs, excluding pitchers). I'd like to know how this level varied over time thereafter (from 1980 onward).
#
# **Specifically, what percent of batters are below .200 each year?**
# #### First, create a dataset with just the figures from 1980 forward
# +
start_year = 1980
end_year = batting_df['yearID'].max()
batting_window = stat_window(batting_df, start_year, end_year)
print(len(batting_window), "batting data records from {} - {}".format(
start_year, end_year))
batting_window.head()
# -
# #### Again, remove the pitchers
batting_window = remove_position(batting_window, all_pitchers)
print(len(batting_window), 'batting data records with pitchers removed')
# #### Next, remove players without at least 50 at bats in that year.
# Similar to the process above, we need to create a multiindex to allow for players to have different 'stints' across different teams in the same season. Note that to qualify for awards like the batting title, the minimum level of appearances is much higher.
#
# Create the at bats multiindex for the 1980 onward batting data:
stat = 'AB'
player_year_stats = get_player_year_sum(batting_window, stat)
player_year_stats.head(10)
# And remove the players without less than 50 ABs in a year from our post-1980 batting dataframe
# +
min_stat = 50
required_min = []
for x in batting_window.iterrows():
if player_year_stats[x[1][0], x[1][1]] >= min_stat:
required_min.append(True)
else:
required_min.append(False)
batting_window = batting_window[required_min]
print(len(batting_window), 'batting data records with minimum of {} {}'.format(min_stat, stat))
# -
batting_window.head(10)
# +
# helper function to return an array with the qualifying batting averages for any given year
def get_annual_BA(year):
annual_data = batting_window[batting_window['yearID'] == year]
return (annual_data['H']/annual_data['AB']).values
# create a dataframe with a column containing the qualifying batting averages for each year
# note that the columns will be of varying lengths, but pandas will pad the missing values with NaN
BA_dict = {x: get_annual_BA(x) for x in range(start_year, end_year+1)}
annual_BA_df = pd.DataFrame.from_dict(BA_dict, orient='index')
annual_BA_df = annual_BA_df.transpose()
annual_BA_df.head()
# -
# #### Quick detour:
# Let's take a look at the 1980's to get a feel for how batting averages are distributed by year
eighties = annual_BA_df.loc[:,:1989]
eighties.head()
eighties.plot.box()
plt.title('1980s MLB batting averages (min 50 ABs)')
plt.ylim(.1, .4)
plt.axhline(y=0.200, color='black')
plt.text(1.5, .21 , "Mendoza Line")
plt.xlabel('Year')
plt.ylabel('Batting average')
plt.show()
# As shown in the box plot above, the middle 50% of the observed batting averages fall in a band roughly .050 wide centered in the .250 - .260 area. The maximum is generally in the .340-.360 area and the min is in the .150 - .170 area. It looks like the .200 Mendoza Line remains a very low average.
#
# #### But how low is low? Let's get the percetile represented by .200 in each year.
# +
# helper function to eliminate NaNs from series before using to calculate percentiles
def no_NaNs(series):
return [x for x in series if x > 0]
min_year = batting_window['yearID'].min()
max_year = batting_window['yearID'].max()
years = [x for x in range(min_year, max_year+1)]
mendoza_pct_by_year = pd.Series([mendoza_percentile(no_NaNs(annual_BA_df[year])) for year in years], index=years)
mendoza_pct_by_year.plot()
plt.title('MLB batting averages below Mendoza Line annually, since 1980')
ave = mendoza_pct_by_year.mean()
plt.axhline(y=ave, color='black')
plt.xlabel('Year')
plt.ylabel('Percentage of batting averages below Mendoza Line')
plt.show()
# -
# #### Conculsion:
# 1. The plot above shows that the percentage of qualifying batters (non-pitchers, with at least 50 ABs in a season across teams) batting below the Mendoza Line ranged from a low of under 5% in 1994 to a higher of nearly 15% in 2014.
# 1. The horizontal line above shows the average of the percentage of batters below the Mendoza Line, which has been just below 10% over this time period.
# ## 4. Summary
# -----
# As verified above, the Mendoza Line refers to a batting average of .200, corresponding the Mario Mendoza's actual average in the years leading up to the phrase first appearing in 1979. To quantify this amount, we showed that this batting average placed Mario Mendoza in the 10th percentile in the years leading up to this phrase being coined. In the years since then, the percentage of qualifying players batting below the Mendoza Line in a given year has ranged from under 5% to over 14%, while on average just under 10% of players bat below the Mendoza Line each year.
# ### 4.1 Limitations and areas for further investigation
# It should be noted that this analysis treated the distribution of batting averages each year as an indepedendent phenomenon. In understanding why the percentage of players batting below the Mendoza Line in any year varies, it might be helpful to normalize or adjust for the quality of pitching in that year. For example, very few batters were below the Mendoza Line during the early 2000s and the level seemed to rise sharply around 2010. It would be helpful to analyze pitching statistics during that time. In addition, the data does not adjust for the fact that most (if not all) MLB ballparks have been rebuilt in the era between 1975 and 2016. Ballparks can have a noticeable impact on batting data.
#
# In addition, there has been substantial press coverage on the usage of steroids and other performance enhancing drugs in baseball. If we had access to data on drug testing, suspensions, and rule changes (on drug testing frequency, penalty changes, etc.) it would be worth further investigation to see if these changes caused a noticeable impact in batting averages in the years they went into effect.
| baseball-final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Introduction
# The following analysis (Task_4_EDA.ipynb) was conducted in Jupyter Lab with Python 3. The intent of this analysis is to explore the datatset of varying gene expression levels in *E. coli* primarily regarding the gene response to ampicillin treatment compared to control levels at stationary and growth phases. Researchers <NAME> and <NAME> at the South Dakota State University used RNA sequencing to detect gene expression levels under three conditions or phases in *E. coli* including the stationary phase where the population size of *E. coli* remain constant, the growth phase where *E. coli* numbers are increasing, and lastly under ampicillin (antibiotic) treatment. This was done in triplicate and measurements were taken three hours post treatment then the data was cleaned by me for this Data 301 project. The explorations to be conducted include general descriptive statistics, distribution plotting, and generally looking at some plots. The goal is to discover if any more data cleaning is required before data analysis, to have a better visualization of my data, and verify the direction of my data analysis to answer my research question. My research question is what genes may be interesting to study when regarding antibiotic resistance that are either highly upregulated or downregulated genes during ampicillin treatment when compared to controls.
| analysis/Task4/Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mlmat] *
# language: python
# name: conda-env-mlmat-py
# ---
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import rdkit.Chem.Fragments as Fragments
from copy import copy
df_BzNSN = pd.read_csv("data/AllProps_1400BzNSN.csv")
import rdkit.Chem as Chem
import rdkit.Chem.Crippen as Crippen
import rdkit.Chem.Lipinski as Lipinski
import rdkit.Chem.rdMolDescriptors as MolDescriptors
import rdkit.Chem.Descriptors as Descriptors
def evaluate_chem_mol(mol):
mol_sssr = Chem.GetSSSR(mol)
clogp = Crippen.MolLogP(mol)
mr = Crippen.MolMR(mol)
mw = MolDescriptors.CalcExactMolWt(mol)
tpsa = MolDescriptors.CalcTPSA(mol)
Chi0n = MolDescriptors.CalcChi0n(mol)
Chi1n = MolDescriptors.CalcChi1n(mol)
Chi2n = MolDescriptors.CalcChi2n(mol)
Chi3n = MolDescriptors.CalcChi3n(mol)
Chi4n = MolDescriptors.CalcChi4n(mol)
Chi0v = MolDescriptors.CalcChi0v(mol)
Chi1v = MolDescriptors.CalcChi1v(mol)
Chi2v = MolDescriptors.CalcChi2v(mol)
Chi3v = MolDescriptors.CalcChi3v(mol)
Chi4v = MolDescriptors.CalcChi4v(mol)
fracsp3 = MolDescriptors.CalcFractionCSP3(mol)
Hall_Kier_Alpha = MolDescriptors.CalcHallKierAlpha(mol)
Kappa1 = MolDescriptors.CalcKappa1(mol)
Kappa2 = MolDescriptors.CalcKappa2(mol)
Kappa3 = MolDescriptors.CalcKappa3(mol)
LabuteASA = MolDescriptors.CalcLabuteASA(mol)
Number_Aliphatic_Rings = MolDescriptors.CalcNumAliphaticRings(mol)
Number_Aromatic_Rings = MolDescriptors.CalcNumAromaticRings(mol)
Number_Amide_Bonds = MolDescriptors.CalcNumAmideBonds(mol)
Number_Atom_Stereocenters = MolDescriptors.CalcNumAtomStereoCenters(mol)
Number_BridgeHead_Atoms = MolDescriptors.CalcNumBridgeheadAtoms(mol)
Number_HBA = MolDescriptors.CalcNumHBA(mol)
Number_HBD = MolDescriptors.CalcNumHBD(mol)
Number_Hetero_Atoms = MolDescriptors.CalcNumHeteroatoms(mol)
Number_Hetero_Cycles = MolDescriptors.CalcNumHeterocycles(mol)
Number_Rings = MolDescriptors.CalcNumRings(mol)
Number_Rotatable_Bonds = MolDescriptors.CalcNumRotatableBonds(mol)
Number_Spiro = MolDescriptors.CalcNumSpiroAtoms(mol)
Number_Saturated_Rings = MolDescriptors.CalcNumSaturatedRings(mol)
Number_Heavy_Atoms = Lipinski.HeavyAtomCount(mol)
Number_NH_OH = Lipinski.NHOHCount(mol)
Number_N_O = Lipinski.NOCount(mol)
Number_Valence_Electrons = Descriptors.NumValenceElectrons(mol)
Max_Partial_Charge = Descriptors.MaxPartialCharge(mol)
Min_Partial_Charge = Descriptors.MinPartialCharge(mol)
return mol_sssr, clogp, mr, mw, tpsa, Chi0n, Chi1n, Chi2n, Chi3n, Chi4n, Chi0v, Chi1v, Chi2v, Chi3v, Chi4v, fracsp3,\
Hall_Kier_Alpha,Kappa1, Kappa2, Kappa3, LabuteASA, Number_Aliphatic_Rings, Number_Aromatic_Rings, \
Number_Amide_Bonds, Number_Atom_Stereocenters, Number_BridgeHead_Atoms, Number_HBA, Number_HBD, \
Number_Hetero_Atoms, Number_Hetero_Cycles, Number_Rings, Number_Rotatable_Bonds, Number_Spiro,\
Number_Saturated_Rings, Number_Heavy_Atoms, Number_NH_OH, Number_N_O, Number_Valence_Electrons,\
Max_Partial_Charge, Min_Partial_Charge
sssr=[]
clogp=[]
mr=[]
mw=[]
tpsa=[]
chi0n=[]
chi1n=[]
chi2n=[]
chi3n=[]
chi4n=[]
chi0v=[]
chi1v=[]
chi2v=[]
chi3v=[]
chi4v=[]
fracsp3=[]
hall_kier_alpha=[]
kappa1=[]
kappa2=[]
kappa3=[]
labuteasa=[]
number_aliphatic_rings=[]
number_aromatic_rings=[]
number_amide_bonds=[]
number_atom_stereocenters=[]
number_bridgehead_atoms=[]
number_HBA=[]
number_HBD=[]
number_hetero_atoms=[]
number_hetero_cycles=[]
number_rings=[]
number_rotatable_bonds=[]
number_spiro=[]
number_saturated_rings=[]
number_heavy_atoms=[]
number_nh_oh=[]
number_n_o=[]
number_valence_electrons=[]
max_partial_charge=[]
min_partial_charge=[]
fr_C_O = []
fr_C_O_noCOO = []
fr_Al_OH = []
fr_Ar_OH = []
fr_methoxy = []
fr_oxime = []
fr_ester = []
fr_Al_COO = []
fr_Ar_COO = []
fr_COO = []
fr_COO2 = []
fr_ketone = []
fr_ether = []
fr_phenol = []
fr_aldehyde = []
fr_quatN = []
fr_NH2 = []
fr_NH1 = []
fr_NH0 = []
fr_Ar_N = []
fr_Ar_NH = []
fr_aniline = []
fr_Imine = []
fr_nitrile = []
fr_hdrzine = []
fr_hdrzone = []
fr_nitroso = []
fr_N_O = []
fr_nitro = []
fr_azo = []
fr_diazo = []
fr_azide = []
fr_amide = []
fr_priamide = []
fr_amidine = []
fr_guanido = []
fr_Nhpyrrole = []
fr_imide = []
fr_isocyan = []
fr_isothiocyan = []
fr_thiocyan = []
fr_halogen = []
fr_alkyl_halide = []
fr_sulfide = []
fr_SH = []
fr_C_S = []
fr_sulfone = []
fr_sulfonamd = []
fr_prisulfonamd = []
fr_barbitur = []
fr_urea = []
fr_term_acetylene = []
fr_imidazole = []
fr_furan = []
fr_thiophene = []
fr_thiazole = []
fr_oxazole = []
fr_pyridine = []
fr_piperdine = []
fr_piperzine = []
fr_morpholine = []
fr_lactam = []
fr_lactone = []
fr_tetrazole = []
fr_epoxide = []
fr_unbrch_alkane = []
fr_bicyclic = []
fr_benzene = []
fr_phos_acid = []
fr_phos_ester = []
fr_nitro_arom = []
fr_nitro_arom_nonortho = []
fr_dihydropyridine = []
fr_phenol_noOrthoHbond = []
fr_Al_OH_noTert = []
fr_benzodiazepine = []
fr_para_hydroxylation = []
fr_allylic_oxid = []
fr_aryl_methyl = []
fr_Ndealkylation1 = []
fr_Ndealkylation2 = []
fr_alkyl_carbamate = []
fr_ketone_Topliss = []
fr_ArN = []
fr_HOCCN = []
for f in df_BzNSN['SMILES']:
f1=Chem.MolFromSmiles(f)
mol_sssr, mol_clogp, mol_mr, mol_mw, mol_tpsa, mol_chi0n, mol_chi1n, mol_chi2n, mol_chi3n, mol_chi4n, mol_chi0v,\
mol_chi1v, mol_chi2v, mol_chi3v, mol_chi4v, mol_fracsp3, mol_hall_kier_alpha, mol_kappa1, mol_kappa2,\
mol_kappa3, mol_labuteasa, mol_number_aliphatic_rings, mol_number_aromatic_rings, mol_number_amide_bonds,\
mol_number_atom_stereocenters, mol_number_bridgehead_atoms, mol_number_HBA, mol_number_HBD, \
mol_number_hetero_atoms, mol_number_hetero_cycles, mol_number_rings, mol_number_rotatable_bonds, \
mol_number_spiro, mol_number_saturated_rings, mol_number_heavy_atoms, mol_number_nh_oh, \
mol_number_n_o, mol_number_valence_electrons, mol_max_partial_charge, mol_min_partial_charge= evaluate_chem_mol(f1)
sssr.append(mol_sssr)
clogp.append(mol_clogp)
mr.append(mol_mr)
mw.append(mol_mw)
tpsa.append(mol_tpsa)
chi0n.append(mol_chi0n)
chi1n.append(mol_chi1n)
chi2n.append(mol_chi2n)
chi3n.append(mol_chi3n)
chi4n.append(mol_chi4n)
chi0v.append(mol_chi0v)
chi1v.append(mol_chi1v)
chi2v.append(mol_chi2v)
chi3v.append(mol_chi3v)
chi4v.append(mol_chi4v)
fracsp3.append(mol_fracsp3)
hall_kier_alpha.append(mol_hall_kier_alpha)
kappa1.append(mol_kappa1)
kappa2.append(mol_kappa2)
kappa3.append(mol_kappa3)
labuteasa.append(mol_labuteasa)
number_aliphatic_rings.append(mol_number_aliphatic_rings)
number_aromatic_rings.append(mol_number_aromatic_rings)
number_amide_bonds.append(mol_number_amide_bonds)
number_atom_stereocenters.append(mol_number_atom_stereocenters)
number_bridgehead_atoms.append(mol_number_bridgehead_atoms)
number_HBA.append(mol_number_HBA)
number_HBD.append(mol_number_HBD)
number_hetero_atoms.append(mol_number_hetero_atoms)
number_hetero_cycles.append(mol_number_hetero_cycles)
number_rings.append(mol_number_rings)
number_rotatable_bonds.append(mol_number_rotatable_bonds)
number_spiro.append(mol_number_spiro)
number_saturated_rings.append(mol_number_saturated_rings)
number_heavy_atoms.append(mol_number_heavy_atoms)
number_nh_oh.append(mol_number_nh_oh)
number_n_o.append(mol_number_n_o)
number_valence_electrons.append(mol_number_valence_electrons)
max_partial_charge.append(mol_max_partial_charge)
min_partial_charge.append(mol_min_partial_charge)
fr_C_O.append(Fragments.fr_C_O(f1))
fr_C_O_noCOO.append(Fragments.fr_C_O_noCOO(f1))
fr_Al_OH.append(Fragments.fr_Al_OH(f1))
fr_Ar_OH.append(Fragments.fr_Ar_OH(f1))
fr_methoxy.append(Fragments.fr_methoxy(f1))
fr_oxime.append(Fragments.fr_oxime(f1))
fr_ester.append(Fragments.fr_ester(f1))
fr_Al_COO.append(Fragments.fr_Al_COO(f1))
fr_Ar_COO.append(Fragments.fr_Ar_COO(f1))
fr_COO.append(Fragments.fr_COO(f1))
fr_COO2.append(Fragments.fr_COO2(f1))
fr_ketone.append(Fragments.fr_ketone(f1))
fr_ether.append(Fragments.fr_ether(f1))
fr_phenol.append(Fragments.fr_phenol(f1))
fr_aldehyde.append(Fragments.fr_aldehyde(f1))
fr_quatN.append(Fragments.fr_quatN(f1))
fr_NH2.append(Fragments.fr_NH2(f1))
fr_NH1.append(Fragments.fr_NH1(f1))
fr_NH0.append(Fragments.fr_NH0(f1))
fr_Ar_N.append(Fragments.fr_Ar_N(f1))
fr_Ar_NH.append(Fragments.fr_Ar_NH(f1))
fr_aniline.append(Fragments.fr_aniline(f1))
fr_Imine.append(Fragments.fr_Imine(f1))
fr_nitrile.append(Fragments.fr_nitrile(f1))
fr_hdrzine.append(Fragments.fr_hdrzine(f1))
fr_hdrzone.append(Fragments.fr_hdrzone(f1))
fr_nitroso.append(Fragments.fr_nitroso(f1))
fr_N_O.append(Fragments.fr_N_O(f1))
fr_nitro.append(Fragments.fr_nitro(f1))
fr_azo.append(Fragments.fr_azo(f1))
fr_diazo.append(Fragments.fr_diazo(f1))
fr_azide.append(Fragments.fr_azide(f1))
fr_amide.append(Fragments.fr_amide(f1))
fr_priamide.append(Fragments.fr_priamide(f1))
fr_amidine.append(Fragments.fr_amidine(f1))
fr_guanido.append(Fragments.fr_guanido(f1))
fr_Nhpyrrole.append(Fragments.fr_Nhpyrrole(f1))
fr_imide.append(Fragments.fr_imide(f1))
fr_isocyan.append(Fragments.fr_isocyan(f1))
fr_isothiocyan.append(Fragments.fr_isothiocyan(f1))
fr_thiocyan.append(Fragments.fr_thiocyan(f1))
fr_halogen.append(Fragments.fr_halogen(f1))
fr_alkyl_halide.append(Fragments.fr_alkyl_halide(f1))
fr_sulfide.append(Fragments.fr_sulfide(f1))
fr_SH.append(Fragments.fr_SH(f1))
fr_C_S.append(Fragments.fr_C_S(f1))
fr_sulfone.append(Fragments.fr_sulfone(f1))
fr_sulfonamd.append(Fragments.fr_sulfonamd(f1))
fr_prisulfonamd.append(Fragments.fr_prisulfonamd(f1))
fr_barbitur.append(Fragments.fr_barbitur(f1))
fr_urea.append(Fragments.fr_urea(f1))
fr_term_acetylene.append(Fragments.fr_term_acetylene(f1))
fr_imidazole.append(Fragments.fr_imidazole(f1))
fr_furan.append(Fragments.fr_furan(f1))
fr_thiophene.append(Fragments.fr_thiophene(f1))
fr_thiazole.append(Fragments.fr_thiazole(f1))
fr_oxazole.append(Fragments.fr_oxazole(f1))
fr_pyridine.append(Fragments.fr_pyridine(f1))
fr_piperdine.append(Fragments.fr_piperdine(f1))
fr_piperzine.append(Fragments.fr_piperzine(f1))
fr_morpholine.append(Fragments.fr_morpholine(f1))
fr_lactam.append(Fragments.fr_lactam(f1))
fr_lactone.append(Fragments.fr_lactone(f1))
fr_tetrazole.append(Fragments.fr_tetrazole(f1))
fr_epoxide.append(Fragments.fr_epoxide(f1))
fr_unbrch_alkane.append(Fragments.fr_unbrch_alkane(f1))
fr_bicyclic.append(Fragments.fr_bicyclic(f1))
fr_benzene.append(Fragments.fr_benzene(f1))
fr_phos_acid.append(Fragments.fr_phos_acid(f1))
fr_phos_ester.append(Fragments.fr_phos_ester(f1))
fr_nitro_arom.append(Fragments.fr_nitro_arom(f1))
fr_nitro_arom_nonortho.append(Fragments.fr_nitro_arom_nonortho(f1))
fr_dihydropyridine.append(Fragments.fr_dihydropyridine(f1))
fr_phenol_noOrthoHbond.append(Fragments.fr_phenol_noOrthoHbond(f1))
fr_Al_OH_noTert.append(Fragments.fr_Al_OH_noTert(f1))
fr_benzodiazepine.append(Fragments.fr_benzodiazepine(f1))
fr_para_hydroxylation.append(Fragments.fr_para_hydroxylation(f1))
fr_allylic_oxid.append(Fragments.fr_allylic_oxid(f1))
fr_aryl_methyl.append(Fragments.fr_aryl_methyl(f1))
fr_Ndealkylation1.append(Fragments.fr_Ndealkylation1(f1))
fr_Ndealkylation2.append(Fragments.fr_Ndealkylation2(f1))
fr_alkyl_carbamate.append(Fragments.fr_alkyl_carbamate(f1))
fr_ketone_Topliss.append(Fragments.fr_ketone_Topliss(f1))
fr_ArN.append(Fragments.fr_ArN(f1))
fr_HOCCN.append(Fragments.fr_HOCCN(f1))
df_Solvent_Features=pd.DataFrame(
{'sssr':sssr,
'clogp':clogp,
'mr':mr,
'mw':mw,
'tpsa': tpsa,
'chi0n':chi0n,
'chi1n':chi1n,
'chi2n':chi2n,
'chi3n':chi3n,
'chi4n':chi4n,
'chi0v':chi0v,
'chi1v':chi1v,
'chi2v':chi2v,
'chi3v':chi3v,
'chi4v':chi4v,
'fracsp3':fracsp3,
'hall_kier_alpha':hall_kier_alpha,
'kappa1':kappa1,
'kappa2':kappa2,
'kappa3':kappa3,
'labuteasa':labuteasa,
'number_aliphatic_rings':number_aliphatic_rings,
'number_aromatic_rings':number_aromatic_rings,
'number_amide_bonds':number_amide_bonds,
'number_atom_stereocenters':number_atom_stereocenters,
'number_bridgehead_atoms':number_bridgehead_atoms,
'number_HBA':number_HBA,
'number_HBD':number_HBD,
'number_hetero_atoms':number_hetero_atoms,
'number_hetero_cycles':number_hetero_cycles,
'number_rings':number_rings,
'number_rotatable_bonds':number_rotatable_bonds,
'number_spiro':number_spiro,
'number_saturated_rings':number_saturated_rings,
'number_heavy_atoms':number_heavy_atoms,
'number_nh_oh':number_nh_oh,
'number_n_o':number_n_o,
'number_valence_electrons':number_valence_electrons,
'max_partial_charge':max_partial_charge,
'min_partial_charge':min_partial_charge
})
df_Solvent_Features_Frags=pd.DataFrame(
{'fr_C_O':fr_C_O,
'fr_C_O_noCOO':fr_C_O_noCOO,
'fr_Al_OH':fr_Al_OH,
'fr_Ar_OH':fr_Ar_OH,
'fr_methoxy':fr_methoxy,
'fr_oxime':fr_oxime,
'fr_ester':fr_ester,
'fr_Al_COO':fr_Al_COO,
'fr_Ar_COO':fr_Ar_COO,
'fr_COO':fr_COO,
'fr_COO2':fr_COO2,
'fr_ketone':fr_ketone,
'fr_ether':fr_ether,
'fr_phenol':fr_phenol,
'fr_aldehyde':fr_aldehyde,
'fr_quatN':fr_quatN,
'fr_NH2':fr_NH2,
'fr_NH1':fr_NH1,
'fr_NH0':fr_NH0,
'fr_Ar_N':fr_Ar_N,
'fr_Ar_NH':fr_Ar_NH,
'fr_aniline':fr_aniline,
'fr_Imine':fr_Imine,
'fr_nitrile':fr_nitrile,
'fr_hdrzine':fr_hdrzine,
'fr_hdrzone':fr_hdrzone,
'fr_nitroso':fr_nitroso,
'fr_N_O':fr_N_O,
'fr_nitro':fr_nitro,
'fr_azo':fr_azo,
'fr_diazo':fr_diazo,
'fr_azide':fr_azide,
'fr_amide':fr_amide,
'fr_priamide':fr_priamide,
'fr_amidine':fr_amidine,
'fr_guanido':fr_guanido,
'fr_Nhpyrrole':fr_Nhpyrrole,
'fr_imide':fr_imide,
'fr_isocyan':fr_isocyan,
'fr_isothiocyan':fr_isothiocyan,
'fr_thiocyan':fr_thiocyan,
'fr_halogen':fr_halogen,
'fr_alkyl_halide':fr_alkyl_halide,
'fr_sulfide':fr_sulfide,
'fr_SH':fr_SH,
'fr_C_S':fr_C_S,
'fr_sulfone':fr_sulfone,
'fr_sulfonamd':fr_sulfonamd,
'fr_prisulfonamd':fr_prisulfonamd,
'fr_barbitur':fr_barbitur,
'fr_urea':fr_urea,
'fr_term_acetylene':fr_term_acetylene,
'fr_imidazole':fr_imidazole,
'fr_furan':fr_furan,
'fr_thiophene':fr_thiophene,
'fr_thiazole':fr_thiazole,
'fr_oxazole':fr_oxazole,
'fr_pyridine':fr_pyridine,
'fr_piperdine':fr_piperdine,
'fr_piperzine':fr_piperzine,
'fr_morpholine':fr_morpholine,
'fr_lactam':fr_lactam,
'fr_lactone':fr_lactone,
'fr_tetrazole':fr_tetrazole,
'fr_epoxide':fr_epoxide,
'fr_unbrch_alkane':fr_unbrch_alkane,
'fr_bicyclic':fr_bicyclic,
'fr_benzene':fr_benzene,
'fr_phos_acid':fr_phos_acid,
'fr_phos_ester':fr_phos_ester,
'fr_nitro_arom':fr_nitro_arom,
'fr_nitro_arom_nonortho':fr_nitro_arom_nonortho,
'fr_dihydropyridine':fr_dihydropyridine,
'fr_phenol_noOrthoHbond':fr_phenol_noOrthoHbond,
'fr_Al_OH_noTert':fr_Al_OH_noTert,
'fr_benzodiazepine':fr_benzodiazepine,
'fr_para_hydroxylation':fr_para_hydroxylation,
'fr_allylic_oxid':fr_allylic_oxid,
'fr_aryl_methyl':fr_aryl_methyl,
'fr_Ndealkylation1':fr_Ndealkylation1,
'fr_Ndealkylation2':fr_Ndealkylation2,
'fr_alkyl_carbamate':fr_alkyl_carbamate,
'fr_ketone_Topliss':fr_ketone_Topliss,
'fr_ArN':fr_ArN,
'fr_HOCCN':fr_HOCCN})
df_Solvent_Features_All = pd.concat([df_Solvent_Features,df_Solvent_Features_Frags], axis=1)
X = df_Solvent_Features_All
X = X.loc[:, (X != 0).any(axis=0)]
Y_Ered=df_BzNSN['Ered']
Y_HOMO_Opt=df_BzNSN['HOMO']
Y_GSol=df_BzNSN['Gsol']
Y_TDDFT=df_BzNSN['Absorption Wavelength']
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_validate, train_test_split
from sklearn.decomposition import PCA
st = StandardScaler()
Xdata = st.fit_transform(X)
pca = PCA(n_components=22)
Xdata = pca.fit_transform(Xdata)
natom_layer = Xdata.shape[1]
from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
from sklearn.gaussian_process import GaussianProcessRegressor
from scipy.stats import norm
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C ,WhiteKernel as Wht,Matern as matk
def gpregression(Xtrain,Ytrain,Nfeature):
cmean=[1.0]*22
cbound=[[1e-3, 1e3]]*22
kernel = C(1.0, (1e-3,1e3)) * matk(cmean,cbound,1.5) + Wht(1.0, (1e-3, 1e3))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=40, normalize_y=False)
gp.fit(Xtrain, Ytrain)
return gp
def gprediction(gpnetwork,xtest):
y_pred, sigma = gpnetwork.predict(xtest, return_std=True)
return y_pred, sigma
#compute expected improvement
def expectedimprovement(xdata,gpnetwork,ybest,itag,epsilon):
ye_pred, esigma = gprediction(gpnetwork, xdata)
expI = np.empty(ye_pred.size, dtype=float)
for ii in range(0,ye_pred.size):
if esigma[ii] > 0:
zzval=itag*(ye_pred[ii]-ybest)/float(esigma[ii])
expI[ii]=itag*(ye_pred[ii]-ybest-epsilon)*norm.cdf(zzval)+esigma[ii]*norm.pdf(zzval)
else:
expI[ii]=0.0
return expI
def paretoSearch(capP,search='min'):
# Non-dominated sorting
paretoIdx=[]
F0 = []
for i,p in enumerate(capP):
Sp = []
nps = 0
for j,q in enumerate(capP):
if i!=j:
if search=='min':
compare = p < q
elif search=='max':
compare = p > q
if any(compare):
Sp.append(q)
else:
nps+=1
if nps==0:
paretoIdx.append(i)
prank = 1
F0.append(p.tolist())
F0 = np.array(F0)
return F0, paretoIdx
def paretoOpt(capP, metric='crowdingDistance',opt='min'):
if capP.shape[0]<=1000:
F0, paretoIdx = paretoSearch(capP, search=opt)
else:
n_parts = int(capP.shape[0]//1000.)
rem = capP.shape[0] % 1000.
FList = []
paretoIdxList = []
for i in range(n_parts):
Fi, paretoIdxi = paretoSearch(capP[1000*i:1000*(i+1)], search=opt)
FList.append(Fi)
ar_paretoIdxi = np.array(paretoIdxi)+1000*i
paretoIdxList.append(ar_paretoIdxi.tolist())
if rem>0:
Fi, paretoIdxi = paretoSearch(capP[1000*n_parts-1:-1], search=opt)
FList.append(Fi)
ar_paretoIdxi = np.array(paretoIdxi)+1000*n_parts
paretoIdxList.append(ar_paretoIdxi.tolist())
F1 = np.concatenate(FList)
paretoIdx1=np.concatenate(paretoIdxList)
F0, paretoIdxTemp = paretoSearch(F1, search=opt)
paretoIdx=[]
for a in paretoIdxTemp:
matchingArr = np.where(capP==F1[a])[0]
counts = np.bincount(matchingArr)
pt = np.argmax(counts)
paretoIdx.append(pt)
m=F0.shape[-1]
l = len(F0)
ods = np.zeros(np.max(paretoIdx)+1)
if metric == 'crowdingDistance':
infi = 1E6
for i in range(m):
order = []
sortedF0 = sorted(F0, key=lambda x: x[i])
for a in sortedF0:
matchingArr = np.where(capP==a)[0]
counts = np.bincount(matchingArr)
o = np.argmax(counts)
order.append(o)
ods[order[0]]=infi
ods[order[-1]]=infi
fmin = sortedF0[0][i]
fmax = sortedF0[-1][i]
for j in range(1,l-1):
ods[order[j]]+=(capP[order[j+1]][i]-capP[order[j-1]][i])/(fmax-fmin)
# Impose criteria on selecting pareto points
if min(ods[np.nonzero(ods)])>=infi:
bestIdx = np.argmax(ods)
else:
if l>2: # if there are more than 2 pareto points, pick inner points with largest crowding distance (i.e most isolated)
tempOds=copy(ods)
for i,a in enumerate(tempOds):
if a>=infi: tempOds[i]=0.
bestIdx = np.argmax(tempOds)
else: #pick pareto point with lower index
bestIdx = np.argmax(ods)
elif metric == 'euclideanDistance': # To the hypothetical point of the current data
for i in range(m):
order = []
sortedF0 = sorted(F0, key=lambda x: x[i])
for a in sortedF0:
matchingArr = np.where(capP==a)[0]
counts = np.bincount(matchingArr)
o = np.argmax(counts)
order.append(o)
fmin = sortedF0[0][i]
fmax = sortedF0[-1][i]
for j in range(0,l):
ods[order[j]]+=((capP[order[j]][i]-fmax)/(fmax-fmin))**2
ods = np.sqrt(ods)
for i,a in enumerate(ods):
if a!=0: print(i,a)
bestIdx = np.where(ods==np.min(ods[np.nonzero(ods)]))[0][0]
return paretoIdx,bestIdx
# +
import matplotlib.pyplot as plt
optimalValue = 375
Ydata1 = -1*df_BzNSN['Ered'].values # Min
Ydata2 = -1*df_BzNSN['Gsol'].values
Ydata3 = -1*abs(df_BzNSN['Absorption Wavelength']-optimalValue)
# -
Ydata = np.vstack((Ydata1, Ydata2, Ydata3)).T
nobj = Ydata.shape[1]
Xinfo = df_BzNSN['SMILES']
ndata = len(Ydata)
#Bayesian optimization run
def numberofopt(Xdata,Ydata,Xinfo,ndata,natom_layer,BOmetric='crowdingDistance'):
itag = 1
epsilon = 0.01
ntrain = 5 # int(train_test_split * ndata)
nremain = ndata - ntrain
dataset = np.random.permutation(ndata)
a1data = np.empty(ntrain, dtype=int)
a2data = np.empty(nremain, dtype=int)
a1data[:] = dataset[0:ntrain]
a2data[:] = dataset[ntrain:ndata]
# info for the initial training set
Xtrain = np.ndarray(shape=(ntrain, natom_layer), dtype=float)
Xtraininfo = np.chararray(ntrain, itemsize=100)
Ytrain = np.empty((ntrain,nobj), dtype=float)
Xtrain[0:ntrain, :] = Xdata[a1data, :]
Xtraininfo[0:ntrain] = Xinfo[a1data]
Ytrain[0:ntrain, :] = Ydata[a1data, :]
XtraininfoIni = Xtraininfo
XtraininfoIni=np.array([x.decode() for x in XtraininfoIni])
XtraininfoIniList = XtraininfoIni.tolist()
_,yoptLoc = paretoOpt(Ytrain,metric=BOmetric,opt='max')
yopttval = Ytrain[yoptLoc]
xoptval = Xtraininfo[yoptLoc]
yoptstep=0
yopinit = yopttval
xoptint = xoptval
# info for the remaining data set
Xremain = np.ndarray(shape=(nremain, natom_layer), dtype=float)
Xremaininfo = np.chararray(nremain, itemsize=100)
Yremain = np.empty((nremain,nobj), dtype=float)
Xremain[0:nremain, :] = Xdata[a2data, :]
Xremaininfo[0:nremain] = Xinfo[a2data]
Yremain[0:nremain] = Ydata[a2data]
targetRM = []
for ii in range(0, Niteration):
if ii > int(0.5*Niteration):
epsilon=0.01
gpnetworkList = []
yt_predList = []
for i in range(nobj):
gpnetwork = gpregression(Xtrain, Ytrain[:,i], natom_layer)
yt_pred, tsigma = gprediction(gpnetwork, Xtrain)
yt_predList.append(yt_pred)
gpnetworkList.append(gpnetwork)
yt_pred=np.vstack((yt_predList)).T
_, ybestloc = paretoOpt(yt_pred,metric=BOmetric,opt='max')
ybest = yt_pred[ybestloc]
ytrue = Ytrain[ybestloc]
currentPareto, currentBest = paretoOpt(Ytrain,metric=BOmetric,opt='max')
if any(Ytrain[currentBest]!=yopttval):
yopttval = Ytrain[currentBest]
xoptval = Xtraininfo[currentBest]
yoptstep=ii
expIList = []
for i in range(nobj):
expI = expectedimprovement(Xremain, gpnetworkList[i], ybest[i], itag, epsilon)
expIList.append(expI)
expI = np.vstack((expIList)).T
_, expimaxloc = paretoOpt(expI,metric=BOmetric,opt='max')
expImax = expI[expimaxloc]
xnew = np.append(Xtrain, Xremain[expimaxloc]).reshape(-1, natom_layer)
xnewinfo = np.append(Xtraininfo, Xremaininfo[expimaxloc])
ynew = np.concatenate((Ytrain, Yremain[expimaxloc].reshape(1,-1)))
xrnew = np.delete(Xremain, expimaxloc, 0)
xrnewinfo = np.delete(Xremaininfo, expimaxloc)
yrnew = np.delete(Yremain, expimaxloc,0)
if ii==0:
Xexplored=Xremaininfo[expimaxloc]
Yexplored=Yremain[expimaxloc]
else:
Xexploredtemp=np.append(Xexplored, Xremaininfo[expimaxloc])
Yexploredtemp=np.append(Yexplored, Yremain[expimaxloc])
del Xexplored,Yexplored
Xexplored=Xexploredtemp
Yexplored=Yexploredtemp
del Xtrain, Ytrain, Xremaininfo, gpnetwork
Xtrain = xnew
Xtraininfo = xnewinfo
Ytrain = ynew
Xremain = xrnew
Xremaininfo = xrnewinfo
Yremain = yrnew
del xnew, xnewinfo, ynew, xrnew, xrnewinfo, yrnew
Yexplored = Yexplored.reshape(-1,nobj)
Xexplored=np.array([x.decode() for x in Xexplored])
XexploredList = Xexplored.tolist()
result = XtraininfoIniList+XexploredList
return xoptint,yopinit,xoptval,yopttval,Xexplored,Yexplored,result
#------ Program Starts from here -------------<br>
#print("Original Training X and Y :",np.shape(Xdata),np.shape(Xdata))
fileName = 'BayesOptRunProgress'
pfile = open(fileName+'.txt','a+')
pfile.write("Original Training X and Y : \n")
pfile.write(str(np.shape(Xdata)))
pfile.write(str(np.shape(Ydata)))
pfile.write("\n")
pfile.close()
Nruns = 1
Niteration = 30 # number of iteration in a given Bayesian Optimization
Xinitguess = np.chararray(Nruns, itemsize=100)
Yinitguess = np.empty((Nruns,nobj), dtype=float)
Xoptimal = np.chararray(Nruns, itemsize=100)
Yoptimal = np.empty((Nruns,nobj), dtype=float)
Yexplored = []
Xexplored = []
res = []
metr = 'crowdingDistance'
for ii in range(0,Nruns):
Xinitguess[ii], Yinitguess[ii], Xoptimal[ii], Yoptimal[ii], xexploredii, yexploredii, resultRow =numberofopt(Xdata, Ydata, Xinfo, ndata, natom_layer,BOmetric=metr)
pfile = open(fileName+'.txt','a+')
pfile.write("Run #"+str(ii+1)+"\n")
pfile.write('---------------------------\n')
pfile.close()
resFile = open('Result-temp.csv','a+')
for i,a in enumerate(resultRow):
if i != len(resultRow)-1:
resFile.write(a+',')
else:
resFile.write(a+'\n')
resFile.close()
xFile = open('Xexplored-temp.csv','a+')
for i,a in enumerate(xexploredii):
if i != len(xexploredii)-1:
xFile.write(a+',')
else:
xFile.write(a+'\n')
xFile.close()
res.append(resultRow)
Xexplored.append(xexploredii)
df_Xexplored = pd.DataFrame.from_records(Xexplored)
df = pd.DataFrame.from_records(res)
df_Xexplored.to_csv('MBOResult-Xexplored.csv',index=False)
df.to_csv('MBOResult-Xexplored-withInitial.csv',index=False)
df_Xexplored
| case-study/multi-objective-BzNSN/BO-1400BzNSN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/connorj4/CNN-plant-subkingdom/blob/master/Plant_Classification_VGG.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="JdEC45MxiYFv" colab_type="code" outputId="f320e40d-11c3-4596-e925-19f00eff1d97" colab={"base_uri": "https://localhost:8080/", "height": 52}
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
# + id="8VRGsLCWie-d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1823} outputId="7091085c-956d-487b-8ab6-6310815a66b3"
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
net = torchvision.models.vgg19(pretrained='imagenet') #vgg 19 layers trained on imagenet
n_class = 5 #number of output classes (used in the final layer we add to train)
num_ftrs = net.classifier[6].in_features #get the input to the last layer
features = list(net.classifier.children())[:-1] #get all layers other than the last one (bc we are replacing it)
features.extend([nn.Linear(num_ftrs, n_class)]) #add a final layer with output as number of classes
net.classifier = nn.Sequential(*features) #wrap in sequential ocntainer. add back to model.
print(net) #get architecture
for p in net.parameters():
p.requires_grad = False
for p in net.classifier.parameters():
p.requires_grad = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if(device=="cpu"):
print("nogpu")
raise
net.to(device)
# + id="9ZrSbQqPkWFm" colab_type="code" outputId="8d762a69-1624-46de-a6ca-e17b0a02bf92" colab={"base_uri": "https://localhost:8080/", "height": 749}
from google.colab import drive
drive.mount('/content/gdrive')
# !ls '/content/gdrive/My Drive' #if dataset is not here may need to debug!!
# + id="RxjpOff9mZGT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="8c983994-c122-4e37-cec1-031cf4ee69b1"
transform = transforms.Compose(
[transforms.Resize(224), #VGG19 input size
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) #mean .5, stddev .5
imagenet_data_train = torchvision.datasets.ImageFolder('/content/gdrive/My Drive/dataset_sm/train',transform=transform)
print(imagenet_data_train.classes) #our 5 classes
imagenet_data_test = torchvision.datasets.ImageFolder('/content/gdrive/My Drive/dataset_sm/test',transform=transform)
print(imagenet_data_test.classes) #our 5 classes
#dataloader will assist us in using minibatch i.e. SGD
batch_size = 16
data_loader_train = torch.utils.data.DataLoader(imagenet_data_train,
batch_size=batch_size,
shuffle=True,
num_workers=2)
data_loader_test = torch.utils.data.DataLoader(imagenet_data_test,
batch_size=batch_size,
shuffle=True,
num_workers=2)
try: #this is to suppress a weird error
x = iter(data_loader_train)
except:
print("")
img, label = x.next()
print(label) # should be 16 labels between 0 and 4
print(img.size()) #should be 4D [minibatchsize, chan, len, wid]
from PIL import Image
from matplotlib import pyplot as plt
plt.imshow(transforms.functional.to_pil_image(img[0])) #first image in the minibatch
classes = imagenet_data_test.classes
print(classes[label[0]]) #first label of the 16
# + id="83csCWPkoFCg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="6574100a-f2fc-4d32-9124-5ae2aa436858"
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(list(filter(lambda p: p.requires_grad, net.parameters())), lr=.001, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 4, gamma=0.1, last_epoch=-1)
for p in net.parameters():
print(p.requires_grad)
# + id="Bcbm7JIkv87Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 4576} outputId="7aa522ed-423c-4c99-eb04-9407bab2ad90"
#professor code !! (mostly)
start = time.time()
for p in net.parameters():
print(p.requires_grad)
for epoch in range(4): # loop over the dataset multiple times
scheduler.step()
running_loss = 0.0
for i, data in enumerate(data_loader_train, 0): #grab a minibatch
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device) #since network is on GPU move the inputs and labels to GPU
# zero the parameter gradients
optimizer.zero_grad() #dont use old gradients
# forward + backward + optimize
outputs = net(inputs) #net.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step() #uses SGD
# print statistics
running_loss += loss.item()
#print(loss.item())
#print(outputs)
#print(labels)
if i % 2 == 1: # print every 100 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2))
running_loss = 0.0
end = time.time()
print(end - start)
print('Finished Training')
# + id="X7HjSXmM8g-7" colab_type="code" colab={}
torch.save(net.state_dict(),'/content/gdrive/My Drive/plantClassificationWeights.pth') #save and run md5 to make sure not going crazy...
# + id="hItMdqoYV3CM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="8fddbc32-d5ea-44d0-e858-8a7a97ca6a55"
start = time.time()
print("hello")
total = 0
correct = 0
with torch.no_grad():
for data in data_loader_test:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (
100 * correct / total))
print("total of")
print(total)
end = time.time()
#print(end - start)
class_correct = list(0. for i in range(5))
class_total = list(0. for i in range(5))
with torch.no_grad():
for data in data_loader_test:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(5):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# + [markdown] id="Vya0jVVyjQAu" colab_type="text"
# from google.colab import drive
# drive.mount('/content/gdrive')
# modern CNN architecture resnet
#
| Plant_Classification_VGG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''stilt'': venv)'
# name: python37764bitstiltvenvd0b5b9a6265a460294244206ccd6e7b1
# ---
# # HYSPLIT vs STILT Outputs:
#
# Simulation Notes:
#
# HYSPLIT Data:
#
# - Puff Simulations utilizing 100 puffs
# +
#Libraries:
import geopandas as gpd
import pandas as pd
import glob
import matplotlib.pyplot as plt
import contextily as ctx
# -
# ## Functions
# +
def file_to_geoframe(path):
"""A function to take a shapefile with concentration element and convert to a geodataframe
"""
gdf = gpd.read_file(path)
gdf = gpd.GeoDataFrame(gdf,crs=gdf.crs)
try:
gdf['CONC']=gdf['CONC'].astype(float)
except:
print('No concentration data')
gdf = gdf.to_crs('epsg:3857')
return gdf
def tri_concatenate(folder,utah_shape,grid_shape):
'''Takes multiple hysplit plume shapefiles and harmonizes over location and time. Keeps all chemicals seperate, only sums those with same chemid.
Input:
----------
HYSPLIT_folder_path: path to the TRI output files. Need to have and HO_ prior to the data entries
utah_shape: A shapefile of the border of utah. EPSG 3857
grid_shape: a fishnet grid of utah (1kmx1km for Hanson purposes)
Returns:
utah_grid_df: a dataframe consisting of 1km x 1km squares of Utah which are filled with harmonized chemical data
'''
#Grab the data entries
#folder_list=sorted(glob.glob(HYSPLIT_folder_path+'/HO_*'))
#Concatenate the output data to a grid
counter = 0
utah_grid_list = []
for path in sorted(glob.glob(folder +'/hour*',recursive=True)):
#Extract information from model file name
#TODO sub with REGEX to make this more robust
temp = path.split('HO')[1].split('_')
FRS_ID = temp[1]
CAS = temp[2]
hour_after = datetime.timedelta(hours = int(temp[-1]))
ymdh= temp[6] +'_'+ temp[7] +'_'+ temp[8] +'_'+temp[9].split('/')[0]
ymdh=datetime.datetime.strptime(ymdh,'%y_%m_%d_%H')
ymdh =ymdh + hour_after
#Load into a geodataframe
data = file_to_geoframe(path)
data['CAS'] = CAS
data['FRS_ID']=FRS_ID
data = data.to_crs('epsg:3857')
#Verify there is concentration data
if 'CONC' not in data.columns:
print('Shapefile has no concentration column')
continue
#Check to make sure there is data within the Utah Boundaries
state_boundary = utah_shape[utah_shape['STATE']=='Utah']
test_join = (gpd.sjoin(state_boundary,data, how='inner',op='intersects'))
if test_join.empty:
print('No part of this shape is within Utah: ' + path)
continue
#Merge to grid
tmp = gpd.sjoin(grid_shape,data, how='left',op='intersects')
#Where shapes overlap, taking the max
tmp=tmp.sort_values('CONC', ascending=False).drop_duplicates('USNG').sort_index()
#Remove the log transformation in order to perform spatial sum
tmp['non_log_CONC'] = np.power(10,tmp['CONC'])
#Save Entries to a temporary dictionary
utah_grid_list.append(tmp)
# Concatenate all the grids together, then group by to create a proper dataframe
#OID_ is the tile number
utah_grid_df = pd.concat(utah_grid_list)
utah_grid_df = utah_grid_df.groupby(by=['DATE','TIME','CAS','USNG','index']).agg({'non_log_CONC':'sum',
'FRS_ID':'first',
'geometry':'first'})
utah_grid_df = utah_grid_df.reset_index()
utah_grid_df['log_CONC'] = np.log10(utah_grid_df['non_log_CONC'])
return utah_grid_df
def geoplot(geo_df, ax,col_of_interest,point_data=False):
"""A plotting function for geographic areas
===
Inputs:
geo_df: A geodataframe with geometry column for plotting. For added background map, please ensure EPSG is set to 3857
ax: The axis for plotting
title: the title of the plot
Returns:
none
===
"""
#Plotting for points
if point_data == True:
geo_df.plot(cmap='YlOrRd',
ax=ax,
column=col_of_interest,
markersize=20)
else: #Assume shapefiles are being plotted
geo_df.plot(cmap='YlOrRd',
ax=ax,
column=col_of_interest)
#Plot a background map if the EPSG is configured correctly
if geo_df.crs == 3857:
ctx.add_basemap(ax)
def hysplit_grid_post_processing(tri_grid):
'''Takes multiple hysplit plume shapefiles and harmonizes over location and time. Keeps all chemicals seperate, only sums those with same chemid.
Input:
----------
tri_grid - an output coming from tri_concat (HYSPLIT output to grid (nationalgrid from esri))
Returns:
sim_avg: a geodataframe of the average non-log_conc per the simulation run (48 hr with current setup) transformed to points for comparison
'''
tri_grid = tri_grid.groupby(['index']).agg({'geometry':'first','non_log_CO':'sum'})
sim_avg = gpd.GeoDataFrame(tri_grid, geometry=tri_grid['geometry'])
sim_avg.crs = 'epsg:3857' #taken from TRI concat which pushes to epsg 3857
sim_avg['geometry'] = sim_avg['geometry'].centroid
return sim_avg
# -
# # Load the Data
#
# Need to load the HYSPLIT data ouputs --> based upon puff simulation converted to grid.
# +
#Load External Data: Need State Boundaries and an empty grid file:
#State Data
state =fiona.open('/home/boogie2/Data_Science/Purp_Air_Analysis/Utah.gdb')
state_gdf = gpd.GeoDataFrame.from_features([feature for feature in state], crs=state.crs)
columns = list(state.meta["schema"]["properties"]) + ["geometry"]
state_gdf = state_gdf[columns]
state_gdf = state_gdf.to_crs('EPSG:3857')
#Grid Data
nat_grid=gpd.read_file("/home/boogie2/Hanson_Lab/Environmental_Exposome/TRI/NationalGrid.gdb")
nat_grid = nat_grid.reset_index()
#Intialized with NAD83
nat_grid.crs = 'epsg:26912'
nat_grid = nat_grid.to_crs('epsg:3857')
# +
#Load in Conjugate Data from HYSPLIT
#So I need to create a 24 hour average grid cell for each
#Questioning if any of these are wrong. Attempting to compare all the releases in a single figure
hysplit_11243_4166=tri_concatenate('/home/boogie2/Hanson_Lab/Environmental_Exposome/TRI/data/HO_110032607329_123911_41.66_-112.43_0.00_90_01_01_00',state_gdf,nat_grid
)
hysplit_11200_4173=tri_concatenate('/home/boogie2/Hanson_Lab/Environmental_Exposome/TRI/data/HO_110055233251_7664939_40.73_-112.0_0.00_90_01_01_00',state_gdf,nat_grid
)
hysplit_11189_4071=tri_concatenate('/home/boogie2/Hanson_Lab/Environmental_Exposome/TRI/data/HO_110064119555_7664939_40.71_-111.89_0.00_90_01_01_00',state_gdf,nat_grid
)
hysplit_11202_4109=tri_concatenate('/home/boogie2/Hanson_Lab/Environmental_Exposome/TRI/data/HO_110069446790_123911_41.09_-112.02_0.00_90_01_01_00',state_gdf,nat_grid
)
#HYSPLIT: Average the grid per the simulation run time --> 48 hours
hysplit_11243_4166_sim_avg = hysplit_grid_post_processing(hysplit_11243_4166)
hysplit_11200_4173_sim_avg = hysplit_grid_post_processing(hysplit_11200_4173)
hysplit_11189_4071_sim_avg = hysplit_grid_post_processing(hysplit_11189_4071)
hysplit_11202_4109_sim_avg = hysplit_grid_post_processing(hysplit_11202_4109)
#saving
hysplit_11243_4166_sim_avg.to_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11243_4166_sim_avg/shapey.shp')
hysplit_11200_4173_sim_avg.to_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11200_4173_sim_avg/shapey.shp')
hysplit_11189_4071_sim_avg.to_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11189_4071_sim_avg/shapey.shp')
hysplit_11202_4109_sim_avg.to_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11202_4109_sim_avg/shapey.shp')
# +
#Load the simulations
hysplit_11243_4166_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11189_4071_sim_avg')
hysplit_11200_4173_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11200_4173_sim_avg')
hysplit_11189_4071_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11189_4071_sim_avg')
hysplit_11202_4109_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/validation/HYSPLIT_STILT_COMPAR/hysplit_11202_4109_sim_avg')
#Get the data ready for plotting
hysplit_11243_4166_sim_avg = hysplit_grid_post_processing(hysplit_11243_4166_sim_avg)
hysplit_11200_4173_sim_avg = hysplit_grid_post_processing(hysplit_11200_4173_sim_avg)
hysplit_11189_4071_sim_avg = hysplit_grid_post_processing(hysplit_11189_4071_sim_avg)
hysplit_11202_4109_sim_avg = hysplit_grid_post_processing(hysplit_11202_4109_sim_avg)
#Adjust the values for better visualization
hysplit_11243_4166_sim_avg = hysplit_11243_4166_sim_avg[hysplit_11243_4166_sim_avg.non_log_CO > 0.01]
hysplit_11200_4173_sim_avg = hysplit_11200_4173_sim_avg[hysplit_11200_4173_sim_avg.non_log_CO > 0.01]
hysplit_11189_4071_sim_avg = hysplit_11189_4071_sim_avg[hysplit_11189_4071_sim_avg.non_log_CO > 1]
hysplit_11202_4109_sim_avg = hysplit_11202_4109_sim_avg[hysplit_11202_4109_sim_avg.non_log_CO > 10]
# -
# # Stilt Simulations
#
# Create the simulation dataframe in an r script
#
# `x <- data.frame("lati" = c(41.663,40.72557,40.70739,41.093219), "long" = c(-112.432,-112.00081,-111.89273,-112.028221), "zagl" = c(0.0,0.0,0.0,0.0))`
#
# `saveRDS(x,file= 'data/processed/stilt_input/092120_hysplit_v_stilt.rds')`
#
# Run Simulations with:
#
# `src/stilt_run/092120_hysplit_v_stilt.r`
#
# Convert to Shapefiles (edit makefile)
#
# `make stilt_output_conversion`
#load_files:
stilt_11243_4166_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/processed/stilt_output/092120_hysplit_v_stilt/199001010000_-112.432_41.663_0_foot/')
stilt_11200_4073_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/processed/stilt_output/092120_hysplit_v_stilt/199001010000_-112.00081_40.72557_0_foot/')
stilt_11189_4071_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/processed/stilt_output/092120_hysplit_v_stilt/199001010000_-111.89273_40.70739_0_foot/')
stilt_11203_4109_sim_avg = gpd.read_file('/home/boogie2/Hanson_Lab/TRI_STILT/data/processed/stilt_output/092120_hysplit_v_stilt/199001010000_-112.028221_41.093219_0_foot/')
# ### Visual Comparison: STILT vs HYSPLIT
#
# TRI Original Release Locations:
#
# 11243_4166 -112.432 41.663
# <br>
# 11200_4073 -112.000 40.726
# <br>
# 11189_4071 -111.89273 40.70739
# <br>
# 11203_4109 -112.01698 41.09394 #This potentially has another option - 41.093219,-112.028221
#
# **Conclusions**
# +
original_TRI_release_locations = {'lati':[41.663,40.726,40.70739,41.09394],'long':[-112.432,-112.000,-111.89273,-112.01698]}
original_TRI_release_locations = pd.DataFrame(original_TRI_release_locations)
#Convert to a geodataframe
original_TRI_release_locations_gdf = gpd.GeoDataFrame(original_TRI_release_locations, geometry=gpd.points_from_xy(x=original_TRI_release_locations['long'], y=original_TRI_release_locations['lati']))
original_TRI_release_locations_gdf = original_TRI_release_locations_gdf.set_crs(epsg=4326)
original_TRI_release_locations_gdf = original_TRI_release_locations_gdf.to_crs(epsg=3857)
# +
fig,ax = plt.subplots(1,2,figsize=(20,20))
for temp_ax in ax:
#Add the point source location
original_TRI_release_locations_gdf.iloc[0:1].plot(color='r', markersize=40,alpha=0.5,ax=temp_ax)
#Remove the axis
temp_ax.axes.get_xaxis().set_visible(False)
temp_ax.axes.get_yaxis().set_visible(False)
geoplot(hysplit_11243_4166_sim_avg,ax[0],'non_log_CO', point_data = True)
geoplot(stilt_11243_4166_sim_avg,ax[1],'foot', point_data=True)
#Adjust the legends
ax[0].set_title('HYSPLIT Output: -112.432 41.663')
ax[0].legend(['TRI Emission Location'],loc='lower right')
ax[1].set_title('STILT Output: -112.432 41.663')
ax[1].legend(['TRI Emission Location'],loc='lower right')
plt.savefig('/home/boogie2/Hanson_Lab/TRI_STILT/figures/hysplit_v_stilt/hysplit_vs_stilt_-112.432_41.663.png')
plt.close()
# +
fig,ax = plt.subplots(1,2,figsize=(20,20))
for temp_ax in ax:
#Add the point source location
original_TRI_release_locations_gdf.iloc[1:2].plot(color='r', markersize=40,alpha=0.5,ax=temp_ax)
#Remove the axis
temp_ax.axes.get_xaxis().set_visible(False)
temp_ax.axes.get_yaxis().set_visible(False)
geoplot(hysplit_11200_4173_sim_avg,ax[0],'non_log_CO', point_data = True)
geoplot(stilt_11200_4073_sim_avg,ax[1],'foot',point_data=True)
#Adjust the legends
ax[0].set_title('HYSPLIT Output: -112.000 41.726')
ax[0].legend(['TRI Emission Location'],loc='lower right')
ax[1].set_title('STILT Output: -112.000 41.726')
ax[1].legend(['TRI Emission Location'],loc='lower right')
plt.savefig('/home/boogie2/Hanson_Lab/TRI_STILT/figures/hysplit_v_stilt/hysplit_vs_stilt_-112.000_41.726.png')
plt.close()
# +
fig,ax = plt.subplots(1,2,figsize=(20,20))
for temp_ax in ax:
#Add the point source location
original_TRI_release_locations_gdf.iloc[2:3].plot(color='r', markersize=40,alpha=0.5,ax=temp_ax)
#Remove the axis
temp_ax.axes.get_xaxis().set_visible(False)
temp_ax.axes.get_yaxis().set_visible(False)
geoplot(hysplit_11189_4071_sim_avg,ax[0],'non_log_CO', point_data = True)
geoplot(stilt_11189_4071_sim_avg,ax[1],'foot',point_data=True)
#Adjust the legends
ax[0].set_title('HYSPLIT Output: -111.89273 40.70739 ')
ax[0].legend(['TRI Emission Location'],loc='lower right')
ax[1].set_title('STILT Output: -111.89273 40.70739 ')
ax[1].legend(['TRI Emission Location'],loc='lower right')
plt.savefig('/home/boogie2/Hanson_Lab/TRI_STILT/figures/hysplit_v_stilt/hysplit_vs_stilt_-111.89273_40.70739.png')
plt.close()
# +
fig,ax = plt.subplots(1,2,figsize=(20,20))
for temp_ax in ax:
#Add the point source location
original_TRI_release_locations_gdf.iloc[3:].plot(color='r', markersize=40,alpha=0.5,ax=temp_ax)
#Remove the axis
temp_ax.axes.get_xaxis().set_visible(False)
temp_ax.axes.get_yaxis().set_visible(False)
geoplot(hysplit_11202_4109_sim_avg,ax[0],'non_log_CO', point_data = True)
geoplot(stilt_11203_4109_sim_avg,ax[1],'foot',point_data=True)
#Adjust the legends
ax[0].set_title('HYSPLIT Output: -112.01698 41.09394')
ax[0].legend(['TRI Emission Location'],loc='lower right')
ax[1].set_title('STILT Output: -112.01698 41.09394')
ax[1].legend(['TRI Emission Location'],loc='lower right')
plt.savefig('/home/boogie2/Hanson_Lab/TRI_STILT/figures/hysplit_v_stilt/hysplit_vs_stilt_-112.01698_41.09394.png')
plt.close()
| notebooks/hysplit_vs_stilt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:astro3]
# language: python
# name: conda-env-astro3-py
# ---
import numpy as np
import matplotlib.pyplot as plt
#This is an analysis of the K2 data for Neptune
#
#Data are from Rowe et al. 2017
# https://ui.adsabs.harvard.edu/?#abs/2017AJ....153..149R
#See Simon et al. 2016 for a published analysis and physical interpreatin
# http://adsabs.harvard.edu/abs/2016ApJ...817..162S
from astropy.table import Table
t = Table.read('K2Neptune_votable.vot')
t
plt.plot(t['BJD'],t['Flux'],'k-')
idataset_size=t['BJD'].size
print(idataset_size)
#make Lomb-Scargle
# See http://docs.astropy.org/en/stable/stats/lombscargle.html
from astropy.stats import LombScargle
frequency, power = LombScargle(t['BJD'], t['Flux'],t['e_Flux']).autopower()
# should change units properly instead of just multiplying...
period_hr=24.0/frequency
plt.plot(period_hr,power)
plt.xlim(10,25)
plt.plot(t['BJD']-16.4,t['Flux'],'k-',t['BJD']-36.4,t['Flux'],'b-',t['BJD']-60.4,t['Flux'],'r-')
plt.xlim(0,5)
#This should look something like Figure 1 of Simon et al.
# +
#Let's exoplore some of the tools from <NAME>'s (DFM) exoplanet toolchest
# In particular, the stellar variability model using Gaussian processes
# Personally I first saw this in Ruth Angus's papers and seminars so it should be cited too.
#
# see https://exoplanet.dfm.io/en/stable/tutorials/stellar-variability/
#
# However, it is a single period model when we know this is multi-period. Let's see
# what happens anyway
#
# First, import the stuff
import pymc3 as pm
import exoplanet as xo
import theano.tensor as tt
# +
# LEt's rename arrays and manipulate as in the tutorial. Otherwise I get an error, either
# due to the units or something that is infinite/not-a-number.
xall=t['BJD']
yall=t['Flux']
yerrall=t['e_Flux']
m = np.isfinite(xall) & np.isfinite(yall)
xall = np.ascontiguousarray(xall[m], dtype=np.float64)
yall = np.ascontiguousarray(yall[m], dtype=np.float64)
yerrall = np.ascontiguousarray(yerrall[m], dtype=np.float64)
# -
#now let us choose random subset of 1/30 the data.
ismallsize=idataset_size//30
#Use method from stack overflow
#https://stackoverflow.com/questions/47941079/can-i-make-random-mask-with-numpy?rq=1
amask = np.full(idataset_size, False)
amask[:ismallsize] = True
np.random.shuffle(amask)
#
x=xall[amask]
y=yall[amask]
yerr=yerrall[amask]
#
# +
# Let's try the DFM exoplanet periodogram as in the tutorial...
# even though we did our own above!
results = xo.estimators.lomb_scargle_estimator(
x, y, max_peaks=1, min_period=5.0, max_period=25.0,
samples_per_peak=50)
peak = results["peaks"][0]
freq, power = results["periodogram"]
plt.plot(-np.log10(freq), power, "k")
plt.axvline(np.log10(peak["period"]), color="k", lw=4, alpha=0.3)
plt.xlim((-np.log10(freq)).min(), (-np.log10(freq)).max())
plt.yticks([])
plt.xlabel("log10(period)")
plt.ylabel("power");
# -
# Analyzing all the data does not make so much sense. We have high signal-to-noise observations sampled every minute for rotation period of ~16-20 hours; the values are very tightly constrained! It might be illustrative to only analyze a fraction like 1/100 of the data.
#
plt.plot(xall,yall,'k-',x,y,'r.')
plt.xlabel("time (days)")
plt.ylabel("relative flux");
plt.xlim(30,40)
with pm.Model() as model:
# The mean flux of the time series
mean = pm.Normal("mean", mu=0.0, sd=10.0)
# A jitter term describing excess white noise
logs2 = pm.Normal("logs2", mu=2*np.log(np.min(yerr)), sd=5.0)
# The parameters of the RotationTerm kernel
logamp = pm.Normal("logamp", mu=np.log(np.var(y)), sd=5.0)
logperiod = pm.Normal("logperiod", mu=np.log(peak["period"]), sd=5.0)
logQ0 = pm.Normal("logQ0", mu=1.0, sd=10.0)
logdeltaQ = pm.Normal("logdeltaQ", mu=2.0, sd=10.0)
mix = pm.Uniform("mix", lower=0, upper=1.0)
# Track the period as a deterministic
period = pm.Deterministic("period", tt.exp(logperiod))
# Set up the Gaussian Process model
kernel = xo.gp.terms.RotationTerm(
log_amp=logamp,
period=period,
log_Q0=logQ0,
log_deltaQ=logdeltaQ,
mix=mix
)
gp = xo.gp.GP(kernel, x, yerr**2 + tt.exp(logs2), J=4)
# Compute the Gaussian Process likelihood and add it into the
# the PyMC3 model as a "potential"
pm.Potential("loglike", gp.log_likelihood(y - mean))
# Compute the mean model prediction for plotting purposes
pm.Deterministic("pred", gp.predict())
# Optimize to find the maximum a posteriori parameters
map_soln = xo.optimize(start=model.test_point)
# plot as in tutorial
plt.plot(x, y, "k.", label="data")
plt.plot(x, map_soln["pred"], color="C1", label="model")
plt.xlim(x.min(), x.max())
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("Neptune; map model");
# plot as in tutorial but with everything and just a few days
plt.plot(xall, yall, "r.", label="all")
plt.plot(x, y, "k.", label="data")
plt.plot(x, map_soln["pred"], color="C1", label="model")
plt.xlim(x.min(), x.max())
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("Neptune; map model");
plt.xlim(30,35)
#citations
with model:
txt, bib = xo.citations.get_citations_for_model()
print(txt)
# the bibtext entry
print("\n".join(bib.splitlines()[:10]) + "\n...")
plt.plot(x, y, "k", label="data")
plt.plot(x, map_soln["pred"], color="C1", label="model")
plt.xlim(16.4, 21.4)
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("Neptune; map model");
plt.plot(x, y, "k", label="data")
plt.plot(x, map_soln["pred"], color="C1", label="model")
plt.xlim(60.4, 65.4)
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("Neptune; map model");
plt.plot(x, y, "k", label="data")
plt.plot(x, map_soln["pred"], color="C1", label="model")
plt.xlim(30, 40)
plt.legend(fontsize=10)
plt.xlabel("time [days]")
plt.ylabel("relative flux")
plt.title("Neptune; map model");
np.random.seed(42)
sampler = xo.PyMC3Sampler(finish=200)
with model:
sampler.tune(tune=2000, start=map_soln, step_kwargs=dict(target_accept=0.9))
trace = sampler.sample(draws=2000)
pm.summary(trace, varnames=["mix", "logdeltaQ", "logQ0", "logperiod", "logamp", "logs2", "mean"])
period_samples = trace["period"]
bins = np.linspace(.5, 1.0, 100)
plt.hist(period_samples, bins, histtype="step", color="k")
plt.yticks([])
plt.xlim(bins.min(), bins.max())
plt.xlabel("rotation period [days]")
plt.ylabel("posterior density");
# Conclusion: The Gaussian Process with a periodic kernel does a great job of describing the data, even with a small subset of the data. However, this flexibility lets it use a relatively uncertain period (0.7-0.8 days, i.e. 17-19 hours centered on the strongest period). The rich K2 dataset lets us see in the full periodogram (as Simon et al. discussed) that there are multiple rotation periods due to wind.
| Neptune.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1.3 应运而生的在线系统
#
# ### 1.3.1 LaTeX在线系统的出现
#
# 上世纪80年代,LaTeX作为一件新生事物,在发布之初便引起了人们极大的兴趣,虽然在制作文档方面拥有很多办公软件都无法比拟的强大优势,尤其在数学公式编写及高效排版上具有很大优势,但是由于其较高的使用门槛(使用计算机程序语言进行编译)和安装成本(本地安装需要花费大量的时间配置相应的环境),在很长一段时间里,LaTeX主要用户都是科研工作者。然而,LaTeX在线系统的出现已实实在在地改变了这一尴尬局面。
#
# 随着信息技术快速发展、互联网深度普及,人们的工作生活方式也在发生着很大改变,很多过去安装在本地的操作软件都被搬到了浏览器上,人们无须在个人计算机上安装各类办公软件就能进行办公,这带来了极大的便利。不过这类在线系统也存在一些先决条件,例如,出于计算资源方面的考虑,通常要求在线系统的类型不能是计算密集型,因为计算密集型的在线系统往往需要大量的计算资源作支撑。反观LaTeX,尽管我们可以认为LaTeX是一种计算机程序语言,但实际上,其对计算资源的需求并不是很大。
#
# 在过去,受网速限制,使用线上系统几乎是一件难以想象的事。然而,在线系统的兴起并非空穴来风,一方面是目前的网速已经跟过去发生了质的变化,另一方面则是上网成本在急剧降低,互联网触手可及,已经成为人们日常生活和工作中不可或缺的一部分。以前,我们可能已经习惯了在本地计算机上安装和使用各类软件或者集成开发环境,不过以LaTeX为例,在本地计算机上安装的集成开发环境也有很多缺陷:
#
# - 第一,我们需要为安装LaTeX编辑器腾出很大的存储空间。
# - 第二,某些特定的宏包需要额外安装和配置,但安装过多宏包之后又会使LaTeX变得很臃肿,甚至是不友好。
# - 第三,当我们在本地计算机使用LaTeX制作文档时,我们很难与合作者进行协同创作。
#
# 在这个背景下,一些成熟的LaTeX在线系统逐渐走进人们的视野,并受到很多用户的喜爱,其中,最为著名的LaTeX在线系统便是overleaf.com。这些LaTeX在线系统不仅支持各种语言、各种拓展宏包等复杂的LaTeX环境,同时也支持实时编译和实时预览编译后的文档,就算是换一台电脑,也丝毫不会影响创作过程,创作完成之后,可以选择下载压缩文件包(如`.zip`),也可以只导出PDF文档,毫无疑问,这些人性化的设计都是为了让LaTeX更加便捷和高效。除此之外,现有的LaTeX在线系统还提供大量的LaTeX模板库,科技论文、毕业设计、幻灯片、海报、简历等参考模板一应俱全,就连LaTeX使用文档也数不胜数。
#
# > Overleaf是一个初创的科技企业,它的主要业务是构建现代化协作创作工具,即LaTeX在线系统,旨在让科学研究变得更加便捷和高效。目前,Overleaf已合并另一款著名的LaTeX在线系统ShareLaTeX,在全球范围内拥有超过600万用户,这些用户大多是来自于高校和研究机构的研究人员、老师以及学生,只要打开网址overleaf.com,用户无需在本地计算机配置LaTeX环境就可以创建各种LaTeX项目。关于Overleaf的介绍可参考[https://www.overleaf.com/about](https://www.overleaf.com/about)。
#
# <p align="center">
# <img align="middle" src="graphics/overleaf_webpage.png" width="900" />
# </p>
#
# <center><b>图1.3</b> Overleaf首页,图片来源于Overleaf官网。</center>
#
# ### 1.3.2 LaTeX在线系统的特点
#
# 以Overleaf为例,该LaTeX在线系统往往具备以下几点特征:
#
# - 免费和开源。可以免费注册和使用,不用下载和安装LaTeX编辑器,这一点对于初学者来说无疑是非常友好的。
# - 使用简单。不管是在计算机、手机还是其他终端上,我们只需要使用浏览器打开overleaf.com就可以开始创作,另外,由于Overleaf界面非常简洁,所以用户使用起来也非常便利。
# - 支持实时在线编辑。有各类LaTeX插件,编辑功能十分完善,且具有实时编译和预览功能。
# - 支持在线协作。创作文档时,我们可以将文档项目分享给合作者进行协作,Overleaf支持实时编译,不会出现版本控制混乱等问题。
# - 支持双向定位。可以在LaTeX代码与PDF文档内容之间进行双向定位。
# - 提供丰富的模板库。Overleaf有着非常庞大的模板库,不仅有正式的学术论文、学位论文和书籍的参考模板,还有很多美观的报告、简历、幻灯片模板。就论文写作来说,用户可以在Overleaf官网找到众多期刊的LaTeX模板,根据使用说明,用户很容易就能用于撰写自己的论文。
# - 提供大量的帮助文档。LaTeX提供了齐全的帮助文档,从LaTeX快速入门、基础操作到编译数学公式,应有尽有、一应俱全,且这些文档内容具有很强的实操性。
#
# <p align="center">
# <img align="middle" src="graphics/overleaf_example.png" width="900" />
# </p>
#
# <center><b>图1.4</b> Overleaf编辑器界面,主要由代码区域和文档预览区域组成。</center>
#
# LaTeX在线系统的出现大大降低了LaTeX的使用门槛,也为用户省去了繁琐的安装和配置过程。其实,LaTeX在线系统的出现并非个例,很多办公软件为迎合用户需求和时代发展趋势,陆续转变了产品研发思路,包括微软在线Office系统、腾讯在线文档等在内的很多在线系统都走进了人们的视野,这些在线系统能够在线备份、满足人们对随时随地办公的需求,在确保便捷和高效的同时,在线和共享的理念正在潜移默化地影响着人们的办公模式。
#
# 【回放】[**1.2 引领浪潮的LaTeX**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-1/section2.ipynb)
#
# 【继续】[**1.4 LaTeX问答社区**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-1/section4.ipynb)
# ### License
#
# <div class="alert alert-block alert-danger">
# <b>This work is released under the MIT license.</b>
# </div>
| chapter-1/section3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BeautifulSoup tutorial - basic web scrapping
# > "My first attempt on web scrapping following a Youtube tutorial by <NAME>"
#
# - toc: True
# - branch: master
# - badges: true
# - categories: [Web scrap, Learning from others]
# - hide: false
# - search_exclude: true
# - metadata_key1: metadata_value1
# - metadata_key2: metadata_value2
# I followed a tutorial from Youtube. I tried to do my best to do the exercises before watching the solution by <NAME>:
#
# https://www.youtube.com/watch?v=GjKQ6V_ViQE
# # Load web
# + code_folding=[0]
# collapse
import pandas as pd
import requests
from bs4 import BeautifulSoup as bs
# + code_folding=[0]
# collapse
# define the link
my_url = 'https://keithgalli.github.io/web-scraping/webpage.html'
r = requests.get(my_url)
# call the soup function to make the html readable
webpage = bs(r.content)
# +
#print(webpage.prettify())
# -
# # Grab all social links
# ## My attempt
# + code_folding=[0]
# collapse
socials = webpage.find_all("ul",{"class":"socials"})
for social in socials:
links = social.find_all('a')
for link in links:
print(link.get('href'))
# -
# ## suggested solutions
# + code_folding=[0]
# collapse
links2 = webpage.select("ul.socials a")
actual_links = [ link['href'] for link in links2]
actual_links
# + code_folding=[0]
# collapse
ulist = webpage.find("ul", {"class":"socials"})
links = ulist.find_all("a")
actual_links = [ link['href'] for link in links]
actual_links
# + code_folding=[0]
# collapse
links = webpage.select("li.social a")
links
actual_links = [ link['href'] for link in links]
actual_links
# -
# # Scrape table into dataframe
# ## My attempt
# + code_folding=[0]
# collapse
table = webpage.select('table.hockey-stats')
table_rows = table[0].find_all('tr')
print(table_rows)
# + code_folding=[0]
# collapse
l = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text for tr in td]
l.append(row)
df = pd.DataFrame(l)
df
# -
# ## Suggested solutions
# + code_folding=[0]
# collapse
table = webpage.select('table.hockey-stats')[0]
columns = table.find("thead").find_all("th")
column_names = [c.string for c in columns]
# + code_folding=[0]
# collapse
table_rows = table.find("tbody").find_all("tr")
l = []
for tr in table_rows:
td = tr.find_all('td')
row = [str(tr.get_text()).strip() for tr in td]
l.append(row)
df = pd.DataFrame(l, columns = column_names)
df
# -
# # Grab all fun fact with word is
# ## My attempt
# + code_folding=[0]
# collapse
fun_facts = webpage.find_all("ul",{"class":"fun-facts"})[0]
fun_facts
# -
import re
# + code_folding=[0]
# collapse
for fact in fun_facts.find_all():
check = fact.text
if "is" in check:
print(fact.get_text())
# -
# ## Solutions
# + code_folding=[0]
# collapse
facts = webpage.select("ul.fun-facts li")
facts_with_is = [facts.find(string=re.compile("is")) for facts in facts]
facts_with_is = [fact.find_parent().get_text() for fact in facts_with_is if fact]
facts_with_is
# -
# # Download image from webpage
# ## My attempt
# + code_folding=[0]
# collapse
imgs = webpage.select("div.row div.column img")
one_pic = imgs[0]['src']
# + code_folding=[0]
# collapse
image_url = "https://keithgalli.github.io/web-scraping/"+one_pic
image_url
# + code_folding=[0]
# collapse
from PIL import Image
img = Image.open(requests.get(image_url, stream = True).raw)
img.save('image.jpg')
# -
# ## Solution
# + code_folding=[0]
# collapse
url = "https://keithgalli.github.io/web-scraping/"
images = webpage.select("div.row div.column img")
image_url = images[0]['src']
full_url = url+image_url
print(full_url)
# + code_folding=[0]
# collapse
img_data = requests.get(full_url).content
with open("lake_como.jpg",'wb') as handler:
handler.write(img_data)
# -
# # Final exercise
# ## My attempt
# + code_folding=[0]
# collapse
secrets = webpage.select("div.block a")
for secret in secrets:
link = secret['href']
my_url_new = url+link
r_new = requests.get(my_url_new)
webpage_new = bs(r_new.content)
msg = webpage_new.find("p", {"id":"secret-word"}).get_text()
print(msg)
# -
# ## Solution
# + code_folding=[0]
# collapse
files = webpage.select("div.block a")
relative_files = [f['href'] for f in files]
relative_files
for f in relative_files:
full_url = url+f
page = requests.get(full_url)
bs_page = bs(page.content)
secret_word_element = bs_page.find("p", {"id":"secret-word"})
secret_word = secret_word_element.string
print(secret_word)
# -
| _notebooks/2020-10-27-web-scrap-tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def isPrime(n):
# Write your code here
result = 0
divisible_lst = []
for i in range(2, (n//2) +1):
if (n % i == 0):
result +=1
divisible_lst.append(i)
answer = 0
if result == 0:
answer = 1
else:
divisible_lst.sort()
answer = divisible_lst[0]
return answer
isPrime(1072843847)
4 //2
import numpy as np
dataset = [['Calculator', 100, 1000],
['music player', 500, 3000],
['Visual Novel', 1500, 5000],
['Console', 3500, 8000],
['2D Vector Art', 5000, 6500],
['XBox', 6000, 7000],
['PS', 8000, 15000],
['Simulator', 9500, 20000],
['Mobile', 12000, 21000],
['RPG', 14000, 25000],
['Monitor', 15500, 27000],
['Tablet', 16500, 30000],
['Laptop', 25000, 52000],
['MMORPG', 30000, 80000]]
#write your Logic here:
x = np.array(dataset)
x.shape
# +
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
#INPUT [uncomment & modify if required]
result=-404
dataset = [['Calculator', 100, 1000],
['music player', 500, 3000],
['Visual Novel', 1500, 5000],
['Console', 3500, 8000],
['2D Vector Art', 5000, 6500],
['XBox', 6000, 7000],
['PS', 8000, 15000],
['Simulator', 9500, 20000],
['Mobile', 12000, 21000],
['RPG', 14000, 25000],
['Monitor', 15500, 27000],
['Tablet', 16500, 30000],
['Laptop', 25000, 52000],
['MMORPG', 30000, 80000]]
#write your Logic here:
# Dummy encode the categorical values
data = pd.DataFrame(dataset, columns=["Name", "production_cost", "sales_price"])
final_data = pd.get_dummies(data)
# Extract the features and the the target variable
all_features = final_data.drop(["sales_price"], axis=1)
target = final_data.pop("sales_price")
# Split the data into train and test set (80% train, 20 test set)
train_x, test_x, train_y, test_y = train_test_split(all_features, target, test_size =0.2, random_state=2021)
# fit the CLF on the training set, and then test on the test set.
classifier = DecisionTreeRegressor()
classifier.fit(train_x, train_y)
print(classifier.score(test_x, test_y))
#OUTPUT [uncomment & modify if required]
name = input()
prod_cost = int(input())
print(result)
# -
all_features
cols = list(all_features.columns)
production_cost Name_2D Vector Art Name_Calculator Name_Console Name_Laptop Name_MMORPG Name_Mobile Name_Monitor Name_PS Name_RPG Name_Simulator Name_Tablet Name_Visual Novel Name_XBox Name_music player
# +
#OUTPUT [uncomment & modify if required]
# Hi team, struggling a bit here...
# Should I not convert the new valued data to dummy encoding? :/ hmm..
# Would love to learn and hear your thoughts on why or why not.
name = input()
prod_cost = int(input())
new_data = [[name,prod_cost]]
new_data = pd.DataFrame(new_data)
new_data = pd.get_dummies(new_data)
# Fixing the error -- Need same number of columns in input than in OUTPUT
# Extract the column headers from all_features.
all_features.columns()
result = classifier.predict(new_data)
print(result)
# -
# Dummy encode the categorical values
dataset2 = pd.DataFrame(dataset, columns = ["Name", "prod", "sale_price"])
#pd.get_dummies(dataset2)
dataset2
# +
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
#INPUT [uncomment & modify if required]
result=-404
dataset = [['Calculator', 100, 1000],
['music player', 500, 3000],
['Visual Novel', 1500, 5000],
['Console', 3500, 8000],
['2D Vector Art', 5000, 6500],
['XBox', 6000, 7000],
['PS', 8000, 15000],
['Simulator', 9500, 20000],
['Mobile', 12000, 21000],
['RPG', 14000, 25000],
['Monitor', 15500, 27000],
['Tablet', 16500, 30000],
['Laptop', 25000, 52000],
['MMORPG', 30000, 80000]]
#write your Logic here:
# Split the data into train and test set (80% train, 20 test set)
data = np.array(dataset)
data.shape # 14,3
# Extract the features, X and the target variable
all_features = data[:, :2]
target = data[:, -1]
train_x, test_x, train_y, test_y = train_test_split(all_features, target, test_size =0.2, random_state=2021)
# fit the CLF on the training set, and then test on the test set.
classifier = DecisionTreeRegressor()
classifier.fit(train_x, train_y)
print(classifer.score(test_x, test_y))
#OUTPUT [uncomment & modify if required]
name = input()
prod_cost = int(input())
print(result)
# -
| Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:data-science] *
# language: python
# name: conda-env-data-science-py
# ---
# +
# default_exp syllabus
# -
#hide
from nbdev.showdoc import *
# # Principles of Automatic Control
# ------------------------------
# Instructor: <NAME>
#
# _<EMAIL>_
#
# Office hours: After lecture
# <table style='margin: 0 auto' rules=none>
# <tr>
# <!--td> <img src="img/0.robot-and-sensors.png" alt="0.robot" style="height: 500px;"/> </td-->
# <td> <img src="img/0.philos-and-sensors.png" alt="0.robot" style="height: 400px;"/> </td>
# <td> <img src="img/0.iNeptune-pMarineViewer-sim3.png" alt="0.iNeptune-pMarineViewer-sim3" style="height: 400px;"/> </td>
# </tr>
# </table>
#
# <table style='margin: 0 auto' rules=none>
# <tr>
# <td> <img src="img/0.pathplan_philos_07-21_1312_combined.png" alt="0.pathplan_philos_07-21_1312_combined" style="width: 1000px;"/> </td>
# </tr>
# </table>
# *_Experiment done in 2021 (SeeByte ltd and the Massachusetts Institute of Technology__
# ## Course Meeting Times
# Lectures: 2 sessions / week, Wednesday 3 hour / session (2.30pm - 5.30pm) and Thursday 2 hour / session (3.30pm - 5.30pm)
# ## Course Description
#
# This course introduces the design of feedback control systems. Topics include the properties and advantages of feedback systems, time-domain and frequency-domain performance measures, stability and degree of stability, the Root locus method, Nyquist criterion, and frequency-domain design.
#
# - A bit on Python/Jupyter notebook
# - Open loop vs Closed Loop
# - Transfer functions and Laplace transform
# - Block Diagrams
# - Reponse of a system
# - Frequency response and Bode plots
# - Final Value Theorem and Steady State
# - System Stability and Control
# - The Root Locus Method
# - Nyquist Stability Criterion
# - PID controllers
# - PID and root locus
# - Gain and phase margins
# - Sensitivity Functions
# - Lead/Lag compensators
# ## Textbook
# A useful but not mandatory textbook for this class is:
#
# - Ogata, Katsuhiko. Modern Control Engineering. 4th ed. Prentice Hall, 2002.
#
#
# Other texts which might be helpful:
#
# - <NAME>. <NAME>, and <NAME>. Feedback Control of Dynamic Systems. 6th ed. Prentice Hall, 2009. ISBN: 9780136019695.
#
# - <NAME>, John. Feedback Control Systems. 3rd ed. Prentice Hall, 1994. ISBN: 9780002085069.
#
# - <NAME>. Automatic Control Systems. 8th ed. John Wiley & Sons, 2003. ISBN: 9780471381488.
#
# - Ogata, Katsuhiko. Solving Control Engineering Problems with MATLAB. Prentice Hall, 1993. ISBN: 9780130459077.
#
#
# Or in Italian
#
# - Bolzern, Scattolini, Schiavoni. Fondamenti di Controlli Automatici, 2nd ed. McGraw-Hill.
#
# ## Use of Python / MATLAB®
#
# The use of a software package like Python or MATLAB is very helpful in the study of Feedback Systems.
# The software can best be used initially to check work that is first done traditionally with pencil and paper. This is particularly helpful when verifying polar plots (Nyquist plots), Bode diagrams and root loci when first attempting to sketch these functions.
# In addition, step responses in the time domain can be examined in order to build an intuitive sense of the relations between time and frequency domain behavior.
# We also will use Pyton to check the validity of simplifying approximations that are often made when carrying out preliminary designs. Finally we can study more complex problems with a computer-aided design package without the enormous burden of doing extensive computations.
#
# We suggest that everyone become familiar with the use of Python or MATLAB. Remember that we want you to come away with an understanding of feedback theory in some depth. The computer is to aid in achieving this understanding and should be used intelligently as an engineering tool.
# ## Grading
# - Homeworks (50%)
# - Final exam (50%)
# ---------------------
| 00_Syllabus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import tensorflow as tf
import matplotlib.pyplot as plt
train_dir='../dataset\\cifar_10_small\\train'
test_dir='../dataset\\cifar_10_small\\test'
train_aeroplane_dir= os.path.join(train_dir,'aeroplane')
train_bird_dir=os.path.join(train_dir,'bird')
train_car_dir= os.path.join(train_dir,'car')
train_cat_dir=os.path.join(train_dir,'cat')
test_aeroplane_dir= os.path.join(test_dir,'aeroplane')
test_bird_dir=os.path.join(test_dir,'bird')
test_car_dir= os.path.join(test_dir,'car')
test_cat_dir=os.path.join(test_dir,'cat')
print('훈련용 aeroplane 이미지 전체 개수:', len(os.listdir(train_aeroplane_dir)))
print('훈련용 bird 이미지 전체 개수:', len(os.listdir(train_bird_dir)))
print('훈련용 car 이미지 전체 개수:', len(os.listdir(train_car_dir)))
print('훈련용 cat 이미지 전체 개수:', len(os.listdir(train_cat_dir)))
print('테스트용 aeroplane 이미지 전체 개수:', len(os.listdir(test_aeroplane_dir)))
print('테스트용 bird 이미지 전체 개수:', len(os.listdir(test_bird_dir)))
print('테스트용 car 이미지 전체 개수:', len(os.listdir(test_car_dir)))
print('테스트용 cat 이미지 전체 개수:', len(os.listdir(test_cat_dir)))
# +
### data - 작성해 주세요~~
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( 입력 )
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator( 입력 )
train_generator = train_datagen.flow_from_directory( 입력 )
print(train_generator.class_indices)
test_generator = test_datagen.flow_from_directory(입력 )
#########################
for data_batch, labels_batch in train_generator:
print('배치 데이터 크기:', data_batch.shape)
print('배치 레이블 크기:', labels_batch.shape)
print('class_name: ', train_generator.class_indices)
break
# +
## 모델
input_Layer =
Out_Layer=
model = tf.keras.Model(inputs=[input_Layer], outputs=[Out_Layer])
model.summary()
# +
loss_function=
optimize=
metric=
model.compile(loss=loss_function,
optimizer=optimize,
metrics=[metric])
## generator는 입력과 타싯의 배치를 끝없이 반환한다.
## 데이터가 끝없이 생성되기 때문에 모델에 하나의 에포크를 정의할때 제너레이터로부터 얼만큼 많은 샘플을 뽑을지 전달해야함
## steps_pr_epoch 가 100이면 위에서 선언된 배치 수만큼의 인풋/아웃풋데이터가 생성되어 학습이 된다.
##즉, 배치가 20이면 20의 배치인 데이터가 100번만큼 생성되어 학습한다. 즉, 20의 배치 데이터를 100번 학습완료하면 1에포크
## 단, 20의 배치데이터를 생성할때마다 랜덤적으로 생성한다.
## 일반적으로 배치사이즈/전체 데이터 길이를 steps_per_epoch로 설정한다.
result = model.fit(
)
model.save('multi_classification_model.hdf5')
# +
acc = result.history['categorical_accuracy']
val_acc = result.history['val_categorical_accuracy']
loss = result.history['loss']
val_loss = result.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| tensorflow/day4/exercise/04_08_CNN_classification_augumentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # List
# +
# initialization methods
#1. Normal Method
thislist = ["apple", "banana", "cherry"]
print(thislist)
#2. Constructor method
thislist = list(("apple", "banana", "mango"))
print(thislist)
# -
# allows duplicate elements
thislist = ["apple", "banana", "cherry", "apple"]
print(thislist)
# check type of an element
type(thislist)
# allows multiple datatypes
thislist = ["apple", 4, "banana", True, 152.48]
print(thislist)
# lenth of a list
len(thislist)
# ### Operations
# +
print("Original List:", thislist)
# append operation
thislist.append("Orange")
thislist.append(False)
print("Appended List:", thislist)
# extend operation
thislist.extend([5.2, 12, "papaya"])
print("Extended List:", thislist)
# insert operation
thislist.insert(1, "papaya")
thislist.insert(4, 1)
print("Inserted List:", thislist)
# +
print("Original List:", thislist)
# remove operation
thislist.remove("papaya")
print("Element removed from List:", thislist)
# -
# pop operation
thislist.pop(5)
print("Pop element from List:", thislist)
# slice operation
thislist[2:5]
# reverse operation
thislist.reverse()
thislist
# index operation
thislist.index('banana')
# +
# other operations :- sort, count, max, min
li = [16,45,85,55, 45, 41]
li.sort()
print("Sorted List:", li)
n = li.count(45)
print("Count of 45 from list:", n)
# min
print("Minimum Element:", min(li))
# max
print("Maximum Element:", max(li))
# -
# # Tuple
# +
# initialization methods
#1. Normal Method
thistuple = ("apple", "banana", "cherry")
print(thistuple)
#2. Constructor method
thistuple = tuple(("apple", "banana", "mango"))
print(thistuple)
# -
type(thistuple)
# allows duplicate elements
thistuple = ("apple", "banana", "cherry", "apple")
print(thistuple)
# allows multiple datatypes
thistuple = ("apple",4, "banana", True, 152.48)
print(thistuple)
# # set
l=[]
t=()
s=set()
# +
# initialization methods
#1. Normal Method
thisset = {"apple", "banana", "cherry"}
print(thisset)
#2. Constructor method
thisset = set(("apple", "banana", "mango"))
print(thisset)
# -
type(thisset)
# allows multiple datatypes
thisset = {"apple",4, "banana", True, 152.48}
print(thisset)
# ### Operations
# +
s1 = set((1,2,3))
# add operation
s1.add(4)
print(s1) # 1,2,3,4
#update operation
s1.update([1,3,5,7])
print(s1) # 1,2,3,4,5,7
# from multiple data types at once
s1.update([2,4,6],{5,6,7,8,9})
print(s1)
# +
# remove operation
s1.remove(8)
print(s1)
# discard operation
s1.discard(5)
print(s1)
# +
# diiference between discard and remove in set
try:
s1.remove(10)
print(s1)
except KeyError as e:
print("KeyError raised....")
try:
s1.discard(10)
print(s1)
except KeyError as e:
print("KeyError raised....")
# +
A = {1, 2, 3, 4, 5}
B = {4, 5, 6, 7, 8}
# union operation
print("----Union----")
print(A|B)
print(A.union(B)) # 1,2,3,4,5,6,7,8
# intersection operation
print("----Intersection----")
print(A&B)
print(A.intersection(B)) # 4,5
# difference operation
print("----Difference----")
print(A-B)
print(A.difference(B)) # 1,2,3
# -
# # Dictionary
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964
}
print(thisdict)
thisdict["brand"]
# +
# Duplicates are not allowed
thisdict = {
"brand": "Ford",
"model": "Mustang",
"year": 1964,
"year": 2020,
"colors": ["red", "white", "blue"]
}
print(thisdict)
# -
len(thisdict)
type(thisdict)
# +
# updating value
thisdict["colors"] = "blue"
thisdict
# +
# delete an element
del thisdict["year"]
print(thisdict)
# +
# getting all keys from dictonary
thisdict.keys()
# +
# getting all values from dictonary
thisdict.values()
# +
# getting key value pair
thisdict.items()
# -
# Delete all elements in the dictionary
thisdict.clear()
print(thisdict)
# # Map
# +
def addition(n):
return n + n
numbers = [1, 2, 3, 4]
result = list(map(addition, numbers))
print(result)
# +
# In competitive coding
n = input()
n_split = n.split(" ")
int_conv = map(int, n_split)
final = list(int_conv)
final
# -
final = list(map(int, input().split(" ")))
final
| List, tuple, set, dictionary and map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## ## Tuning Hyperparameters with Kears Tuner 2
# + [markdown] id="9b3c60b6"
# - Task: Find best Number of Filters in ConvLayers and best Learning Rate for SGD Optimizer
# - Dataset: Cropped and Resized Fundus Images
# - Original Colorscheme
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1628076383986, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="9c6708da"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# + executionInfo={"elapsed": 1712, "status": "ok", "timestamp": 1628076385692, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="149d22bb"
from keras.models import Sequential
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras import regularizers, optimizers
from tensorflow.keras.callbacks import EarlyStopping
# + executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1628076385693, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="zAF4gI1du-7R"
from keras_preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import Adam, SGD
# + executionInfo={"elapsed": 546, "status": "ok", "timestamp": 1628076386235, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="05b88b36"
df = pd.read_csv('/content/drive/MyDrive/Anomaly Detection/train_binary.csv')
# + executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1628076386236, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="d8897b2b"
df.normal = df.normal.astype('str')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1628076386237, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="3532801a" outputId="4f006012-0e71-491f-ba00-8309e7d7a9f6"
df.normal.value_counts()
# + executionInfo={"elapsed": 510, "status": "ok", "timestamp": 1628076386740, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="89b0e66d"
# As data is ordered in dataframe after augmentation, shuffle it
from sklearn.utils import shuffle
df = shuffle(df)
# + executionInfo={"elapsed": 6, "status": "ok", "timestamp": 1628076387218, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="6bd6c1ba"
# Directory
indir = r"D:\data\Projects\notebooks\RetinaAI\Anomaly Detection\train"
# + executionInfo={"elapsed": 6, "status": "ok", "timestamp": 1628076387219, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="f8e97a88"
datagen = ImageDataGenerator(rescale=1./255., validation_split = 0.25)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 33533, "status": "ok", "timestamp": 1628076420747, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="6bae5961" outputId="ab8d73fb-a3db-4fa3-90ca-e72e62d08364"
train_gen = datagen.flow_from_dataframe(dataframe = df,
directory = indir,
x_col = "filename",
y_col = 'normal',
batch_size = 64,
seed = 2,
shuffle = True,
class_mode = "binary",
target_size = (300,300),
subset='training')
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1374, "status": "ok", "timestamp": 1628076422114, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="a2b3e2ac" outputId="467b3106-0358-4123-915a-4ce279f25cc6"
val_gen = datagen.flow_from_dataframe(dataframe = df,
directory = indir,
x_col = "filename",
y_col = 'normal',
batch_size = 64,
seed = 2,
shuffle = True,
class_mode = "binary",
target_size = (300,300),
subset='validation')
# + [markdown] id="ibgAwjphof9Z"
# Tune Hyperparameters
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 529, "status": "ok", "timestamp": 1628076447223, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="ec59dd08" outputId="bdc7fb86-1440-48cf-a615-8c01bd10f697"
from kerastuner.tuners import Hyperband
from kerastuner.engine.hyperparameters import HyperParameters
# + executionInfo={"elapsed": 298, "status": "ok", "timestamp": 1628076451316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="tWxOnt1bqnmc"
LOG_DIR = 'log_dir'
# + executionInfo={"elapsed": 513, "status": "ok", "timestamp": 1628076453407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="4PCqbzO-pXrL"
def build_model(hp):
model = Sequential()
model.add(Conv2D(hp.Int('conv_1', min_value=32, max_value=128, step=32),
(3, 3), kernel_initializer='he_uniform', padding='same', activation='relu', input_shape=(300,300,3)))
model.add(Conv2D(hp.Int('conv_2', min_value=32, max_value=128, step=32),
(3, 3), kernel_initializer='he_uniform', padding='same', activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(hp.Int('conv_3', min_value=64, max_value=256, step=64),
(3, 3), kernel_initializer='he_uniform', padding='same', activation='relu'))
model.add(Conv2D(hp.Int('conv_4', min_value=64, max_value=256, step=64),
(3, 3), kernel_initializer='he_uniform', padding='same', activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(hp.Int('conv_5', min_value=64, max_value=256, step=64),
(3, 3), kernel_initializer='he_uniform', padding='same', activation='relu')))
model.add(Conv2D(hp.Int('conv_6', min_value=64, max_value=256, step=64),
(3, 3), kernel_initializer='he_uniform', padding='same', activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(hp.Int('units', min_value=64, max_value=256, step=64), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
lr = hp.Choice("learning_rate", values=[1e-1, 1e-2, 1e-3])
opt = SGD(learning_rate=lr, momentum=0.9)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return(model)
# + executionInfo={"elapsed": 6067, "status": "ok", "timestamp": 1628076463665, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="c2721e71"
tuner = Hyperband(build_model,
objective="val_accuracy",
max_epochs=20,
factor=3,seed=2,
directory=config.OUTPUT_PATH,
project_name=args["tunes"])
# + executionInfo={"elapsed": 6, "status": "ok", "timestamp": 1628076463666, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="7sdRahjSndXR"
step_size_train = train_gen.n//train_gen.batch_size
step_size_val = val_gen.n//val_gen.batch_size
# -
es = EarlyStopping(monitor='val_acc', patience = 5)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1976952, "status": "ok", "timestamp": 1628078442360, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="2e3eae66" outputId="10b6885b-9ef7-49f2-8623-14960c5df5db"
tuner.search(x=train_gen,
validation_data=val_gen,
steps_per_epoch=step_size_train,
validation_steps=step_size_val,
epochs=1,
verbose=2
)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 301, "status": "ok", "timestamp": 1628079456467, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="c2f67cc2" outputId="8d753e3c-3be4-4ad5-d09b-878765367265"
print(tuner.results_summary())
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 827, "status": "ok", "timestamp": 1628078883321, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhQN7oZcOXhMM0AgecritM7JP6IWKvkL4HS8bJW=s64", "userId": "13385611080783470639"}, "user_tz": -120} id="sSVBBnzx18I2" outputId="8809f3dc-b726-4350-fcbb-02c7c169f758"
tuner.get_best_models()[0].summary()
#tuner.get_best_models()[0].predict()
| 02_Anomaly Detection/Modell_0.1_KerasTuner2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="_l3WhGKOv5-8" outputId="7fe1976b-eb73-4279-b22f-796242d4fd4c"
# !git clone https://github.com/yhenon/pytorch-retinanet.git
# !apt-get install tk-dev python-tk
# !pip install pandas
# !pip install pycocotools
# !pip install opencv-python
# !pip install requests
# + colab={"base_uri": "https://localhost:8080/"} id="ixqbxL8QwKcx" outputId="3d68721b-849e-4c7d-92c7-4b22f3ed40fe"
# #!wget https://drive.google.com/open?id=1yLmjq3JtXi841yXWBxst0coAgR26MNBS
# !gdown --id 1yLmjq3JtXi841yXWBxst0coAgR26MNBS
# + colab={"base_uri": "https://localhost:8080/"} id="YeiDD0lJw6Sd" outputId="7a33cf83-22e9-40b6-b6d0-1691f381b985"
import torch
import torchvision.models as models
retinanet = models.resnet50(num_classes=3,)
retinanet.load_state_dict(torch.load('/content/coco_resnet_50_map_0_335_state_dict.pt'),strict=False)
# + colab={"base_uri": "https://localhost:8080/"} id="dteTM2isKaUD" outputId="e40307fa-2896-43a5-b341-66504b32bc20"
from google.colab import drive
drive.mount('/content/drive')
# + id="ZbwEHiuiKbX7"
# !cp '/content/drive/MyDrive/Objectdetection/images.tar' '/content/images.tar'
# + colab={"base_uri": "https://localhost:8080/"} id="ta78BDysKlaf" outputId="e872ff66-93bb-45c3-d90f-04dfb376330a"
# !tar -xvf '/content/images.tar' -C '/content/'
# + id="rZAtkLALJZRv"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import requests
import urllib
import os
from PIL import Image
# from keras_retinanet import models
# from keras_retinanet.utils.image import preprocess_image, resize_image
# from keras_retinanet.utils.visualization import draw_box, draw_caption
# from keras_retinanet.utils.colors import label_color
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="N2vXHXv8Ja2_" outputId="6cf47186-a7d2-414f-c5f7-4ea8828c0d2d"
df_train = pd.read_csv('/content/train_labels.csv')
df_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="NNWSpyWZKPu7" outputId="b6eabd00-b81f-4fb9-bdc6-bce9bd34cdcf"
df_test = pd.read_csv('/content/test_labels.csv')
df_test.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="Nsi4xaxLKUhU" outputId="d28de222-d5aa-4051-a23a-7c1ea9a80724"
train_data = df_train.iloc[:,[0,4,5,6,7,3]]
train_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 198} id="H3Jj_RtxLt-F" outputId="1c8162e0-fc9e-4762-8337-7649991675ab"
test_data = df_test.iloc[:,[0,4,5,6,7,3]]
test_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 327} id="QYBYWub3Lxaf" outputId="0e227d86-2d97-4941-f042-49e051f3cb1b"
train_path = '/content/images/train'
train_data['filename'] = train_data['filename'].apply(lambda x:x.replace(x,os.path.join(train_path,x)))
train_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 327} id="8oKSuqR_NIKL" outputId="26b7d259-92d4-492c-f627-fbed7f1690ac"
test_path = '/content/images/test'
test_data['filename'] = test_data['filename'].apply(lambda x:x.replace(x,os.path.join(test_path,x)))
test_data.head()
# + id="VkRwM_sXNQ72"
train_data.to_csv('/content/train_annotations.csv', index=False, header=None)
# + id="7pIphoqbyyK_"
test_data.to_csv('/content/test_annotations.csv', index=False, header=None)
# + id="Ap5soeVdNl5G"
classes = ['Red_signal','Green_signal','Yellow_signal']
with open('/content/classes.csv', 'w') as f:
for i, class_name in enumerate(classes):
f.write(f'{class_name},{i}\n')
# + colab={"base_uri": "https://localhost:8080/"} id="sxB2_0VUOHff" outputId="105be311-42f8-4e55-8acb-02104078fedd"
# !head /content/train_annotations.csv
# + colab={"base_uri": "https://localhost:8080/"} id="8TRXrW_4OOMe" outputId="372a60cf-fc0f-499b-ee7e-6c4f0284f96a"
# !head /content/classes.csv
# + colab={"base_uri": "https://localhost:8080/"} id="mQgtb74oy4yf" outputId="5ee7dca0-7d58-4d90-94fe-2b52c81c6861"
# !head /content/test_annotations.csv
# + colab={"base_uri": "https://localhost:8080/"} id="Rs48ZQmcyftE" outputId="54bc5d1c-5a19-401f-a864-63c6c7527df6"
# !python '/content/pytorch-retinanet/train.py' --dataset csv --csv_train '/content/train_annotations.csv' --csv_classes '/content/classes.csv' --csv_val '/content/test_annotations.csv'
# + id="rkbTTQd7YaQC"
#python coco_validation.py --coco_path ~/path/to/coco --model_path /path/to/model/coco_resnet_50_map_0_335_state_dict.pt
# + colab={"base_uri": "https://localhost:8080/"} id="G_NtnThqYsv2" outputId="6ea4290a-7a8c-43da-fe97-70a02da5b75a"
# %matplotlib inline
# !python '/content/pytorch-retinanet/visualize_single_image.py' --image_dir '/content/images/test/' --model_path '/content/csv_retinanet_50.pt' --class_list '/content/classes.csv'
| Objectdetection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="JmPaGLEyKq6H" outputId="716333a4-23d0-45c1-d546-8ba08d85ec37"
# ! pip install simpletransformers
# + id="45bQN-3dKx8_"
import pandas as pd
# + id="4Xq7m9G2LDSo"
# + id="FJI2P633LaNN"
url = 'https://raw.githubusercontent.com/Jhagrut/Twitter-Project/main/Data/dft%20updated.csv'
df = pd.read_csv(url)
# Dataset is now stored in a Pandas Dataframe
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="iLZPQTDYMUuF" outputId="99eee09e-4bf0-42a0-bede-e6154e33be32"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="6Vwoc76dMWGA" outputId="6c378a5b-c474-43fe-81b0-aa9bcd051d67"
print(set(df['injury_report'].values))
# + id="bTQp5vqfMbQ8"
df.injury_report = pd.Categorical(df.injury_report)
df['injury_report'] = df['injury_report'].cat.codes
df = df.dropna()
# + id="fuF6jGMPMjYA"
df_new = df[["tweet", "injury_report"]]
# + id="niM_q7mXb6MQ"
df_new1 = df_new[df_new['injury_report']==0]
df_new2 = df_new[df_new['injury_report']==1]
# + colab={"base_uri": "https://localhost:8080/", "height": 418} id="28TjvEZicA64" outputId="bdf82b3c-c8ae-4747-c292-e882f79034cc"
df_new1
# + colab={"base_uri": "https://localhost:8080/"} id="X19Y5LCZcvF4" outputId="9aa19c43-aa06-477b-eb06-0d99a3bc2b50"
df_new2['tweet'].sample(10).tolist()
# + id="XemzK708cIyQ"
df_new1 = df_new1.sample(6000, replace = True)
# + id="KCU2hCzScIus"
df_new = pd.concat([df_new1,df_new2])
# + id="gR6S8MQNdb8g"
df_new.reset_index(drop = True, inplace = True)
# + colab={"base_uri": "https://localhost:8080/", "height": 418} id="MR3N0HsAcIqp" outputId="7949a2d0-cc7d-4446-9eca-61fac2f8c3fb"
df_new
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="FIzxsgVGREUS" outputId="f9503a0c-cfc1-4fc8-ab9b-131471e025c1"
from google.colab import files
df_new.to_csv("dft_sampled_1.csv")
files.download('dft_sampled_1.csv')
# + id="AkWv5qS9cIi3"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_new['tweet'], df_new['injury_report'], random_state = 91, stratify = df_new['injury_report'], test_size = 0.2)
# + id="V7nO5Xcxuj7W"
train = pd.concat([X_train, y_train], axis = 1)
test = pd.concat([X_test, y_test], axis = 1)
# + id="guGPlCYNMkZs"
from sklearn.model_selection import train_test_split
train, test = train_test_split(df_new, random_state=91)
# + id="STUSP5FlMrXh"
from simpletransformers.classification import ClassificationModel
# + id="hTKP0p--Msap" colab={"base_uri": "https://localhost:8080/", "height": 418} outputId="f8ec4250-1e69-4e5f-94d2-74dac4c29a3e"
df_new
# + colab={"base_uri": "https://localhost:8080/", "height": 418} id="mXkPWwHNd1wz" outputId="d8cb0f50-9d2d-4dba-963e-ba9e4483fc70"
train
# + colab={"base_uri": "https://localhost:8080/"} id="zgqZaubpd-EL" outputId="06d97b74-84ca-4772-8740-ae33bb7bed28"
# !pip install imbalanced-learn
# + colab={"base_uri": "https://localhost:8080/"} id="KUvTrs42fz18" outputId="6bfb5043-725d-4452-8f5f-28ca824ce628"
# !pip install Counter
# + id="fI0gFL59gG8k"
import collections
# + id="exE14BCfeMyb" colab={"base_uri": "https://localhost:8080/"} outputId="85e7b49d-d936-468a-d0d1-8623469b10e8"
import imblearn
from imblearn.over_sampling import SMOTE
# + colab={"base_uri": "https://localhost:8080/"} id="uve96f1YepCL" outputId="baf22c73-1669-4390-d920-df9720beec82"
train["injury_report"]
# + colab={"base_uri": "https://localhost:8080/"} id="3rygeY0ZeY6A" outputId="82e5ffbc-9283-4e54-df1b-8f261a1dd31c"
counter = collections.Counter(train["injury_report"])
print(counter)
# + id="cmAn4HfxgXHS"
smt = SMOTE()
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="GV2tckIfeY4M" outputId="5587ddd6-970b-4490-e1d6-a6f99a93ad9f"
# + colab={"base_uri": "https://localhost:8080/", "height": 439, "referenced_widgets": ["e39cc11304ad42819297a59c87537bb1", "7255958f06cc4d238c3e4ff48feaff19", "dbbfb43a9e1141b9b2223e7e6a0d8a3e", "<KEY>", "<KEY>", "<KEY>", "b33e76ca044b4cd0b0d3e869430b2be9", "<KEY>", "<KEY>", "91121c23bece49b9b95aef8a9d67fbaf", "8528d0f0de9a4e9a9ef0a45cf23e0947", "<KEY>", "16fd3fed4ab84e1db1b884e71dfb3d20", "f5521d69eaac400e8f5d08eeb428855a", "<KEY>", "<KEY>", "35c51089a889451b8e38552a62aaca30", "<KEY>", "87d1d63fe38d4c8cb143330516f9e3df", "d52ca9674c234833b117df4d3caf99ee", "0e92f79767974a7797e3a2d01e086f44", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "8a0061d7291e4cd2831c478c951a6e2a", "9aeff09af8264e8397fd70b02464b42e", "d621ff08de1640c3847bc60437524a22", "<KEY>", "8877fd6709e944f296339d84a2e01ff0", "<KEY>", "5db081eeeee74381a950b75e3ce36b4e", "08eb415e22e7498f848755aef0a96b65", "<KEY>", "c0e197435db74f6692c6778e113b3e02", "<KEY>", "01eca5e1c8614214b5d276abff018fed", "edbdee1c5dda4934be36791893fa65a3", "cb9edb0dd5d44f67b7309bb3a8444d84", "<KEY>", "42fa017ed7d449fc8ac711b0ebd3dc60", "56fb2c08eba1413db02abd012167cc24", "1de62795db8e464aa5ba748dabe50911", "ffa83e7f1e834223ab7214422dcba3dd", "ce430e128c174cb48a100224d8d34305", "<KEY>", "f3cc4e8671cb4d9aa2dbad417aae6d50", "<KEY>", "<KEY>", "37aa1725f4a44265a6c7a3a4e830afea", "bafd3ee16e414d4fb99065e91ec25ccc", "79f560d997da4fe6b070c8ca0b3698be", "c5edfec5477447d7ab7ace38e7f60929", "e53df3ef4fa747ddb432488d0fcf1478", "9a77d832090743dd937f4ab7a821d1e5", "c6f0ff6cc18842db9bad49ed808fa372", "3cb831281ac14e6c986a92fd517e3083", "<KEY>", "<KEY>", "<KEY>", "eca61f8434654f7681f84f10e905ec22", "<KEY>", "21e34acc0aef4045a86388997522b517", "e4216cdd2e464701b0488722a7d584c4", "7ac604dc911a49858852caf8507761f4", "<KEY>", "b091117f1a6d44a8ba8e15284defde29", "<KEY>", "993cea1ee6544ff6837863e7e9512aa1", "068edf07e3094aa58f73a8b9eacb9466", "<KEY>", "f08586408cee41428f4d1dae7483724f", "<KEY>", "92583aff72ab4153a00c3a561834117a", "57ecdd323de946c99278aacef0d0270c", "8f89f3bc29a048c58187e9ae854e57f2", "020d9ee9818543a599324cd618b39a89"]} id="rizrYxVHMuHe" outputId="a024e6b4-e5ca-4fc3-e042-01ab6288d138"
# set use_cuda=False on CPU-only platforms
from simpletransformers.classification import ClassificationModel, ClassificationArgs
model_args = ClassificationArgs()
model_args.use_early_stopping = True
model_args.early_stopping_delta = 0.01
model_args.early_stopping_metric = "mcc"
model_args.early_stopping_metric_minimize = False
model_args.early_stopping_patience = 5
model_args.evaluate_during_training_steps = 1000
bert = ClassificationModel('roberta', 'roberta-base', num_labels=2, use_cuda=True, args={
'reprocess_input_data': True,
'use_cached_eval_features': False,
'overwrite_output_dir': True,
'num_train_epochs': 5,
'use_early_stopping': True,
'early_stopping_delta': 0.01,
'early_stopping_metric': "mcc",
'early_stopping_metric_minimize': False,
'early_stopping_patience': 5,
'evaluate_during_training_steps': 1000
})
bert.train_model(train)
# + id="v8J4Cip_MvzR"
import matplotlib.pyplot as plt
import sklearn
import numpy as np
#others
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix
from scipy.special import softmax
# + colab={"base_uri": "https://localhost:8080/", "height": 154, "referenced_widgets": ["a64d425a1bd0467ab5d6ff418888604f", "501fdbefff2b426197f21b72f1b8126b", "<KEY>", "5b3545c5e50a4b12afb9b326e6618741", "82a482caa7a84053bb5dda3cae4ed3ca", "6d1b18e043ec459c846c78cdf0465250", "f63613d1f22c43778c247ad82f26a327", "<KEY>", "173b8520e3184483b47d8f911da87afa", "<KEY>", "dd35d57bb755451493efe3100c3dcedf", "2f592f0dacab4a5eb1507e43afe48551", "<KEY>", "eefbe62e8358447bad9a078f18ee701a", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "c375b5e4acb74ef49b163133986d2eca"]} id="FAVs4gcxM_OX" outputId="740cf487-a1d7-466c-ea8a-5fd312f4ba9e"
result, model_outputs, wrong_predictions = bert.eval_model(test, acc=sklearn.metrics.accuracy_score)
print(result)
# + colab={"base_uri": "https://localhost:8080/", "height": 314, "referenced_widgets": ["da2962cf537f492d84879648b2b61763", "14490408ff044d55b3d0036b3bbb1ef0", "<KEY>", "caa3383b92df4a7780c897c13e2559c2", "44e96f5ae0e44974b6e5fc7a1e129f59", "<KEY>", "<KEY>", "0b8ce97ad2c04579a6b4d3bcd60fa709", "<KEY>", "<KEY>", "716220295c33412ebe86eb3d2290fa81", "97fd4e746779453fabeff26454d8c5df", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "74e97b6550184e5dac2e2612c45a2bff", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "7734ceaf956343c0be11a127e09ba33d"]} id="wEXbH1xpNArO" outputId="0db21ba7-d65b-4f23-d04d-ee0fc04fc602"
result, model_outputs, wrong_predictions = bert.eval_model(test, acc=confusion_matrix)
result
# + id="C6v4YPPkC4Le"
import torch
torch.save(bert, '/roberta1')
# + colab={"base_uri": "https://localhost:8080/"} id="_dpKxZ-0NAou" outputId="dcc4ba7b-e5dc-4f1c-a24b-3a7cd26e1f87"
cm = result["acc"]
recall = np.mean(np.diag(cm) / np.sum(cm, axis = 1))
precision = np.mean(np.diag(cm) / np.sum(cm, axis = 0))
print(precision, recall)
# + id="a5ifG-tzEIz-"
# bert.get_named_parameters()
# + colab={"base_uri": "https://localhost:8080/"} id="1lg-xINf-P96" outputId="7638d9d9-e623-4db9-a235-1ba0c1152bb8"
result
# + colab={"base_uri": "https://localhost:8080/"} id="WG0V7w1J-R5M" outputId="32d7482b-3310-4773-fc46-7428c7fbe308"
model_outputs
# + colab={"base_uri": "https://localhost:8080/"} id="zu1aMkvp98A8" outputId="2639d796-33ed-4218-c534-d2776325f27e"
wrong_predictions
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="DCzTErEONAle" outputId="cb8946a6-13b2-4fc4-d240-38cd5ffc8e3b"
fig, ax = plt.subplots(figsize=(10, 10))
intersection_matrix = cm
ax.matshow(intersection_matrix, cmap=plt.cm.Blues)
for i in range(2):
for j in range(2):
c = intersection_matrix[j,i]
ax.text(i, j, str(c), va='center', ha='center')
plt.xticks(range(0, 2))
ax.tick_params(axis="x", labelbottom=True, labeltop=False)
plt.yticks(np.arange(0, 2, step=1))
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="tHkKd6NU_Rjb" outputId="5db56fd5-1988-4edc-c6fa-ca35a54fbb02"
len(model_outputs)
# + colab={"base_uri": "https://localhost:8080/"} id="3ja394qN_X9i" outputId="d0f90803-a78f-4052-d466-c18e091b170b"
len(test)
# + id="kx-hAdN8-nQM"
tst = test['tweet'].tolist()
# + id="Gd3f1R2vHJZI"
y = test['injury_report'].tolist()
# + colab={"base_uri": "https://localhost:8080/"} id="nkaQ7zcvNAij" outputId="bdfc14dd-16fd-4770-9652-75343ec2e0cd"
fn = []
fp = []
ind = []
for i in range(len(model_outputs)):
x = model_outputs[i];
if x[1]>x[0] and y[i]==0:
fp.append(tst[i])
elif x[0]>x[1] and y[i]==1:
fn.append(tst[i])
print(fp)
print(len(fp))
print(fn)
print(len(fn))
# + id="0HLueH5HPz76"
# + colab={"base_uri": "https://localhost:8080/"} id="_O4j-gvvDsqG" outputId="570279a4-2a04-43ec-b189-685b0728278d"
fn
# + colab={"base_uri": "https://localhost:8080/"} id="sp3DjEmGNzUq" outputId="9cf18876-493a-4fb2-dde6-cc66c8bb6cf2"
fp
# + id="XkCgd2Xz-wsu"
len(model_outputs)
# + id="2XbIRFMpO1_Y"
len(wrong_predictions[0])
# + id="WDiK9dllPSxu"
lst
# + colab={"base_uri": "https://localhost:8080/"} id="Wi1jeliVPNHn" outputId="11b00cb2-6980-4165-e6b3-6386e3a97e96"
225/247
# + id="bsv-b3EuPWHL"
| Models/RoBERTa_with_sampling_updated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
# # <span style="color:red"> Machine Learning </span>
# +
import pandas as pd
def dataToHTML():
dataFrame = pd.read_csv("house_data.csv")
html=dataFrame[:100].to_html()
with open("data.html", "w") as outfile:
outfile.write(html)
dataToHTML()
# -
| .ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sparse Gaussian Process Regression
#
# ### <NAME>
# This example shows some of the properties of the Variational Free Energy and Fully Independent Training Conditional approaches of Gaussian Process Estimation
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
# %config InlineBackend.figure_format = 'svg'
import GPy
import numpy as np
import matplotlib.pyplot as plt
# Fix teh random seed
np.random.seed(100)
# Suppress tight layout warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module="matplotlib")
# -
# ## Sample Function
#
# Now we'll sample a Gaussian process regression problem directly from a Gaussian process prior.
# +
N = 100 # 64
MAX_X = 10
MIN_X = 0
noise_var = 0.05
X = np.linspace(MIN_X, MAX_X,num=N)[:,None]
k = GPy.kern.RBF(1)
y = np.random.multivariate_normal(np.zeros(N),k.K(X)+np.eye(N)*np.sqrt(noise_var)).reshape(-1,1)
# -
# ## Full Gaussian Process Fit
#
# Now we optimize the parameters of a Gaussian process given the sampled data with no approximations.
m_full = GPy.models.GPRegression(X,y)
m_full.optimize('bfgs')
m_full.plot()
print(m_full)
# ## Basic Sparse Gaussian Process Fit
#
# Now we construct a sparse Gaussian process. This model uses the inducing variable approximation.
Z = np.linspace(MIN_X, MAX_X, 8)[1:7,None]
m_vfe = GPy.models.SparseGPRegression(X,y,Z=Z)
m_vfe.likelihood.variance = noise_var
m_vfe.plot()
print(m_vfe)
#
# Notice how the fit is reasonable where there are inducing points, but bad elsewhere.
#
# ### Optimizing Covariance Parameters
#
# Next, we will try and find the optimal covariance function parameters, given that the inducing inputs are held in their current location.
m_vfe.inducing_inputs.fix()
m_vfe.optimize('bfgs')
m_vfe.plot()
print(m_vfe)
# The poor location of the inducing inputs causes the model to 'underfit' the data. The lengthscale is much longer than the full GP, and the noise variance is larger. This is because in this case the Kullback Leibler term in the objective free energy is dominating, and requires a larger lengthscale to improve the quality of the approximation. This is due to the poor location of the inducing inputs.
# ### Optimizing Inducing Inputs
#
# Let's see what we can achieve by optimizing the position of the different inducing inputs. Now, it is evenly distributed.
m_vfe.randomize()
m_vfe.Z.unconstrain()
m_vfe.optimize('bfgs')
m_vfe.plot()
print(m_vfe)
# The inducing points spread out to cover the data space, but the fit isn't quite there. We can try increasing the number of the inducing points.
# ### Relation with the number of inducing inputs
#
# Now we try different numbers of inducing inputs, and see how the log-likelihood evolves
MAX_INPUTS = 20
results_bfgs = np.zeros(MAX_INPUTS)
inducing_inputs = np.zeros(MAX_INPUTS)
for inputs in range(1, MAX_INPUTS):
inducing_inputs[inputs] = np.random.rand() * (MAX_X - MIN_X) + MIN_X
m = GPy.models.SparseGPRegression(X,y,Z=inducing_inputs[1:inputs+1,None])
m.optimize('bfgs')
# Add the value to the list
results_bfgs[inputs] = m.log_likelihood()[0,0]
# Plot what we just calculated
p=plt.plot(range(1,MAX_INPUTS), results_bfgs[1:MAX_INPUTS], 'k')
plt.axhline(y=m_full.log_likelihood(), color='r')
plt.xticks(range(1,MAX_INPUTS))
plt.ylabel("Log Likelihood")
plt.xlabel("Inducing Inputs")
print("Log likelihood respect to the number of inducing inputs:")
# ### Results with more inducing inputs
#
# Now we try 12 inducing points, rather than the original six. We then compare with the full Gaussian process likelihood.
# +
Z = np.random.rand(12,1) * (MAX_X - MIN_X) + MIN_X
m_vfe_12 = GPy.models.SparseGPRegression(X,y,Z=Z)
m_vfe_12.optimize('bfgs')
m_vfe_12.plot()
m_full.plot()
print("Log Likelihoods:")
print("VFE:",m_vfe_12.log_likelihood()[0,0])
print("Full Fit:",m_full.log_likelihood())
# -
# This time, we have enough inducing points and the fit resembles that of the GP. Log likelihood for both the complete and the VFE model are similar.
# ## Fully Independent Training Conditional
# Now, let's try using the FITC approach
# +
Z = np.linspace(MIN_X, MAX_X, 8)[1:7,None]
# Print FITC
m_fitc = GPy.models.SparseGPRegression(X, y, Z=Z)
m_fitc.inference_method=GPy.inference.latent_function_inference.FITC()
m_fitc.optimize('bfgs')
m_fitc.plot()
# Compare with VFE
m_vfe = GPy.models.SparseGPRegression(X,y,Z=Z)
m_vfe.optimize('bfgs')
m_vfe.plot()
# Print model
print(m_fitc)
# -
# Observations:
# - FITC can severely underestimate the noise variance, VFE overestimates it
# - See that some inputs are ignored in FITC
# ## Improvement with the number of inputs
# Here we check how the approximation changes with the number of inducing inputs
MAX_INPUTS = 20
results_fitc = np.zeros(MAX_INPUTS)
inducing_inputs = np.zeros(MAX_INPUTS)
for inputs in range(1, MAX_INPUTS):
inducing_inputs[inputs] = np.random.rand() * (MAX_X - MIN_X) + MIN_X
m = GPy.models.SparseGPRegression(X,y,Z=inducing_inputs[1:inputs+1,None])
m.inference_method=GPy.inference.latent_function_inference.FITC()
m.optimize('bfgs')
# Add the value to the list
results_fitc[inputs] = m.log_likelihood()
# Plot what we just calculated
p=plt.plot(range(1,MAX_INPUTS), results_fitc[1:MAX_INPUTS], 'k')
plt.axhline(y=m_full.log_likelihood(), color='r')
plt.xticks(range(1,MAX_INPUTS))
plt.ylabel("Log Likelihood")
plt.xlabel("Inducing Inputs")
print("Log likelihood respect to the number of inducing inputs:")
# Observations:
# - FITC does not recover the true posterior, VFE does
# - VFE improves with additional inducing inputs, FITC may ignore them
# ## Recovering the true posterior:
# With the whole dataset
# +
# Print FITC
m_fitc_full = GPy.models.SparseGPRegression(X, y, Z=X)
m_fitc_full.inference_method=GPy.inference.latent_function_inference.FITC()
m_fitc_full.optimize('bfgs')
m_fitc_full.plot()
plt.title("FITC:");
# Compare with VFE
m_vfe_full = GPy.models.SparseGPRegression(X,y,Z=X)
m_vfe_full.optimize('bfgs')
m_vfe_full.plot()
plt.title("VFE")
print("Log Likelihoods:")
print("VFE:",m_vfe_full.log_likelihood()[0,0])
print("FITC:",m_fitc_full.log_likelihood())
print("Full Fit:",m_full.log_likelihood())
# -
# With just one eight of it
# +
Z = np.linspace(MIN_X, MAX_X,num=N/8)[:,None]
# Print FITC
m_fitc_full = GPy.models.SparseGPRegression(X, y, Z=Z)
m_fitc_full.inference_method=GPy.inference.latent_function_inference.FITC()
m_fitc_full.optimize('bfgs')
m_fitc_full.plot()
plt.title("FITC:")
# Compare with VFE
m_vfe_full = GPy.models.SparseGPRegression(X,y,Z=Z)
m_vfe_full.optimize('bfgs')
m_vfe_full.plot()
plt.title("VFE:")
print("Log Likelihoods:")
print("VFE:",m_vfe_full.log_likelihood()[0,0])
print("FITC:",m_fitc_full.log_likelihood())
print("Full Fit:",m_full.log_likelihood())
# -
| presentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# +
# install River, a Python library for online machine learning.
# # !pip install river
import river
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
np.random.seed(42)
# -
# More streams are available on river's [API reference](https://riverml.xyz/latest/api/overview/).
# +
# load the Electricity prices in New South Wales data stream from river
dataset = river.datasets.Elec2()
print(dataset)
# +
# print two sample from the data stream
for x, y in dataset.take(2):
pprint(x)
print(y)
#break
# +
# build a a pipeline with a StandardScaler and LogisticRegression
model = river.compose.Pipeline(river.preprocessing.StandardScaler(),
river.linear_model.LogisticRegression(river.optim.SGD(lr=0.05)))
# performance metric
metric = river.metrics.Accuracy()
for x, y in dataset:
y_pred = model.predict_one(x) # make a prediction
metric = metric.update(y, y_pred) # update the metric
model = model.learn_one(x, y) # make the model learn
print(metric)
# -
# # Concept drift and Drift detection
# We will use numpy to generate a stream with **four drifts**
# +
# Generate data for 5 distributions
random_state = np.random.RandomState(seed=42)
dist_a = random_state.normal(0.4, 0.02, 1000)
dist_b = random_state.normal(0.8, 0.05, 1000)
dist_c = random_state.normal(0.4, 0.02, 1000)
dist_d = random_state.normal(0.8, 0.05, 1000)
dist_e = random_state.normal(0.5, 0.01, 1000)
# Concatenate data to simulate a data stream with 4 drifts
stream = np.concatenate((dist_a, dist_b, dist_c, dist_d, dist_e))
# Auxiliary function to plot the data
def plot_data(dist_a, dist_b, dist_c, dist_d, dist_e, drifts=None):
fig = plt.figure(figsize=(7,3), tight_layout=True)
gs = gridspec.GridSpec(1, 2, width_ratios=[5, 1])
ax1, ax2 = plt.subplot(gs[0]), plt.subplot(gs[1])
ax1.grid()
ax1.plot(stream, label='Stream')
ax2.grid(axis='y')
ax2.hist(dist_a, label=r'$dist_a$')
ax2.hist(dist_b, label=r'$dist_b$')
ax2.hist(dist_c, label=r'$dist_c$')
ax2.hist(dist_d, label=r'$dist_d$')
ax2.hist(dist_e, label=r'$dist_e$')
if drifts is not None:
for drift_detected in drifts:
ax1.axvline(drift_detected, color='red')
plt.show()
plot_data(dist_a, dist_b, dist_c, dist_d, dist_e)
# -
# ## Variable window
# [ADWIN](https://riverml.xyz/dev/api/drift/ADWIN/) efficiently keeps a variable-length window of recent items; such that it holds that there has no been change in the data distribution.
# +
adwin = river.drift.ADWIN()
drifts_adw = []
for i, val in enumerate(stream):
adwin.update(val) # Data is processed one sample at a time
if adwin.change_detected:
# The drift detector indicates after each sample if there is a drift in the data
print(f'Drift detected at index {i}')
drifts_adw.append(i)
adwin.reset() # As a best practice, we reset the detector
plot_data(dist_a, dist_b, dist_c, dist_d, dist_e, drifts_adw)
# -
# ## Fixed window
# Kolmogorov-Smirnov Windowing ([KSWIN](https://riverml.xyz/dev/api/drift/KSWIN/)) method for concept drift detection.
# +
# window_size sets the sliding window's size
# for our stream the best window_size is 1000
kswin = river.drift.KSWIN(window_size = 600)
drifts_ksw = []
for i, val in enumerate(stream):
in_drift, in_warning = kswin.update(val)
if in_drift:
print(f'Drift detected at index {i}')
drifts_ksw.append(i)
#drift_detector.reset() # As a best practice, we reset the detector
plot_data(dist_a, dist_b, dist_c, dist_d, dist_e, drifts_ksw)
# -
# ## Forgetting
# [PageHinkley](https://riverml.xyz/dev/api/drift/PageHinkley/) detects a concept drift if the observed mean at some instant is greater then a threshold value lambda.
# +
# min_instances: sets minimum number of instances before detecting change
# alpha: is forgetting factor, used to weight the observed value and the mean
# you can try with different values of min_instances and alpha
# and observe where the drift is detected
# for our stream, the best values are min_instaces = 1000 and alpha ~ 1.03
ph = river.drift.PageHinkley(min_instances = 500, alpha = 1.01)
drifts_ph = []
# Update drift detector and verify if change is detected
for i, val in enumerate(stream):
in_drift, in_warning = ph.update(val)
if in_drift:
print(f"Change detected at index {i}")
drifts_ph.append(i)
plot_data(dist_a, dist_b, dist_c, dist_d, dist_e, drifts_ph)
# +
# Phishing data set
#phishing = datasets.Phishing()
# -
# # Performance Evaluation
# For performance evaluation, we will use scikit-multiflow
# +
# install it with
# # !pip install scikit-multiflow
import skmultiflow
# +
# Setting up a toy data stream with concept drift
ht_stream = skmultiflow.data.ConceptDriftStream()
# define the Hoeffding Tree estimator
ht = skmultiflow.trees.HoeffdingTreeClassifier(split_criterion='gini')
# Setup variables to control loop and track performance
n_samples = 0
correct_cnt = 0
max_samples = 20000
# Train the estimator with the samples from the data stream
while n_samples < max_samples and ht_stream.has_more_samples():
X, y = ht_stream.next_sample()
y_pred = ht.predict(X)
if y[0] == y_pred[0]:
correct_cnt += 1
ht = ht.partial_fit(X, y)
n_samples += 1
# Display results
print('{} samples analyzed.'.format(n_samples))
print('Hoeffding Tree accuracy: {}'.format(correct_cnt / n_samples))
# -
# ## Prequential Evaluation
# Read more about [EvaluatePrequential](https://scikit-multiflow.readthedocs.io/en/stable/api/generated/skmultiflow.evaluation.EvaluatePrequential.html#skmultiflow.evaluation.EvaluatePrequential)
# +
# %matplotlib notebook
preq_evaluator=skmultiflow.evaluation.EvaluatePrequential(show_plot=True,
data_points_for_classification=False,
max_samples=20000,
metrics=['accuracy', 'recall',
'precision' ,'f1', 'running_time'])
#run evaluation
preq_evaluator.evaluate(stream=ht_stream, model=ht)
# -
# ## Holdout Evaluation
# Read more about [EvaluateHoldout](https://scikit-multiflow.readthedocs.io/en/stable/api/generated/skmultiflow.evaluation.EvaluateHoldout.html)
# %matplotlib notebook
hold_evaluator = skmultiflow.evaluation.EvaluateHoldout(max_samples=20000,
n_wait = 500,
test_size = 1000,
show_plot=True,
metrics=['accuracy', 'recall',
'precision' ,'f1', 'running_time'],
dynamic_test_set=True)
# Run evaluation
hold_evaluator.evaluate(stream=ht_stream, model=ht)
| Online learning in practice.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Blend NASA day and night views for AGU abstract
#
# Plot a blended image of the NASA Blue and Black marble mosaic images with a general perspective image over the New Orleans Ernest N. Morial Convention Center.
#
# Based on code written by <NAME> in https://github.com/GenericMappingTools/pygmt/issues/1395#issuecomment-891777960.
#
# Data References - Global Earth Day/Night Images:
#
# - Blue Marble: https://visibleearth.nasa.gov/images/57752/blue-marble-land-surface-shallow-water-and-shaded-topography
# - Black Marble: https://earthobservatory.nasa.gov/features/NightLights/page3.php
#
# %%
import pygmt
# %%
# Set the resolution to 2 arc minutes (30s used in abstract)
res = "02m"
# %%
# Use the location of the Sun at 6.30am (sunrise) on 13 Dec 2021, Central Standard Time (UTC-6)
# !gmt solar -C -o0:1 -I+d2021-12-13T06:30+z-6 # -8.95331142671 -23.1626971083
# %%
# Make a global grid with a smooth 2-degree transition across day/night boundary.
# !gmt grdmath -Rd -I$res -r -8.95331142671 -23.1626971083 2 DAYNIGHT = w.nc
# %%
# Create an intensity grid based on a DEM so that we can see structures in the oceans
pygmt.grdgradient(
grid=f"@earth_relief_{res}", normalize="t0.5", azimuth=45, outgrid="intens.nc"
)
# Mask so that the DEM-based intensity is NaN on land
# !gmt grdmath @earth_mask_$res 0 EQ 0 NAN intens.nc MUL = intens_ocean.nc
# %%
# Blend the earth_day and earth_night geotiffs using the weights, so that when w is 1
# we get the earth_day, and then adjust colors based on the intensity.
# !gmt grdmix @earth_day_$res @earth_night_$res -Ww.nc -Gview.tif -Iintens_ocean.nc
# %%
# Plot this image on an Earth with view from over New Orleans Ernest N. Morial Convention Center
fig = pygmt.Figure()
fig.grdimage(
grid="view.tif",
# General Perspective lon0/lat0/width+z<altitude>+a<azimuth>+t<tilt>+w<twist>+v<vwidth>/<vheight>
projection="G-90.0631825/29.9395386/25c+z3000+a345+t10+w-30+v90/60",
verbose="e",
)
fig.logo(position="jTR+w3c")
fig.show()
# %%
# Save the figure
fig.savefig(fname="figures/agu2021_abstract.png")
| examples/abstract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import matplotlib.pyplot as plt
import seaborn as sb
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchmetrics as tm
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
# + pycharm={"name": "#%%\n"}
with open('datasets/activity_labels.txt') as f:
defined_labels = [line.strip().split(" ")[1] for line in f]
# + pycharm={"name": "#%%\n"}
X_train = []
with open("datasets/Train/X_train.txt") as f:
for line in f:
arr = line.strip().split(" ")
arr = [float(num) for num in arr]
X_train.append(torch.tensor(arr))
y_train = []
with open("datasets/Train/y_train.txt") as f:
for line in f:
y_train.append(int(line.strip())-1)
X_test = []
with open("datasets/Test/X_test.txt") as f:
for line in f:
arr = line.strip().split(" ")
arr = [float(num) for num in arr]
X_test.append(torch.tensor(arr))
y_test = []
with open("datasets/Test/y_test.txt") as f:
for line in f:
y_test.append(int(line.strip())-1)
# + pycharm={"name": "#%%\n"}
class TabularDataset(Dataset):
def __init__(self, ts, labels):
self.ts = ts
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
datas = self.ts[idx]
label = self.labels[idx]
return datas, label
# + pycharm={"name": "#%%\n"}
train_data = TabularDataset(X_train, y_train)
test_data = TabularDataset(X_test, y_test)
# + pycharm={"name": "#%%\n"}
train_dataloader = DataLoader(train_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64)
# + pycharm={"name": "#%%\n"}
class ClassificationMetrics(nn.Module):
def __init__(self, num_classes: int, mode: str):
super().__init__()
self.num_classes = num_classes
self.mode = mode
self.categ_pc = tm.Precision(num_classes=self.num_classes, average="none")
self.macro_pc = tm.Precision(num_classes=self.num_classes, average="macro")
self.micro_pc = tm.Precision(num_classes=self.num_classes, average="micro")
self.weigh_pc = tm.Precision(num_classes=self.num_classes, average="weighted")
self.categ_rc = tm.Recall(num_classes=self.num_classes, average="none")
self.macro_rc = tm.Recall(num_classes=self.num_classes, average="macro")
self.micro_rc = tm.Recall(num_classes=self.num_classes, average="micro")
self.weigh_rc = tm.Recall(num_classes=self.num_classes, average="weighted")
self.categ_f1 = tm.F1(num_classes=self.num_classes, average="none")
self.macro_f1 = tm.F1(num_classes=self.num_classes, average="macro")
self.micro_f1 = tm.F1(num_classes=self.num_classes, average="micro")
self.weigh_f1 = tm.F1(num_classes=self.num_classes, average="weighted")
self.cnfs_mat = tm.ConfusionMatrix(num_classes=self.num_classes, normalize="true")
self.mcc = tm.MatthewsCorrcoef(num_classes=self.num_classes)
def to(self, device: torch.device):
self.categ_pc.to(device)
self.macro_pc.to(device)
self.micro_pc.to(device)
self.weigh_pc.to(device)
self.categ_rc.to(device)
self.macro_rc.to(device)
self.micro_rc.to(device)
self.weigh_rc.to(device)
self.categ_f1.to(device)
self.macro_f1.to(device)
self.micro_f1.to(device)
self.weigh_f1.to(device)
self.cnfs_mat.to(device)
self.mcc.to(device)
def forward(self, x, y):
self.categ_pc(x, y)
self.macro_pc(x, y)
self.micro_pc(x, y)
self.weigh_pc(x, y)
self.categ_rc(x, y)
self.macro_rc(x, y)
self.micro_rc(x, y)
self.weigh_rc(x, y)
self.categ_f1(x, y)
self.macro_f1(x, y)
self.micro_f1(x, y)
self.weigh_f1(x, y)
self.cnfs_mat(x, y)
self.mcc(x, y)
# + pycharm={"name": "#%%\n"}
class MultiLayerPerceptron(LightningModule):
def __init__(self, input_dim, num_classes, labels):
super().__init__()
self.input_dim = input_dim
self.num_classes = num_classes
self.dense = nn.Linear(self.input_dim, self.input_dim)
self.relu = nn.ReLU()
self.linear = nn.Linear(self.input_dim, self.num_classes)
self.model = nn.Sequential(self.dense, self.relu, self.linear)
self.loss = nn.CrossEntropyLoss()
self.labels = labels if labels is not None else list(range(self.num_classes))
def setup(self, stage=None) -> None:
if stage == 'fit':
self.val_metrics = ClassificationMetrics(self.num_classes, "val")
if stage == 'test':
self.test_metrics = ClassificationMetrics(self.num_classes, "test")
def forward(self, x):
result = self.model(x)
return result
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters(), lr=1e-3)
def training_step(self, batch, batch_idx):
x, y = batch
pred = self(x)
y = y.type_as(pred).long()
loss = self.loss(pred, y)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
pred = self(x)
y = y.type_as(pred).long()
loss = self.loss(pred, y)
self.val_metrics(pred, y)
self.log("val_loss", loss)
self.log_scalars(self.val_metrics)
def validation_epoch_end(self, outputs):
self.log_nonscalars(self.val_metrics)
def test_step(self, batch, batch_idx):
x, y = batch
pred = self(x)
y = y.type_as(pred).long()
loss = self.loss(pred, y)
self.test_metrics(pred, y)
self.log("test_loss", loss)
self.log_scalars(self.test_metrics)
def test_epoch_end(self, outputs):
self.log_nonscalars(self.test_metrics)
def log_scalars(self, metric: ClassificationMetrics):
self.log(f"{metric.mode}_precision_macro", metric.macro_pc)
self.log(f"{metric.mode}_precision_micro", metric.micro_pc)
self.log(f"{metric.mode}_precision_weighted", metric.weigh_pc)
self.log(f"{metric.mode}_recall_macro", metric.macro_rc)
self.log(f"{metric.mode}_recall_micro", metric.micro_rc)
self.log(f"{metric.mode}_recall_weighted", metric.weigh_rc)
self.log(f"{metric.mode}_f1_macro", metric.macro_f1)
self.log(f"{metric.mode}_f1_micro", metric.micro_f1)
self.log(f"{metric.mode}_f1_weighted", metric.weigh_f1)
self.log(f"{metric.mode}_mcc", metric.mcc)
def log_nonscalars(self, metric: ClassificationMetrics):
fig = plt.figure(figsize=(24, 24))
cf_matrix = metric.cnfs_mat.compute().cpu().numpy()
sb.heatmap(cf_matrix, annot=True, xticklabels=self.labels, yticklabels=self.labels, fmt='.1%')
self.logger.experiment.add_figure(f"{metric.mode}_cnfs_mat", fig, global_step=self.current_epoch)
categ_pc = metric.categ_pc.compute().cpu().tolist()
pc_map = dict(zip(self.labels, categ_pc))
self.logger.experiment.add_scalars(f"{metric.mode}_precision_categ", pc_map, global_step=self.current_epoch)
categ_rc = metric.categ_pc.compute().cpu().tolist()
rc_map = dict(zip(self.labels, categ_rc))
self.logger.experiment.add_scalars(f"{metric.mode}_recall_categ", rc_map, global_step=self.current_epoch)
categ_f1 = metric.categ_pc.compute().cpu().tolist()
f1_map = dict(zip(self.labels, categ_f1))
self.logger.experiment.add_scalars(f"{metric.mode}_f1_categ", f1_map, global_step=self.current_epoch)
# + pycharm={"name": "#%%\n"}
model = MultiLayerPerceptron(561, 12, defined_labels)
checkpoint_callback = ModelCheckpoint(monitor="val_loss")
trainer = Trainer(gpus=0, max_epochs=50, callbacks=[checkpoint_callback])
trainer.fit(model, train_dataloaders=train_dataloader, val_dataloaders=test_dataloader)
trainer.test(model, test_dataloaders=test_dataloader)
# + pycharm={"name": "#%%\n"}
| perceptron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/devhemza/deeplearningproject/blob/main/Attempt_from_scratch_Keras_abstractive_text_summarization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="lRXNoG_dH8z8"
# Since the GRUs are bi-directional, there is both a forward and a backward state which are combined (concatenated) as the encoder state.
# + colab={"base_uri": "https://localhost:8080/"} id="qtrTASvhMBPe" outputId="de4129c2-5a48-4af2-9de7-3fcc2c25f7a4"
# !pip install pyrouge
# + id="u0jXdShwGjUs"
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class AttentionLayer(Layer):
"""
This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).
There are three sets of weights introduced W_a, U_a, and V_a
"""
def __init__(self, **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
# Create a trainable weight variable for this layer.
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs, verbose=False):
"""
inputs: [encoder_output_sequence, decoder_output_sequence]
"""
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
""" Step function for computing energy for a single decoder state
inputs: (batchsize * 1 * de_in_dim)
states: (batchsize * 1 * de_latent_dim)
"""
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
""" Some parameters required for shaping tensors"""
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
""" Computing S.Wa where S=[s0, s1, ..., si]"""
# <= batch size * en_seq_len * latent_dim
W_a_dot_s = K.dot(encoder_out_seq, self.W_a)
""" Computing hj.Ua """
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim
if verbose:
print('Ua.h>', U_a_dot_h.shape)
""" tanh(S.Wa + hj.Ua) """
# <= batch_size*en_seq_len, latent_dim
Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)
if verbose:
print('Ws+Uh>', Ws_plus_Uh.shape)
""" softmax(va.tanh(S.Wa + hj.Ua)) """
# <= batch_size, en_seq_len
e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)
# <= batch_size, en_seq_len
e_i = K.softmax(e_i)
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
""" Step function for computing ci using ei """
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
# <= batch_size, hidden_size
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
fake_state_c = K.sum(encoder_out_seq, axis=1)
fake_state_e = K.sum(encoder_out_seq, axis=2) # <= (batch_size, enc_seq_len, latent_dim
""" Computing energy outputs """
# e_outputs => (batch_size, de_seq_len, en_seq_len)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
""" Computing context vectors """
last_out, c_outputs, _ = K.rnn(
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
""" Outputs produced by the layer """
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
]
# + id="beLd09tiL6CY"
from numpy.random import seed
seed(1)
from sklearn.model_selection import train_test_split as tts
import logging
import tensorflow as tf
from pyrouge import Rouge155
import matplotlib.pyplot as plt
import keras
from keras import initializers
from keras.optimizers import RMSprop, Adadelta
from keras.models import Model
from keras.layers import Bidirectional, Dense,GRU,Input,Activation,Add,TimeDistributed,\
Permute,Flatten,RepeatVector,merge,Lambda,Multiply,Reshape, Attention, Embedding, Concatenate
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
# + [markdown] id="tafxlU2STLTm"
# Load data
# + colab={"base_uri": "https://localhost:8080/"} id="n9cj16XUTNq4" outputId="052ab265-7316-48fd-c788-368c63e83b98"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="sGscGkXFTSbU" outputId="58844d1a-cd6e-4612-af8d-7c92a47ac9b3"
# %cd "/content/drive/MyDrive/M2/DeepLearning/DeepProject/text-summarization-tensorflow-master"
# + colab={"base_uri": "https://localhost:8080/"} id="hf4b16KopBx7" outputId="e8a30338-7bf4-4c27-80de-420a01d3825a"
# !pip install gensim
# !pip install wget
import nltk
nltk.download('punkt')
# + id="4-SPZoUOTc3N"
from nltk.tokenize import word_tokenize
import re
import collections
import pickle
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
from gensim.test.utils import get_tmpfile
from gensim.scripts.glove2word2vec import glove2word2vec
default_path = "./sample_data/"
train_article_path = default_path + "sumdata/train/train.article.txt"
train_title_path = default_path + "sumdata/train/train.title.txt"
valid_article_path = default_path + "sumdata/train/valid.article.filter.txt"
valid_title_path = default_path + "sumdata/train/valid.title.filter.txt"
#valid_article_path = default_path + "sumdata/DUC2003/input.txt"
#valid_title_path = default_path + "sumdata/DUC2003/task1_ref0.txt"
def clean_str(sentence):
sentence = re.sub("[#.]+", "#", sentence)
return sentence
def get_text_list(data_path, toy):
with open (data_path, "r", encoding="utf-8") as f:
if not toy:
return [clean_str(x.strip()) for x in f.readlines()][:200000]
else:
return [clean_str(x.strip()) for x in f.readlines()][:50]
def build_dict(step, toy=False):
if step == "train":
train_article_list = get_text_list(train_article_path, toy)
train_title_list = get_text_list(train_title_path, toy)
words = list()
for sentence in train_article_list + train_title_list:
for word in word_tokenize(sentence):
words.append(word)
word_counter = collections.Counter(words).most_common()
word_dict = dict()
word_dict["<padding>"] = 0
word_dict["<unk>"] = 1
word_dict["<s>"] = 2
word_dict["</s>"] = 3
for word, _ in word_counter:
word_dict[word] = len(word_dict)
with open(default_path + "word_dict.pickle", "wb") as f:
pickle.dump(word_dict, f)
elif step == "valid":
with open(default_path + "word_dict.pickle", "rb") as f:
word_dict = pickle.load(f)
reversed_dict = dict(zip(word_dict.values(), word_dict.keys()))
article_max_len = 50
summary_max_len = 50
return word_dict, reversed_dict, article_max_len, summary_max_len
def build_dataset(step, word_dict, article_max_len, summary_max_len, toy=False):
if step == "train":
article_list = get_text_list(train_article_path, toy)
title_list = get_text_list(train_title_path, toy)
elif step == "valid":
article_list = get_text_list(valid_article_path, toy)
else:
raise NotImplementedError
x = [word_tokenize(d) for d in article_list]
x = [[word_dict.get(w, word_dict["<unk>"]) for w in d] for d in x]
x = [d[:article_max_len] for d in x]
x = [d + (article_max_len - len(d)) * [word_dict["<padding>"]] for d in x]
if step == "valid":
return x
else:
y = [word_tokenize(d) for d in title_list]
y = [[word_dict.get(w, word_dict["<unk>"]) for w in d] for d in y]
y = [d[:(summary_max_len - 1)] for d in y]
y = [d + (summary_max_len - len(d)) * [word_dict["<padding>"]] for d in y]
return x, y
def get_init_embedding(word_dict , reversed_dict, embedding_size):
embedding_matrix = np.zeros((len(word_dict) + 1, embedding_size))
print("Loading Lists...")
train_article_list = get_text_list(train_article_path, False)
train_title_list = get_text_list(train_title_path, False)
print("Loading TF-IDF...")
tf_idf_list = tf_idf_generate(train_article_list+train_title_list)
print("Loading Pos Tags...")
pos_list , postags_for_named_entity = get_pos_tags_dict(word_dict.keys())
print("Loading Named Entity...")
named_entity_recs = named_entity(postags_for_named_entity)
print("Loading Glove vectors...")
with open( default_path + "model_glove_300.pkl", 'rb') as handle:
word_vectors = pickle.load(handle)
used_words = 0
word_vec_list = list()
for i, word in sorted(reversed_dict.items()):
try:
word_vec = word_vectors.word_vec(word)
if word in tf_idf_list:
v= tf_idf_list[word]
rich_feature_array = np.array([v,v,v,v,v,v,v,v,v,v])
word_vec = np.append(word_vec, rich_feature_array)
else:
v=0
rich_feature_array = np.array([v,v,v,v,v,v,v,v,v,v])
word_vec = np.append(word_vec, rich_feature_array)
if word in pos_list:
v=pos_list[word]
rich_feature_array_2 = np.array([v,v,v,v,v,v,v,v,v,v])
word_vec = np.append(word_vec, rich_feature_array_2)
else:
v=0
rich_feature_array_2 = np.array([v,v,v,v,v,v,v,v,v,v])
word_vec = np.append(word_vec, rich_feature_array_2)
if word in named_entity_recs:
v=named_entity_recs[word]
rich_feature_array_3 = np.array([v,v,v,v,v,v,v,v,v,v])
word_vec = np.append(word_vec, rich_feature_array_3)
else:
v=0
rich_feature_array_3 = np.array([v,v,v,v,v,v,v,v,v,v])
word_vec = np.append(word_vec, rich_feature_array_3)
used_words += 1
except KeyError:
word_vec = np.zeros([embedding_size], dtype=np.float32) #to generate for <padding> and <unk>
word_vec_list.append(np.array(word_vec))
embedding_matrix[i] = word_vec
print("words found in glove percentage = " + str((used_words/len(word_vec_list))*100) )
# Assign random vector to <s>, </s> token
word_vec_list[2] = np.random.normal(0, 1, embedding_size)
word_vec_list[3] = np.random.normal(0, 1, embedding_size)
return embedding_matrix
# + [markdown] id="ZxFTlhgDsfUF"
# #### TF-IDF
# + id="ZoqnXswL_FzR"
# _____TF-IDF libraries_____
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
# _____helper Libraries_____
import pickle # would be used for saving temp files
import csv # used for accessing the dataset
import timeit # to measure time of training
import random # used to get a random number
def tf_idf_generate(sentences):
#https://stackoverflow.com/questions/30976120/find-the-tf-idf-score-of-specific-words-in-documents-using-sklearn
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
# our corpus
data = sentences
cv = CountVectorizer()
# convert text data into term-frequency matrix
data = cv.fit_transform(data)
tfidf_transformer = TfidfTransformer()
# convert term-frequency matrix into tf-idf
tfidf_matrix = tfidf_transformer.fit_transform(data)
# create dictionary to find a tfidf word each word
word2tfidf = dict(zip(cv.get_feature_names(), tfidf_transformer.idf_))
#i = 0
#for word, score in word2tfidf.items():
# print(word, score)
# if (i == 10):
# break
# i+=1
return word2tfidf
# + [markdown] id="0dZvXIVXsiEt"
# ### POS tags
# + colab={"base_uri": "https://localhost:8080/"} id="c5YAAeXrsuwe" outputId="3f3f26e5-0065-4c17-ac97-157fcba79138"
import nltk
nltk.download('averaged_perceptron_tagger')
# + id="ajEzVjvpslJt"
#https://stackoverflow.com/questions/38088652/pandas-convert-categories-to-numbers
import nltk
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
#ex = 'European authorities fined Google a record $5.1 billion on Wednesday for abusing its power in the mobile phone market and ordered the company to alter its practices'
def get_pos_tags_dict(words):
#sent = nltk.word_tokenize(sent)
#print(sent)
post_tags_for_words = nltk.pos_tag(words)
pos_list ={}
#sent = preprocess(ex)
for word,pos in post_tags_for_words:
pos_list[word] = pos
#print(pos_list)
import pandas as pd
df = pd.DataFrame(list(pos_list.items()))
df.columns = ['word', 'pos']
df.pos = pd.Categorical(df.pos)
df['code'] = df.pos.cat.codes
#print(df)
pos_list ={}
for index, row in df.iterrows():
pos_list[row['word']] = row['code']
print(pos_list)
return pos_list , post_tags_for_words
# + [markdown] id="tKBuNALo-idI"
# ### Named Entity Reognition
# + colab={"base_uri": "https://localhost:8080/"} id="svj4Nx7X-wSn" outputId="be7db567-1e3f-4f14-aa38-9cbf6d195a2a"
import nltk
nltk.download('maxent_ne_chunker')
nltk.download('words')
# + id="a0pP9BSA-h10"
#https://nlpforhackers.io/named-entity-extraction/
from nltk import word_tokenize, pos_tag, ne_chunk
#sentence = "Mark and John are working at Google."
#print (ne_chunk(pos_tag(word_dict.keys())[:5]))
#names = ne_chunk(pos_tag(word_tokenize(sentence)))
#names = ne_chunk(pos_tag(word_tokenize(sentence)))
def named_entity(post_tags_for_words):
names = ne_chunk(post_tags_for_words)
names_dict = {}
for n in names:
if (len(n) == 1):
named_entity = str(n).split(' ')[0][1:]
word = str(n).split(' ')[1].split('/')[0]
names_dict[word] = named_entity
print (names_dict)
import pandas as pd
df = pd.DataFrame(list(names_dict.items()))
df.columns = ['word', 'pos']
df.pos = pd.Categorical(df.pos)
df['code'] = df.pos.cat.codes
#print(df)
names_dict ={}
for index, row in df.iterrows():
names_dict[row['word']] = row['code']
print(names_dict)
return names_dict
# + colab={"base_uri": "https://localhost:8080/"} id="w1IhD1ZDaNTn" outputId="972e1de5-756e-4234-c42c-0330b0674c91"
# !ls sample_data
# + colab={"base_uri": "https://localhost:8080/"} id="RJD0vyy6WLIb" outputId="feac802f-227d-4180-e380-d748e2d65b6a"
print("Building dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("train", False)
print("Loading training dataset...")
train_x, train_y = build_dataset("train", word_dict, article_max_len, summary_max_len, False)
# + id="bYl6S0yWU-qh"
train_embedding = get_init_embedding(word_dict , reversed_dict, 330)
# + colab={"base_uri": "https://localhost:8080/"} id="uTRgcqjpPIQr" outputId="39c809ad-9d17-4d61-a045-e294278c2263"
train_embedding.shape
# + colab={"base_uri": "https://localhost:8080/"} id="aSCxb9CCSKK8" outputId="e21987cb-3a7f-4675-a3c1-b01f2e63e59b"
train_embedding[1].shape
# + colab={"base_uri": "https://localhost:8080/"} id="45uoiuDuB3Eo" outputId="93a5f570-c73f-4fb5-c45e-be501d384687"
train_embedding.shape
# + colab={"base_uri": "https://localhost:8080/"} id="ZJDsKo1HinQ8" outputId="2adc25fe-0245-49fd-c09b-96f0eb28630f"
np.shape(train_x[1])
# + id="hETQtHsYO9vG"
def embedding_matrix_creater(embedding_dim, word_index):
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
# + [markdown] id="44jts2DrU_O-"
# Model
# + colab={"base_uri": "https://localhost:8080/"} id="0NBD1hetH95X" outputId="4872cacf-e457-418c-c2f3-ce27c21c27e5"
### Constructor for baseline enc. dec. network.
### Adding the encoder
#######################model params###########################
batch_size = 50
num_classes = 1
epochs = 20
learning_rate = 0.005
clip_norm = 2.0
en_shape = np.shape(train_x)
de_shape = np.shape(train_y[1])
hidden_units = 400
###############################################################
###############################################################
encoder_inputs = Input(shape = (en_shape[1], ))
print(encoder_inputs)
"""_______encoder________"""
# GRU
encoder_embedding_layer = Embedding(train_embedding.shape[0], 330, input_length = 50, weights = [train_embedding])
encoder_embedding = encoder_embedding_layer(encoder_inputs)
encoder_GRU = Bidirectional(GRU(hidden_units, return_sequences=True, return_state=True))
encoder_output, encoder_fs, encoder_bs = encoder_GRU(encoder_embedding)
encoder_state = Concatenate()([encoder_fs, encoder_bs])
# + colab={"base_uri": "https://localhost:8080/"} id="lvop7O1sDsn7" outputId="f18a3673-9275-4d28-e78c-c0c59055a906"
encoder_embedding.shape
# + id="YiV0z4myJ1tu"
### Adding the decoder
decoder_input = Input(shape=(None, ))
decoder_embedding = Embedding(train_embedding.shape[0], train_embedding.shape[1], input_length = 50, weights = [train_embedding])(decoder_input)
# GRU using encoder_states as initial state
decoder_gru = GRU(hidden_units*2, return_sequences=True, return_state=True)
decoder_output, decoder_state = decoder_gru(decoder_embedding, initial_state=[encoder_state])
# + id="stoVb9V9zoOW"
# Attention Layer for alignment model and computation of weights for all encoder hidden states
attention_layer = AttentionLayer()
attention_out, attention_states = attention_layer([encoder_output, decoder_output])
# Concat attention output and decoder GRU output
decoder_concatenate = Concatenate(axis=-1)([decoder_output, attention_out])
# Dense layer for generating words from vocabulary distribution
decoder_dense = TimeDistributed(Dense(train_embedding.shape[0]+1, activation='softmax'))
decoder_dense_output = decoder_dense(decoder_concatenate)
# Define the model
model = Model(inputs = [encoder_inputs, decoder_input], outputs = decoder_dense_output)
opt = Adadelta()
model.compile(optimizer=opt, loss='categorical_crossentropy')
# + colab={"base_uri": "https://localhost:8080/"} id="WsyOrN3bH7LX" outputId="330a7a5f-0461-4e9d-9def-aba17a6cfe58"
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 598} id="LnBXSSmhRp85" outputId="6343fe2b-0b8d-4b0d-9000-38c531d07ccb"
tf.keras.utils.plot_model(model, show_shapes=True)
# + id="Fg0E0lolLTts"
train_X = tf.convert_to_tensor(train_x)
train_Y = tf.convert_to_tensor(train_y)
# + colab={"base_uri": "https://localhost:8080/", "height": 918} id="lqjjRDckH8y4" outputId="bdc861c2-68d2-455a-92ae-3ea418519920"
model.fit([train_X, train_Y], train_Y)
# + id="qNXQuPDtNGMR"
| attempts/Attempt_from_scratch_Keras_abstractive_text_summarization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing the required libraries
import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Input, Flatten, Dropout, Activation
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
from keras import regularizers
import os
mylist= os.listdir('RawData/')
type(mylist)
print(mylist[1800])
print(mylist[300][6:-16])
# ## Plotting the audio file's waveform and its spectrogram
data, sampling_rate = librosa.load('RawData/su10.wav')
# +
% pylab inline
import os
import pandas as pd
import librosa
import glob
plt.figure(figsize=(15, 5))
librosa.display.waveplot(data, sr=sampling_rate)
# +
import matplotlib.pyplot as plt
import scipy.io.wavfile
import numpy as np
import sys
sr,x = scipy.io.wavfile.read('RawData/f10.wav')
## Parameters: 10ms step, 30ms window
nstep = int(sr * 0.01)
nwin = int(sr * 0.03)
nfft = nwin
window = np.hamming(nwin)
## will take windows x[n1:n2]. generate
## and loop over n2 such that all frames
## fit within the waveform
nn = range(nwin, len(x), nstep)
X = np.zeros( (len(nn), nfft//2) )
for i,n in enumerate(nn):
xseg = x[n-nwin:n]
z = np.fft.fft(window * xseg, nfft)
X[i,:] = np.log(np.abs(z[:nfft//2]))
plt.imshow(X.T, interpolation='nearest',
origin='lower',
aspect='auto')
plt.show()
# -
# ## Setting the labels
feeling_list=[]
for item in mylist:
if item[6:-16]=='02' and int(item[18:-4])%2==0:
feeling_list.append('female_calm')
elif item[6:-16]=='02' and int(item[18:-4])%2==1:
feeling_list.append('male_calm')
elif item[6:-16]=='03' and int(item[18:-4])%2==0:
feeling_list.append('female_happy')
elif item[6:-16]=='03' and int(item[18:-4])%2==1:
feeling_list.append('male_happy')
elif item[6:-16]=='04' and int(item[18:-4])%2==0:
feeling_list.append('female_sad')
elif item[6:-16]=='04' and int(item[18:-4])%2==1:
feeling_list.append('male_sad')
elif item[6:-16]=='05' and int(item[18:-4])%2==0:
feeling_list.append('female_angry')
elif item[6:-16]=='05' and int(item[18:-4])%2==1:
feeling_list.append('male_angry')
elif item[6:-16]=='06' and int(item[18:-4])%2==0:
feeling_list.append('female_fearful')
elif item[6:-16]=='06' and int(item[18:-4])%2==1:
feeling_list.append('male_fearful')
elif item[:1]=='a':
feeling_list.append('male_angry')
elif item[:1]=='f':
feeling_list.append('male_fearful')
elif item[:1]=='h':
feeling_list.append('male_happy')
#elif item[:1]=='n':
#feeling_list.append('neutral')
elif item[:2]=='sa':
feeling_list.append('male_sad')
labels = pd.DataFrame(feeling_list)
labels[:100]
# ## Getting the features of audio files using librosa
df = pd.DataFrame(columns=['feature'])
bookmark=0
for index,y in enumerate(mylist):
if mylist[index][6:-16]!='01' and mylist[index][6:-16]!='07' and mylist[index][6:-16]!='08' and mylist[index][:2]!='su' and mylist[index][:1]!='n' and mylist[index][:1]!='d':
X, sample_rate = librosa.load('RawData/'+y, res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X,
sr=sample_rate,
n_mfcc=13),
axis=0)
feature = mfccs
#[float(i) for i in feature]
#feature1=feature[:135]
df.loc[bookmark] = [feature]
bookmark=bookmark+1
df[:5]
df3 = pd.DataFrame(df['feature'].values.tolist())
# df3[:5]
newdf = pd.concat([df3,labels], axis=1)
rnewdf = newdf.rename(index=str, columns={"0": "label"})
rnewdf[:5]
from sklearn.utils import shuffle
rnewdf = shuffle(newdf)
rnewdf[:10]
rnewdf=rnewdf.fillna(0)
# ## Dividing the data into test and train
newdf1 = np.random.rand(len(rnewdf)) < 0.8
train = rnewdf[newdf1]
test = rnewdf[~newdf1]
train[250:260]
trainfeatures = train.iloc[:, :-1]
trainlabel = train.iloc[:, -1:]
testfeatures = test.iloc[:, :-1]
testlabel = test.iloc[:, -1:]
# +
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
X_train = np.array(trainfeatures)
y_train = np.array(trainlabel)
X_test = np.array(testfeatures)
y_test = np.array(testlabel)
lb = LabelEncoder()
y_train = np_utils.to_categorical(lb.fit_transform(y_train))
y_test = np_utils.to_categorical(lb.fit_transform(y_test))
# -
y_train
X_train.shape
# ## Changing dimension for CNN model
# +
x_traincnn =np.expand_dims(X_train, axis=2)
x_testcnn= np.expand_dims(X_test, axis=2)
# +
model = Sequential()
model.add(Conv1D(256, 5,padding='same',
input_shape=(216,1)))
model.add(Activation('relu'))
model.add(Conv1D(128, 5,padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(128, 5,padding='same',))
model.add(Activation('relu'))
#model.add(Conv1D(128, 5,padding='same',))
#model.add(Activation('relu'))
#model.add(Conv1D(128, 5,padding='same',))
#model.add(Activation('relu'))
#model.add(Dropout(0.2))
model.add(Conv1D(128, 5,padding='same',))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(10))
model.add(Activation('softmax'))
opt = keras.optimizers.rmsprop(lr=0.00001, decay=1e-6)
# -
model.summary()
model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['accuracy'])
# ### Removed the whole training part for avoiding unnecessary long epochs list
cnnhistory=model.fit(x_traincnn, y_train, batch_size=16, epochs=400, validation_data=(x_testcnn, y_test))
plt.plot(cnnhistory.history['loss'])
plt.plot(cnnhistory.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# ## Saving the model
model_name = 'Emotion_Voice_Detection_Model_Panda.h5'
save_dir = os.path.join(os.getcwd(), 'saved_models')
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
import json
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# ## Loading the model
# +
# loading json and creating model
from keras.models import model_from_json
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("saved_models/Emotion_Voice_Detection_Model_Panda.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
score = loaded_model.evaluate(x_testcnn, y_test, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
# -
# ## Predicting emotions on the test data
preds = loaded_model.predict(x_testcnn,
batch_size=32,
verbose=1)
preds
preds1=preds.argmax(axis=1)
preds1
abc = preds1.astype(int).flatten()
abc
predictions = (lb.inverse_transform((abc)))
predictions
preddf = pd.DataFrame({'predictedvalues': predictions})
preddf[:240]
actual=y_test.argmax(axis=1)
abc123 = actual.astype(int).flatten()
actualvalues = (lb.inverse_transform((abc123)))
actualdf = pd.DataFrame({'actualvalues': actualvalues})
actualdf[:10]
finaldf = actualdf.join(preddf)
# ## Actual v/s Predicted emotions
finaldf[100:110]
finaldf.groupby('actualvalues').count()
finaldf.groupby('predictedvalues').count()
finaldf.to_csv('Predictions_Panda.csv', index=False)
# ## Live Demo
# #### The file 'output10.wav' in the next cell is the file that was recorded live using the code in AudioRecoreder notebook found in the repository
data, sampling_rate = librosa.load('output10.wav')
# +
% pylab inline
import os
import pandas as pd
import librosa
import glob
plt.figure(figsize=(15, 5))
librosa.display.waveplot(data, sr=sampling_rate)
# -
#livedf= pd.DataFrame(columns=['feature'])
X, sample_rate = librosa.load('output10.wav', res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13),axis=0)
featurelive = mfccs
livedf2 = featurelive
livedf2= pd.DataFrame(data=livedf2)
livedf2 = livedf2.stack().to_frame().T
livedf2
twodim= np.expand_dims(livedf2, axis=2)
livepreds = loaded_model.predict(twodim,
batch_size=32,
verbose=1)
livepreds
livepreds1=livepreds.argmax(axis=1)
liveabc = livepreds1.astype(int).flatten()
livepredictions = (lb.inverse_transform((liveabc)))
livepredictions
| Speech-Emotion-Analyzer/final_results_gender_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="acac685b-e43b-42ef-be1a-ff090c5ce042"
# # Linear Regression (3D)
#
# Person A: "I forgot what we've learned during last season's linear algebra class."
#
# Person B: "Well I guess you are experiencing __LINEAR REGRESSION__!"
# + id="bb7d7571-bb4a-4221-b890-dbd0a6918ace"
# dependencies
import time
import numpy as np
import matplotlib.pyplot as plt
# + id="a4c41cbd-9ef4-469a-8dab-4ce41404446f"
# constants
rand_seed = 397
# + id="addd69a4-2fec-47eb-a62a-a2456ad782fb"
# dataset
# y = x @ weights + biases
# dim(x) -> (n_data, dim_input)
# dim(weights) -> (dim_input, 1)
# dim(biases) -> (1)
def generate_dataset(dim_input, n_data, noise=7):
weights = (np.random.random_sample((dim_input, 1)) - 0.5) * 20
biases = (np.random.random_sample() - 0.5) * 50
x = (np.random.random_sample((n_data, dim_input)) - 0.5) * 100
y = x @ weights + biases + np.random.normal(0, noise, (n_data, 1))
return x, y
# + id="b4c5e739-7429-489d-b4bd-08419062824d"
# loss functions & accuracy
def loss(predictions, labels):
return ((labels - predictions) ** 2).sum()
def accuracy(predictions, labels, threshold=1):
diff = predictions - labels
diff[abs(diff) < threshold] = 0
return diff[diff == 0].size / predictions.shape[0]
# + id="8744291a-9248-4b86-9d97-f40cf81e93b6"
# write your code in this cell
def linear_regression(x, y, learning_rate=1e-3):
# your code here
#
# weights is a vertical vector of shape (dim_input, 1)
return weights, biases
# + id="22aedb83-28a1-4ec1-87cb-5c838eec347b"
# generate & visualize a 3D dataset
np.random.seed(rand_seed)
x, y = generate_dataset(2, 100)
figure = plt.figure()
axes = figure.add_subplot(projection='3d')
_ = axes.scatter(x[:, 0], x[:, 1], y.flatten())
# + id="29c3228a-3a15-4394-b9a3-f539adab3148"
# testing
predicted_weights, predicted_biases = linear_regression(x, y)
figure = plt.figure()
axes = figure.add_subplot(projection='3d')
_ = axes.scatter(x[:, 0], x[:, 1], y.flatten())
rang = np.array([[min(x[:, 0]), min(x[:, 1])], [max(x[:, 0]), max(x[:, 1])]])
rang_y = rang @ predicted_weights + predicted_biases
# draws the diagonal of the hyperplane instead of the plane to make
# plotting clearer
_ = axes.plot(rang[:, 0], rang[:, 1], rang_y.flatten(), c='red')
# + id="3999a107-a1ee-4107-bcc3-882ab5da41d6"
| day-2/linear_regression_3d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="UO10cGdM5SGX"
# # História das Olimpíadas
# _(créditos ao prof. <NAME>)_
#
# Após um ano de atraso por conta da pandemia de Covid-19, as atenções do mundo todo se voltaram para Tóquio, no Japão, para acompanhar mais uma edição das Olimpíadas.
#
# No Brasil não foi diferente, e muitos se uniram para torcer por nossos atletas em diferentes competições, tanto em esportes onde o Brasil já possui tradição quanto em novos esportes.
#
# Vamos aproveitar o clima para estudar um pouco das Olimpíadas! Utilizaremos um _dataset_ com 120 anos de dados históricos das Olimpíadas, cobrindo desde os jogos de Atenas 1896 até Rio 2016.
#
# Faça o download do _dataset_ em https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results e carregue o arquivo ```athlete_events.csv``` para um DataFrame utilizando Pandas. Aproveite para explorar seu DataFrame e se familiarizar com a sua estrutura.
#
# OBS: Fique à vontade para acrescentar mais células Python conforme necessário em qualquer etapa do exercício.
# + [markdown] id="1-In92bH5SGd"
# ## 1. O Brasil nas Olimpíadas
#
# Vamos começar estudando o desempenho do nossos próprio país. Gere um DataFrame novo contendo apenas as informações sobre atletas brasileiros.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3688, "status": "ok", "timestamp": 1634250156076, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwY13Y95qBrTNrQRqtFjQ4dZr49VLcT0CL9r_cng=s64", "userId": "06240568517972348464"}, "user_tz": 180} id="c0MzysV95SGe" outputId="d2fde249-a387-48b3-f53e-68a1f1b1b1a8"
# !pip install pandas
# + executionInfo={"elapsed": 227, "status": "ok", "timestamp": 1634250255842, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhwY13Y95qBrTNrQRqtFjQ4dZr49VLcT0CL9r_cng=s64", "userId": "06240568517972348464"}, "user_tz": 180} id="ZFafcc4g56zQ"
import pandas as pd
# +
df = pd.read_csv('../data/athlete_events.csv')
brazil = df.loc[df['Team'] == 'Brazil']
brazil.head()
# + [markdown] id="79xrCVYh5SGe"
# ### Medalhistas
#
# Vamos focar um pouco nos casos de sucesso do Brasil. Use o seu DataFrame anterior para filtrar apenas informações sobre **medalhistas** brasileiros.
#
# **DICA:** observe como a coluna ```Medal``` é representada quando o atleta não ganhou medalha.
# + id="9tlxQQja5SGf"
brazil_with_medals = brazil.dropna(subset = ['Medal'])
brazil_with_medals.head()
# + [markdown] id="oAJtYJZA5SGf"
# ### Verão vs Inverno
#
# Você deve ter notado que temos duas categorias distintas de jogos olímpicos, representados pela estação: temos os jogos de verão e os jogos de inverno, que ocorrem de maneira intercalada.
#
# Agora que já conhecemos os medalhistas brasileiros, resposta: quantos atletas brasileiros receberam medalha nos jogos de verão e quantos receberam nos jogos de inverno?
# + id="AHMm4ROR5SGg"
brazil_with_medals.groupby(['Season']).size()
# + [markdown] id="yCbiBGUR5SGg"
# Os jogos de verão são bem mais populares do que os jogos de inverno no Brasil. Portanto, deste ponto em diante iremos focar apenas nos jogos de verão. Descarte de seu DataFrame os dados dos jogos de inverno.
#
#
# + id="SiJvANPT5SGh"
brazil_with_medals_summer = brazil_with_medals.loc[brazil_with_medals['Season'] == 'Summer']
brazil_with_medals_summer.head()
# + [markdown] id="6eW-D6eC5SGi"
# ### Atletas do Brasil
#
# Vamos conhecer um pouco melhor nossos atletas. Descubra a altura e peso médio de nossos medalhistas.
# + [markdown] id="IDq-M0-f5SGi"
# Imaginamos que diferentes esportes podem beneficiar diferentes tipos físicos, certo? Então refaça a análise anterior, mas obtendo os valores médios **por esporte**.
# + id="FM32ADb-5SGj"
print(f'Altura media: {round(brazil_with_medals_summer["Height"].mean(), 2)} cm')
print(f'Peso medio: {round(brazil_with_medals_summer["Weight"].mean(), 2)} Kg')
# +
brazil_with_medals_summer_groupby_sport_height = round(brazil_with_medals_summer.groupby(["Sport"])["Height"].mean(), 2)
print(f'Altura media por esporte:\n {brazil_with_medals_summer_groupby_sport_height}')
# +
brazil_with_medals_summer_groupby_sport_weight = round(brazil_with_medals_summer.groupby(["Sport"])["Weight"].mean(), 2)
print(f'Peso medio por esporte:\n {brazil_with_medals_summer_groupby_sport_weight}')
# + [markdown] id="cOUVrKel5SGj"
# Será que os dados acima influenciaram no interesse geral dos atletas pelo esporte ou realmente impactaram no desempenho deles? Podemos tentar descobrir se há algum tipo de correlação.
#
# Você ainda possui o dataframe original contendo todos os atletas brasileiros, incluindo os sem medalha? Obtenha os valores médios de peso e altura por esporte daquele dataframe e compare-o com os dos medalhistas. Há alguma diferença significativa em algum esporte?
# + id="_fvqJvEd5SGk"
brazil_summer = brazil.loc[brazil['Season'] == 'Summer']
brazil_summer_groupby_sport_weight = round(brazil_summer.groupby(["Sport"])["Weight"].mean(), 2)
series = {
'With Medals': brazil_with_medals_summer_groupby_sport_weight,
'Without Medals': brazil_summer_groupby_sport_weight
}
pd.DataFrame(series)
# +
brazil_summer = brazil.loc[brazil['Season'] == 'Summer']
brazil_summer_groupby_sport_height = round(brazil_summer.groupby(["Sport"])["Height"].mean(), 2)
series = {
'With Medals': brazil_with_medals_summer_groupby_sport_height,
'Without Medals': brazil_summer_groupby_sport_height
}
pd.DataFrame(series)
# + [markdown] id="ZV7LBF9F5SGl"
# Existe um detalhe importante passando batido até agora em nossa análise: as categorias esportivas costumam ser divididas por gênero justamente por conta de diferenças físicas entre homens e mulheres que poderiam influenciar no desempenho. Compare a altura e peso médios de atletas brasileiros por esporte segmentado por sexo.
# + id="qrFPqPGV5SGl"
brazil_summer_groupby_sport_and_sex_height = round(brazil_summer.groupby(["Sport", "Sex"])["Height"].mean(), 2)
brazil_summer_groupby_sport_and_sex_height
# -
brazil_summer_groupby_sport_and_sex_weight = round(brazil_summer.groupby(["Sport", "Sex"])["Weight"].mean(), 2)
brazil_summer_groupby_sport_and_sex_weight
# + [markdown] id="Qkk5fmAU5SGl"
# Qual foi (ou quais foram) o maior medalhista brasileiro em quantidade total de medalhas?
# + id="HRmh1AV45SGm"
brazil_with_medals['Name'].value_counts()[brazil_with_medals['Name'].value_counts() == brazil_with_medals['Name'].value_counts().max()]
# + [markdown] id="i9ywjI_-5SGm"
# E o(s) maior(es) em quantidade de medalhas de ouro?
# + id="Z9FCYvZK5SGn"
brazil_with_medals_only_golds = brazil_with_medals[brazil_with_medals['Medal'] == 'Gold']
brazil_with_medals_only_golds['Name'].value_counts()[brazil_with_medals_only_golds['Name'].value_counts() == brazil_with_medals_only_golds['Name'].value_counts().max()]
# + [markdown] id="YBvaASBO5SGn"
# Qual esporte rendeu mais medalhas de ouro para o Brasil? E qual rendeu mais medalhas no total?
#
# **DICA:** tome muito cuidado nessa análise: cada **evento esportivo** rende 1 medalha. Por exemplo, quando a equipe de futebol vence, isso é considerado 1 medalha, mesmo tendo cerca de 20 atletas medalhistas na equipe.
# + id="4J_pMD3z5SGn"
brazil_with_medals_groupby_sport = brazil_with_medals.groupby(by=['Sport', 'Event', 'Year']).size()
brazil_with_medals_groupby_sport_cumcount = brazil_with_medals_groupby_sport.groupby('Sport').cumcount() + 1
brazil_with_medals_groupby_sport_cumcount.max()
# + [markdown] id="IpTwJZor5SGo"
# Cada "categoria" dentro de um esporte é considerado um evento. Por exemplo, dentro de "atletismo", temos uma competição de 100m masculina, uma de 100m feminino, um revezamento 4 x 100m masculino, um revezamento 4 x 100m feminino, uma competição de 400m masculino, uma de 400m feminino, uma maratona masculina, uma maratona feminina, e assim sucessivamente.
#
# Sabendo disso, qual evento esportivo mais rendeu medalhas de ouro para o Brasil? E total de medalhas?
# + id="ebyQ65QO5SGo"
brazil_with_medals_groupby_event_and_year = brazil_with_medals.groupby(by=['Event', 'Year']).size()
brazil_with_medals_groupby_event_cumcount = brazil_with_medals_groupby_event_and_year.groupby('Event').cumcount() + 1
max_event = brazil_with_medals_groupby_event_cumcount.max()
max_events = brazil_with_medals_groupby_event_cumcount[brazil_with_medals_groupby_event_cumcount == max_event]
max_events_names = [event_name[0] for event_name in max_events.index]
max_events_names
# + [markdown] id="JfFreIuL5SGo"
# Para finalizar sobre o Brasil: obtenha o total de medalhas de ouro, prata, bronze e total por ano.
# + id="Skaz6fmt5SGo"
brazil_with_medals_by_gold = brazil_with_medals[(brazil_with_medals.Medal == 'Gold')]
brazil_with_medals_by_gold = brazil_with_medals_by_gold.groupby(['Sport', 'Event', 'Year'])['Medal'].size()
brazil_with_medals_by_gold = brazil_with_medals_by_gold.groupby('Sport').cumcount() + 1
print(f'Quantidade de medalhas de ouro do Brasil: {len(brazil_with_medals_by_gold)}')
# +
brazil_with_medals_by_silver = df.loc[(df['Team'] == 'Brazil') & (df['Medal'] == 'Silver')]
brazil_with_medals_by_silver = brazil_with_medals_by_silver.groupby(['Sport', 'Event', 'Year'])['Medal'].size()
brazil_with_medals_by_silver = brazil_with_medals_by_silver.groupby('Sport').cumcount() + 1
print(f'Quantidade de medalhas de prata do Brasil: {len(brazil_with_medals_by_silver)}')
# +
brazil_with_medals_by_bronze = df.loc[(df['Team'] == 'Brazil') & (df['Medal'] == 'Bronze')]
brazil_with_medals_by_bronze = brazil_with_medals_by_bronze.groupby(['Sport', 'Event', 'Year'])['Medal'].size()
brazil_with_medals_by_bronze = brazil_with_medals_by_bronze.groupby('Sport').cumcount() + 1
print(f'Quantidade de medalhas de bronze do Brasil: {len(brazil_with_medals_by_bronze)}')
# + [markdown] id="rzwfCqFN5SGo"
# ## 2. O mundo nos jogos de verão
#
# Vamos agora analisar um pouquinho do que aconteceu nas Olimpíadas de verão em todo o mundo.
#
# Retome o DataFrame original e descarte as informações sobre os jogos de inverno.
# + id="mmd0hfLl5SGo"
summer = df.loc[df['Season'] == 'Summer']
summer.head()
# + [markdown] id="3N0C7qtf5SGo"
# Obtenha a lista de todos os esportes já disputados nas olimpíadas de verão.
# + id="z9QrbMwL5SGo"
summer['Sport'].unique()
# + [markdown] id="yKd9diEG5SGp"
# Obtenha a lista de todas as modalidades esportivas já disputadas nas olimpíadas de verão.
# + id="KYdVSr2b5SGp"
summer['Event'].unique()
# + [markdown] id="PL5rlAyU5SGp"
# Obtenha a lista de todos os países que já disputaram olimpíadas.
# + id="fzgmbnGg5SGp"
summer['Team'].unique()
# + [markdown] id="GkgOf12C5SGp"
# Qual atleta foi o maior medalhista (em medalhas totais) da história das olimpíadas de verão?
# + id="TtQrLcR75SGp"
summer['Name'].value_counts().idxmax()
# + [markdown] id="t_dbROes5SGp"
# Qual atleta foi o maior medalhista de ouro da história das olimpíadas de verão?
# + id="MNkKnccO5SGp"
summer_golds = summer.loc[summer['Medal'] == 'Gold']
summer_golds['Name'].value_counts().idxmax()
# + [markdown] id="Q24AhVm35SGp"
# Qual país foi o maior medalhista de ouro da história das olimpíadas de verão? Lembre-se da questão do evento esportivo, para não considerar múltiplas medalhas para um mesmo evento (ex: uma equipe de futebol fazendo parecer que mais de 20 medalhas foram distribuídas).
# + id="tssImhHR5SGp"
summer_golds_by_event = summer_golds.groupby(['Sport', 'Event', 'Year', 'Team'])['Medal'].count()
summer_golds_by_event = summer_golds_by_event.groupby('Team').cumcount() + 1
print(f'País: {summer_golds_by_event.idxmax()[3]} | Medalhas de ouro: {summer_golds_by_event.max()}')
# + [markdown] id="8KlUnXiH5SGq"
# Qual país foi o maior medalhista em medalhas totais na história das olimpíadas de verão?
# + id="31Vol04r5SGq"
summer_medals = df.dropna(subset = ['Medal'])
summer_medals_by_event = summer_medals.groupby(['Sport', 'Event', 'Year', 'Team'])['Medal'].count()
summer_medals_by_event_by_team = summer_medals_by_event.groupby('Team').cumcount() + 1
print(f'País: {summer_medals_by_event_by_team.idxmax()[3]} | Total de medalhas: {summer_medals_by_event_by_team.max()}')
# + [markdown] id="Lnd69njD5SGq"
# Obtenha o total de medalhas de ouro, prata e total por edição das Olimpíadas de verão. Lembre-se da questão do evento esportivo.
# + id="LlnHArzi5SGq"
summer_medals_by_event = summer_medals.groupby(['Sport', 'Event', 'Year', 'Medal']).count()
summer_medals_by_event_by_medal = summer_medals_by_event.groupby(['Medal']).cumcount() + 1
summer_medals_by_event_by_medal.groupby(level=3).apply(max)
# + [markdown] id="5PHpMV0-5SGq"
# ## 3. Brasil vs Mundo
# + [markdown] id="ElpxdyRJ5SGq"
# Para finalizar, vamos fazer algumas comparações entre Brasil e mundo. Qual o ranking do Brasil em cada edição das olimpíadas? Lembrando que o ranking é ordenado por medalhas de ouro.
# + id="gfuMZUlv5SGr"
summer_medals_golds = summer_medals.loc[summer_medals['Medal'] == 'Gold']
summer_medals_golds_by_event = summer_medals_golds.groupby(by = ['Sport', 'Event', 'Year', 'Team'])['Medal'].count()
summer_medals_golds_by_event_by_team = summer_medals_golds_by_event.groupby(['Team']).cumcount() + 1
summer_medals_golds_by_event_by_team = summer_medals_golds_by_event_by_team.groupby(level=3).apply(max)
summer_medals_golds_by_event_by_team_sorted = summer_medals_golds_by_event_by_team.sort_values(ascending=False)
brazil_position = [(index + 1) for (index, (team, medals)) in enumerate(summer_medals_golds_by_event_by_team_sorted.iteritems()) if team == 'Brazil'][0]
print(f'Posicao do Brasil em numero de medalhas de ouro: {brazil_position}ª')
# + [markdown] id="KQL9ViJk5SGr"
# Compare o maior medalhista em ouros do Brasil com o maior medalhista em ouros do mundo.
# + id="PqPCCG7X5SGr"
summer_golds_brazil = summer.loc[(summer['Medal'] == 'Gold') & (summer['Team'] == 'Brazil')]
biggest_gold_medalist_in_the_world = (summer_golds['Name'].value_counts().idxmax(), summer_golds['Name'].value_counts().max())
biggest_gold_medalist_in_brazil = (summer_golds_brazil['Name'].value_counts().idxmax(), summer_golds_brazil['Name'].value_counts().max())
print(biggest_gold_medalist_in_the_world)
print(biggest_gold_medalist_in_brazil)
# + [markdown] id="0jgZ3uX95SGr"
# Compare o maior medalhista em total de medalhas do Brasil com o maior medalhista em total de medalhas do mundo.
# + id="sV89yg-t5SGr"
summer_with_medals = summer.dropna(subset = ['Medal'])
summer_brazil = summer_with_medals.loc[(summer_with_medals['Medal'] == 'Gold') & (summer_with_medals['Team'] == 'Brazil')]
biggest_medalist_in_the_world = (summer_with_medals['Name'].value_counts().idxmax(), summer_with_medals['Name'].value_counts().max())
biggest_medalist_in_brazil = (summer_brazil['Name'].value_counts().idxmax(), summer_brazil['Name'].value_counts().max())
print(biggest_medalist_in_the_world)
print(biggest_medalist_in_brazil)
# + [markdown] id="jWBWVchr5SGr"
# Compare o maior medalhista em ouros do Brasil com o maior medalhista do mundo no mesmo esporte.
# + id="eUoPH2_H5SGr"
summer_golds_brazil_by_name = summer_golds_brazil.groupby(['Sport'])['Name'].value_counts()
biggest_medalist_by_gold_in_brazil_by_sport_name = summer_golds_brazil_by_name.groupby(['Sport']).idxmax()
biggest_medalist_by_gold_in_brazil_by_sport_name = biggest_medalist_by_gold_in_brazil_by_sport_name.apply(lambda x: x[1])
biggest_medalist_by_gold_in_brazil_by_sport_name = biggest_medalist_by_gold_in_brazil_by_sport_name.reset_index()
biggest_medalist_by_gold_in_brazil_by_sport_medal = summer_golds_brazil_by_name.groupby(['Sport']).max()
biggest_medalist_by_gold_in_brazil_by_sport_medal = biggest_medalist_by_gold_in_brazil_by_sport_medal.reset_index()
biggest_medalist_by_gold_in_brazil_by_sport_medal.columns = ['Sport', 'Medal']
biggest_medalist_by_gold_in_brazil_by_sport = pd.merge(biggest_medalist_by_gold_in_brazil_by_sport_name, biggest_medalist_by_gold_in_brazil_by_sport_medal)
biggest_medalist_by_gold_in_brazil_by_sport
# +
summer_golds_world_by_name = summer_medals_golds.groupby(['Sport'])['Name'].value_counts()
biggest_medalist_by_gold_in_world_by_sport_name = summer_golds_world_by_name.groupby(['Sport']).idxmax()
biggest_medalist_by_gold_in_world_by_sport_name = biggest_medalist_by_gold_in_world_by_sport_name.apply(lambda x: x[1])
biggest_medalist_by_gold_in_world_by_sport_name = biggest_medalist_by_gold_in_world_by_sport_name.reset_index()
biggest_medalist_by_gold_in_world_by_sport_medal = summer_golds_world_by_name.groupby(['Sport']).max()
biggest_medalist_by_gold_in_world_by_sport_medal = biggest_medalist_by_gold_in_world_by_sport_medal.reset_index()
biggest_medalist_by_gold_in_world_by_sport_medal.columns = ['Sport', 'Medal']
biggest_medalist_by_gold_in_world_by_sport = pd.merge(biggest_medalist_by_gold_in_world_by_sport_name, biggest_medalist_by_gold_in_world_by_sport_medal)
biggest_medalist_by_gold_in_world_by_sport
# +
aux_biggest_medalist_by_gold_in_world_by_sport = biggest_medalist_by_gold_in_world_by_sport.copy()
aux_biggest_medalist_by_gold_in_world_by_sport.columns = ['Sport', 'Name (World)', 'Medal (World)']
aux_biggest_medalist_by_gold_in_brazil_by_sport = biggest_medalist_by_gold_in_brazil_by_sport.copy()
aux_biggest_medalist_by_gold_in_brazil_by_sport.columns = ['Sport', 'Name (Brazil)', 'Medal (Brazil)']
biggest_medalist_by_gold_comparation = pd.merge(aux_biggest_medalist_by_gold_in_world_by_sport, aux_biggest_medalist_by_gold_in_brazil_by_sport, how='left')
biggest_medalist_by_gold_comparation
# + [markdown] id="u43E7J-W5SGr"
# Compare o maior medalhista em total de medalhas do Brasil com o maior medalhista do mundo no mesmo esporte.
# + id="JNtMgTyT5SGr"
summer_medals_brazil = summer_medals.loc[summer_medals['Team'] == 'Brazil']
summer_medals_brazil_by_name = summer_medals_brazil.groupby(['Sport'])['Name'].value_counts()
biggest_medalist_in_brazil_by_sport_name = summer_medals_brazil_by_name.groupby(['Sport']).idxmax()
biggest_medalist_in_brazil_by_sport_name = biggest_medalist_in_brazil_by_sport_name.apply(lambda x: x[1])
biggest_medalist_in_brazil_by_sport_name = biggest_medalist_in_brazil_by_sport_name.reset_index()
biggest_medalist_in_brazil_by_sport_medal = summer_medals_brazil_by_name.groupby(['Sport']).max()
biggest_medalist_in_brazil_by_sport_medal = biggest_medalist_in_brazil_by_sport_medal.reset_index()
biggest_medalist_in_brazil_by_sport_medal.columns = ['Sport', 'Medal']
biggest_medalist_in_brazil_by_sport = pd.merge(biggest_medalist_in_brazil_by_sport_name, biggest_medalist_in_brazil_by_sport_medal)
biggest_medalist_in_brazil_by_sport
# +
summer_golds_world_by_name = summer_medals.groupby(['Sport'])['Name'].value_counts()
biggest_medalist_in_world_by_sport_name = summer_golds_world_by_name.groupby(['Sport']).idxmax()
biggest_medalist_in_world_by_sport_name = biggest_medalist_in_world_by_sport_name.apply(lambda x: x[1])
biggest_medalist_in_world_by_sport_name = biggest_medalist_in_world_by_sport_name.reset_index()
biggest_medalist_in_world_by_sport_medal = summer_golds_world_by_name.groupby(['Sport']).max()
biggest_medalist_in_world_by_sport_medal = biggest_medalist_in_world_by_sport_medal.reset_index()
biggest_medalist_in_world_by_sport_medal.columns = ['Sport', 'Medal']
biggest_medalist_in_world_by_sport = pd.merge(biggest_medalist_in_world_by_sport_name, biggest_medalist_in_world_by_sport_medal)
biggest_medalist_in_world_by_sport
# +
aux_biggest_medalist_in_world_by_sport = biggest_medalist_in_world_by_sport.copy()
aux_biggest_medalist_in_world_by_sport.columns = ['Sport', 'Name (World)', 'Medal (World)']
aux_biggest_medalist_in_brazil_by_sport = biggest_medalist_in_brazil_by_sport.copy()
aux_biggest_medalist_in_brazil_by_sport.columns = ['Sport', 'Name (Brazil)', 'Medal (Brazil)']
biggest_medalist_comparation = pd.merge(aux_biggest_medalist_in_world_by_sport, aux_biggest_medalist_in_brazil_by_sport, how='left')
biggest_medalist_comparation
# + [markdown] id="sGZZRo7N5SGr"
# Calcule o percentual de medalhas de ouro, prata e bronze que o Brasil ganhou em cada olimpíada.
# + id="cqCzBph15SGr"
summer_medals_brazil_percentual = summer_medals_brazil.groupby(['Sport', 'Event', 'Year', 'Medal']).count()
summer_medals_brazil_percentual = summer_medals_brazil_percentual.groupby(['Medal']).cumcount() + 1
summer_medals_brazil_percentual = summer_medals_brazil_percentual.groupby(level=3).apply(max)
total_brazil = summer_medals_brazil_percentual[0] + summer_medals_brazil_percentual[1] + summer_medals_brazil_percentual[2]
summer_medals_brazil_percentual.apply(lambda x: f'{round(x / total_brazil * 100, 2)} %')
| src/project1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.io as sio
# MDS
class MDS():
def __init__(self, samples, dim=2):
self.samples = samples
self.dim = dim
self.N = samples.shape[0]
self.D = self.getD()
self.B = self.getB()
self.Z = self.getZ()
def getD(self):
Dij = np.dot(self.samples, self.samples.T)
diag = Dij.diagonal()
Dii = np.tile(diag, (self.N, 1))
Djj = Dii.T
D = np.sqrt(Dii + Djj - 2 * Dij)
return D
def getB(self):
D = self.D
Di = np.sum(np.square(D), axis=1, keepdims=1)
Dj = np.sum(np.square(D), axis=0, keepdims=1)
Dsum = np.square(D).sum()
Di = np.tile(Di, self.N)
Dj = np.tile(Dj, (self.N, 1))
B = (Di + Dj - self.N * np.square(D) - Dsum / self.N) / (2 * self.N)
return B.astype(np.float32)
def getZ(self):
vals, vecs = np.linalg.eig(self.B)
sort_vals = np.argsort(vals)[::-1]
vals = vals[sort_vals[:self.dim]]
vecs = vecs[:, sort_vals[:self.dim]]
vals_diag = np.diag(vals)
Z = np.dot(vecs, np.sqrt(vals_diag).T)
return Z
def visualization(self):
points = self.Z
if self.dim == 2:
plt.plot(points[labels==1, 0],points[labels==1, 1], 'x', label='1')
plt.plot(points[labels==2, 0],points[labels==2, 1], '.', label='2')
plt.savefig('dim2.png')
if self.dim == 3:
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(points[labels==1, 0],points[labels==1, 1], points[labels==1, 2], 'x', label='1')
ax.scatter(points[labels==2, 0],points[labels==2, 1], points[labels==2, 2], '.', label='2')
plt.savefig('dim3.png')
# Load Dataset MNIST12
dict = sio.loadmat('./datasets/MNIST12.mat')
samples = dict['samples'][:5000]
labels = dict['labels'][:5000]
labels = np.reshape(labels, -1)
# 二维降维
mds2d = MDS(samples, dim=2)
mds2d.visualization()
# 三维降维
mds3d = MDS(samples, dim=3)
mds3d.visualization()
| hw3/MDS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [How to do mixup training from image files in Keras](https://www.dlology.com/blog/how-to-do-mixup-training-from-image-files-in-keras/) | DLology Blog
import os
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
# +
train_dir = "./data"
batch_size = 5
validation_split = 0.3
img_height = 150
img_width = 150
epochs = 10
# +
class MixupImageDataGenerator():
def __init__(self, generator, directory, batch_size, img_height, img_width, alpha=0.2, subset=None):
"""Constructor for mixup image data generator.
Arguments:
generator {object} -- An instance of Keras ImageDataGenerator.
directory {str} -- Image directory.
batch_size {int} -- Batch size.
img_height {int} -- Image height in pixels.
img_width {int} -- Image width in pixels.
Keyword Arguments:
alpha {float} -- Mixup beta distribution alpha parameter. (default: {0.2})
subset {str} -- 'training' or 'validation' if validation_split is specified in
`generator` (ImageDataGenerator).(default: {None})
"""
self.batch_index = 0
self.batch_size = batch_size
self.alpha = alpha
# First iterator yielding tuples of (x, y)
self.generator1 = generator.flow_from_directory(directory,
target_size=(
img_height, img_width),
class_mode="categorical",
batch_size=batch_size,
shuffle=True,
subset=subset)
# Second iterator yielding tuples of (x, y)
self.generator2 = generator.flow_from_directory(directory,
target_size=(
img_height, img_width),
class_mode="categorical",
batch_size=batch_size,
shuffle=True,
subset=subset)
# Number of images across all classes in image directory.
self.n = self.generator1.samples
def reset_index(self):
"""Reset the generator indexes array.
"""
self.generator1._set_index_array()
self.generator2._set_index_array()
def on_epoch_end(self):
self.reset_index()
def reset(self):
self.batch_index = 0
def __len__(self):
# round up
return (self.n + self.batch_size - 1) // self.batch_size
def get_steps_per_epoch(self):
"""Get number of steps per epoch based on batch size and
number of images.
Returns:
int -- steps per epoch.
"""
return self.n // self.batch_size
def __next__(self):
"""Get next batch input/output pair.
Returns:
tuple -- batch of input/output pair, (inputs, outputs).
"""
if self.batch_index == 0:
self.reset_index()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
# random sample the lambda value from beta distribution.
l = np.random.beta(self.alpha, self.alpha, self.batch_size)
X_l = l.reshape(self.batch_size, 1, 1, 1)
y_l = l.reshape(self.batch_size, 1)
# Get a pair of inputs and outputs from two iterators.
X1, y1 = self.generator1.next()
X2, y2 = self.generator2.next()
# Perform the mixup.
X = X1 * X_l + X2 * (1 - X_l)
y = y1 * y_l + y2 * (1 - y_l)
return X, y
def __iter__(self):
while True:
yield next(self)
input_imgen = ImageDataGenerator(
rescale=1./255,
rotation_range=5,
width_shift_range=0.05,
height_shift_range=0,
shear_range=0.05,
zoom_range=0,
brightness_range=(1, 1.3),
horizontal_flip=True,
fill_mode='nearest',
validation_split=validation_split)
train_generator = MixupImageDataGenerator(generator=input_imgen,
directory=train_dir,
batch_size=batch_size,
img_height=img_height,
img_width=img_height,
subset='training')
validation_generator = input_imgen.flow_from_directory(train_dir,
target_size=(
img_height, img_width),
class_mode="categorical",
batch_size=batch_size,
shuffle=True,
subset='validation')
# -
sample_x, sample_y = next(train_generator)
for i in range(batch_size):
display(image.array_to_img(sample_x[i]))
print(sample_y)
print('training steps: ', train_generator.get_steps_per_epoch())
print('validation steps: ', validation_generator.samples // batch_size)
for item in train_generator:
sample_x, sample_y = item
for i in range(batch_size):
display(image.array_to_img(sample_x[i]))
print(sample_y)
break
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
alpha = 0.2
array = np.random.beta(alpha, alpha, 5000)
h = sorted(array) #sorted
fit = stats.norm.pdf(h, np.mean(h), np.std(h)) #this is a fitting indeed
plt.hist(h,normed=True)
plt.title('Beta distribution')
plt.show()
# +
from tensorflow.keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(img_height, img_width, 3))
# +
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import optimizers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(4, activation='sigmoid'))
conv_base.trainable = False
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy',
metrics=['acc'])
# -
model.summary()
train_generator.reset()
validation_generator.reset()
# Start the traning.
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.get_steps_per_epoch(),
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size,
epochs=epochs)
| test_mixup_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bonus: Temperature Analysis I
import pandas as pd
from datetime import datetime as dt
import numpy as np
from scipy import stats
# "tobs" is "temperature observations"
df = pd.read_csv('./Resources/hawaii_measurements.csv')
df.head()
df.dtypes
# +
# Convert the date column format from string to datetime
df1 = df
df1['date'] = pd.to_datetime(df['date'])
df1.dtypes
# -
# Set the date column as the DataFrame index
df1.set_index("date",inplace=True)
df1.head()
# +
# Drop the date column
#same result acheived by inplace = True above
# -
# ### Compare June and December data across all years
from scipy import stats
# Filter data for desired months
june = df1[df1.index.month ==6]
dec = df1[df1.index.month ==12]
june.head()
dec.head()
nulls = june['tobs'].isnull().sum()+dec['tobs'].isnull().sum()+june['tobs'].isna().sum()+dec['tobs'].isna().sum()
nulls
# +
# Identify the average temperature for June
june.describe()
#June mean temp is 74.94
# +
# Identify the average temperature for December
dec.describe()
#dec mean temp is 71.04
# -
june1 = june.loc[~june.index.duplicated(keep='first')]
june1
# +
# Create collections of temperature data
#collections created above in the june and dec data frames
merge = pd.concat([june, dec], axis=0)
merge
# -
# Run paired t-test and unpaired t-test
#stats.ttest_rel(june['tobs'], dec['tobs'])
ts, p = stats.ttest_ind(june['tobs'], dec['tobs'], equal_var=False)
print(f'Tstatistic = {ts}, and p value = {p}')
# ### Analysis
# ##Unpaired T-tests
#
# Paired t-test could not be used since it is used to compare samples of the same population and therefore have to be of the same size.
# Unpaired T-tests are performed to compare meants of two populations. Since these are independent populations of two different months, therefore unpaired t-tests are more appropriate
#
# Also, since the p value is much less than .05, the null hypothesis is rejected and the temperatures sets vary by month for June and December.
#
| .ipynb_checkpoints/temp_analysis_bonus_1_starter-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8 - AzureML
# language: python
# name: python38-azureml
# ---
# # Configuration
#
# _**Setting up your Azure Machine Learning services workspace and configuring needed resources**_
#
# ---
# ---
#
# **Requirements** - In order to benefit from this tutorial, you will need:
# - A basic understanding of Machine Learning
# - An Azure account with an active subscription. [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)
# - An Azure ML workspace - [Create workspace](../resources/workspace/workspace.ipynb)
# - A python environment
# - Installed Azure Machine Learning Python SDK v2 - [install instructions](../README.md) - check the getting started section
#
# **Learning Objectives** - By the end of this tutorial, you should be able to:
# - Connect to your AML workspace from the Python SDK using different auth credentials
# - Create workspace config file
# - Create Compute clusters which required by jobs notebooks. [Check this notebook to create a compute cluster](../resources/compute/compute.ipynb)
#
# **Motivations** - This notebook covers the scenario that user define components using yaml then use these components to build pipeline.
#
#
# # 1. Import the required libraries
# Import required libraries
from azure.ai.ml import MLClient
# # 2. Configure credential
#
# We are using `DefaultAzureCredential` to get access to workspace. When an access token is needed, it requests one using multiple identities(`EnvironmentCredential, ManagedIdentityCredential, SharedTokenCacheCredential, VisualStudioCodeCredential, AzureCliCredential, AzurePowerShellCredential`) in turn, stopping when one provides a token.
# Reference [here](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python) for more information.
#
# `DefaultAzureCredential` should be capable of handling most Azure SDK authentication scenarios.
# Reference [here](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python) for all available credentials if it does not work for you.
# +
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
# This will open a browser page for
credential = InteractiveBrowserCredential()
# -
# # 3. Connect to Azure Machine Learning Workspace
#
# The [workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section we will connect to the workspace in which the job will be run.
#
# Check this notebook for creating a [workspace](../resources/workspace/workspace.ipynb).
#
# To connect to a workspace, we need identifier parameters - a subscription, resource group and workspace name.
# The config details of a workspace can be saved to a file from the Azure Machine Learning [portal](https://ml.azure.com/). Click on the name of the portal on the top right corner to see the link to save the config file.
# This config file can be used to load a workspace using `MLClient`. If no path is mentioned, path is defaulted to current folder. If no file name is mentioned, file name will be defaulted to `config.json`
try:
ml_client = MLClient.from_config(credential=credential)
except Exception as ex:
# NOTE: Update following workspace information if not correctly configure before
client_config = {
"subscription_id": "<SUBSCRIPTION_ID>",
"resource_group": "<RESOURCE_GROUP>",
"workspace_name": "<AML_WORKSPACE_NAME>",
}
if client_config["subscription_id"].startswith("<"):
print(
"please update your <SUBSCRIPTION_ID> <RESOURCE_GROUP> <AML_WORKSPACE_NAME> in notebook cell"
)
raise ex
else: # write and reload from config file
import json, os
config_path = "../.azureml/config.json"
os.makedirs(os.path.dirname(config_path), exist_ok=True)
with open(config_path, "w") as fo:
fo.write(json.dumps(client_config))
ml_client = MLClient.from_config(credential=credential, path=config_path)
print(ml_client)
# ## 4. Retrieve or create an Azure Machine Learning compute target
#
# To create a Azure Machine Learning job, you need a compute cluster as prerequisite. Below code ensures computes named `cpu-cluster` and `gpu-cluster` exists in your workspace.
# + name="create-cpu-compute"
from azure.ai.ml.entities import AmlCompute
# specify aml compute name.
cpu_compute_target = "cpu-cluster"
try:
ml_client.compute.get(cpu_compute_target)
except Exception:
print("Creating a new cpu compute target...")
compute = AmlCompute(
name=cpu_compute_target, size="STANDARD_D2_V2", min_instances=0, max_instances=4
)
ml_client.compute.begin_create_or_update(compute)
# + name="create-gpu-compute"
from azure.ai.ml.entities import AmlCompute
# specify aml compute name.
gpu_compute_target = "gpu-cluster"
try:
ml_client.compute.get(gpu_compute_target)
except Exception:
print("Creating a new gpu compute target...")
compute = AmlCompute(
name=gpu_compute_target, size="STANDARD_NC6", min_instances=0, max_instances=4
)
ml_client.compute.begin_create_or_update(compute)
# -
# # Next Steps
# You can see further examples of running a pipeline job [here](../jobs/pipelines/README.md)
| sdk/jobs/configuration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 딕셔너리, 사전
# 특정 value 를 가지는 key
# key 에 대한 value 로
# 서로 대응하는 관계의 자료형을 딕셔너리 자료형이라 함
# +
# key1: value1, key2: value2, key3: value3, ---
# +
# key 에는 문자열, 숫자형, 튜플 자료형이 들어갈수 있고
# value 에는 어떠한 자료형도 들어갈수 있다.
# value 에 딕셔너리 자료형마저 넣을수 있다.
# -
dict1 = {
'No': 'A001',
'Name': 'DKang',
'Phone': '010-2258-8789',
'birth': 'Dec'
}
dict1
# 딕셔너리가 어떤 키값을 가지고 있는지 보려면, keys() 함수를 사용한다.
dict1.keys()
# 만약, 이 key 값들을, 리스트 자료형으로 사용하고 싶다면, list() 함수를 이용하면 된다.
list(dict1.keys())
# 딕셔너리가 어떤 values 값을 가지고 있는지 보려면, values() 함수를 사용하면 된다.
dict1.values()
# 대응하는 key 와 value 페어 모두를 얻으려면, items() 함수를 사용하면 된다.
dict1.items()
# 사전에서 특정 값 찾기
# Name 이라는 키를 가진 값을 찾는다.
dict1.keys()
print(dict1['Name'])
print(dict1.get('Name'))
# key value 쌍을 추가 하기
dict1['hobby'] = 'programming'
dict1
# 딕셔너리 요소를 삭제할때는, del 함수를 사용해 준다.
del dict1['No']
dict1
# +
# 딕셔너리에 존재하지 않는 key 에 접근하려면 에러가 난다.
# -
# 딕셔너리에 존재하지 않는 key 값을 찾으려 할때 나타나는 에러를
# 미리 정해둔 디폴트 값을 출력하도록 설정 할수 있다
dict1
# default 값을 정해 놓은 경우
dict1.get('job', 'student')
| django/python_basic_dictionary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ryanleeallred/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module4-sequence-your-narrative/LS_DS_124_Sequence_your_narrative.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="JbDHnhet8CWy"
# _Lambda School Data Science_
#
# # Sequence your narrative
#
# Today we will create a sequence of visualizations inspired by [<NAME>'s 200 Countries, 200 Years, 4 Minutes](https://www.youtube.com/watch?v=jbkSRLYSojo).
#
# Using this [data from Gapminder](https://github.com/open-numbers/ddf--gapminder--systema_globalis/):
# - [Income Per Person (GDP Per Capital, Inflation Adjusted) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv)
# - [Life Expectancy (in Years) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv)
# - [Population Totals, by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)
# - [Entities](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv)
# - [Concepts](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv)
# + [markdown] colab_type="text" id="zyPYtsY6HtIK"
# Objectives
# - sequence multiple visualizations
# - combine qualitative anecdotes with quantitative aggregates
#
# Links
# - [<NAME>’s TED talks](https://www.ted.com/speakers/hans_rosling)
# - [Spiralling global temperatures from 1850-2016](https://twitter.com/ed_hawkins/status/729753441459945474)
# - "[The Pudding](https://pudding.cool/) explains ideas debated in culture with visual essays."
# - [A Data Point Walks Into a Bar](https://lisacharlotterost.github.io/2016/12/27/datapoint-in-bar/): a thoughtful blog post about emotion and empathy in data storytelling
# + [markdown] colab_type="text" id="SxTJBgRAW3jD"
# ## Make a plan
#
# #### How to present the data?
#
# Variables --> Visual Encodings
# - Income --> x
# - Lifespan --> y
# - Region --> color
# - Population --> size
# - Year --> animation frame (alternative: small multiple)
# - Country --> annotation
#
# Qualitative --> Verbal
# - Editorial / contextual explanation --> audio narration (alternative: text)
#
#
# #### How to structure the data?
#
# | Year | Country | Region | Income | Lifespan | Population |
# |------|---------|----------|--------|----------|------------|
# | 1818 | USA | Americas | ### | ## | # |
# | 1918 | USA | Americas | #### | ### | ## |
# | 2018 | USA | Americas | ##### | ### | ### |
# | 1818 | China | Asia | # | # | # |
# | 1918 | China | Asia | ## | ## | ### |
# | 2018 | China | Asia | ### | ### | ##### |
#
# + [markdown] colab_type="text" id="3ebEjShbWsIy"
# ## Upgrade Seaborn
#
# Make sure you have at least version 0.9.0.
#
# In Colab, go to **Restart runtime** after you run the `pip` command.
# + colab_type="code" id="4RSxbu7rWr1p" colab={}
# !pip install --upgrade seaborn
# + colab_type="code" id="5sQ0-7JUWyN4" colab={}
import seaborn as sns
sns.__version__
# + [markdown] colab_type="text" id="S2dXWRTFTsgd"
# ## More imports
# + colab_type="code" id="y-TgL_mA8OkF" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# + [markdown] colab_type="text" id="CZGG5prcTxrQ"
# ## Load & look at data
# + colab_type="code" id="-uE25LHD8CW0" colab={}
income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')
# + colab_type="code" id="gg_pJslMY2bq" colab={}
lifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv')
# + colab_type="code" id="F6knDUevY-xR" colab={}
population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')
# + colab_type="code" id="hX6abI-iZGLl" colab={}
entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
# + colab_type="code" id="AI-zcaDkZHXm" colab={}
concepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv')
# + colab_type="code" id="EgFw-g0nZLJy" colab={}
income.shape, lifespan.shape, population.shape, entities.shape, concepts.shape
# + colab_type="code" id="I-T62v7FZQu5" colab={}
income.head()
# + colab_type="code" id="2zIdtDESZYG5" colab={}
lifespan.head()
# + colab_type="code" id="58AXNVMKZj3T" colab={}
population.head()
# + colab_type="code" id="0ywWDL2MZqlF" colab={}
pd.options.display.max_columns = 500
entities.head()
# + colab_type="code" id="mk_R0eFZZ0G5" colab={}
concepts.head()
# + [markdown] colab_type="text" id="6HYUytvLT8Kf"
# ## Merge data
# + [markdown] colab_type="text" id="dhALZDsh9n9L"
# https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf
# + colab_type="code" id="A-tnI-hK6yDG" colab={}
# + [markdown] colab_type="text" id="4OdEr5IFVdF5"
# ## Explore data
# + colab_type="code" id="4IzXea0T64x4" colab={}
# + [markdown] colab_type="text" id="hecscpimY6Oz"
# ## Plot visualization
# + colab_type="code" id="_o8RmX2M67ai" colab={}
# + [markdown] colab_type="text" id="8OFxenCdhocj"
# ## Analyze outliers
# + colab_type="code" id="D59bn-7k6-Io" colab={}
# + [markdown] colab_type="text" id="DNTMMBkVhrGk"
# ## Plot multiple years
# + colab_type="code" id="JkTUmYGF7BQt" colab={}
# + [markdown] colab_type="text" id="BB1Ki0v6hxCA"
# ## Point out a story
# + colab_type="code" id="eSgZhD3v7HIe" colab={}
| module4-sequence-your-narrative/LS_DS_124_Sequence_your_narrative.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Disclaimer**: this notebook's compatibility with StatsBomb event data 4.0.0 was last checked on June 15th, 2020
# %load_ext autoreload
# %autoreload 2
import os;
import warnings
import pandas as pd
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
import tqdm
import socceraction.spadl as spadl
import socceraction.spadl.statsbomb as statsbomb
import socceraction.atomic.spadl as atomicspadl
# ## Set up the statsbombloader
# +
# Use this if you only want to use the free public statsbomb data
free_open_data_remote = "https://raw.githubusercontent.com/statsbomb/open-data/master/data/"
SBL = statsbomb.StatsBombLoader(root=free_open_data_remote,getter="remote")
# # Uncomment the code below if you have a local folder on your computer with statsbomb data
#datafolder = "../data-epl" # Example of local folder with statsbomb data
#SBL = statsbomb.StatsBombLoader(root=datafolder,getter="local")
# -
# ## Select competitions to load and convert
# View all available competitions
competitions = SBL.competitions()
set(competitions.competition_name)
# +
# Fifa world cup
selected_competitions = competitions[competitions.competition_name=="FIFA World Cup"]
# # Messi data
# selected_competitions = competitions[competitions.competition_name=="La Liga"]
# # FA Women's Super League
# selected_competitions = competitions[competitions.competition_name=="FA Women's Super League"]
selected_competitions
# -
# Get matches from all selected competitions
matches = list(
SBL.matches(row.competition_id, row.season_id)
for row in selected_competitions.itertuples()
)
matches = pd.concat(matches, sort=True).reset_index(drop=True)
matches[["home_team_name","away_team_name","match_date","home_score","away_score"]]
# ## Load and convert match data
# +
matches_verbose = tqdm.tqdm(list(matches.itertuples()),desc="Loading match data")
teams,players,player_games = [],[],[]
actions = {}
atomic_actions = {}
for match in matches_verbose:
# load data
teams.append(SBL.teams(match.match_id))
players.append(SBL.players(match.match_id))
events = SBL.events(match.match_id)
# convert data
player_games.append(statsbomb.extract_player_games(events))
actions = statsbomb.convert_to_actions(events,match.home_team_id)
atomic_actions[match.match_id] = atomicspadl.convert_to_atomic(actions)
games = matches.rename(columns={"match_id":"game_id"})
teams = pd.concat(teams).drop_duplicates("team_id").reset_index(drop=True)
players = pd.concat(players).drop_duplicates("player_id").reset_index(drop=True)
player_games = pd.concat(player_games).reset_index(drop=True)
# -
# ## Store converted spadl data in a h5-file
# +
datafolder = "../data-fifa"
# Create data folder if it doesn't exist
if not os.path.exists(datafolder):
os.mkdir(datafolder)
print(f"Directory {datafolder} created ")
spadl_h5 = os.path.join(datafolder, "atomic-spadl-statsbomb.h5")
# Store all spadl data in h5-file
with pd.HDFStore(spadl_h5) as spadlstore:
spadlstore["competitions"] = selected_competitions
spadlstore["games"] = games
spadlstore["teams"] = teams
spadlstore["players"] = players
spadlstore["player_games"] = player_games
for game_id in atomic_actions.keys():
spadlstore[f"atomic_actions/game_{game_id}"] = atomic_actions[game_id]
spadlstore["results"] = spadl.results_df()
spadlstore["bodyparts"] = spadl.bodyparts_df()
spadlstore["atomic_actiontypes"] = atomicspadl.actiontypes_df()
# -
# ## Plot the spadl data
# Extra library required: ```pip install matplotsoccer```
# +
# Select England vs Belgium game at World Cup
with pd.HDFStore(spadl_h5) as spadlstore:
games = spadlstore["games"].merge(spadlstore["competitions"])
game_id = games[(games.competition_name == "FIFA World Cup")
& (games.home_team_name == "Belgium")
& (games.away_team_name == "England")].game_id.values[0]
atomic_actions = spadlstore[f"atomic_actions/game_{game_id}"]
atomic_actions = (
atomic_actions.merge(spadlstore["atomic_actiontypes"],how="left")
#.merge(spadlstore["results"],how="left")
.merge(spadlstore["bodyparts"],how="left")
.merge(spadlstore["players"],how="left")
.merge(spadlstore["teams"],how="left")
)
# use nickname if available else use full name
atomic_actions["player"] = atomic_actions[["player_nickname","player_name"]].apply(lambda x: x[0] if x[0] else x[1],axis=1)
# +
import matplotsoccer
for shot in list(atomic_actions[(atomic_actions.type_name == "goal")].index):
a = atomic_actions[shot-8:shot+1].copy()
a["start_x"] = a.x
a["start_y"] = a.y
a["end_x"] = a.x + a.dx
a["end_y"] = a.y + a.dy
g = list(games[games.game_id == a.game_id.values[0]].itertuples())[0]
minute = int((a.period_id.values[0]-1)*45 +a.time_seconds.values[0] // 60)
game_info = f"{g.match_date} {g.home_team_name} {g.home_score}-{g.away_score} {g.away_team_name} {minute + 1}'"
print(game_info)
def nice_time(row):
minute = int((row.period_id-1)*45 +row.time_seconds // 60)
second = int(row.time_seconds % 60)
return f"{minute}m{second}s"
a["nice_time"] = a.apply(nice_time,axis=1)
labels = a[["nice_time", "type_name", "player", "team_name"]]
matplotsoccer.actions(
location=a[["start_x", "start_y", "end_x", "end_y"]],
action_type=a.type_name,
team= a.team_name,
label=labels,
labeltitle=["time","actiontype","player","team"],
zoom=False,
figsize=6
)
| public-notebooks/ATOMIC-1-load-and-convert-statsbomb-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="spZ51d-KS2jl"
# # Priprava okolja
# + id="n2r39hxkf7_k"
# !pip install transformers
# !pip install sentencepiece
# + id="y1lwgECZepaY"
import csv
import torch
from torch import nn
from transformers import AutoTokenizer, AutoModel
import pandas as pd
from google.colab import drive
import transformers
import json
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
RANDOM_SEED = 42
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# + [markdown] id="Kt-IiQ5LtK10"
# # Pomožni razredi in funkcije
# + id="CYhWyVgfenAG"
class SentimentClassifier(nn.Module):
def __init__(self, n_classes):
super(SentimentClassifier, self).__init__()
self.model = AutoModel.from_pretrained('EMBEDDIA/sloberta')
self.pre_classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.2)
self.classifier = nn.Linear(self.model.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
output = self.model(
input_ids=input_ids,
attention_mask=attention_mask
)
last_hidden_state = output[0]
pooler = last_hidden_state[:, 0, :]
pooler = self.dropout(pooler)
pooler = self.pre_classifier(pooler)
pooler = torch.nn.ReLU()(pooler)
pooler = self.dropout(pooler)
output = self.classifier(pooler)
return output
class ArticleTestDataset(torch.utils.data.Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.tokenizer = tokenizer
self.df = dataframe
self.text = dataframe.body
self.max_len = max_len
def __getitem__(self, idx):
text = str(self.text[idx])
inputs = tokenizer.encode_plus(
text,
None,
add_special_tokens=True,
padding='max_length',
truncation=True,
max_length=self.max_len,
return_attention_mask=True,
return_token_type_ids=True
)
input_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
return {
'text': text,
'input_ids': torch.tensor(input_ids, dtype=torch.long),
'attention_mask': torch.tensor(attention_mask, dtype=torch.long),
}
def __len__(self):
return len(self.text)
def get_predictions(model, data_loader):
model = model.eval()
predictions = []
data_iterator = tqdm(data_loader, desc="Iteration")
with torch.no_grad():
for step, d in enumerate(data_iterator):
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
predictions.extend(preds)
predictions = torch.stack(predictions).cpu()
return predictions
# + [markdown] id="lWo0LClntV6A"
# # MAIN
# + id="89z2yiSboSi-"
model_path = '/content/drive/MyDrive/Diploma/best_model_state_latest.bin'
# + id="ICcDRSZYtVNY"
MAX_LEN = 512
BATCH_SIZE = 8
test_params = {'batch_size': BATCH_SIZE,
'shuffle': False,
'num_workers': 0
}
tokenizer = AutoTokenizer.from_pretrained('EMBEDDIA/sloberta', use_fast=False)
model = SentimentClassifier(3)
model.load_state_dict(torch.load(model_path))
model = model.to(device)
# + id="tiYTBXGBTINL"
# V naslednji vrstici lako spremenite vrednost. Možne vrednosti so:
# "2019_slovenija_sentiment",
# "2019_svet_sentiment",
# "2020_korona_sentiment",
# "2020_svet_sentiment",
# "2020_slovenska_politika_sentiment",
file_name = '2019_slovenija_sentiment'
filepath = f'/content/drive/MyDrive/Diploma/data/{file_name}.pkl'
data = pd.read_pickle(filepath)
dataloader = DataLoader(ArticleTestDataset(data, tokenizer, MAX_LEN), **test_params)
# + id="z8_spmXJtTz0"
preds = get_predictions(model, dataloader)
data['sentiment'] = preds
# + id="CK9ak5Uy2GFV"
# data.to_pickle(filepath)
| sentiment_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Geospatial Data
#
# ### Part 4 of n
# # Structured Query Language (SQL)
# ### Talking to a Database through Queries
#
# ### DQL and DML
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Reminder
# <a href="#/slide-2-0" class="navigate-right" style="background-color:blue;color:white;padding:8px;margin:2px;font-weight:bold;">Continue with the lesson</a>
#
# <br>
# </br>
# <font size="+1">
#
# By continuing with this lesson you are granting your permission to take part in this research study for the Hour of Cyberinfrastructure: Developing Cyber Literacy for GIScience project. In this study, you will be learning about cyberinfrastructure and related concepts using a web-based platform that will take approximately one hour per lesson. Participation in this study is voluntary.
#
# Participants in this research must be 18 years or older. If you are under the age of 18 then please exit this webpage or navigate to another website such as the Hour of Code at https://hourofcode.com, which is designed for K-12 students.
#
# If you are not interested in participating please exit the browser or navigate to this website: http://www.umn.edu. Your participation is voluntary and you are free to stop the lesson at any time.
#
# For the full description please navigate to this website: <a href="../../gateway-lesson/gateway/gateway-1.ipynb">Gateway Lesson Research Study Permission</a>.
#
# </font>
# + hide_input=true init_cell=true slideshow={"slide_type": "skip"} tags=["Hide"]
# This code cell starts the necessary setup for Hour of CI lesson notebooks.
# First, it enables users to hide and unhide code by producing a 'Toggle raw code' button below.
# Second, it imports the hourofci package, which is necessary for lessons and interactive Jupyter Widgets.
# Third, it helps hide/control other aspects of Jupyter Notebooks to improve the user experience
# This is an initialization cell
# It is not displayed because the Slide Type is 'Skip'
from IPython.display import HTML, IFrame, Javascript, display
from ipywidgets import interactive
import ipywidgets as widgets
from ipywidgets import Layout
import getpass # This library allows us to get the username (User agent string)
# import package for hourofci project
import sys
sys.path.append('../../supplementary') # relative path (may change depending on the location of the lesson notebook)
# sys.path.append('supplementary')
import hourofci
try:
import os
os.chdir('supplementary')
except:
pass
# load javascript to initialize/hide cells, get user agent string, and hide output indicator
# hide code by introducing a toggle button "Toggle raw code"
HTML('''
<script type="text/javascript" src=\"../../supplementary/js/custom.js\"></script>
<style>
.output_prompt{opacity:0;}
</style>
<input id="toggle_code" type="button" value="Toggle raw code">
''')
# + slideshow={"slide_type": "slide"}
import displaydatabases
from questiondisplay import QueryWindow
disp = displaydatabases.Display()
disp.displayDatabases
# + [markdown] slideshow={"slide_type": "slide"}
# #### Grouping Records together by Group By statement
# -
# The Group By statement is used to **arrange identical data into groups**. The group by clause follows the where clause (if it's present) and precedes the order by clause (if it's present). For example you want to aggregate the total number of invoices by Country, or you want to get the count of employees with different Title (how many General Manager, Sales Manager etc)
#
# The Group By statement is often used with aggregate functions (COUNT(), MAX(), MIN(), SUM(), AVG()) to group the result-set by one or more columns.
#
# The syntax for Group By statement
#
# ```mysql
# SELECT column_name(s)
# FROM table_name
# WHERE condition
# GROUP BY column_name(s)
# ORDER BY column_name(s);
# ```
#
# The where condition and order by are optional and depends up on the use-case.
#
# Let's look at some examples
# + [markdown] slideshow={"slide_type": "slide"}
# **26. Select sum Total of invoices for each BillingCountry**
# -
QueryWindow(26).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **27. Select number of invoices for each BillingCountry**
# -
QueryWindow(27).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **28. select average length of films from film table grouped on rating.**
# -
QueryWindow(28).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **29. Select maximum rental_rate of films from film table grouped on rating.**
# -
QueryWindow(29).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **30. Select average rental_rate for each rating for each release_year.**
# -
QueryWindow(30).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **31. Select total number of invoices for each BillingCity for the BillingCountry Germany.**
# -
QueryWindow(31).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **32. Select sum total of Total for invoices for each BillingCity for the BillingCountry Germany and order the records by the sum total in descending order.**
# -
QueryWindow(32).display()
# + [markdown] slideshow={"slide_type": "slide"}
# #### Aliases for providing Temporary Name
# -
# Aliases are used to give a **table or column in a table a temporary name**. Most of the time aliases are used to make the **query more readable**. Aliases **only exists until the query is running**.
#
# The syntax for column alias is
#
# ```mysql
# SELECT column_name AS alias_name
# FROM table_name;
# ```
#
# And syntax for table alias is
#
# ```mysql
# SELECT column_name
# FROM table_name as alias_name;
# ```
#
# Let's look at some examples
# + [markdown] slideshow={"slide_type": "slide"}
# **33. Select sum total of Total for invoices for each BillingCity for the BillingCountry Germany and order the records by the sum total in descending order. Name the sum total of Total as TotalAmount**
# -
QueryWindow(33).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **34. Select total number of invoices for each BillingCity for the BillingCountry Germany. Name the total number of invoices column as TotalInvoices**
# -
QueryWindow(34).display()
# + [markdown] slideshow={"slide_type": "slide"}
# For **table aliases** here are some examples,
#
# ```mysql
# select * from invoices as i
# ```
# We can use **(.)** *operator* for accessing columns when using tale aliases.
#
# ```mysql
# select i.InvoiceId,i.BillingCountry from invoices as i
# ```
#
# Even when you are not using alias operator, the full qualified column name can be used to refer to columns. For example
#
# ```mysql
# select invoices.InvoiceId,invoices.BillingCountry from invoices
# ```
#
# Such full qualified names are paticularly useful when you are using multiple tables with same column names (mostly in Join queries) in a single query.
# + [markdown] slideshow={"slide_type": "slide"} toc-hr-collapsed=true
# #### Join for Combining Multiple Tables
# -
# A join clause is used to **combine records from multiple tables using related columns between them.** It is one of the most powerful operation in a relational database (joining based on relations).
# + [markdown] slideshow={"slide_type": "slide"}
# ##### Inner Join
# + [markdown] slideshow={"slide_type": "slide"}
# For inner join **records that have matching values in both tables will only be retrieved**.
#
# 
#
# The syntax for inner join is
#
# ```mysql
# SELECT column_name(s)
# FROM table1
# INNER JOIN table2
# ON table1.column_name = table2.column_name
# ```
#
# Inner join can also be written without an explicit inner join clause
#
# ```mysql
# SELECT column_name(s)
# FROM table1,table2
# where table1.column_name = table2.column_name
# ```
#
# Let's look at an example
# + [markdown] slideshow={"slide_type": "slide"}
# **35. Display the city along with country from city and country table.**
# -
QueryWindow(35).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **36. Select title and actor first_name and actor last_name for all the films from film, actor and film_actor tables**
# -
QueryWindow(36).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **37. Select title of all English movies from film table**
# -
QueryWindow(37).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **38. Select artist Name and the total number of albums composed by them as TotalAlbums**
# -
QueryWindow(38).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **39. select customers first_name and last_name and the total amount they spend as TotalAmount. Sort the results by total amount in descending order.**
# -
QueryWindow(39).display()
# + [markdown] slideshow={"slide_type": "slide"}
# **40. select customers first_name and last_name and the total number of rentals they had as TotalRentals. Sort the results by total rentals in descending order.**
# -
QueryWindow(40).display()
# + [markdown] slideshow={"slide_type": "slide"}
# ### Manipulating Data in a Database (Data Manipulation Language (DML))
# + [markdown] slideshow={"slide_type": "slide"}
# #### Inserting New Records to a Table
# -
# The syntax for inserting records to a table is
#
# ```mysql
# INSERT INTO table_name (column1, column2, column3, ..columnN)
# VALUES (value1, value2, value3, ..valueN)
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Deleting Records from a Table
# -
# The syntax for deleting records from a table is
#
# ```mysql
# DELETE FROM table_name WHERE condition;
# ```
disp.displayDatabases
# + [markdown] slideshow={"slide_type": "slide"}
#
#
# <font size="+1"><a style="background-color:blue;color:white;padding:12px;margin:10px;font-weight:bold;" href="dbms-5.ipynb">Click here to go to the next notebook.</a></font>
| intermediate-lessons/geospatial-data/dbms-4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn.metrics import accuracy_score
df=pd.read_csv(r'file:///F:\winequality%20data.csv')
df
df.describe()
df.head(5)
df.isnull().sum()
sns.countplot(x='quality',data=df,palette="mako_r")
plt.xlabel("(Quality of the Wine)")
plt.show()
sns.violinplot(x='quality',y='citric acid',data=df,palette='rainbow')
sns.barplot(x='quality',y='chlorides',data=df)
sns.boxplot(x='quality',y='alcohol',data=df,palette= 'rainbow')
sns.jointplot(x='pH',y='free sulfur dioxide',data=df,kind='reg')
# +
plt.figure(figsize=(10,6))
plt.suptitle("bar plot",fontsize=24)
plt.subplots_adjust(wspace=0.4,hspace=0.4)
plt.subplot(3,3,1)
sns.barplot(x='quality',y='chlorides',data=df)
plt.subplot(3,3,2)
sns.barplot(x='quality',y='fixed acidity',data=df)
plt.subplot(3,3,3)
sns.barplot(x='quality',y='volatile acidity',data=df)
plt.subplot(3,3,4)
sns.barplot(x='quality',y='citric acid',data=df)
plt.subplot(3,3,5)
sns.barplot(x='quality',y='residual sugar',data=df)
plt.subplot(3,3,6)
sns.barplot(x='quality',y='free sulfur dioxide',data=df)
plt.subplot(3,3,7)
sns.barplot(x='quality',y='total sulfur dioxide',data=df)
# -
sns.boxplot(x='quality',y='density',data=df)
sns.boxplot(x='quality',y='pH',data=df)
#Making binary classification for the response variable.
#Dividing wine as good and bad by giving the limit for the quanlity.
bins=(2,6.5,8)
group_names=['bad','good']
df['quality']=pd.cut(df['quality'],bins=bins,labels=group_names)
#Now lets assign a labels to our quality variable
from sklearn.preprocessing import LabelEncoder
label_quality=LabelEncoder()
#Bad becomes 0 and good becomes 1
df['quality']=label_quality.fit_transform(df['quality'])
df
sns.countplot(x="quality",data=df,palette='bwr')
plt.show()
#now separate the dataset as response variable and feature variables
x=df.drop('quality',axis=1)
y=df['quality']
# +
#Splitting the data into train and split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.33, random_state = 42)
# -
#Applying Standard Scaler
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.fit_transform(x_test)
#Applying Random Forest
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(n_estimators=200,random_state=0)
rfc.fit(x_train,y_train)
y_pred=rfc.predict(x_test)
from sklearn.metrics import accuracy_score
print('Model accuracy score with 200 decision-trees:{0:0.4f}'.format(accuracy_score(y_test,y_pred)))
accuracies={}
#let see how our model performed
from sklearn.metrics import classification_report
print(classification_report(y_test,y_pred))
from sklearn.model_selection import train_test_split,GridSearchCV,cross_val_score
rfc_eval = cross_val_score(estimator = rfc, X = x_train, y = y_train, cv = 20)
rfc_eval.mean()
#confusion matrix for the random forest classification
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test,y_pred))
#Support vector classifier
from sklearn.svm import SVC
svm=SVC(random_state=1)
svm.fit(x_train,y_train)
rfc_SVC = cross_val_score(estimator = rfc, X = x_train, y = y_train, cv = 29)
rfc_SVC.mean()
# +
from sklearn.tree import DecisionTreeClassifier
classifier=DecisionTreeClassifier(criterion='entropy',random_state=0)
classifier.fit(x_train,y_train)
acc = classifier.score(x_test,y_test)*100
accuracies['Decision Tree']=acc
print("Test Accuracy {:.2f}%".format(acc))
rfc_DT = cross_val_score(estimator = rfc, X = x_train, y = y_train, cv = 29)
rfc_DT.mean()
# +
from sklearn.linear_model import LogisticRegression
regr = LogisticRegression()
regr.fit(x_train, y_train)
acc = regr.score(x_test,y_test)*100
accuracies['Logistic Regression']=acc
print("Test Accuracy {:.2f}%".format(acc))
rfc_lr = cross_val_score(estimator = rfc, X = x_train, y = y_train, cv = 29)
rfc_lr.mean()
# -
| Advance RFM and SVM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### 1. When is a client considered to be "authenticated" when using MAC address filtering to control network access to a wireless network?
# ##### Ans: when the access point verifies that the MAC address is in the MAC table and sends a confirmation message to the client
# #### 2. What type of authentication do most access points use by default?
# ##### Ans: open
# #### 3. What wireless router configuration would reduce the risk of outsiders accessing or viewing content from your home network?
# ##### Ans: encryption
# #### 4. Which term is used for bulk advertising emails flooded to as many end users as possible?
# ##### Ans: spam
# #### 5. What is a characteristic of a computer worm?
# ##### Ans: exploits vulnerabilities with the intent of propagating itself across a network
# #### 6. How does a phisher typically contact a victim?
# ##### Ans: email
# #### 7. A network administrator attempted to access the company website and received a "page not found" error. The next day the administrator checked the web server logs and noticed that during the same hour that the site failed to load, there was an unusually large number of requests for the website home page. All of the requests originated from the same IP address. Given this information, what might the network administrator conclude?
# ##### Ans: It is likely that someone attempted a DoS attack.
# #### 8. Which three attacks exploit vulnerabilities in software? (Choose three.)
# ##### Ans:
# - worms
# - Trojan horses
# - viruses
# #### 9. Which type of attack attempts to overwhelm network links and devices with useless data?
# ##### Ans: denial of service
# #### 10. What type of program installs and gathers personal information, including password and account information, from a computer without permission or knowledge of the user?
# ##### Ans: spyware
# #### 11. What type of attack is the ping of death?
# ##### Ans: denial of service
# #### 12. True or False?
#
# #### Authorized users are not considered a security threat to the network.
# ##### Ans: false
# #### 13. Which type of attack exploits human behavior to gain confidential information?
# ##### Ans: social engineering
# #### 14. Which type of attack involves an attacker using a powerful computer to try a large number of possible passwords in rapid succession to gain access to a system?
# ##### Ans: brute force
| Coursera/Cisco Networking Basics Specializations/Course_4-Home_Networking_Basics/Week-3/Quiz/Week-3-Quiz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Y5ijY7DSFKzs"
# # Clean Slate: Labelling Nonexpungeable Sex Offenses
#
# > Prepared by <NAME> for Code for Boston's [Clean Slate Project](https://github.com/codeforboston/clean-slate)
#
# # Purpose
# Whether or not a charge relates to a non-expungeable sex offense is deeply relevant to parts of the data analysis we are performing to answer the questions posed by Citizens for Juvenile Justice.
#
# Previous efforts to label these sex offenses in our data have relied solely on keyword matching. This notebook explores the impact of a new labelling strategy that incorporates offense chapter and section data as well as keyword matching.
#
# **This notebook is used to apply the new sex offense labelling strategy to the merged_nw.csv, merged_suff.csv, and merged_ms.csv files.** Only values in the 'sex' column are updated; the files are otherwise unchanged.
#
# Mass general laws chapter 276 article 100J indicates chapter/section pairings that are likely (but not certain) to indicate nonexpungeable sex offenses in subsections 6, 7, 8, and 10. The associated chapter and section pairings are found in [this crosswalk document.](https://docs.google.com/spreadsheets/d/1eM7lXkUKruWl9cRg20vtXEfoD47CD2KGPPPMg_0AoH4/edit#gid=1023772964)
#
#
# ---
# + [markdown] id="f7FrE475M1BA"
# # Import Libraries and Fetch Data
#
# Pandas dataframes are created from the Northwest, Suffolk, and Middlesex datasets. A listing of chapter/section pairings is created from the sex offense crosswalk document.
# + id="aRcvXhinEhni"
import requests
import pandas as pd
import os
# -
# Create dataframes for Northwest, Suffolk, and Middlesex
nw = pd.read_csv('../../data/processed/merged_nw.csv', encoding='cp1252')
sf = pd.read_csv('../../data/processed/merged_suff.csv', encoding='cp1252')
ms = pd.read_csv('../../data/processed/merged_ms.csv', encoding='cp1252')
pd.set_option("display.max.columns", None)
print("Dataframes created for Northwest, Suffolk, and Middlesex")
# +
# Create the list of chaper/section pairings related to nonexpungable sex offenses
# Each nonexpungeable sex offense has three values: chapter, section, and boolean for 'only on repeat offense'
download_url = 'https://docs.google.com/spreadsheets/d/1eM7lXkUKruWl9cRg20vtXEfoD47CD2KGPPPMg_0AoH4/gviz/tq?usp=sharing&tqx=out:csv&sheet={Crosswalk}'
target_csv_path = 'so_crosswalk.csv'
response = requests.get(download_url)
response.raise_for_status()
with open(target_csv_path, 'wb') as f:
f.write(response.content)
# Create dataframe from the crosswalk, then a list containing that data
so = pd.read_csv('so_crosswalk.csv')
sex_offenses = list(so.to_records(index=False))
print('Sex Offenses Crosswalk List:\n', sex_offenses)
# Delete the downloaded crosswalk file from the local directory
os.remove('so_crosswalk.csv')
# + [markdown] id="z232SlPbNPZl"
# # Determine Offenses With Chapter/Section Pairings Matching the Sex Offense Crosswalk
# + [markdown] id="cOArv7C5ZB0r"
# The chapters and sections of the offenses identified as matching the sex offense crosswalk are listed for manual inspection.
# + colab={"base_uri": "https://localhost:8080/", "height": 103} id="yBS6R38BJAQ0" outputId="a487d7b8-5e39-4ef2-ec27-1bd87512de5c"
# To help determine which keywords will cover all subsequent charges (i.e. 'subsequent', 'second', etc)
# display list of unique charges associated with crosswalk entrys that are only sex offenses on a subsequent offense
all_datasets = pd.concat([sf, nw, ms])
only_subsequents = all_datasets.iloc[0:0]
for listing in sex_offenses:
if listing[2] == 1:
x = all_datasets.loc[
(all_datasets['Chapter'].notna()) &
(all_datasets['Chapter'].str.contains(str(listing[0]), case=False)) &
(all_datasets['Section'].notna()) &
(all_datasets['Section'].str.contains(str(listing[1]), case=False))
]
only_subsequents = pd.concat([only_subsequents, x])
only_subsequents['Charge'].unique()
# -
# This list is determined by manual inspection of the above, and should be revisited if the underlying data changes significantly
subsequent_keywords = ['SUBSQ.OFF.', 'AFTER CERTAIN OFFENSES']
# + id="1kmpSTxxgBr3"
# This function takes in a dataframe and a crosswalk entry, and returns a slice of that dataframe containing charges that match the crosswalk entry
def match_crosswalk(df, offense):
return df.loc[
(df['Chapter'].notna()) &
(df['Chapter'].str.contains(str(offense[0]), case=False)) &
(df['Section'].notna()) &
(df['Section'].str.contains(str(offense[1]), case=False)) &
(offense[2] == 0 | df['Charge'].str.contains('|'.join(subsequent_keywords), case=False))
]
# + id="3hoTMlQr4kHQ"
# This function takes in a dataframe and a crosswalk entry, and changes the value of the 'sex' column to 1 wherever a charge matches the crosswalk entry
# and that charge is a sex offense (based on the sex offense keyword list)
def update_sex(df, offense):
df.loc[
(df['Chapter'].notna()) &
(df['Chapter'].str.contains(str(offense[0]), case=False)) &
(df['Section'].notna()) &
(df['Section'].str.contains(str(offense[1]), case=False)) &
(df['Charge'].str.contains('|'.join(so_keywords), case=False)) &
(offense[2] == 0 | df['Charge'].str.contains('|'.join(subsequent_keywords), case=False)),
'sex'
] = 1
# + colab={"base_uri": "https://localhost:8080/"} id="indNRCFHo5LH" outputId="f733a869-2007-4264-ebeb-729f82064425"
# Create so_sf, a dataframe containing all Suffolk charges that match the chapter/section pairs in the sex offenses list
so_sf = sf.iloc[0:0]
for listing in sex_offenses:
x = match_crosswalk(sf, listing)
so_sf = pd.concat([so_sf, x])
so_sf.groupby(['Chapter', 'Section']).size()
# + colab={"base_uri": "https://localhost:8080/"} id="_Gh7Ov__uhIq" outputId="8637ea4e-6a12-4f26-d830-4e39edd04de5"
# Create so_nw, a dataframe containing all Northwest charges that match the chapter/section pairs in the sex offenses list
so_nw = nw.iloc[0:0]
for listing in sex_offenses:
x = match_crosswalk(nw, listing)
so_nw = pd.concat([so_nw, x])
so_nw.groupby(['Chapter', 'Section']).size()
# + colab={"base_uri": "https://localhost:8080/"} id="JvV30P_QBNrG" outputId="12557bb8-6f12-46c5-8666-f8e1bbe5116d"
# Create so_ms, a dataframe containing all Northwest charges that match the chapter/section pairs in the sex offenses list
so_ms = ms.iloc[0:0]
for listing in sex_offenses:
x = match_crosswalk(ms, listing)
so_ms = pd.concat([so_ms, x])
so_ms.groupby(['Chapter', 'Section']).size()
# + [markdown] id="uhMd5GOCLsx6"
# # Determine Sex Offense Related Keywords
# Some of the chapter/section pairings in the sex offense crosswalk can pertain to nonsexual offenses as well.
#
# A careful manual review of the list of unique incidents that match the chapter/section pairings from the sex offense crosswalk sheet is used to determine a list of keywords that cover all of the sex-related offenses, and none of the non-sex related offenses.
#
# Because this is a manual review, significant changes to the underlying data will require the keyword list to be revisited.
# + id="tZ6FUfTEMJ1-"
so_keywords = ['INDECENT', 'RAPE', 'SEX', 'LEWDNESS', 'OBSCENE', 'PORNOGRAPHY', 'PROSTITUTION', 'LEWD', 'NUDE', 'NIGHTWALKER', 'STREETWALKER', 'SODOMY', 'INCEST', 'UNNATURAL', 'BESTIALITY']
# + colab={"base_uri": "https://localhost:8080/"} id="vzbcjzCprUlV" outputId="b182c294-1a8e-4dba-98e6-48a02480649c"
# Display all unique charges from all three datasets that match the chapter/section pairs in the sex offenses list
print('ALL OFFENSES MATCHING THE SEX OFFENSE CROSSWALK (SUFFOLK, NORTHWEST, MIDDLESEX):\n')
so_all = pd.concat([so_sf, so_nw, so_ms])
so_all['Charge'].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="-C-onAqnEhm1" outputId="2d641fec-11c4-42a9-f913-cbf7bd32bb6f"
# Display the offenses that contain at least one of the chosen sex offense keywords (all of these should be sex offenses)
print('SEX OFFENSES MATCHING THE SEX OFFENSE CROSSWALK (SUFFOLK, NORTHWEST, MIDDLESEX):\n')
so_all.loc[
(so_all['Charge'].str.contains('|'.join(so_keywords), case=False))
]['Charge'].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="wjRuexuzLGDC" outputId="bb01373f-416b-46b9-bdc7-feafc5fca1e3"
# Display the offenses that contain none of the chosen sex offense keywords (none of these should be sex offenses)
print('NONSEXUAL OFFENSES MATCHING THE SEX OFFENSE CROSSWALK (SUFFOLK, NORTHWEST, MIDDLESEX):\n')
so_all.loc[
(~so_all['Charge'].str.contains('|'.join(so_keywords), case=False))
]['Charge'].unique()
# + [markdown] id="cJOjjdtmYtLj"
# # Comparison of Previous Sex Offense Labeling Strategy with New Strategy
#
# The Suffolk, Northwest, and Middlesex datasets already contain a boolean column 'sex' to mark nonexpungeable sex offenses. This column was derived using keywords, but without factoring in chapter and section data.
# + id="5c0eP97Qek4s"
# Create copies of the Suffolk, Northwest, and Middlesex datasets which will have updated 'sex' columns
sf_new = sf.copy()
nw_new = nw.copy()
ms_new = ms.copy()
sf_new.rename(columns={'sex': 'sex_old'}, inplace=True)
nw_new.rename(columns={'sex': 'sex_old'}, inplace=True)
ms_new.rename(columns={'sex': 'sex_old'}, inplace=True)
sf_new.insert(loc=sf_new.columns.get_loc("sex_old"), column='sex', value=0)
nw_new.insert(loc=nw_new.columns.get_loc("sex_old"), column='sex', value=0)
ms_new.insert(loc=ms_new.columns.get_loc("sex_old"), column='sex', value=0)
# + id="9FZQeAxafsKR"
# Use the sex offenses crosswalk and keyword list to mark nonexpungeable sex offenses
for listing in sex_offenses:
update_sex(sf_new, listing)
update_sex(nw_new, listing)
update_sex(ms_new, listing)
# -
# **Manually Address Invalid False Positives**
#
# Some nonexpungeable sex offenses have not been automatically matched with the sex offense crosswalk because of errorneous or omitted chapter and section data. Here, those offenses are manually labeled as nonexpungeable sex offenses.
#
# Additionally, though charges related to a sex offender failing to register or provide information do not have chapter/section pairs matching entries in the sex offenses crosswalk, it seems certain that all individuals with such a charge must also have at least one charge this is considered a nonexpungeable sex offense. The Clean Slate data team has determined that these sex offender registration charges should be considered as nonexpungeable sex offenses for all data analysis purposes.
invalid_false_positives = ['SEX OFFENDER', 'LEWDNESS, OPEN AND GROSS', 'RAPE', 'SEXUAL INTERCOURSE, INDUCE CHASTE MINOR', 'INDECENT A&B', 'PHOTOGRAPH SEXUAL OR INTIMATE PARTS OF CHILD']
# +
# Display charges that will be manually marked as nonexpungeable sex offenses
x = sf_new.loc[
(sf_new['sex'] == 0) &
(sf_new['Charge'].str.contains('|'.join(invalid_false_positives), case=False))
]
print('Suffolk charges to be manually marked as nonexpungeable sex offenses:\n', x.groupby(['Charge']).size().sort_values(ascending=False))
x = nw_new.loc[
(nw_new['sex'] == 0) &
(nw_new['Charge'].str.contains('|'.join(invalid_false_positives), case=False))
]
print('\nNorthwest charges to be manually marked as nonexpungeable sex offenses:\n', x.groupby(['Charge']).size().sort_values(ascending=False))
x = ms_new.loc[
(ms_new['sex'] == 0) &
(ms_new['Charge'].str.contains('|'.join(invalid_false_positives), case=False))
]
print('\nMiddlesex charges to be manually marked as nonexpungeable sex offenses:\n', x.groupby(['Charge']).size().sort_values(ascending=False))
# +
# Mark the above charges as nonexpungeable sex offenses
sf_new.loc[
(sf_new['sex'] == 0) &
(sf_new['Charge'].str.contains('|'.join(invalid_false_positives), case=False)),
'sex'
] = 1
nw_new.loc[
(nw_new['sex'] == 0) &
(nw_new['Charge'].str.contains('|'.join(invalid_false_positives), case=False)),
'sex'
] = 1
ms_new.loc[
(ms_new['sex'] == 0) &
(ms_new['Charge'].str.contains('|'.join(invalid_false_positives), case=False)),
'sex'
] = 1
# + colab={"base_uri": "https://localhost:8080/"} id="aJ58yj7S1oYf" outputId="2daaf83e-df66-4af7-efb9-dae84dad1f72"
# Compare sex offense labelling strategies for Suffolk
missing_sex_offenses = sf_new.loc[
(sf_new['sex'] == 1) &
(sf_new['sex_old'] == 0)
]
false_positives = sf_new.loc[
(sf_new['sex'] == 0) &
(sf_new['sex_old'] == 1)
]
strategy_matches_so = sf_new.loc[
(sf_new['sex'] == 1) &
(sf_new['sex_old'] == 1)
]
strategy_matches_not_so = sf_new.loc[
(sf_new['sex'] == 0) &
(sf_new['sex_old'] == 0)
]
previous_so_count = len(sf_new.loc[(sf_new['sex_old'] == 1)])
new_so_count = len(sf_new.loc[(sf_new['sex'] == 1)])
print('In Suffolk, the previous labelling strategy found', previous_so_count, 'sex offenses. The new strategy found', new_so_count, 'sex offenses.')
print('Both labelling strategies agree on', len(strategy_matches_so), 'offenses marked as sex offenses, and', len(strategy_matches_not_so), 'offenses not marked as sex offenses.')
print('The new strategy determined that there are', len(missing_sex_offenses), 'offenses not previously marked as sex offenses that should be (missed sex offenses).')
print('The new strategy determined that there are', len(false_positives), 'offenses previously marked as sex offenses that should not be (false positives).')
print('\nSuffolk list of unique charges among the missed sex offenses:\n')
print(missing_sex_offenses.groupby(['Charge']).size().sort_values(ascending=False))
print('\nSuffolk list of unique charges among the false positives:\n')
#print(false_positives['Charge'].unique())
print(false_positives.groupby(['Charge']).size().sort_values(ascending=False))
# + colab={"base_uri": "https://localhost:8080/"} id="VDlpCLkqDbYi" outputId="21f081b3-627f-46bd-fa55-861c170781e5"
# Compare sex offense labelling strategies for Northwest
missing_sex_offenses = nw_new.loc[
(nw_new['sex'] == 1) &
(nw_new['sex_old'] == 0)
]
false_positives = nw_new.loc[
(nw_new['sex'] == 0) &
(nw_new['sex_old'] == 1)
]
strategy_matches_so = nw_new.loc[
(nw_new['sex'] == 1) &
(nw_new['sex_old'] == 1)
]
strategy_matches_not_so = nw_new.loc[
(nw_new['sex'] == 0) &
(nw_new['sex_old'] == 0)
]
previous_so_count = len(nw_new.loc[(nw_new['sex_old'] == 1)])
new_so_count = len(nw_new.loc[(nw_new['sex'] == 1)])
print('In Northwest, the previous labelling strategy found', previous_so_count, 'sex offenses. The new strategy found', new_so_count, 'sex offenses.')
print('Both labelling strategies agree on', len(strategy_matches_so), 'offenses marked as sex offenses, and', len(strategy_matches_not_so), 'offenses not marked as sex offenses.')
print('The new strategy determined that there are', len(missing_sex_offenses), 'offenses not previously marked as sex offenses that should be (missed sex offenses).')
print('The new strategy determined that there are', len(false_positives), 'offenses previously marked as sex offenses that should not be (false positives).')
print('\nNorthwest list of unique charges among the missed sex offenses:\n')
print(missing_sex_offenses.groupby(['Charge']).size().sort_values(ascending=False))
print('\nNorthwest list of unique charges among the false positives:\n')
#print(false_positives['Charge'].unique())
print(false_positives.groupby(['Charge']).size().sort_values(ascending=False))
# + colab={"base_uri": "https://localhost:8080/"} id="G577BSuLeTjY" outputId="e9eea5f9-2353-4572-d3f8-91be8efe59f7"
# Compare sex offense labelling strategies for Middlesex
missing_sex_offenses = ms_new.loc[
(ms_new['sex'] == 1) &
(ms_new['sex_old'] == 0)
]
false_positives = ms_new.loc[
(ms_new['sex'] == 0) &
(ms_new['sex_old'] == 1)
]
strategy_matches_so = ms_new.loc[
(ms_new['sex'] == 1) &
(ms_new['sex_old'] == 1)
]
strategy_matches_not_so = ms_new.loc[
(ms_new['sex'] == 0) &
(ms_new['sex_old'] == 0)
]
previous_so_count = len(ms_new.loc[(ms_new['sex_old'] == 1)])
new_so_count = len(ms_new.loc[(ms_new['sex'] == 1)])
print('In Middlesex, the previous labelling strategy found', previous_so_count, 'sex offenses. The new strategy found', new_so_count, 'sex offenses.')
print('Both labelling strategies agree on', len(strategy_matches_so), 'offenses marked as sex offenses, and', len(strategy_matches_not_so), 'offenses not marked as sex offenses.')
print('The new strategy determined that there are', len(missing_sex_offenses), 'offenses not previously marked as sex offenses that should be (missed sex offenses).')
print('The new strategy determined that there are', len(false_positives), 'offenses previously marked as sex offenses that should not be (false positives).')
print('\nMiddlesex list of unique charges among the missed sex offenses:\n')
print(missing_sex_offenses.groupby(['Charge']).size().sort_values(ascending=False))
print('\nMiddlesex list of unique charges among the false positives:\n')
#print(false_positives['Charge'].unique())
print(false_positives.groupby(['Charge']).size().sort_values(ascending=False))
# -
# # Apply the New Labelling Strategy to the Data Spreadsheets
#
# The 'sex' column will be updated in these three files: **merged_suff.csv**, **merged_nw.csv**, and **merged_ms.csv**.
#
# No other columns will be altered.
# +
# Drop the uneeded 'sex_old' column from the dataframes
sf_output = sf_new.drop(['sex_old'], axis=1)
nw_output = nw_new.drop(['sex_old'], axis=1)
ms_output = ms_new.drop(['sex_old'], axis=1)
# Save the updated dataframes as csv files, overwriting them in the processed data folder
sf_file = sf_output.to_csv('../../data/processed/merged_suff.csv', index=False)
nw_file = nw_output.to_csv('../../data/processed/merged_nw.csv', index=False)
ms_file = ms_output.to_csv('../../data/processed/merged_ms.csv', index=False)
| notebooks/sex_offenses_labelling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this notebook we explore the possibilities of fuzzy search algorithms in finding similarities.
#
# **Classification using fuzzy matching**
#
# -Classify whether question pairs are duplicate or not
#
# -Let us start with importing the necessary modules for exploring the data.
# +
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from pyxdameraulevenshtein import damerau_levenshtein_distance, normalized_damerau_levenshtein_distance
from nltk.stem.porter import *
stemmer = PorterStemmer()
import random
import re
from nltk.corpus import stopwords
from nltk import word_tokenize, ngrams
eng_stopwords = set(stopwords.words('english'))
random.seed(1337)
df_train = pd.read_csv('../data/train.csv', encoding="ISO-8859-1")
df_test = pd.read_csv('../data/test.csv', encoding="ISO-8859-1")
num_train = df_train.shape[0]
print (num_train)
# -
df_train.head()
df_test.head()
# We use Jaccard similarity and Levenshtein distance to find the similarity between the questions. The number of words matched are also used as features for prediction.
# +
df_all = pd.concat((df_train, df_test), axis=0, ignore_index=True)
def get_unigrams(que):
return [word for word in word_tokenize(que.lower()) if word not in eng_stopwords]
def get_common_unigrams(row):
return len( set(row["unigrams_ques1"]).intersection(set(row["unigrams_ques2"])) )
def get_common_unigram_ratio(row):
return float(row["unigrams_common_count"]) / max(len( set(row["unigrams_ques1"]).union(set(row["unigrams_ques2"])) ),1)
df_all["unigrams_ques1"] = df_all['question1'].apply(lambda x: get_unigrams(str(x)))
df_all["unigrams_ques2"] = df_all['question2'].apply(lambda x: get_unigrams(str(x)))
df_all["unigrams_common_count"] = df_all.apply(lambda row: get_common_unigrams(row),axis=1)
df_all["unigrams_common_ratio"] = df_all.apply(lambda row: get_common_unigram_ratio(row), axis=1)
print ("Unigrams generated...")
def get_bigrams(que):
return [i for i in ngrams(que, 2)]
def get_common_bigrams(row):
return len( set(row["bigrams_ques1"]).intersection(set(row["bigrams_ques2"])) )
def get_common_bigram_ratio(row):
return float(row["bigrams_common_count"]) / max(len( set(row["bigrams_ques1"]).union(set(row["bigrams_ques2"])) ),1)
df_all["bigrams_ques1"] = df_all["unigrams_ques1"].apply(lambda x: get_bigrams(x))
df_all["bigrams_ques2"] = df_all["unigrams_ques2"].apply(lambda x: get_bigrams(x))
df_all["bigrams_common_count"] = df_all.apply(lambda row: get_common_bigrams(row),axis=1)
df_all["bigrams_common_ratio"] = df_all.apply(lambda row: get_common_bigram_ratio(row), axis=1)
df_all["words_ques1"] = df_all["unigrams_ques1"].apply(len)
df_all["words_ques2"] = df_all["unigrams_ques2"].apply(len)
print ("Bigrams generated...")
from nltk.corpus import stopwords
stops = set(stopwords.words("english"))
def word_match_share(row):
q1words = {}
q2words = {}
for word in str(row['question1']).lower().split():
if word not in stops:
q1words[word] = 1
for word in str(row['question2']).lower().split():
if word not in stops:
q2words[word] = 1
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_words_in_q1 = [w for w in q1words.keys() if w in q2words]
shared_words_in_q2 = [w for w in q2words.keys() if w in q1words]
R = (len(shared_words_in_q1) + len(shared_words_in_q2))/(len(q1words) + len(q2words))
return R
print ("Word match share over...")
# +
train_qs = pd.Series(df_train['question1'].tolist() + df_train['question2'].tolist()).astype(str)
test_qs = pd.Series(df_test['question1'].tolist() + df_test['question2'].tolist()).astype(str)
dist_train = train_qs.apply(lambda x: len(x.split(' ')))
dist_test = test_qs.apply(lambda x: len(x.split(' ')))
from collections import Counter
# If a word appears only once, we ignore it completely (likely a typo)
# Epsilon defines a smoothing constant, which makes the effect of extremely rare words smaller
def get_weight(count, eps=10000, min_count=2):
if count < min_count:
return 0
else:
return 1 / (count + eps)
eps = 5000
words = (" ".join(train_qs)).lower().split()
counts = Counter(words)
weights = {word: get_weight(count) for word, count in counts.items()}
def tfidf_word_match_share(row):
q1words = {}
q2words = {}
for word in str(row['question1']).lower().split():
if word not in stops:
q1words[word] = 1
for word in str(row['question2']).lower().split():
if word not in stops:
q2words[word] = 1
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_weights = [weights.get(w, 0) for w in q1words.keys() if w in q2words] + [weights.get(w, 0) for w in q2words.keys() if w in q1words]
total_weights = [weights.get(w, 0) for w in q1words] + [weights.get(w, 0) for w in q2words]
R = np.sum(shared_weights) / np.sum(total_weights)
return R
# +
def str_stem(str1):
str1 = str(str1)
str1 = re.sub(r'[^a-zA-Z0-9 ]',r'',str1)
str1 = str1.lower()
#str1 = (" ").join([stemmer.stem(z) for z in str1.split(" ")])
return str1
def str_common_word(str1, str2):
str1, str2 = str1.lower(), str2.lower()
words, cnt = str1.split(), 0
for word in words:
if str2.find(word)>=0:
cnt+=1
return cnt
def ngram(tokens, n):
grams =[tokens[i:i+n] for i in range(len(tokens)-(n-1))]
return grams
def get_sim(a_tri,b_tri):
intersect = len(set(a_tri) & set(b_tri))
union = len(set(a_tri) | set(b_tri))
if union == 0:
return 0
return float(intersect)/(union)
def jaccard_similarity(str1,str2):
sentence_gram1 = str1
sentence_gram2 = str2
grams1 = ngram(sentence_gram1, 5)
grams2 = ngram(sentence_gram2, 5)
similarity = get_sim(grams1, grams2)
return similarity
df_all['question1'] = df_all['question1'].map(lambda x:str_stem(x))
df_all['question2'] = df_all['question2'].map(lambda x:str_stem(x))
df_all['len_of_q1'] = df_all['question1'].map(lambda x:len(x.split())).astype(np.int64)
df_all['len_of_q2'] = df_all['question2'].map(lambda x:len(x.split())).astype(np.int64)
df_all['questions'] = df_all['question1']+"|"+df_all['question2']
print ("Questions combined...")
df_all['q2_in_q1'] = df_all['questions'].map(lambda x:str_common_word(x.split('|')[0],x.split('|')[1]))
df_all['q1_in_q2'] = df_all['questions'].map(lambda x:str_common_word(x.split('|')[1],x.split('|')[0]))
print ("Common words found ...")
df_all['jaccard'] = df_all['questions'].map(lambda x:jaccard_similarity(x.split('|')[0],x.split('|')[1]))
print ("Jaccard similarities computed...")
df_all['lev_distance'] = df_all['questions'].map(lambda x:normalized_damerau_levenshtein_distance(x.split('|')[0],x.split('|')[1]))
print ("Levenshtein distances computed...")
# -
df_all.to_csv("../data/train_test.csv")
df_all.head()
# +
#df_all = df_all.drop(['id','qid1','qid2','questions','unigrams_ques1','unigrams_ques2','bigrams_ques1','bigrams_ques2'],axis=1)
df_train = df_all.iloc[:num_train]
df_test = df_all.iloc[num_train:]
df_train['word_match'] = df_train.apply(word_match_share, axis=1, raw=True)
df_train['tfidf_word_match'] = df_train.apply(tfidf_word_match_share, axis=1, raw=True)
df_test['word_match'] = df_test.apply(word_match_share, axis=1, raw=True)
df_test['tfidf_word_match'] = df_test.apply(tfidf_word_match_share, axis=1, raw=True)
id_test = df_test['test_id']
y_train = df_train['is_duplicate'].values
X_train = df_train.drop(['test_id','is_duplicate','question1','question2'],axis=1).values
X_test = df_test.drop(['test_id','is_duplicate','question1','question2'],axis=1).values
# -
df_train.head()
# +
from sklearn.cross_validation import train_test_split
x_trainb, x_validb, y_trainb, y_validb = train_test_split(X_train, y_train, test_size=0.2, random_state=4747)
import xgboost as xgb
params = {}
params['objective'] = 'binary:logistic'
params['eval_metric'] = 'logloss'
params['eta'] = 0.1
params['max_depth'] = 6
d_train = xgb.DMatrix(x_trainb, label=y_trainb)
d_valid = xgb.DMatrix(x_validb, label=y_validb)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
bst = xgb.train(params, d_train, 1000, watchlist, early_stopping_rounds=50, verbose_eval=10)
# +
d_test = xgb.DMatrix(X_test)
p_test = bst.predict(d_test)
sub = pd.DataFrame()
sub['test_id'] = np.int32(id_test)
sub['is_duplicate'] = p_test
sub.to_csv('../submission/simple_xgb_v2.csv', index=False)
# -
# This is my first notebook in Kaggle. Looking for suggestions to improve the model.
# +
X_train2 = df_train.drop(['test_id','is_duplicate','question1','question2'],axis=1)
pos_train = X_train2[y_train == 1]
neg_train = X_train2[y_train == 0]
# Now we oversample the negative class
# There is likely a much more elegant way to do this...
p = 0.165
scale = ((len(pos_train) / (len(pos_train) + len(neg_train))) / p) - 1
while scale > 1:
neg_train = pd.concat([neg_train, neg_train])
scale -=1
neg_train = pd.concat([neg_train, neg_train[:int(scale * len(neg_train))]])
print(len(pos_train) / (len(pos_train) + len(neg_train)))
x_train = pd.concat([pos_train, neg_train])
y_train = (np.zeros(len(pos_train)) + 1).tolist() + np.zeros(len(neg_train)).tolist()
del pos_train, neg_train
# +
# Finally, we split some of the data off for validation
from sklearn.cross_validation import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.2, random_state=4242)
# +
import xgboost as xgb
# Set our parameters for xgboost
params = {}
params['objective'] = 'binary:logistic'
params['eval_metric'] = 'logloss'
params['eta'] = 0.2
params['max_depth'] = 4
params['gamma'] =5
d_train = xgb.DMatrix(x_train, label=y_train)
d_valid = xgb.DMatrix(x_valid, label=y_valid)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
bst = xgb.train(params, d_train, 5000, watchlist, early_stopping_rounds=50, verbose_eval=10)
# +
X_test = df_test.drop(['test_id','is_duplicate','question1','question2'],axis=1)
d_test = xgb.DMatrix(X_test)
p_test = bst.predict(d_test)
sub = pd.DataFrame()
sub['test_id'] = np.int32(id_test)
sub['is_duplicate'] = p_test
sub.to_csv('../submission/simple_xgb_v5.csv', index=False)
# -
| scripts/mynotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7PMMxIBbhhaA" colab_type="text"
# # Convert YOLO to tflite
# * 안드로이드에 YOLO 모델을 돌리기 위해서는 tensorflow 모델로 올려야함
# * [tensorflow-yolov4-tflite](https://github.com/hunglc007/tensorflow-yolov4-tflite) : Darknet으로 생성한 yolo weight를 tflite 로 변환하기 위해 사용
# * MIT License
# + [markdown] id="jFuISvFehfeo" colab_type="text"
# ## weight 로딩
# + [markdown] id="iThEItPDh6ef" colab_type="text"
# ### 구글 드라이브와 연동
# + id="yJLdTCabhPqy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 127} executionInfo={"status": "ok", "timestamp": 1598515831156, "user_tz": -540, "elapsed": 20806, "user": {"displayName": "\uc548\uc2b9\ube48", "photoUrl": "", "userId": "02041625260438450830"}} outputId="2c12055a-3233-494b-c2ad-ed3d8f9455a9"
from google.colab import drive
drive.mount("/content/gdrive")
# + [markdown] id="AjmuHcKwh2sB" colab_type="text"
# ### 소프트링크 생성
# * 편의성을 위함
# + id="9nSN05o7h38P" colab_type="code" colab={}
# !ln -s /content/gdrive/My\ Drive/Colab\ Notebooks/CPDS /mydrive
# + [markdown] id="YGpgrFxBjRbw" colab_type="text"
# ### weight, obj.names 로딩
# * weight : YOLO weight 파일
# * obj.names : model의 classes 정보
# + id="yVtK-fYXjcnG" colab_type="code" colab={}
# !cp /mydrive/yolov4-obj_best.weights model.weights
# !cp /mydrive/obj.names obj.names
# + [markdown] id="WJ4vcRfunVEO" colab_type="text"
# ## git clone
# + id="F9NsJERzptq-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 125} executionInfo={"status": "ok", "timestamp": 1598515859074, "user_tz": -540, "elapsed": 12101, "user": {"displayName": "\uc548\uc2b9\ube48", "photoUrl": "", "userId": "02041625260438450830"}} outputId="f5168a8f-d9ea-46d7-ec64-f41778967954"
# %cd /content
# !git clone https://github.com/hunglc007/tensorflow-yolov4-tflite.git
# + [markdown] id="xXZtG86dnXkd" colab_type="text"
# ## Convert
#
# + [markdown] id="bBMOd4ro79oO" colab_type="text"
# * save_model.py 를 실행하면 output_path에 pb 파일이 생성됨
# * 생성된 pb파일의 경로가 다음에 실행할 conver_tflite.py의 weights의 경로가 됨
# * tensorflow-yolov4-tflite/core/config.py 파일
# + [markdown] id="J6NhUGnj89CS" colab_type="text"
# ### 1. config.py 수정
# * /tensorflow-yolov4-tflite/core/config.py
# * __C.YOLO.CLASSES = "./data/classes/coco.names" 에서 __C.YOLO.CLASSES = "./data/classes/obj.names" 로 수정
# * custom model의 classes name을 적용하기 위함
# + id="CSDB2zEb-U6P" colab_type="code" colab={}
# move obj.names to classes
# !mv /content/obj.names /content/tensorflow-yolov4-tflite/data/classes
# + [markdown] id="nnyvWWN6-I0R" colab_type="text"
# ### 2. convert weight to pb
# * weights를 변환하여 pb 를 생성한다
# * 지정한 경로에 save_model.py 에 wegihts를 전달하면 output에 지정한 경로에 생성됨
# + id="6QRxf3Zcj-i8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598516137227, "user_tz": -540, "elapsed": 84837, "user": {"displayName": "\uc548\uc2b9\ube48", "photoUrl": "", "userId": "02041625260438450830"}} outputId="64207a13-28e0-481c-8282-eae18ed068e0"
# %cd /content/tensorflow-yolov4-tflite
# save_model.py --wegihts your_weight --output output_path --input_size 정수 --model yolov3 | yolov4 --framework tflite [--tiny]
# !python save_model.py --weights ../model.weights --output /content --input_size 416 --model yolov4 --framework tflite
# + [markdown] id="wlHEq2xM_LLZ" colab_type="text"
# ### 3. conver pb to tflite
# * 2단계에서 생성 된 pb파일로부터 tflite 파일을 생성한다
# * --weights 옵션에 pb파일이 위치한 디렉토리의 경로를 전달(pb파일 경로가 아님)
# + id="aZLbWY8DmRw6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598516303832, "user_tz": -540, "elapsed": 41711, "user": {"displayName": "\uc548\uc2b9\ube48", "photoUrl": "", "userId": "02041625260438450830"}} outputId="0536b815-5497-4bb8-b9bf-88e0df5d8360"
# %cd /content/tensorflow-yolov4-tflite
# python convert_tflite.py --weights pb_path -- output output_path
# !python convert_tflite.py --weights /content/ --output /content/model.tflite
# + [markdown] id="Ao-ElvzS_6Bj" colab_type="text"
# ### 4. 드라이브에 저장
# + id="wv3OuOg0omeT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1598516347949, "user_tz": -540, "elapsed": 1912, "user": {"displayName": "\uc548\uc2b9\ube48", "photoUrl": "", "userId": "02041625260438450830"}} outputId="0ba34e00-2c3b-4f98-ab5e-671daea9235b"
# %cd /content
# !cp model.tflite /mydrive/model.tflite
| CPDS/Detector/Convertor/Convertor.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# # MLE and REML
# Machine information
versioninfo()
# ## Demo data
# For demonstration, we generate a random data set.
# generate data from a d-variate response variane component model
using Random, LinearAlgebra
Random.seed!(123)
n = 1000 # no. observations
d = 2 # dimension of responses
m = 2 # no. variance components
p = 2 # no. covariates
# n-by-p design matrix
X = randn(n, p)
# p-by-d mean component regression coefficient
B = ones(p, d)
# a tuple of m covariance matrices
V = ntuple(x -> zeros(n, n), m)
for i = 1:m-1
Vi = randn(n, 50)
copyto!(V[i], Vi * Vi')
end
copyto!(V[m], Matrix(I, n, n)) # last covarianec matrix is idendity
# a tuple of m d-by-d variance component parameters
Σ = ntuple(x -> zeros(d, d), m)
for i in 1:m
Σi = randn(d, d)
copyto!(Σ[i], Σi' * Σi)
end
# form overall nd-by-nd covariance matrix Ω
Ω = zeros(n * d, n * d)
for i = 1:m
Ω += kron(Σ[i], V[i])
end
Ωchol = cholesky(Ω)
# n-by-d responses
Y = X * B + reshape(Ωchol.L * randn(n*d), n, d);
# ## Maximum likelihood estimation (MLE)
# To find the MLE of parameters $(B,\Sigma_1,\ldots,\Sigma_m)$, we take 3 steps:
#
# **Step 1 (Construct data)**. Construct an instance of `VarianceComponentVariate`, which consists fields
#
# * `Y`: $n$-by-$d$ responses
# * `X`: $n$-by-$p$ covariate matrix
# * `V=(V[1],...,V[m])`: a tuple of $n$-by-$n$ covariance matrices. The last covariance matrix must be positive definite and usually is the identity matrix.
using VarianceComponentModels
vcdata = VarianceComponentVariate(Y, X, V)
fieldnames(typeof(vcdata))
# In the absence of covariates $X$, we can simply initialize by `vcdata = VarianceComponentVariate(Y, V)`.
# **Step 2 (Construct a model)**. Construct an instance of `VarianceComponentModel`, which consists of fields
#
# * `B`: $n$-by-$p$ mean regression coefficients
# * `Σ=(Σ[1],...,Σ[m])`: variane component parameters respectively.
#
# When constructed from a `VarianceComponentVariate` instance, the mean parameters $B$ are initialized to be zero and the tuple of variance component parameters $\Sigma$ to be `(eye(d),...,eye(d))`.
vcmodel = VarianceComponentModel(vcdata)
fieldnames(typeof(vcmodel))
vcmodel
# The remaining fields `A`, `sense`, `b`, `lb`, `ub` specify (optional) constraints on the mean parameters `B`:
#
# $A * \text{vec}(B) \,\, =(\text{or } \ge \text{or } \le) \,\, b$
#
# $lb \le \text{vec}(B) \le ub$
#
# `A` is an constraint matrix with $pd$ columns, `sense` is a vector of charaters taking values `'<'`, `'='` or `'>'`, and `lb` and `ub` are the lower and upper bounds for `vec(B)`. By default, `A`, `sense`, `b` are empty, `lb` is `-Inf`, and `ub` is `Inf`. If any constraits are non-trivial, final estimates of `B` are enforced to satisfy them.
# When a better initial guess is available, we can initialize by calling `vcmodel=VarianceComponentModel(B0, Σ0)` directly.
# **Step 3 (Fit model)**. Call optmization routine `fit_mle!`. The keywork `algo` dictates the optimization algorithm: `:MM` (minorization-maximization algorithm) or `:FS` (Fisher scoring algorithm).
vcmodel_mle = deepcopy(vcmodel)
@time logl, vcmodel_mle, Σse, Σcov, Bse, Bcov = fit_mle!(vcmodel_mle, vcdata; algo = :MM);
# The output of `fit_mle!` contains
#
# * final log-likelihood
logl
# * fitted model
fieldnames(typeof(vcmodel_mle))
vcmodel_mle
# * standard errors of the estimated varianec component parameters
Σse
# * covariance matrix of the variance component parameters estimates
Σcov
# * standard errors of the estimated mean parameters
Bse
# * covariance matrix of the mean parameter estimates
Bcov
# ## Restricted maximum likelihood estimation (REML)
# [REML (restricted maximum likelihood estimation)](https://en.wikipedia.org/wiki/Restricted_maximum_likelihood) is a popular alternative to the MLE. To find the REML of a variane component model, we replace the above step 3 by
# **Step 3**. Call optmization routine `fit_reml!`.
vcmodel_reml = deepcopy(vcmodel)
@time logl, vcmodel_reml, Σse, Σcov, Bse, Bcov = fit_reml!(vcmodel_reml, vcdata; algo = :MM);
# The output of `fit_reml!` contains
# * the final log-likelihood at REML estimate
logl
# * REML estimates
fieldnames(typeof(vcmodel_reml))
vcmodel_reml
# * standard errors of the estimated variance component parameters
Σse
# * covariance matrix of the variance component parameters estimates
Σcov
# * standard errors of the estimated mean parameters
Bse
# * covariance matrix of the mean parameter estimates
Bcov
# ## Optimization algorithms
# Finding the MLE or REML of variance component models is a non-trivial nonlinear optimization problem. The main complications are the non-convexity of objective function and the positive semi-definiteness constraint of variane component parameters $\Sigma_1,\ldots,\Sigma_m$. In specific applications, users should try different algorithms with different starting points in order to find a better solution. Here are some tips for efficient computation.
# In general the optimization algorithm needs to invert the $nd$ by $nd$ overall covariance matrix $\Omega = \Sigma_1 \otimes V_1 + \cdots + \Sigma_m \otimes V_m$ in each iteration. Inverting a matrix is an expensive operation with $O(n^3 d^3)$ floating operations. When there are only **two** varianec components ($m=2$), this tedious task can be avoided by taking one (generalized) eigendecomposion of $(V_1, V_2)$ and rotating data $(Y, X)$ by the eigen-vectors.
vcdatarot = TwoVarCompVariateRotate(vcdata)
fieldnames(typeof(vcdatarot))
# Two optimization algorithms are implemented: [Fisher scoring](https://books.google.com/books?id=QYqeYTftPNwC&lpg=PP1&pg=PA142#v=onepage&q&f=false) (`mle_fs!`) and the [minorization-maximization (MM) algorithm](http://arxiv.org/abs/1509.07426) (`mle_mm!`). Both take the rotated data as input. These two functions give finer control of the optimization algorithms. Generally speaking, MM algorithm is more stable while Fisher scoring (if it converges) yields more accurate answer.
vcmodel_mm = deepcopy(vcmodel)
@time mle_mm!(vcmodel_mm, vcdatarot; maxiter=10000, funtol=1e-8, verbose = true);
# MM estimates
vcmodel_mm.B
# MM estimates
vcmodel_mm.Σ
# Fisher scoring (`mle_fs!`) uses either [Ipopt.jl](https://github.com/JuliaOpt/Ipopt.jl) (keyword `solver=:Ipopt`) or [KNITRO.jl](https://github.com/JuliaOpt/KNITRO.jl) (keyword `solver=:Knitro`) as the backend solver. Ipopt is open source and installation of [Ipopt.jl](https://github.com/JuliaOpt/Ipopt.jl) package alone is sufficient.
# Fisher scoring using Ipopt
vcmodel_ipopt = deepcopy(vcmodel)
@time mle_fs!(vcmodel_ipopt, vcdatarot; solver=:Ipopt, maxiter=1000, verbose=true);
# Ipopt estimates
vcmodel_ipopt.B
# Ipopt estimates
vcmodel_ipopt.Σ
# Knitro is a commercial software and users need to follow instructions at [KNITRO.jl](https://github.com/JuliaOpt/KNITRO.jl) for proper functioning. Following code invokes Knitro as the backend optimization solver.
# ```julia
# using KNITRO
#
# # Fisher scoring using Knitro
# vcmodel_knitro = deepcopy(vcmodel)
# @time mle_fs!(vcmodel_knitro, vcdatarot; solver=:Knitro, maxiter=1000, verbose=true);
#
# # Knitro estimates
# vcmodel_knitro.B
#
# # Knitro estimates
# vcmodel_knitro.Σ
# ```
# ## Starting point
#
# Here are a few strategies for successful optimization.
# * For $d>1$ (multivariate response), initialize $B, \Sigma$ from univariate estimates.
# * Use REML estimate as starting point for MLE.
# * When there are only $m=2$ variance components, pre-compute `TwoVarCompVariateRotate` and use it for optimization.
# ## Constrained estimation of `B`
#
#
# Many applications invoke constraints on the mean parameters `B`. For demonstration, we enforce `B[1,1]=B[1,2]` and all entries of `B` are within [0, 2].
# set up constraints on B
vcmodel_constr = deepcopy(vcmodel)
vcmodel_constr.A = [1.0 0.0 -1.0 0.0]
vcmodel_constr.sense = '='
vcmodel_constr.b = 0.0
vcmodel_constr.lb = 0.0
vcmodel_constr.ub = 2.0
vcmodel_constr
# We first try the MM algorithm.
# MM algorithm for constrained estimation of B
@time mle_mm!(vcmodel_constr, vcdatarot; maxiter=10000, funtol=1e-8, verbose = true);
fieldnames(typeof(vcmodel_constr))
vcmodel_constr.B
vcmodel_constr.Σ
# Now let's try Fisher scoring.
# Fisher scoring using Ipopt for constrained estimation of B
vcmodel_constr = deepcopy(vcmodel)
vcmodel_constr.A = [1.0 0.0 -1.0 0.0]
vcmodel_constr.sense = '='
vcmodel_constr.b = 0.0
vcmodel_constr.lb = 0.0
vcmodel_constr.ub = 2.0
vcmodel_constr
@time mle_fs!(vcmodel_constr, vcdatarot; solver=:Ipopt, maxiter=1000, verbose=true);
vcmodel_constr.B
vcmodel_constr.Σ
| docs/mle_reml.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: JModelica 2.4 64 bit
# language: python
# name: jmodelicap2_4_64bit
# ---
from pyfmi import load_fmu
from pyfmi.fmi_coupled import CoupledFMUModelME2
# Load CSV reader FMU
csv_reader = load_fmu('csv_reader/simulator.fmu', log_level=7)
csv_reader.setup_experiment(start_time=0, stop_time=20)
# +
# Create the Master algorithm
master = CoupledFMUModelME2([csv_reader], [])
# Launch the simulation
results = master.simulate(final_time=23.0)
csv_reader.terminate()
# +
# Plot the results
import matplotlib.pyplot as plt
# %matplotlib inline
plt.figure(figsize=(10, 3))
plt.plot(results[csv_reader]["time"], results[csv_reader]["y"])
plt.ylabel("Data from the CSV file")
plt.xlabel("Time")
plt.show()
| tests/001_load_and_simulate/ME_server/simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import numpy as np
import numpy.random as npr
import scipy as sp
import scipy.ndimage
import leggps
import matplotlib.pylab as plt
# %matplotlib inline
# -
# # interpolation
# ### construct a few independent one-d observations
# +
sample1_ts = np.cumsum(npr.exponential(size=1000)+.01) # <-- irregularly spaced observation times!
sample1_vals = sp.ndimage.gaussian_filter1d(npr.randn(1000),10,axis=0)[:,None]
sample2_ts = np.cumsum(npr.exponential(size=100)+.1)
sample2_vals = sp.ndimage.gaussian_filter1d(npr.randn(100),10,axis=0)[:,None]
# -
# Note that `sample1_vals` and `sample2_vals` **MUST** be matrices, not vectors:
print(sample1_ts.shape)
print(sample1_vals.shape)
print(sample2_ts.shape)
print(sample2_vals.shape)
# Look at one of them
plt.plot(sample1_ts,sample1_vals)
# ### put them together
all_ts = [sample1_ts,sample2_ts]
all_vals=[sample1_vals,sample2_vals]
# ### train a model
# train a rank-5 LEG model on it
rank=5
result = leggps.training.fit(all_ts,all_vals,rank)
# better yet, fit a rank-5 LEG model and get live updates (only works if you have installed tqdm)
# this picks a RANDOM initial condition somewhat based on the data
# often works ok
result = leggps.training.fit(all_ts,all_vals,rank,use_tqdm_notebook=True,maxiter=100)
# look at the different nats we found as we trained using BFGS.
#
# here `nats = - log likelihood / nobservations`. lower is better
# +
plt.subplot(1,2,1)
iterations=range(len(result['nats']))
plt.plot(iterations,result['nats'])
plt.xlabel('iterations')
plt.ylabel("nats (lower is better)")
plt.subplot(1,2,2)
plt.plot(iterations[80:],result['nats'][80:])
plt.xlabel('iterations')
plt.title("zoom in on the later \n iterations")
plt.tight_layout()
# -
# Looking at these losses, it seems like maybe we don't need to train anymore.
print("training result:",result['message'])
# That means we're as good as we're going to get with the numerical precision we have. Let's train a little longer, just for fun.
# result['params'] is the model learned in the first round of training
# here we feed it back into the trainer, which uses it as an initial condition...
result2 = leggps.training.fit(all_ts,all_vals,rank,use_tqdm_notebook=True,maxiter=100,**result['params'])
# What did nats look like as we did a second round of training?
# +
plt.subplot(1,2,1)
iterations=range(len(result2['nats']))
plt.plot(iterations,result2['nats'])
plt.ylim(np.min(result2['nats']),np.min(result['nats']))
plt.xlabel('iterations')
plt.ylabel("nats (lower is better)")
plt.subplot(1,2,2)
plt.plot(iterations[10:],result2['nats'][10:])
plt.tight_layout()
plt.xlabel('iterations')
# -
# Looks like at the end we weren't finding any better solutions. BFGS will always return the best solution that it came across (not necessarily the last one, which may not be the best).
#
# Let's see what the optimizer had to say:
print("training result:",result2['message'])
# Yeah still the same.
model = result2['params']
# A LEG model has four parameters: N,R,B,Lambda. We can look at all of them, though they may not mean too much.
print(model['N'])
print(model['R'])
print(model['B'])
print(model['Lambda'])
# ### forecasting/interpolating
# Now that we've trained our model on that dataset, we can do forecasting on any dataset. Let's make an interesting one. We'll start by making a regular looking timeseries
sample3_ts = np.cumsum(np.r_[npr.exponential(size=500)])
sample3_vals = sp.ndimage.gaussian_filter1d(npr.randn(500),10,axis=0)[:,None]
# But then we'll chop out the middle
# +
sample3_ts_chopped = np.r_[sample3_ts[:200],sample3_ts[-200:]]
sample3_vals_chopped = np.concatenate([sample3_vals[:200],sample3_vals[-200:]])
plt.scatter(sample3_ts,sample3_vals,color='C1',alpha=.2)
plt.scatter(sample3_ts_chopped,sample3_vals_chopped,color='C0')
# -
# Let's see how the model interpolates and forecasts with this data. We pick a set of times we're interested in. We feed it the censored observations and the model. It spits out predictions:
# +
forecast_times=np.r_[-200:700:300j]
means,variances=leggps.posterior_predictive(sample3_ts_chopped,sample3_vals_chopped,forecast_times,**model)
plt.scatter(sample3_ts_chopped,sample3_vals_chopped,label='observed data')
plt.scatter(sample3_ts[200:-200],sample3_vals[200:-200],label='censored data')
plt.plot(forecast_times,means,'C1',label='interpolation/forecasting')
plt.fill_between(forecast_times,
means[:,0]+2*np.sqrt(variances[:,0,0]),
means[:,0]-2*np.sqrt(variances[:,0,0]),
color='black',alpha=.5,label='Uncertainty')
plt.legend(bbox_to_anchor=[1,1],fontsize=20)
# -
# # smoothing
# For smoothing, let's look at datasets which have some noise on top of them.
# +
sample1_ts = np.cumsum(npr.exponential(size=1000)+.01)
sample1_vals = sp.ndimage.gaussian_filter1d(npr.randn(1000),10,axis=0)[:,None]
sample1_vals_plusnoise = sample1_vals+.5*npr.randn(1000)[:,None]
plt.plot(sample1_ts,sample1_vals,label='pure observations')
plt.plot(sample1_ts,sample1_vals_plusnoise,'.',label='noised observations')
plt.legend()
# -
rank=5
result = leggps.training.fit([sample1_ts],[sample1_vals_plusnoise],rank,use_tqdm_notebook=True)
# Look at training process...
plt.plot(result['nats'])
plt.ylim(np.min(result['nats']),np.min(result['nats'])+.01)
print(result['message'])
# Looks fine. This error about precision loss always happens. Let's store this model and use it.
model = result['params']
# Let's try to smooth the data (in this case, the same data we trained on)
forecast_times=np.r_[-200:600]
means,variances=leggps.posterior_predictive(sample1_ts,sample1_vals_plusnoise,forecast_times,**model)
# +
plt.plot(sample1_ts,sample1_vals,'r-',label='true noiseless process')
plt.scatter(sample1_ts,sample1_vals_plusnoise,label='observed noisy data')
plt.plot(forecast_times,means,'C1',label='interpolation/forecasting')
plt.fill_between(forecast_times,
means[:,0]+2*np.sqrt(variances[:,0,0]),
means[:,0]-2*np.sqrt(variances[:,0,0]),
color='black',alpha=.5,label='Uncertainty')
v2 = variances + model['Lambda']@model['Lambda'].T
plt.fill_between(forecast_times,
means[:,0]+2*np.sqrt(v2[:,0,0]),
means[:,0]-2*np.sqrt(v2[:,0,0]),
color='green',alpha=.1,label='Posterior predictive variance')
plt.xlim(-200,600)
plt.ylim(-1,1)
plt.legend(bbox_to_anchor=[1.5,.5],fontsize=20)
# -
# This shows the difference between the posterior predictive uncertainty and the posterior predictive variances. T
#
# - The interpolation/forecast (orange) tells us our guess for the true noiseless process
# - The posterior predictive uncertainty (gray) tells us how uncertain we are about the orange curve
# - The posterior predictive variances (green) tells us how uncertain we should be about the value of a new observation at a particular time.
# Another way to think about the learned model is to look directly at the LEG covariance.
taus=np.r_[0:400]
C_LEG=leggps.C_LEG(taus,**model)
plt.plot(taus,C_LEG[:,0,0],'.')
# Here we see that the marginal variance ($C(0)$) is much higher than $C(\tau),\tau>0$. That's because of the observation noise. The observation noise adds a spike at zero.
#
# Let's zoom in:
plt.plot(taus,C_LEG[:,0,0],'.')
plt.ylim(0,C_LEG[1,0,0])
# # speed test
# +
m=1000000
import tensorflow as tf
import leggps.cr
import numpy as np
Rs=tf.convert_to_tensor(np.zeros((m,3,3)) + np.eye(3)[None,:,:],dtype=tf.float32)
Os=tf.convert_to_tensor(np.zeros((m-1,3,3)),dtype=tf.float32)
@tf.function(autograph=False)
def go(Rs,Os):
return leggps.cr.decompose(Rs,Os)
# %time foo=go(Rs,Os)
# %time foo=go(Rs,Os)
| examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <table>
# <tr><td><img style="height: 150px;" src="images/geo_hydro1.jpg"></td>
# <td bgcolor="#FFFFFF">
# <p style="font-size: xx-large; font-weight: 900; line-height: 100%">AG Dynamics of the Earth</p>
# <p style="font-size: large; color: rgba(0,0,0,0.5);">Jupyter notebooks</p>
# <p style="font-size: large; color: rgba(0,0,0,0.5);"><NAME></p>
# </td>
# </tr>
# </table>
# # Dynamic systems: 9. Viscous material
# ## Hagen-Poiseuille flow
# ----
# *<NAME>,
# Geophysics Section,
# Institute of Geological Sciences,
# Freie Universität Berlin,
# Germany*
import numpy as np
import scipy.special
import matplotlib.pyplot as plt
# ## Definition of **Hagen-Poiseuille** flow law
#
# The **Hagen-Poiseuille** flow law describes the laminar flow profile $u(r)$ [m/s] in a cylindrical pipe of
# radius $R$ [m] and length $L$ [m], with $x$ [m] the coordinate along the pipe,
# and $r$ [m] the coordinate perpendicular to
# the pipe length, caused by a pressure gradient ${{dp}\over{dx}}$ [Pa/m]:
# $$
# u(r) = -{{dp}\over{dx}} {{R^2}\over{4\eta}} \left[ 1 - \left( {{r}\over{R}} \right)^2 \right]
# $$
# The fluid is described by the dynamic viscosity $\eta$ [Pas].
#
# Consider a pipe with $R=0.1$ m radius, and water (dynamic viscosity $\eta \simeq 10^{-3}$ Pas,
# density $\rho=1000$ kg/m$^3$). Then the maximum velocity is given by (set r=0 above):
# $$
# u_{max} = -{{dp}\over{dx}} {{R^2}\over{4\eta}}
# $$
# For $u_{max}=1$ m/s we need a pressure gradient of ${{dp}\over{dx}}=-0.4$ Pa/m along the pipe.
# +
R = 0.1
eta = 1e-3
rho = 1000
dpdx = -0.4
r = np.linspace(-R,R,101)
u = -dpdx*R**2/4/eta *(1-(r/R)**2)
plt.figure(figsize=(12,6))
plt.xlabel('u [m/s]')
plt.ylabel('r [m]')
plt.plot(u,r,linewidth=4,label='Hagen-Poiseuille')
plt.legend()
# -
# ## Derivation of **Hagen-Poiseuille** flow law
#
# The **Hagen-Poiseuille law** can be derived from the **Navier-Stokes equation**:
# $$
# \rho \frac{\partial \vec{u}}{\partial t}
# + \rho \left( \vec{u} \cdot \nabla \right) \vec{u}
# = - \nabla p
# + \eta \Delta \vec{u}
# - \rho \vec{g}
# $$
# Here,
# $\vec{u}$ [m/s] is the velocity vector,
# $\rho$ [kg/m$^3$] fluid density,
# $\eta$ [Pas] fluid dynamic viscosity,
# $\vec{g}$ [m/s$^2$] gravitational acceleration,
# $p$ [Pa] pressure,
# $t$ [s] time, and $\nabla$ [1/m] the Nabla operator, and
# $\Delta$ [1/m$^2$] the Laplace operator.
#
# We simplify the Navier-Stokes equation by assuming
# - **steady-state flow:** $\frac{\partial \vec{u}}{\partial t}=0$
# - **no advection:** $\left( \vec{u} \cdot \nabla \right) \vec{u}=0$
# - **no gravitational force:** $\rho \vec{g}=0$
# and then we find:
# $$
# \nabla p = \eta \Delta \vec{u}
# $$
#
# As we are dealing with a pipe, we use cylindrical coordinates, and only consider a radial dependence, thus
# $$
# {{1}\over{r}} \left( {{\partial}\over{\partial r}} r {{\partial u}\over{\partial r}} \right)
# = {{1}\over{\eta}} {{dp}\over{dx}}
# $$
# A solution for the above differential equation is:
# $$
# u(r) =
# +{{dp}\over{dx}} {{1}\over{4\eta}} \left[ r^2 - R^2 \right] =
# -{{dp}\over{dx}} {{R^2}\over{4\eta}} \left[ 1 - \left( {{r}\over{R}} \right)^2 \right]
# $$
# thus our **Hagen-Poiseuille flow**.
# ## Transient profile
#
# The **Hagen-Poiseuille profile** is a steady-state profile of flow. As solution for the transient
# development of the final profile can be approximated by Bessel functions:
# $$
# u(r,t) = -{{dp}\over{dx}} {{1}\over{4\eta}} \left( R^2 - r^2 \right)
# + {{dp}\over{dx}} {{2R^2}\over{\eta}}
# \sum\limits_{n=1}^{\infty}
# {{1}\over{\lambda_n^3}}
# {{J_0(\lambda_n r / R)}\over{J_1(\lambda_n)}} e^{-\lambda_n^2 {{\eta t}\over{R^2}} }
# $$
# with $J_0$ and $J_1$ the Bessel functions of first kind and order zero resp. one.
# $\lambda_n$ are the roots of $J_0$, $J_0(\lambda_n)=0$.
# +
nzeros = 50
t = np.array([1,10,100,1000,1e4])
J0_roots = scipy.special.jn_zeros(0,nzeros)
#print('J0roots: ',J0_roots,J0_roots.shape)
utransient = np.zeros(len(r)*len(t)).reshape(len(r),len(t))
print(utransient.shape)
print(r.shape)
for i in range(len(t)):
for j in range(len(r)):
besselsum = 0
for n in range(nzeros):
J0root = J0_roots[n]
J0 = scipy.special.jv(0,J0root*r[j]/R)
J1 = scipy.special.jv(1,J0root)
besselsum = besselsum + 1/J0root**3 * J0 / J1 * np.exp(-J0root**2*eta*t[i]/rho/R**2)
#print(i,j,n,J0,J1,besselsum)
utransient[j,i] = -dpdx/4/eta*(R**2-r[j]**2) + 2*dpdx*R**2/eta*besselsum
plt.figure(figsize=(12,6))
plt.xlabel('u [m/s]')
plt.ylabel('Radius [m]')
for i in range(len(t)):
plt.plot(utransient[:,i],r,linewidth=2,label='t='+str(round(t[i],0))+' s')
plt.legend()
# -
# ## Bessel functions
#
# Bessel functions are solutions of the second-order differential equation:
# $$
# x^2 {{d^2J_{\alpha}}\over{dx^2}} + x {{dJ_{\alpha}}\over{dx}} + (x^2 - \alpha^2) J_{\alpha} = 0
# $$
# with $\alpha$ the **order** of the Bessel function $J_{\alpha}$.
#
# In `python`, Bessel functions are implemented in the package `scipy.special`, as well as there
# roots.
x = np.linspace(0,32,101)
J0 = scipy.special.jv(0,x)
J1 = scipy.special.jv(1,x)
J2 = scipy.special.jv(2,x)
nzeros = 10
J0_roots = scipy.special.jn_zeros(0,nzeros)
plt.figure(figsize=(12,6))
plt.xlabel('x')
plt.ylabel('J$_a(x)$')
plt.plot(x,J0,linewidth=4,label='J$_0$')
plt.plot(x,J1,label='J$_1$')
plt.plot(x,J2,label='J$_2$')
plt.plot(x,np.zeros(len(x)),linewidth=1,linestyle=':',color='gray')
plt.plot(J0_roots,np.zeros(len(J0_roots)),linestyle='none',marker='o',markersize=15,label='roots of J$_0$')
plt.legend()
# ... done
| Dynamics_lab09_HagenPoiseuille.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Main
# This script demonstrates how the whole pipeline should work.
# +
# Imports
import sys
sys.path.append('../..')
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from help_project.src.disease_model import data
from help_project.src.disease_model import ensemble_model
from help_project.src.disease_model.models import parameter_mapper
from help_project.src.disease_model.models import sir
from help_project.src.disease_model.models import seir
from help_project.src.disease_model.utils import data_fetcher
from help_project.src.exitstrategies import data_elt
from help_project.src.exitstrategies import lockdown_policy
from help_project.src.economic_model.models import basic_lockdown_model as economy
from help_project.src.optimization import lockdown_config
from help_project.src.optimization import loss_function
from help_project.src.optimization import optimizer
# +
# Select one country
country = 'India'
start = datetime.date(2020, 5, 11)
end = datetime.date(2020, 6, 10)
print('Working on %s between %s and %s' % (country, start, end))
# +
fetcher = data_fetcher.DataFetcher()
population_data = fetcher.get_population_data(country)
health_data = fetcher.get_health_data(country)
past_health_data = health_data[:start - datetime.timedelta(1)]
future_health_data = health_data[start:]
# +
# Get all distinct policies used by any country at any point
policy_timeseries_by_country = data_elt.DataELT.extract_attribute_data()
all_policies = list(set(
policy_application.policy
for policy_timeseries in policy_timeseries_by_country.values()
for policy_application in policy_timeseries[:start].policies
))
len(all_policies)
# +
# Generate lockdown config - which chooses one of the precomputed policies
def generate_lockdown_policy(kwargs):
policy = all_policies[kwargs['policy']]
policy_ts = lockdown_policy.LockdownTimeSeries([
lockdown_policy.LockdownPolicyApplication(
policy=policy,
start=start,
end=end + datetime.timedelta(1))
])
return policy_ts
lockdown_config.LockdownConfig.generate_lockdown_policy = generate_lockdown_policy
config = lockdown_config.LockdownConfig(
policy=lockdown_config.Options([i for i, _ in enumerate(all_policies)]),
)
# +
# Define health & econ model wrappers and the loss function
class HealthModelWrapper:
def __init__(self, model, population_data, health_data):
self.model = model
self.population_data = population_data
self.health_data = health_data
def run(self, policy):
return self.model.predict(self.population_data,
self.health_data,
policy)
class Loss(loss_function.LossFunction):
def compute(self, health_output, economic_output):
return sum(health_output.deaths), -sum(economic_output.values())
# +
# Create and fit a health model
seir_model = seir.SEIR()
sir_model = sir.SIR()
health_model = ensemble_model.EnsembleModel([sir_model, seir_model])
health_model.fit(population_data, past_health_data, policy_timeseries_by_country['1_ind'][:start])
# pm = parameter_mapper.ParameterMapper()
# pm.fit([health_model])
# health_model.parameter_mapper = pm
wrapped_health_model = HealthModelWrapper(
health_model, population_data, past_health_data)
# -
# Create the economic model
economic_model = economy.EconomicLockdownModel()
# +
# Optimize and obtain the best result
opt = optimizer.ExhaustiveSearch(config, Loss())
results = opt.optimize(wrapped_health_model, economic_model)
results = [
(sol, (health, -econ))
for sol, (health, econ) in results
]
def describe_policy(policy):
for country_key, policy_timeseries in policy_timeseries_by_country.items():
for pol in policy_timeseries.policies:
if pol.policy == policy:
return 'Policy "%s" applied from %s to %s' % (country_key, pol.start, pol.end or 'now')
return 'Not found'
for pol, loss in results:
print('Deaths: %i, GDP: %.2f - %s' % (int(loss[0]), loss[1], describe_policy(pol.policies[0].policy)))
# If there's a pareto frontier, plot the points
if len(results) > 1:
fig, ax = plt.subplots(figsize=(12,4))
values = sorted((health, econ) for _, (health, econ) in results)
max_x, max_y = 0, 0
for x, y in [loss for _, loss in opt.records]:
max_x = max(max_x, x)
max_y = max(max_y, -y)
plt.plot(x, -y, marker='o', linestyle='dashed')
x, y = zip(*values)
plt.plot(x, y, marker='o', linestyle='dashed')
ax.set_xlabel('Deaths')
ax.set(xlim=(0, 1.3 * max_x))
ax.set_ylabel('GDP')
ax.set(ylim=(0, 1.3 * max_y))
# -
# Code for Health model output Visualization
def visualize(title, past_data, predictions, ground_truth=None):
past_df = pd.DataFrame({'date': past_data.index.strftime('%Y-%m-%d'),
'past': past_data})
future_data = {}
for prediction, values in predictions.items():
future_data[prediction] = values
future_data['date'] = values.index.strftime('%Y-%m-%d')
if ground_truth is not None:
future_data['ground_truth'] = ground_truth
future_df = pd.DataFrame(future_data)
data = pd.concat([past_df, future_df])
dates = data['date']
melt = data.melt(id_vars=['date'],
var_name='data',
value_name='value')
fig, ax = plt.subplots(figsize = (12,6))
ax = sns.pointplot(x='date', y='value', data=melt, hue='data')
ax.set_title(title)
ax.set_xticklabels(labels=dates, rotation=45, ha='right')
good_tick_count = 15
if len(dates) > good_tick_count:
n = len(dates) // good_tick_count
for ind, label in enumerate(ax.get_xticklabels()):
if ind % n == 0: # every nth label is kept
label.set_visible(True)
else:
label.set_visible(False)
return ax
# +
# Visualize policy output
predictions = [wrapped_health_model.run(policy) for policy, _ in results]
wrapped_health_model
_ = visualize('Confirmed',
past_health_data.confirmed_cases,
{describe_policy(results[i][0].policies[0].policy): prediction.confirmed_cases
for i, prediction in enumerate(predictions)},
future_health_data.confirmed_cases)
_ = visualize('Recovered',
past_health_data.recovered,
{describe_policy(results[i][0].policies[0].policy): prediction.recovered
for i, prediction in enumerate(predictions)},
future_health_data.recovered)
_ = visualize('Deaths',
past_health_data.deaths,
{describe_policy(results[i][0].policies[0].policy): prediction.deaths
for i, prediction in enumerate(predictions)},
future_health_data.deaths)
# -
| src/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using the FullPathMapper for Migration For Graph based Migration Barrier Analysis
# Start with a lithiated structure
from pymatgen import MPRester, Structure
struct = MPRester().get_structure_by_material_id("mp-770533")
# BASIC usage of the Full
from pymatgen_diffusion.neb.full_path_mapper import FullPathMapper
# instanciate the full path mapper, all hops with distance below a cutoff of 4 Angstroms will be considered.
fpm = FullPathMapper(struct, 'Li', max_path_length=4)
# populate the edges with the MigratationPath objects
fpm.populate_edges_with_migration_paths()
# group the edges together based on equivalence of the MigratationPath objects
fpm.group_and_label_hops()
# The result is a dicationary in the form of:
# ```
# {
# (start_index, end_index, edges_index) : {'hop_label', unique_hop_label}
# }
# ```
#
# The `edge_index` only increments for hop with the same starting and ending site indices which represent hops to different unit cells.
| examples/neb_path_mapping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PDF->CSV status
# Copyright (C) 2021 ServiceNow, Inc.
#
# This notebook allows you to investigate the status of the pdf->csv conversion for a given dataset.
#
# You will need to change the paths to match those on your system.
import os
import re
import pathlib
# Dataset A is split into two folders, so we have to specify a subfolder for this dataset
# +
DATASET = 'D' #A B, D
#For dataset A: '' generic_pdfs_all has_pdf_dir_all low_text_pdfs of_pdf_all
DATASET_SUBFOLDER = ''
# -
# ## Compare pdfs with output .csvs
# +
if DATASET == 'A':
if DATASET_SUBFOLDER == "":
PDF_DIR = '/nrcan_p2/data/01_raw/20201006/geoscan/raw/pdf'
else:
PDF_DIR = f"/nrcan_p2/data/01_raw/20201117/geoscan/raw/extracted/{DATASET_SUBFOLDER}"
elif DATASET == 'B':
PDF_DIR = "/nrcan_p2/data/01_raw/20210108"
elif DATASET == 'D':
PDF_DIR = '/nrcan_p2/data/01_raw/20201221/doaj'
else:
raise ValueError('Not handled')
pdfs = [x for x in pathlib.Path(PDF_DIR).rglob('*.pdf')]
# -
len(pdfs)
# +
if DATASET == 'A':
if DATASET_SUBFOLDER == "":
CSV_DIR = f'/nrcan_p2/data/02_intermediate/20201006/geoscan/pdf/v1_all'
else:
CSV_DIR = f'/nrcan_p2/data/02_intermediate/20201117/geoscan/pdf/{DATASET_SUBFOLDER}'
elif DATASET == 'B':
CSV_DIR = '/nrcan_p2/data/02_intermediate/20210108'
elif DATASET == 'D':
CSV_DIR = '/nrcan_p2/data/02_intermediate/20201221/doaj'
else:
raise ValueError('Not handled')
csvs = [x for x in pathlib.Path(CSV_DIR).rglob('*.csv')]
# -
len(csvs)
pdfs_rel = [pdf.relative_to(PDF_DIR) for pdf in pdfs]
pdfs_rel[0]
csvs_rel = [csv.relative_to(CSV_DIR) for csv in csvs]
csvs_rel[0]
pdfs_base = [pathlib.Path(re.sub('/', '__', str(pdf))).stem for pdf in pdfs_rel]
pdfs_base[0]
csvs_base = [pathlib.Path(pathlib.Path(csv.stem).stem).stem for csv in csvs_rel]
csvs_base[0]
# ## Overlap
no_overlap = []
for i, pdf_base in enumerate(pdfs_base):
if pdf_base not in csvs_base:
no_overlap.append((i, pdf_base))
len(no_overlap)
# ### Write the outputs to a file
fout = f'unfinished_files_{DATASET}.txt'
with open(fout, 'w') as f:
for elem in no_overlap:
fname = re.sub('__', '/', elem[1]) + '.pdf'
f.write(fname + '\n')
# ### Group the outputs (only works for datasets with filenames starting with the file id)
# +
gs = {}
for elem in no_overlap:
g = int(elem[0]/1000) * 1000
if g in gs:
gs[g] += 1
else:
gs[g] = 1
gs
# -
# ### Find a particular file and its index
for i, pdf_base in enumerate(pdfs_base):
if pdf_base == "ontario__lists__OFR6088":
print(i, pdf_base)
#2255
no_overlap
| project_tools/notebooks/Preprocessing_and_Dataset_Analysis/PDF to CSV status.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
ReloadProject('deep_learning')
# # My models
# ## 20-20-20
# Try the model that has shape (20, 20, 20).
batch_size = 64 # used in qfunc and runner.
env = guided_environments.GuidedMountainCar(reward_factor=10.0)
qfunc = qfunc_impl.DQN(
model=qfunc_impl.CreateModel(
state_shape=env.GetStateShape(),
action_space_size=env.GetActionSpaceSize(),
hidden_layer_sizes=(20, 20, 20)),
training_batch_size=batch_size,
discount_factor=0.99,
)
runner = runner_impl.ExperienceReplayRunner(experience_capacity=100000, experience_sample_batch_size=batch_size)
# %%time
# Train
logging.ENV.debug_verbosity = 3
policy = policy_impl.GreedyPolicyWithRandomness(epsilon=1.0)
runner.Run(env=env, qfunc=qfunc, policy=policy, num_of_episodes=500)
# %%time
# Train
logging.ENV.debug_verbosity = 3
policy = policy_impl.GreedyPolicyWithRandomness(epsilon=0.3)
runner.Run(env=env, qfunc=qfunc, policy=policy, num_of_episodes=500)
# %%time
# Train
logging.ENV.debug_verbosity = 3
policy = policy_impl.GreedyPolicyWithRandomness(epsilon=0.1)
runner.Run(env=env, qfunc=qfunc, policy=policy, num_of_episodes=500)
# %%time
# Test
logging.ENV.debug_verbosity = 4
policy = policy_impl.GreedyPolicy()
runner.Run(env=env, qfunc=qfunc, policy=policy, num_of_episodes=100)
| notebooks/new_learning/mountaincar_v0_guided_more_reward.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''vela'': pipenv)'
# language: python
# name: python37664bitvelapipenvde09592071074af6a70ce3b1ce38af95
# ---
# # Post Here: The Subreddit Suggester
#
# > Worked as a machine learning engineer on a remote, interdisciplinary team to build an app for recommending subreddits.
#
# * toc: true
# * badges: true
# * author: <NAME>
# * comments: true
# * categories: [recommender systems, natural language processing, machine learning]
# * image: images/post_here/post-here-landing-crop.png
# ---
#
#
#
# ---
#
# ## Imports and Configuration
# === General imports === #
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import janitor
# +
# === ML imports === #
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_selection import chi2, SelectKBest
# === NLP Imports === #
from sklearn.feature_extraction.text import TfidfVectorizer
# import spacy
# +
# === Configure === #
# Load spacy language model
# nlp = spacy.load("en_core_web_md")
# Configure pandas display settings
pd.options.display.max_colwidth = 200
# Set random seed
seed = 92
# -
# ---
#
# ## Introduction
# ### The Problem
#
# Reddit is an expansive site. Anyone who has spent any significant amount of time on it knows what I mean. There is a subreddit for seemingly every topic anyone could ever want to discuss or even think about (and many that most do not want think about).
#
# Reddit is a powerful site; a tool for connecting and sharing information with like- or unlike-minded individuals around the world. When used well, it can be a very useful resource.
#
# On the other hand, the deluge of information that's constantly piling into the pages of can be overwhelming and lead to wasted time. As with any tool, it can be used for good or for not-so-good.
#
# A common problem that Redditors experience, particularly those who are relatively new to the site, is where to post content. Given that there are subreddits for just about everything, with wildly varying degrees of specificity it can be quite overwhelming trying to find the best place for each post.
#
# Just to illustrate the point, some subreddits get _weirdly_ specific. I won't go into the _really_ weird or NSFW, but here are some good examples of what I mean by specific:
#
# * [r/Borderporn](https://www.reddit.com/r/Borderporn/)
# * [r/BreadStapledtoTrees](https://www.reddit.com/r/BreadStapledToTrees/)
# * [r/birdswitharms](https://www.reddit.com/r/birdswitharms/)
# * [r/totallynotrobots](https://old.reddit.com/r/totallynotrobots)
#
# ...need I go on? (If you're curious and/or want to be entertained indefinitely, here is a [thread](https://www.reddit.com/r/AskReddit/comments/dd49gw/what_are_some_really_really_weird_subreddits/) with these and much, much more.)
#
# Most of the time when a post is deemed irrelevant to a particular subreddit, it will simply be removed by moderators or a bot. However, depending on the subreddit and how welcoming they are to newbies, sometimes it can lead to very unfriendly responses and/or bans.
#
# So how does one go about deciding where to post or pose a question?
#
# Post Here aims to take the guesswork out of this process.
# ### The Solution
#
# The goal with the Post Here app, as mentioned, is to provide a tool that makes it quick and easy to find the most appropriate subreddits for any given post. A user would simply provide the title and text of the their prospective post and the app would provide the user with a list of subreddit recommendations.
#
# Recommendations are produced by a model attempts to predict which subreddit a given post would belong to. The model was built using Scikit-learn, and was trained on a large dataset of reddit posts. In order to serve the recommendations to the web app, an API was built using Flask and deployed to Heroku.
#
# The live version of the app is linked below.
#
# [Post Here: The Subreddit Suggester](https://github.com/tobias-fyi/post_here_ds)
# ### My Role
#
# I worked on the Post Here app with a remote, interdisciplinary team of data scientists, machine learning engineers, and web developers. I was one of two machine learning engineers on the team, responsible for the entire process of building and training the machine learning models. The two data scientists on the team were primarily responsible for building and deploying the API.
#
# The main challenge we ran into, which directed much of the iterative process, was scope management.
#
# At this point in my machine learning journey, this was one of the larger datasets that I'd taken on. Uncompressed, the dataset we used was over 800mb of mostly natural language text. The dataset and the time constraint—we had less than four full days of work to finish the project—were the primary causes of the challenges we ended up facing.
#
# With such a dataset, one important concept we had to keep in mind was the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality), which is basically a title for the various problems and phenomena that occur when dealing with extremely highly dimensional datasets. When processed, a natural language dataset of this size would likely fall prey to this curse and may prove somewhat unwieldy without large amounts of processing power.
#
# I ended up researching and applying various methods of addressing this problem in order to fit the processing/modeling pipeline on the free Heroku Dyno, with a memory limit of 500mb, while preserving adequate performance. Many of our deployments failed because the pipeline, when loaded into memory on the server, exceeded that limit.
#
# One important tradeoff we had to wrangle with was how much, and in what ways we could limit the dataset—i.e. how many classes to try and predict, and how many observations per class to include when training. The original dataset contains data for 1,000 subreddits. It was not within the scope of a a four-day project to build a classification model of a caliber that could accurately classify 1,000 classes.
#
# In the beginning, we did try to build a basic model trained on all 1,000 classes. But with the time and processing power I had, it proved to be untenable. In the end, we settled for a model that classified text into 305 subreddits with a test precision-at-k of .75, .88, and .92 for 'k' of 1, 3, and 5, respectively.
# ---
#
# ## The Data
# The dataset we ended up using to train the recommendation system is called the [Reddit Self-Post Classification Task dataset](https://www.kaggle.com/mswarbrickjones/reddit-selfposts), available on Kaggle thanks to Evolution AI. The full dataset clocks in at over 800mb, containing 1,013,000 rows: 1,000 posts each from 1,013 subreddits.
#
# The data was posted to reddit between June 2016 and June 2018.
#
# [more info from the article]
#
# For more details on the dataset, including a nice interactive plot of all of the subreddits, refer to Evolution AI's [blog post](https://evolution.ai//blog/page/5/an-imagenet-like-text-classification-task-based-on-reddit-posts/).
# ### Wrangling and Exploration
#
# As seems to be common with NLP projects, the process of wrangling the data was very much intertwined with the modeling process. Of course, this could be said about any machine learning project. However, I feel like it is particularly so in the case of NLP.
#
# Therefore, this section—the one dedicated to data wrangling only—will be rather brief and basic. I go into much more detail in the Modeling section.
#
# First, I needed to reduce the size of the dataset. I defined a subset of 12 categories which I thought were most relevant to the task at hand, and used that list to do the initial pruning. Those 12 categories left me with 305 unique subreddits and 305,000 rows. The list I used was as follows:
#
# * health
# * profession
# * electronics
# * hobby
# * writing/stories
# * advice/question
# * social_group
# * stem
# * parenting
# * books
# * finance/money
# * travel
#
# Next, I took a random sample of those 305,000 rows. The result was a dataset with 91,500 rows, now consisting of between 250 and 340 rows per subreddit. If I tried to use all of the features (tokens, or words) that resulted from this corpus, even in its reduced state, it would still result in a serialized vocabulary and/or model too large for our free Heroku Dyno. However, the features used in the final model can be chosen based on how useful they are for the classification.
#
# According to the dataset preview on Kaggle, there are quite a large number of missing values in each of the features—12%, 25%, and 39% of the subreddit, title, and selftext columns, respectively. However, I did not find any sign of those null values in the dataset nor mention of them in the dataset's companion blog post or article. I chocked it up to an error in the Kaggle preview.
#
# Finally, I went about doing some basic preprocessing to get the data ready for vectorization. As described in the description page on Kaggle, newline and tab characters were replaced with their HTML equivalents, `<lb>` and `<tab>`. I removed those and other HTML entities using a simple regular expression. I also concatenated `title` and `selftext` into a single text feature in order to process them together.
# === Load the dataset === #
rspct = pd.read_csv("assets/data/rspct.tsv", sep="\t")
print(rspct.shape)
rspct.head(3)
# #### Nulls
#
# Kaggle says that 12%, 25%, and 39% of the subreddit, title, and selftext columns are null, respectively. If that is indeed the case, they did not get read into the dataframe correctly. However, it could be an error on Kaggle's part, seeing as there is no mention of these anywhere else in the description or blog post or article, nor sign of them during my explorations.
# === Null values === #
rspct.isnull().sum()
# #### Preprocess
#
# To prune the list of subreddits, I'll load in the `subreddit_info.csv` file, join, then choose a certain number of categories (category_1) to filter on.
# === Load info === #
info = pd.read_csv("assets/data/subreddit_info.csv", usecols=["subreddit", "category_1", "category_2"])
print(info.shape)
info.head()
# === Join the two dataframes === #
rspct = pd.merge(rspct, info, on="subreddit").drop(columns=["id"])
print(rspct.shape)
rspct.head()
# === Still no nulls === #
rspct.isnull().sum() # That's a good sign
# === Look at categories === #
rspct["category_1"].value_counts()
# +
# === Define list of categories to keep === #
keep_cats = [
"health",
"profession",
"electronics",
"hobby",
"writing/stories",
"advice/question",
"social_group",
"stem",
"parenting",
"books",
"finance/money",
"travel",
]
# === Prune dataset to above categories === #
# Overwriting to save memory
rspct = rspct[rspct["category_1"].isin(keep_cats)]
print(rspct.shape)
print("Unique subreddits:", len(rspct["subreddit"].unique()))
rspct.head(2)
# -
# === Take a sample of that === #
rspct = rspct.sample(frac=.3, random_state=seed)
print(rspct.shape)
rspct.head()
# +
# === Clean up a bit === #
# Concatenate title and selftext
rspct["text"] = rspct["title"] + " " + rspct["selftext"]
# Drop categories
rspct = rspct.drop(columns=["category_1", "category_2", "title", "selftext"])
print(rspct.shape)
rspct.head(2)
# -
# === Remove <lb>, <tab>, and other HTML entities === #
# NOTE: takes a couple minutes to run
rspct["text"] = rspct["text"].str.replace("(<lb>)*|(<tab>)*|(&)*|(nbsp;)*", "")
rspct.head()
# === Reset the index === #
rspct = rspct.reset_index(drop=True)
rspct.head(2)
# === Save pruned dataset to file === #
rspct.to_csv("assets/data/rspct_small.csv")
# === List of subreddits === #
subreddits = rspct["subreddit"].unique()
print(len(subreddits))
subreddits[:50]
rspct["subreddit"].value_counts()
# ---
#
# ## Modeling
# ### Label Encoding
# +
# === Encode the target using LabelEncoder === #
# This process naively transforms each class of the target into a number
le = LabelEncoder() # Instantiate a new encoder instance
le.fit(y_train) # Fit it on training label data
# Transform both using the trained instance
y_train = le.transform(y_train)
y_val = le.transform(y_val)
y_test = le.transform(y_test)
y_train[:8]
# -
# ### Vectorization
#
# A vectorizer is used to extract numerical features (information) from a corpus of natural language text. I used a [bag-of-words](https://en.wikipedia.org/wiki/Bag-of-words_model) method of vectorization, which for the most part, disregards grammar.
#
# The output of this vectorizer is a document-term matrix, with the documents (observations, or rows) on one axes and the terms (words, bigrams) on the other. This matrix can be thought of as a sort of vocabulary, or text-number translator.
#
# At first, the "vocabulary" derived from the corpus using the vectorizer was the largest object when serialized. Luckily, there are many options and parameters available to reduce its size, most of which are simply different methods for reducing the number of features (terms) it contains.
#
# One option is to put a hard limit of 100,000 on the number of features in the vocabulary. This is a simple, naive limit on the generated features, and thus, the resulting vocabulary size.
#
# I decided to remove stopwords before vectorization in hopes that this would reduce the size of the vector vocabulary. To my initial surprise, removing the stop words (using [NLTK](https://www.nltk.org/)'s list) actually increased the size of the serialized vocab from 59mb to 76mb.
#
# After some consideration, I found this to be a reasonable result. I figured that many of the stop words are short ("I", "me", "my", etc.), and their removal caused the average length of words (and therefore bigrams as well) in the vocab to increase. While this may not account for the entirety of the difference, this provides some intuition as to why there is a difference.
#
# Although the vocab without stop words was larger, I ended up using it anyways because it provided an extra ~0.01 in the precision-at-k score of the final model.
# +
# === Look at lengths of stop words === #
lengths = []
three_or_below = []
for word in stop_words:
lengths.append(len(word))
if len(word) <= 4:
three_or_below.append(len(word))
print(f"There are {len(stop_words)} stop words in the list.")
print(f"{len(three_or_below)} are 4 chars long or shorter.")
print(f"Average length is: {np.mean(lengths)}.")
# +
# === Vectorize! === #
# Extract features from the text data using bag-of-words method
tfidf = TfidfVectorizer(
max_features=100000,
min_df=10,
ngram_range=(1,2),
stop_words=stop_words, # Use nltk's stop words
)
# Fit the vectorizer on the feature column to create vocab (doc-term matrix)
vocab = tfidf.fit(X_train)
# Get sparse document-term matrices
X_train_sparse = vocab.transform(X_train)
X_val_sparse = vocab.transform(X_val)
X_test_sparse = vocab.transform(X_test)
X_train_sparse.shape, X_val_sparse.shape, X_test_sparse.shape
# -
# ### Feature Selection
#
# As mentioned previously, the size of the corpus means the dimensionality of the featureset after vectorization will be very high. I passed in 100,000 as the maximum number of features to the vectorizer, limiting the initial size of the vocab. However, the features would have to be reduced more before training the model, as it is generally not good practice to have a larger number of features (100,000) than observations (91,500).
#
# To reduce it down from that 100,000, I used a process called select k best, which does exactly what it sounds like: selects a certain number of the best features. The key aspect of this process is how to measure the value of the features; how to find which ones are the "best". The scoring function I used in this case is called [ch2](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.chi2.html) (chi-squared).
#
# This function calculates chi-squared statistics between each feature and the target, measuring the dependence, or correlation, between them. The intuition here is that features which are more correlated with the target are more likely to be useful to the model.
#
# I played around with some different values for the maximum number of features to be selected. Ultimately, I was once again limited by the size of the free Heroku Dyno, and settled on 20,000. This allowed the deployment to go smoothly while retaining enough information for the model to have adequate performance.
# + _uuid="27ddcca6c2af541e94133222961a83c11f54208e"
# === Feature Selection === #
selector = SelectKBest(chi2, k=20000)
selector.fit(X_train_sparse, y_train)
X_train_select = selector.transform(X_train_sparse)
X_val_select = selector.transform(X_val_sparse)
X_test_select = selector.transform(X_test_sparse)
X_train_select.shape, X_val_select.shape, X_test_select.shape
# -
# ### Model validation
#
# In this case, the model has a target that it is attempting to predict—a supervised problem. Therefore, the performance can be measured on validation and test sets.
#
# To test out the recommendations I copied some posts and put them through the prediction pipeline to see what kinds of subreddits were getting recommended. For the most part, the predictions were decent.
#
# The cases where the recommendations were a little less than ideal happened when I pulled example posts from subreddits that were not in the training data. The model generally did a good job recommending similar subreddits.
# #### Baseline
#
# For the baseline model, I decided to go with a basic random forest. This choice was somewhat arbitrary, though I was curious to see how a random forest would do with such a high target cardinality (number of classes/categories).
#
# The baseline precision-at-k metrics for the random forest on the validation set were .54, .63, and .65, for 'k' of 1, 3, and 5, respectively.
# + _uuid="8d31738daa5d0382761477f16e79d1800ac6f730"
# === Evaluate performance using precision-at-k === #
def precision_at_k(y_true, y_pred, k=5):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
y_pred = np.argsort(y_pred, axis=1)
y_pred = y_pred[:, ::-1][:, :k]
arr = [y in s for y, s in zip(y_true, y_pred)]
return np.mean(arr)
# + _uuid="6d317a155229fa60c6241e7b8d2355fb1cba9d43"
# === Baseline RandomForest model === #
rfc = RandomForestClassifier(max_depth=32, n_jobs=-1, n_estimators=200)
rfc.fit(X_train_select, y_train)
# + _uuid="6d317a155229fa60c6241e7b8d2355fb1cba9d43"
# === Create predictions for validation set === #
y_pred_proba_rfc = rfc.predict_proba(X_val_select)
# === For each prediction, find the index with the highest probability === #
y_pred_rfc = np.argmax(y_pred_proba_rfc, axis=1)
y_pred_rfc[:10]
# + _uuid="6d317a155229fa60c6241e7b8d2355fb1cba9d43"
# === Evaluate precision at k for validation === #
print("Validation scores:")
print(" precision@1 =", np.mean(y_val == y_pred_rfc))
print(" precision@3 =", precision_at_k(y_val, y_pred_proba_rfc, 3))
print(" precision@5 =", precision_at_k(y_val, y_pred_proba_rfc, 5))
# -
# #### Multinomial Naive Bayes
#
# Multinomial naive Bayes is a probabilistic learning method for multinomially distributed data, and one of two classic naive Bayes algorithms used for text classification. I decided to iterate with this algorithm because it is meant for text classification tasks.
#
# The precision-at-k metrics for the final Multinomial naive Bayes model on the validation set were .76, .88, and .9188, for 'k' of 1, 3, and 5, respectively. Performance on the test set was nearly identical: .75, .88, and .9159.
# + _uuid="6d317a155229fa60c6241e7b8d2355fb1cba9d43"
# === Naive Bayes model === #
nb = MultinomialNB(alpha=0.1)
nb.fit(X_train_select, y_train)
# -
# #### Evaluate on validation set
# + _uuid="6d317a155229fa60c6241e7b8d2355fb1cba9d43"
# === Create predictions for validation set === #
y_pred_proba_val = nb.predict_proba(X_val_select)
# For each prediction, find index with highest probability
y_pred_val = np.argmax(y_pred_proba_val, axis=1)
y_pred_val[:10]
# -
# === Evaluate precision at k for validation === #
print("Validation scores:")
print(" precision@1 =", np.mean(y_val == y_pred_val))
print(" precision@3 =", precision_at_k(y_val, y_pred_proba_val, 3))
print(" precision@5 =", precision_at_k(y_val, y_pred_proba_val, 5))
# #### Evaluate on test set
# + _uuid="6d317a155229fa60c6241e7b8d2355fb1cba9d43"
# === Create predictions for test set === #
y_pred_proba_test = nb.predict_proba(X_test_select)
# For each prediction, find index with highest probability
y_pred_test = np.argmax(y_pred_proba_test, axis=1)
y_pred_test[:10]
# -
# === Evaluate precision at k for test === #
print("Test scores:")
print(" precision@1 =", np.mean(y_test == y_pred_test))
print(" precision@3 =", precision_at_k(y_test, y_pred_proba_test, 3))
print(" precision@5 =", precision_at_k(y_test, y_pred_proba_test, 5))
# ### Recommendations
#
# The API should return a list of recommendations, not a single prediction. To accomplish this, I wrote a function that returns the top 5 most likely subreddits and their respective probabilities.
# +
# === Function to serve predictions === #
# The main functionality of the predict API endpoint
def predict(title: str, submission_text: str, return_count: int = 5):
"""
Serve subreddit predictions.
Parameters
----------
title : string
Title of post.
submission_text : string
Selftext that needs a home.
return_count : integer
The desired number of recommendations.
Returns
-------
Python dictionary formatted as follows:
[{'subreddit': 'PLC', 'proba': 0.014454},
...
{'subreddit': 'Rowing', 'proba': 0.005206}]
"""
# Concatenate title and post text
fulltext = str(title) + str(submission_text)
# Vectorize the post -> sparse doc-term matrix
post_sparse = vocab.transform([fulltext])
# Feature selection
post_select = selector.transform(post_sparse)
# Generate predicted probabilities from trained model
proba = nb.predict_proba(post_select)
# Wrangle into correct format
proba_dict = (pd
.DataFrame(proba, columns=[le.classes_]) # Classes as column names
.T # Transpose so column names become index
.reset_index() # Pull out index into a column
.rename(columns={"level_0": "name", 0: "proba"}) # Rename for aesthetics
.sort_values(by="proba", ascending=False) # Sort by probability
.iloc[:return_count] # n-top predictions to serve
.to_dict(orient="records")
)
proba_json = {"predictions": proba_dict}
return proba_json
# +
title_science = """Is there an evolutionary benefit to eating spicy food that lead to consumption across numerous cultures throughout history? Or do humans just like the sensation?"""
post_science = """I love spicy food and have done ever since I tried it. By spicy I mean HOT, like chilli peppers (we say spicy in England, I don't mean to state the obvious I'm just not sure if that's a global term and I've assumed too much before). I love a vast array of spicy foods from all around the world. I was just wondering if there was some evolutionary basis as to why spicy food managed to become some widely consumed historically. Though there seem to
It way well be that we just like a tingly mouth, the simple things in life."""
science_recs = predict(title_science, post_science)
science_recs
# +
# === Test post from r/buildmeapc === #
title_pc = """Looking for help with a build"""
post_pc = """I posted my wants for my build about 2 months ago. Ordered them and when I went to build it I was soooooo lost. It took 3 days to put things together because I was afraid I would break something when I finally got the parts together it wouldn’t start, I was so defeated. With virtually replacing everything yesterday it finally booted and I couldn’t be more excited!"""
post_pc_recs = predict(title_pc, post_pc, 10)
post_pc_recs
# +
# === Example post from 'r/learnprogramming' === #
post_title = """What to do about java vs javascript"""
post = """I am a new grad looking for a job and currently in the process with a company for a junior backend engineer role. I was under the impression that the position was Javascript but instead it is actually Java. My general programming and "leet code" skills are pretty good, but my understanding of Java is pretty shallow. How can I use the next three days to best improve my general Java knowledge? Most resources on the web seem to be targeting complete beginners. Maybe a book I can skim through in the next few days?
Edit:
A lot of people are saying "the company is a sinking ship don't even go to the interview". I just want to add that the position was always for a "junior backend engineer". This company uses multiple languages and the recruiter just told me the incorrect language for the specific team I'm interviewing for. I'm sure they're mainly interested in seeing my understanding of good backend principles and software design, it's not a senior lead Java position."""
# === Test out the function === #
post_pred = predict(post_title, post) # Default is 5 results
post_pred
# +
# === Test it out with another dummy post === #
title_book = "Looking for books with great plot twists"
# This one comes from r/suggestmeabook
post2 = """I've been dreaming about writing my own stort story for a while but I want to give it an unexpected ending. I've read lots of books, but none of them had the plot twist I want. I want to read books with the best plot twists, so that I can analyze what makes a good plot twist and write my own story based on that points. I don't like romance novels and I mostly enjoy sci-fi or historical books but anything beside romance novels would work for me, it doesn't have to be my type of novel. I'm open to experience after all. I need your help guys. Thanks in advance."""
# === This time with 10 results === #
post2_pred = predict(title_book, post2, 10)
post2_pred
# -
# ### Model deployment
#
# As mentioned, the model, vocab, and feature selector were all serialized using Python's pickle module. In the Flask app, the pickled objects are loaded and ready for use, just like that.
#
# I will go over the details of how the Flask app was set up in a separate blog post.
# +
# === Create pickle func to make pickling (a little) easier === #
def picklizer(to_pickle, filename, path):
"""
Creates a pickle file.
Parameters
----------
to_pickle : Python object
The trained / fitted instance of the
transformer or model to be pickled.
filename : string
The desired name of the output file,
not including the '.pkl' extension.
path : string or path-like object
The path to the desired output directory.
"""
import os
import pickle
# Create the path to save location
picklepath = os.path.join(path, filename)
# Use context manager to open file
with open(picklepath, "wb") as p:
pickle.dump(to_pickle, p)
# +
# === Picklize! === #
filepath = "./assets/pickles" # Change this accordingly
# Create directory if doesn't exist
os.makedirs(filepath, exist_ok=True)
# Export LabelEncoder as pickle
picklizer(le, "le.pkl", filepath)
# Export selector as pickle
picklizer(selector, "selector.pkl", filepath)
# Export vectorizer as pickle
picklizer(vocab, "vocab.pkl", filepath)
# Export naive bayes model as pickle
picklizer(nb, "nb.pkl", filepath)
# -
# ---
#
# ## Final Thoughts
#
# For me, the most important and valuable aspects of this project were mainly surrounding the challenge of scope management. I constantly had to ask myself, "What is the best version of this I can create given our limitations?"
#
# At first, I thought it would be feasible to predict all of the 1,000+ subreddits in the data, and wasted hours of valuable time attempting to do so. While I had tested various strategies of reducing the complexity of the model, the performance was rather terrible when it was trained on 100 or less examples of each of the complete list of subreddits.
#
# The data scientist who I primarily worked with (we had one data scientist in addition to him and one other machine learning engineer, both of whom did not contribute significantly to the project) kept telling me that I should try reducing the number of classes first, allowing for more examples of each class and fewer classes for the model to predict.
#
# Ultimately, this is the strategy that worked best, and I wasted valuable time by not listening to him the first few times he recommended that strategy. Good teamwork requires the members being humble and listening, something that I have taken to heart since the conclusion of this project.
#
# ### Scope Management, Revisited
#
# As time was very short while building this initial recommendation API, there are many things that we wished we could have done but simply did not have the time. Here are a few of the more obvious improvements that could be made.
#
# The first, and most obvious one, is to simply deploy to a more powerful server, such as one hosted on AWS Elastic Beanstalk or EC2. This way, we could use the entire dataset to train an optimal model without worrying (as much) about memory limits.
#
# Second, I could use a Scikit-learn pipeline to validate and tune hyperparameters using cross-validation, instead of a separate validation set. Also, this pipeline could be serialized as a single large object, rather than as separate pieces (encoder, vectorizer, feature selector, and classifier). As a final note for this particular train of thought, [Joblib](https://joblib.readthedocs.io/en/latest/) could potentially provide more efficient serialization than the Pickle module, allowing a more complex pipeline to be deployed on the same server.
#
# Third, a model could've been trained to classify the input post first into a broad category. Then, some sort of model could be used to to classify into a specific subreddit within that broad category. I'm not sure about the feasibility of the second part of this idea, but thought it could be an interesting one to explore.
#
# Lastly, different classes and calibers of models could have been tested for use in the various steps in the pipeline. In this case, I'm referring primarily to using deep learning/neural networks. For example, word vectors could be generated with word embedding models such as Word2Vec. Or the process could be recreated with a library like PyTorch, and a framework like [FastText](https://github.com/facebookresearch/fastText/).
#
# I plan to explore at least some of these in separate blog posts.
#
# As always, thank you for reading! I'll see you in the next one.
#
#
#
#
#
#
#
#
#
| ds/projects/post_here/post_here_2-small_full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ! pip install xgboost
from numpy import loadtxt #for importing the dataset (pima indian diabetes)
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import warnings
warnings.filterwarnings('ignore')
# ## XGBOOST
dataset = loadtxt("pima-indians-diabetes.data.csv", delimiter = ',')
# +
# spliting the data into x and y
x = dataset[:,0:8]
y = dataset[:,8]
# +
# Splitting the data into train and test splits
# -
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.33, random_state = 7)
# +
# Building the final model
model = XGBClassifier()
model.fit(x_train,y_train)
# +
# Predicting the values on xtest data
y_pred = model.predict(x_test)
predictions = [round(value) for value in y_pred]
# -
predictions
# +
#Calculating accuracy
accuracy = accuracy_score(y_test, predictions)
# -
print("Accuracy : %.2f%%" % (accuracy*100))
# Inference : Accuracy of our model is 74.02%
# ## LGBOOST
# ! pip3 install lightgbm
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import lightgbm as lgb
dataset = loadtxt("pima-indians-diabetes.data.csv", delimiter = ',')
x = dataset[:,0:8]
y = dataset[:,8]
# +
# Splitting the data into train and test splits
# -
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.25, random_state = 7)
# +
# As lgbm is suppose to find the records having high gradient and low gradient from the dataset so we will create one sample dataset using lgbm function
d_train = lgb.Dataset(x_train, label = y_train) # Label is nothing but our output variable
# +
# We will define some parameters to be passed. Passing params is optional
params = {}
params['learning_rate'] = 0.03 # Learning rate of the base learners
params['boosting_type'] = 'gbdt' # Gradient boost decision tree will be the type of boosting
params['objective'] = 'binary' # Binary because there will be only two classes Y/N in case of classifiers
params['metric'] = 'binary_logloss' # As objective is binary so metric used for evaluation will be binarylogloss
params['sub_features'] = 0.5 # while building a decsion from the same dataset it will consider subset of features and here 0.5 means 50 % of the features will be choosen from the columns of entire data, suppose there are 8 columns, 4 features will be choosen at a time to be splitted at the root node
params['num_leaves'] = 10
params['min_data'] = 50 # This involves GOSS method, 50 means 10 records will be choosen having higher gradient descent and rest 40 records will be choosen
params['max_depth'] = 10
# -
model = lgb.train(params, d_train, 100) # num_trees = 100
y_pred = model.predict(x_test)
predictions = [round(value) for value in y_pred]
predictions
accuracy = accuracy_score(y_test, predictions)
print("Accuracy : %2f%%" % (accuracy*100))
# Inference : Accuracy of our model is 79.16%
| XGBM_LGBM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Top 'K' Frequent Numbers (medium)
#
# ```
# Example 1:
#
# Input: [1, 3, 5, 12, 11, 12, 11], K = 2
# Output: [12, 11]
# Explanation: Both '11' and '12' apeared twice.
# Example 2:
#
# Input: [5, 12, 11, 3, 11], K = 2
# Output: [11, 5] or [11, 12] or [11, 3]
# Explanation: Only '11' appeared twice, all other numbers appeared once.
#
# ```
# +
from collections import defaultdict
import heapq
def find_k_frequent_numbers(nums, k):
topNumbers = []
counts = defaultdict(int)
for n in nums:
counts[n] += 1
max_heap = [(-count, n) for n,count in counts.items()]
heapq.heapify(max_heap)
res = []
while len(res) < k:
res.append(heapq.heappop(max_heap)[1])
return res
def main():
print("Here are the K frequent numbers: " +
str(find_k_frequent_numbers([1, 3, 5, 12, 11, 12, 11], 2)))
print("Here are the K frequent numbers: " +
str(find_k_frequent_numbers([5, 12, 11, 3, 11], 2)))
main()
| ed_heaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
from utils import *
# %load_ext autoreload
# %autoreload 2
MODEL_PATH = "models/recognition/"
# -
height = 28
width = 28
channels = 1
n_inputs = height * width
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype(np.float32).reshape(-1, n_inputs) / 255.0
X_test = X_test.astype(np.float32).reshape(-1, n_inputs) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
nb_samples_taken = 2000
X_train = X_train[:nb_samples_taken]
y_train = y_train[:nb_samples_taken]
# +
tf.reset_default_graph()
conv1_fmaps = 32
conv1_ksize = 3
conv1_stride = 1
conv2_fmaps = 64
conv2_ksize = 3
conv2_stride = 2
conv3_fmaps = 128
conv3_ksize = 3
conv3_stride = 2
pool3_fmaps = conv3_fmaps
n_fc1 = 128
n_outputs = 10
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
y = tf.placeholder(tf.int32, shape=[None], name="y")
conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize,
strides=conv1_stride, padding="SAME",
activation=tf.nn.relu, name="conv1")
conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize,
strides=conv2_stride, padding="SAME",
activation=tf.nn.relu, name="conv2")
conv3 = tf.layers.conv2d(conv2, filters=conv3_fmaps, kernel_size=conv3_ksize,
strides=conv3_stride, padding="SAME",
activation=tf.nn.relu, name="conv3")
with tf.name_scope("pool3"):
pool3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 3 * 3])
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool3_flat, n_fc1, activation=tf.nn.relu, name="fc1")
with tf.name_scope("output"):
logits = tf.layers.dense(fc1, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
with tf.name_scope("train"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
loss_ = tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy_train = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_test = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_train_ = tf.summary.scalar('accuracy_train', accuracy_train)
accuracy_test_ = tf.summary.scalar('accuracy_test', accuracy_test)
with tf.name_scope("init_and_save"):
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
n_epochs = 10
batch_size = 100
now = datetime.now()
logdir = "tf_logs/" + now.strftime("3_conv_layers_%Y%m%d-%H%M%S") + "/"
with tf.Session() as sess:
merged = tf.summary.merge([accuracy_train_,loss_])
tf_tensorboard_writer = tf.summary.FileWriter('./'+logdir, sess.graph)
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
print(epoch, "Last batch accuracy:", accuracy_train.eval(feed_dict={X: X_batch, y: y_batch}), "Test accuracy:", accuracy_test.eval(feed_dict={X: X_test, y: y_test}))
summary_str = sess.run(merged, feed_dict={X: X_batch, y: y_batch})
test_summary_str = sess.run(accuracy_test_, feed_dict={X: X_test, y: y_test})
tf_tensorboard_writer.add_summary(summary_str, epoch)
tf_tensorboard_writer.add_summary(test_summary_str, epoch)
save_path = saver.save(sess, MODEL_PATH + "model3conv")
tf_tensorboard_writer.close()
# -
# # Testing rotation and zoom
samples_to_show = 5
plt.figure(figsize=(8,50)) # not shown in the book
X_reshaped = tf.reshape(X_train, shape=[-1, height, width])
with tf.Session() as sess:
X_reshaped = X_reshaped.eval()
# ### Original images
plt.figure(figsize=(15,50)) # not shown in the book
for iteration in range(samples_to_show):
plt.subplot(samples_to_show, 10, iteration + 1)
plot_image(X_reshaped[iteration])
# ### Rotation
from scipy.ndimage import rotate
plt.figure(figsize=(15,50)) # not shown in the book
degrees = 10
for iteration in range(samples_to_show):
plt.subplot(samples_to_show, 10, iteration + 1)
plot_image(rotate(X_reshaped[iteration], 20, reshape=False))
# ### Zoom
plt.figure(figsize=(15,50)) # not shown in the book
for iteration in range(samples_to_show):
plt.subplot(samples_to_show, 10, iteration + 1)
plot_image(clipped_zoom(X_reshaped[iteration], 0.7))
# # Increasing dataset
# +
X_train_zoom1 = X_reshaped.copy()
X_train_zoom2 = X_reshaped.copy()
X_train_rotate1 = X_reshaped.copy()
X_train_rotate2 = X_reshaped.copy()
for iteration in range(nb_samples_taken):
X_train_zoom1[iteration] = clipped_zoom(X_train_zoom[iteration], 0.95)
X_train_zoom2[iteration] = clipped_zoom(X_train_zoom[iteration], 1.05)
for iteration in range(nb_samples_taken):
X_train_rotate1[iteration] = rotate(X_train_rotate1[iteration], 8, reshape=False)
X_train_rotate2[iteration] = rotate(X_train_rotate2[iteration], -8, reshape=False)
X_train_artificially_increased = np.concatenate((X_reshaped, X_train_zoom1, X_train_zoom2,X_train_rotate1,X_train_rotate2), axis=0)
# -
X_train_artificially_increased = X_train_artificially_increased.astype(np.float32).reshape(-1, n_inputs)
X_train_artificially_increased.shape
y_train_artificially_increased = np.concatenate((y_train,y_train,y_train,y_train,y_train), axis=0)
y_train_artificially_increased.shape
# # Training model with dataset artificially_increased
# +
n_epochs = 10
batch_size = 100
now = datetime.now()
logdir = "tf_logs/" + now.strftime("3_conv_DatasetIncreasedArt__%Y%m%d-%H%M%S") + "/"
with tf.Session() as sess:
merged = tf.summary.merge([accuracy_train_,loss_])
tf_tensorboard_writer = tf.summary.FileWriter('./'+logdir, sess.graph)
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train_artificially_increased, y_train_artificially_increased, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
print(epoch, "Last batch accuracy:", accuracy_train.eval(feed_dict={X: X_batch, y: y_batch}), "Test accuracy:", accuracy_test.eval(feed_dict={X: X_test, y: y_test}))
summary_str = sess.run(merged, feed_dict={X: X_batch, y: y_batch})
test_summary_str = sess.run(accuracy_test_, feed_dict={X: X_test, y: y_test})
tf_tensorboard_writer.add_summary(summary_str, epoch)
tf_tensorboard_writer.add_summary(test_summary_str, epoch)
save_path = saver.save(sess, MODEL_PATH + "with_dataset_artificially_increased/model3conv")
tf_tensorboard_writer.close()
# -
# ## Building a better network
# +
tf.reset_default_graph()
conv1_fmaps = 6
conv1_ksize = 6
conv1_stride = 1
conv2_fmaps = 12
conv2_ksize = 5
conv2_stride = 2
pool1_fmaps = conv2_fmaps
conv3_fmaps = 24
conv3_ksize = 4
conv3_stride = 2
conv4_fmaps = 48
conv4_ksize = 4
conv4_stride = 2
pool2_fmaps = conv4_fmaps
n_fc1 = 200
n_outputs = 10
in_training_mode = tf.placeholder_with_default(False, shape=[], name='training')
fc1_dropout_rate = 0.5
with tf.name_scope("inputs"):
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
X_BN = tf.layers.batch_normalization(X_reshaped, momentum=0.9, training=in_training_mode)
y = tf.placeholder(tf.int32, shape=[None], name="y")
conv1 = tf.layers.conv2d(X_BN, filters=conv1_fmaps, kernel_size=conv1_ksize,
strides=conv1_stride, padding="SAME",
activation=None, name="conv1")
conv1_BN = tf.layers.batch_normalization(conv1, momentum=0.9, training=in_training_mode)
conv1_relu = tf.nn.relu(conv1_BN)
conv2 = tf.layers.conv2d(conv1_relu, filters=conv2_fmaps, kernel_size=conv2_ksize,
strides=conv2_stride, padding="SAME",
activation=None, name="conv2")
conv2_BN = tf.layers.batch_normalization(conv2, momentum=0.9, training=in_training_mode)
conv2_relu = tf.nn.relu(conv2_BN)
pool1 = tf.nn.max_pool(conv2_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", name="conv3")
conv3 = tf.layers.conv2d(pool1, filters=conv3_fmaps, kernel_size=conv3_ksize,
strides=conv3_stride, padding="SAME",
activation=None, name="conv3")
conv3_BN = tf.layers.batch_normalization(conv3, momentum=0.9, training=in_training_mode)
conv3_relu = tf.nn.relu(conv3_BN)
conv4 = tf.layers.conv2d(conv3_relu, filters=conv4_fmaps, kernel_size=conv4_ksize,
strides=conv4_stride, padding="SAME",
activation=None, name="conv4")
conv4_BN = tf.layers.batch_normalization(conv4, momentum=0.9, training=in_training_mode)
conv4_relu = tf.nn.relu(conv4_BN)
with tf.name_scope("pool2"):
pool2 = tf.nn.max_pool(conv4_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID")
pool2_flat = tf.reshape(pool2, shape=[-1, pool2_fmaps * int(pool2.shape[1]) * int(pool2.shape[1])])
with tf.name_scope("fc1"):
fc1 = tf.layers.dense(pool2_flat, n_fc1, activation=tf.nn.relu, name="fc1")
fc1_drop = tf.layers.dropout(fc1, fc1_dropout_rate, training=in_training_mode)
with tf.name_scope("output"):
logits = tf.layers.dense(fc1, n_outputs, name="output")
Y_proba = tf.nn.softmax(logits, name="Y_proba")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy)
loss_ = tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer()
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy_train = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_test = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_train_ = tf.summary.scalar('accuracy_train', accuracy_train)
accuracy_test_ = tf.summary.scalar('accuracy_test', accuracy_test)
with tf.name_scope("init_and_save"):
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# -
# ### Training
# +
n_epochs = 20
batch_size = 100
now = datetime.now()
logdir = "tf_logs/" + now.strftime("4conv_DatasetIncreasedArt__%Y%m%d-%H%M%S") + "/"
with tf.Session() as sess:
merged = tf.summary.merge([accuracy_train_,loss_])
tf_tensorboard_writer = tf.summary.FileWriter('./'+logdir, sess.graph)
init.run()
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train_artificially_increased, y_train_artificially_increased, batch_size):
sess.run(training_op, feed_dict={X: X_batch, y: y_batch, in_training_mode: True})
print(epoch, "Last batch accuracy:", accuracy_train.eval(feed_dict={X: X_batch, y: y_batch}), "Test accuracy:", accuracy_test.eval(feed_dict={X: X_test, y: y_test}))
summary_str = sess.run(merged, feed_dict={X: X_batch, y: y_batch})
test_summary_str = sess.run(accuracy_test_, feed_dict={X: X_test, y: y_test})
tf_tensorboard_writer.add_summary(summary_str, epoch)
tf_tensorboard_writer.add_summary(test_summary_str, epoch)
save_path = saver.save(sess, MODEL_PATH + "with_dataset_artificially_increased/model4conv")
tf_tensorboard_writer.close()
| .ipynb_checkpoints/digit_recognition-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Midterm review
#
# ## 1- Measurement systems
# ### Goal of experiments
# - __1- Engineering/scientific experimentation__
# - __2- Operational system__
# <img src="img/DIKW_Pyramid.png" width="240">
#
# <img src="img/DAS_FlowChart.jpg" width="240">
#
# ### Units - Unit conversion - Dimensional analysis
# ### Errors and Uncertainties
# <img src="img/AccuracyVsPrecision_b.png" width="240">
# ## 2- Dynamic response of measurement systems
#
# \begin{align*}
# a_n\frac{d^ny}{dt^n} + a_{n-1}\frac{d^{n-1}y}{dt^{n-1}}x + \dots + a_1 \frac{dy}{dt} + a_0 y = F(t)
# \end{align*}
#
# ### 0$^{\mathrm{th}}$ order system
#
# \begin{align*}
# y(t) = K F(t) = \frac{1}{a_0} F(t) \quad \text{$K=1/a_0$ is called the _static sensitivity_ or _steady-state gain_}
# \end{align*}
#
# ### 1$^{\mathrm{st}}$ order system
#
# \begin{align*}
# a_1 \frac{dy}{dt} + a_0 y = F(t) \quad
# \tau \frac{dy}{dt} + y = K F(t)
# \end{align*}
# with $\tau = a_1/a_0$ the time constant of the system.
#
# ### 2$^{\mathrm{nd}}$ order system
#
# \begin{align*}
# a_2 \frac{d^2y}{dt^2} + a_1 \frac{dy}{dt} + a_0 y = F(t) \quad
# \frac{1}{\omega^2_n} \frac{d^2y}{dt^2} + \frac{2\zeta}{\omega_n} \frac{dy}{dt} + y = K F(t)
# \end{align*}
# ## 3- Digital data acquisition
# ### Binary to decimal conversion
#
# ### ADC system
# __Resolution__
# \begin{align*}
# \Delta V = \frac{V_{max} - V_{min}}{2^N}
# \end{align*}
# N : number of bits
#
# __Quantization error__
# \begin{align*}
# Q = \pm \frac{1}{2} \Delta V = \pm \frac{V_{max} - V_{min}}{2^{N+1}}
# \end{align*}
#
# __Clipping__
# ### Discrete Sampling
#
# ### Aliasing
# Nyquist Shannon sampling rate thereom states:
# >_If a function, $y(t)$, contains no frequencies greater than $f_{signal}(MAX)$, it is completely determined if it is sampled with a sampling frequency $f_s$ such that:_
# \begin{align*}
# f_s > 2 f_{signal}(MAX)
# \end{align*}
# > if $f_s \ngtr 2f_{signal}(MAX)$
# \begin{align*}
# f_a = \left| f - f_s \cdot \mathrm{NINT}\left( \frac{f}{f_s} \right) \right|
# \end{align*}
#
# ### Fourier Series
# \begin{align*}
# y(t) = \frac{A_0}{2} + \sum_{n=1}^{\infty} \left( A_n \cos \left(\frac{2\pi nt}{T}\right) + B_n \sin \left(\frac{2\pi nt}{T}\right) \right)
# \end{align*}
# - __Fundamental harmonic/frequency__: $n=1$, noted $\omega_0=2\pi/T$ or $f_0 = 1/T$.
# - $T$: signal period
# - Frequencies of $\sin$ and $\cos$ are multiple of the fundamental frequency $\omega$: __$n^{th}$ harmonic__.
# - $A_n$, $B_n$: Fourier coefficients
# - $A_0/2$: mean of the function
#
# ### Fourier Transform, DFT, FFT
# \begin{align*}
# Y(k \Delta f) = \sum_{n=0}^{N-1} y(n \Delta t) e^{-i (2\pi k \Delta f)(n\Delta t)} \qquad k = 0,\,1,\,2, \dots , \, N-1
# \end{align*}
#
# \begin{array}{ll}
# N & \text{number of samples taken over sampling interval $T_s$}\\
# T_s & T_s = N\Delta t\quad\text{total sampling time}\\
# \Delta f & \text{frequency increment/resolution}\\
# & \Delta f = 1/T_s = 1/N\Delta t= f_s/N\\
# \Delta t & \text{time increment (or sampling period)}\\
# & \Delta t = T_s/N\\
# f_s & \text{sampling rate}\\
# & f_s = 1/\Delta t = N/T_s\\
# f_{max} & f_{max} = f_{folding} = f_s/2 = N/2 \cdot \Delta f \quad \text{max resolved frequency}
# \end{array}
#
# ### Leakage
# ### Windowing
# ### Anti-aliasing filter
#
# ### Analog signal reconstruction
# If an analog signal is sampled properly: reconstruct the analog signal with the _cardinal series_.
# ## 4- Signal Conditioning
#
# ### Basic electronics
#
# ### Filters
# #### $1^{\mathrm{st}}$ order low-pass filter
#
# \begin{align*}
# \omega_{cutoff} & = \frac{1}{RC} \quad f_{cutoff} = \frac{\omega_{cutoff}}{2 \pi} = \frac{1}{2\pi RC} \\
# G & = \frac{|V_{out}|}{|V_{in}|} = \frac{1}{\sqrt{1+ \left( \frac{f}{f_{cutoff}}\right)^2 }} \quad
# \phi = -\arctan \left( \frac{f}{f_{cutoff}} \right)
# \end{align*}
# #### Low-pass filter of order $n$
# \begin{align*}
# G = \frac{1}{\sqrt{1+ \left( \frac{f}{f_{cutoff}}\right)^{2n} }}
# \end{align*}
# #### $1^{\mathrm{st}}$ order high-pass filter
# \begin{align*}
# G = \frac{1}{\sqrt{1+ \left( \frac{f_{cutoff}}{f}\right)^2 }} \quad
# \phi = \arctan \left( \frac{f_{cutoff}}{f} \right)
# \end{align*}
#
# #### high-pass filter of order $n$
# \begin{align*}
# G = \frac{1}{\sqrt{1+ \left( \frac{f_{cutoff}}{f}\right)^{2n} }}
# \end{align*}
#
#
# ### Op-Amps
# ideal op-amps vs real effects (impedance loading, GBP, CMRR)
# ## 5- Design Stage uncertainty analysis
#
# ### Expected uncertainty
# RSS: root of the sum of the squares
#
# ### Combining elemental uncertainties
# \begin{align}
# u_{sys} = \sqrt{\sum_i u_{e,i}^2} \quad \text{with $u_{e,i}$ elemental error}
# \end{align}
#
# ### Overall uncertainty
# \begin{align}
# u_{overall} = \sqrt{\sum_j u_{sys,j}^2} \quad \text{with $u_{sys,j}$ error of each component}
# \end{align}
#
#
# ### Uncertainty of a result
# \begin{align}
# R = f(x_1, x_2, \dots) \quad u_R = \sqrt{\sum_i \left( u_{x_i} \frac{\partial f}{\partial x_i} \right)^2}
# \end{align}
# If f is polynomial
# \begin{align}
# f = x_1^{a_1} \cdot x_2^{a_2} \cdot \dots \quad \frac{u_R}{R} = \sqrt{\sum_i \left( \frac{u_{x_i}}{x_i} a_i \right)^2}
# \end{align}
#
# ## Sample Problems
# ### Quiz
# > What are the major elements of a data acquisition/measurement system?
#
#
# > How can you check for clipping in your acquired data? How could you correct for it?
#
# > When acquiring a repeatable signal of fixed frequency with a data acquisition system, which test can you use to determine if you have aliasing in your data?
#
# > In digital data acquisition, what is leakage? How will it manifest itself on a frequency spectrum?
#
# > Propose at least one strategy to minimize leakage.
#
# > How should you select the sampling frequency and number of samples acquired with digital data acquisition system to have the best possible frequency resolution?
#
# > A signal consists of a sine wave of frequency $f$ = 30 Hz. Data are sampled discretely at sampling frequency $f_s$ = 49 Hz. Is there aliasing? Calculate the apparent frequency in Hz.
# ### Filter
# A beam vibrates at 100 Hz. The vibrations are measured with a strain gage and Wheatstone bridge. The output voltage of the sensor is $V_{out} = 2 V_s \epsilon$, with $V_s=5.00$ V the supply voltage and $\epsilon=\delta L/L$ the strain. The strain is in the range of $\pm 5\times 10^{-4}$. In addition, you know that:
#
# > there is noise on the input: $_{fnoise}$ = 40,000Hz with amplitude 0.100 mV .
#
# > The output is sent to your NI USB-6008 DAQ board, which has a 12bit A/D converter with a range of $\pm$ 5V .
#
# > All op-amps have a GBP (non-inverting) of 1.0 MHz.
#
# Propose a scheme to condition the signal (amplify if necessarty and filter the noise out).
# _Solution_: Here we have to design the signal conditioning step of the measurement system. This has two steps:
# - 1 select amplifier gain (if necessary) so that we pair the DAQ and Wheatstone bridge well.
# - 2 select cutoff frequency and order of filter to effectively remove the noise from the signal. Effective filtering means that amplitude of the noise should be less than DAQ (ADC) resolution and carrier component of signal is not significantly affected.
# __step 1__: First we need to determine what is the amplitude of the carrier signal.
import numpy
A_c = 2*5.00*0.0005
f_c = 100 # Hz
A_n = 0.0001
f_n = 40000 #Hz
#V_o = A*numpy.sin(2*numpy.pi*100*t)
# So in summary we have:
# - Carrier signal: $f_c$ = 100 Hz, $A_c$ = 5.00 mV
# - Noise signal: $f_n$ = 40 000 Hz, $A_n$ = 0.100 mV
#
# DAQ has resolution:
# - $\Delta V = (V_M - V_m)/(2^N)$
deltaV = (10)/2**12
print('\Delta V = ',deltaV,' V')
# Here we see that the signal is about twice the resolution of the DAQ. We have to amplify!
#
# We select gain such that amplitude of amplified carrier is 80% of input range of DAS, or 4 V.
# gain of amplifier
G=4/0.005
print('G = ',G)
# While a gain of 800 is ok from a GBP point of view, we will run into impedance loading issues ($R$ will be either too high compare to $R_{in} = 1 M\Omega$ of real op-amp, or too small with respect to $R_{out} = 1 \Omega$ of real op-amp. Therefore ideal op-amp rules will fail. As a rule of thumb we do not want the gain of amplifier stage to be more than 30.
#
# Let's use two stages.
# use 2 amplifier stages
G_i = numpy.sqrt(G)
print('G_i = ', G_i)
A_cA = A_c*G
A_nA = A_n*G
print('amplitude of carrier ',A_cA,' V', 'amplitude of noise ',A_nA, ' V')
# Step 2: Select parameter of Low-pass filter, i.e.:
# - select $f_{co}$, and
# - the order $n$ of LPF.
#
# Again with the goals:
# - $A_n < \Delta V$
# - $A_c$ and $\phi_c$ not changed significantly
#gain of LPF for noise
G_LPF_n = deltaV/A_nA
print(G_LPF_n)
# Therefore the gain of your low pass filter at the frequency of the noise should be about 3%.
#
# Let's select a cutoff frequency of 10 times the carrier frequency and assess its merit.
# n = 1 LPF
f_co = 10*f_c
G_LPF = 1/numpy.sqrt(1+(f_n/f_co)**2)
print(f_co, G_LPF)
G_LPF = 1/numpy.sqrt(1+(f_c/f_co)**2)
phi = - numpy.arctan(f_c/f_co)*180/numpy.pi #deg
print(G_LPF, phi)
# We have a successful design!
#
# The noise is now below the resolution of the DAS and the carrier frequency has not been affected significantly (0.5% decrease in amplitude, phase of $\approx-6^\circ$).
# ## Dynamic response
# A first-order, passive low-pass (RC) filter is designed to have a cut-off frequency, $f_c = 1$ kHz. Its resistance equals 120 $\Omega$. Determine the filter:
# > time constant (in s)
#
# > capacitance (in F)
#
# > signal magnitude ratio at $f = 3$ kHz.
| Lectures/10_MidTermReview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + hideCode=false hidePrompt=false
import numpy as np
# + hideCode=false hidePrompt=false
# Avoid inaccurate floating values (for inverse matrices in dot product for instance)
# See https://stackoverflow.com/questions/24537791/numpy-matrix-inversion-rounding-errors
np.set_printoptions(suppress=True)
# + language="html"
# <style>
# .pquote {
# text-align: left;
# margin: 40px 0 40px auto;
# width: 70%;
# font-size: 1.5em;
# font-style: italic;
# display: block;
# line-height: 1.3em;
# color: #5a75a7;
# font-weight: 600;
# border-left: 5px solid rgba(90, 117, 167, .1);
# padding-left: 6px;
# }
# .notes {
# font-style: italic;
# display: block;
# margin: 40px 10%;
# }
# img + em {
# text-align: center;
# display: block;
# color: gray;
# font-size: 0.9em;
# font-weight: 600;
# }
# </style>
# -
# $$
# \newcommand\bs[1]{\boldsymbol{#1}}
# $$
# <span class='notes'>
# This content is part of a series following the chapter 2 on linear algebra from the [Deep Learning Book](http://www.deeplearningbook.org/) by <NAME>., <NAME>., and <NAME>. (2016). It aims to provide intuitions/drawings/python code on mathematical theories and is constructed as my understanding of these concepts. You can check the syllabus in the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/).
# </span>
# # Introduction
#
# The dot product is a major concept of linear algebra and thus machine learning and data science. We will see some properties of this operation. Then, we will get some intuition on the link between matrices and systems of linear equations.
# + [markdown] hideCode=false hidePrompt=false
# # 2.2 Multiplying Matrices and Vectors
#
# The standard way to multiply matrices is not to multiply each element of one with each element of the other (called the *element-wise product*) but to calculate the sum of the products between rows and columns. The matrix product, also called **dot product**, is calculated as following:
#
# <img src="images/dot-product.png" width="400" alt="An example of how to calculate the dot product between a matrix and a vector" title="The dot product between a matrix and a vector">
# <em>The dot product between a matrix and a vector</em>
#
# The number of columns of the first matrix must be equal to the number of rows of the second matrix. If the dimensions of the first matrix is ($m \times n$), the second matrix needs to be of shape ($n \times x$). The resulting matrix will have the shape ($m \times x$).
# -
# ### Example 1.
#
# Let's start with the multiplication of a matrix and a vector.
#
# $$\bs{A} \times \bs{b} = \bs{C}$$
#
# with $
# \bs{A}=
# \begin{bmatrix}
# 1 & 2\\\\
# 3 & 4\\\\
# 5 & 6
# \end{bmatrix}
# $ and $\bs{b}=\begin{bmatrix}
# 2\\\\
# 4
# \end{bmatrix}$.
#
# We saw that the formula is the following:
#
# $$
# \begin{align*}
# &\begin{bmatrix}
# A_{1,1} & A_{1,2} \\\\
# A_{2,1} & A_{2,2} \\\\
# A_{3,1} & A_{3,2}
# \end{bmatrix}\times
# \begin{bmatrix}
# B_{1,1} \\\\
# B_{2,1}
# \end{bmatrix}=\\\\
# &\begin{bmatrix}
# A_{1,1}B_{1,1} + A_{1,2}B_{2,1} \\\\
# A_{2,1}B_{1,1} + A_{2,2}B_{2,1} \\\\
# A_{3,1}B_{1,1} + A_{3,2}B_{2,1}
# \end{bmatrix}
# \end{align*}
# $$
#
# So we will have:
#
# $$
# \begin{align*}
# &\begin{bmatrix}
# 1 & 2 \\\\
# 3 & 4 \\\\
# 5 & 6
# \end{bmatrix}\times
# \begin{bmatrix}
# 2 \\\\
# 4
# \end{bmatrix}=\\\\
# &\begin{bmatrix}
# 1 \times 2 + 2 \times 4 \\\\
# 3 \times 2 + 4 \times 4 \\\\
# 5 \times 2 + 6 \times 4
# \end{bmatrix}=
# \begin{bmatrix}
# 10 \\\\
# 22 \\\\
# 34
# \end{bmatrix}
# \end{align*}
# $$
#
# It is a good habit to check the dimensions of the matrix to see what is going on. We can see in this example that the shape of $\bs{A}$ is ($3 \times 2$) and the shape of $\bs{b}$ is ($2 \times 1$). So the dimensions of $\bs{C}$ are ($3 \times 1$).
# + [markdown] hideCode=false hidePrompt=false
# ### With Numpy
#
# The Numpy function `dot()` can be used to compute the matrix product (or dot product). Let's try to reproduce the last exemple:
# + hideCode=false hidePrompt=false
A = np.array([[1, 2], [3, 4], [5, 6]])
A
# -
B = np.array([[2], [4]])
B
# + hideCode=false hidePrompt=false
C = np.dot(A, B)
C
# -
# It is equivalent to use the method `dot()` of Numpy arrays:
C = A.dot(B)
C
# + [markdown] hideCode=false hidePrompt=false
# ### Example 2.
#
# Multiplication of two matrices.
#
# $$\bs{A} \times \bs{B} = \bs{C}$$
#
# with:
#
# $$\bs{A}=\begin{bmatrix}
# 1 & 2 & 3 \\\\
# 4 & 5 & 6 \\\\
# 7 & 8 & 9 \\\\
# 10 & 11 & 12
# \end{bmatrix}
# $$
#
# and:
#
# $$\bs{B}=\begin{bmatrix}
# 2 & 7 \\\\
# 1 & 2 \\\\
# 3 & 6
# \end{bmatrix}
# $$
#
# So we have:
#
# $$
# \begin{align*}
# &\begin{bmatrix}
# 1 & 2 & 3 \\\\
# 4 & 5 & 6 \\\\
# 7 & 8 & 9 \\\\
# 10 & 11 & 12
# \end{bmatrix}\times
# \begin{bmatrix}
# 2 & 7 \\\\
# 1 & 2 \\\\
# 3 & 6
# \end{bmatrix}=\\\\
# &\begin{bmatrix}
# 2 \times 1 + 1 \times 2 + 3 \times 3 & 7 \times 1 + 2 \times 2 + 6 \times 3 \\\\
# 2 \times 4 + 1 \times 5 + 3 \times 6 & 7 \times 4 + 2 \times 5 + 6 \times 6 \\\\
# 2 \times 7 + 1 \times 8 + 3 \times 9 & 7 \times 7 + 2 \times 8 + 6 \times 9 \\\\
# 2 \times 10 + 1 \times 11 + 3 \times 12 & 7 \times 10 + 2 \times 11 + 6 \times 12 \\\\
# \end{bmatrix}\\\\
# &=
# \begin{bmatrix}
# 13 & 29 \\\\
# 31 & 74 \\\\
# 49 & 119 \\\\
# 67 & 164
# \end{bmatrix}
# \end{align*}
# $$
#
# Let's check the result with Numpy:
# + hideCode=false hidePrompt=false
A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
A
# -
B = np.array([[2, 7], [1, 2], [3, 6]])
B
# + hideCode=false hidePrompt=false
C = A.dot(B)
C
# + [markdown] hideCode=false hidePrompt=false
# It works!
#
# # Formalization of the dot product
#
# The dot product can be formalized through the following equation:
#
# $$
# C_{i,j} = A_{i,k}B_{k,j} = \sum_{k}A_{i,k}B_{k,j}
# $$
#
# You can find more examples about the dot product [here](https://www.mathsisfun.com/algebra/matrix-multiplying.html).
# + [markdown] hideCode=false hidePrompt=false
# # Properties of the dot product
#
# We will now see some interesting properties of the dot product. Using simple examples for each property, we'll get used to the Numpy functions.
#
# ## Matrices mutliplication is distributive
#
# $$\bs{A}(\bs{B}+\bs{C}) = \bs{AB}+\bs{AC}$$
#
# ### Example 3.
#
# $$
# \bs{A}=\begin{bmatrix}
# 2 & 3 \\\\
# 1 & 4 \\\\
# 7 & 6
# \end{bmatrix},
# \bs{B}=\begin{bmatrix}
# 5 \\\\
# 2
# \end{bmatrix},
# \bs{C}=\begin{bmatrix}
# 4 \\\\
# 3
# \end{bmatrix}
# $$
#
#
# $$
# \begin{align*}
# \bs{A}(\bs{B}+\bs{C})&=\begin{bmatrix}
# 2 & 3 \\\\
# 1 & 4 \\\\
# 7 & 6
# \end{bmatrix}\times
# \left(\begin{bmatrix}
# 5 \\\\
# 2
# \end{bmatrix}+
# \begin{bmatrix}
# 4 \\\\
# 3
# \end{bmatrix}\right)=
# \begin{bmatrix}
# 2 & 3 \\\\
# 1 & 4 \\\\
# 7 & 6
# \end{bmatrix}\times
# \begin{bmatrix}
# 9 \\\\
# 5
# \end{bmatrix}\\\\
# &=
# \begin{bmatrix}
# 2 \times 9 + 3 \times 5 \\\\
# 1 \times 9 + 4 \times 5 \\\\
# 7 \times 9 + 6 \times 5
# \end{bmatrix}=
# \begin{bmatrix}
# 33 \\\\
# 29 \\\\
# 93
# \end{bmatrix}
# \end{align*}
# $$
#
# is equivalent to
#
# $$
# \begin{align*}
# \bs{A}\bs{B}+\bs{A}\bs{C} &= \begin{bmatrix}
# 2 & 3 \\\\
# 1 & 4 \\\\
# 7 & 6
# \end{bmatrix}\times
# \begin{bmatrix}
# 5 \\\\
# 2
# \end{bmatrix}+
# \begin{bmatrix}
# 2 & 3 \\\\
# 1 & 4 \\\\
# 7 & 6
# \end{bmatrix}\times
# \begin{bmatrix}
# 4 \\\\
# 3
# \end{bmatrix}\\\\
# &=
# \begin{bmatrix}
# 2 \times 5 + 3 \times 2 \\\\
# 1 \times 5 + 4 \times 2 \\\\
# 7 \times 5 + 6 \times 2
# \end{bmatrix}+
# \begin{bmatrix}
# 2 \times 4 + 3 \times 3 \\\\
# 1 \times 4 + 4 \times 3 \\\\
# 7 \times 4 + 6 \times 3
# \end{bmatrix}\\\\
# &=
# \begin{bmatrix}
# 16 \\\\
# 13 \\\\
# 47
# \end{bmatrix}+
# \begin{bmatrix}
# 17 \\\\
# 16 \\\\
# 46
# \end{bmatrix}=
# \begin{bmatrix}
# 33 \\\\
# 29 \\\\
# 93
# \end{bmatrix}
# \end{align*}
# $$
# + hideCode=false hidePrompt=false
A = np.array([[2, 3], [1, 4], [7, 6]])
A
# -
B = np.array([[5], [2]])
B
# + hideCode=false hidePrompt=false
C = np.array([[4], [3]])
C
# -
# $\bs{A}(\bs{B}+\bs{C})$:
# + hideCode=false hidePrompt=false
D = A.dot(B+C)
D
# -
# is equivalent to $\bs{AB}+\bs{AC}$:
# + hideCode=false hidePrompt=false
D = A.dot(B) + A.dot(C)
D
# + [markdown] hideCode=false hidePrompt=false
# ## Matrices mutliplication is associative
#
# $$\bs{A}(\bs{BC}) = (\bs{AB})\bs{C}$$
#
# + hideCode=false hidePrompt=false
A = np.array([[2, 3], [1, 4], [7, 6]])
A
# -
B = np.array([[5, 3], [2, 2]])
B
# $\bs{A}(\bs{BC})$:
#
# + hideCode=false hidePrompt=false
D = A.dot(B.dot(C))
D
# -
# is equivalent to $(\bs{AB})\bs{C}$:
# + hideCode=false hidePrompt=false
D = (A.dot(B)).dot(C)
D
# + [markdown] hideCode=false hidePrompt=false
# ## Matrix multiplication is not commutative
#
# $$\bs{AB} \neq \bs{BA}$$
# + hideCode=false hidePrompt=false
A = np.array([[2, 3], [6, 5]])
A
# -
B = np.array([[5, 3], [2, 2]])
B
# $\bs{AB}$:
# + hideCode=false hidePrompt=false
AB = np.dot(A, B)
AB
# -
# is different from $\bs{BA}$:
# + hideCode=false hidePrompt=false
BA = np.dot(B, A)
BA
# + [markdown] hideCode=false hidePrompt=false
# ## However vector multiplication is commutative
#
# $$\bs{x^{ \text{T}}y} = \bs{y^{\text{T}}x} $$
# + hideCode=false hidePrompt=false
x = np.array([[2], [6]])
x
# -
y = np.array([[5], [2]])
y
# $\bs{x^\text{T}y}$:
# + hideCode=false hidePrompt=false
x_ty = x.T.dot(y)
x_ty
# -
# is equivalent to $\bs{y^\text{T}x}$:
# + hideCode=false hidePrompt=false
y_tx = y.T.dot(x)
y_tx
# + [markdown] hideCode=false hidePrompt=false
# ## Simplification of the matrix product
#
# $$(\bs{AB})^{\text{T}} = \bs{B}^\text{T}\bs{A}^\text{T}$$
# + hideCode=false hidePrompt=false
A = np.array([[2, 3], [1, 4], [7, 6]])
A
# -
B = np.array([[5, 3], [2, 2]])
B
# + [markdown] hideCode=false hidePrompt=false
# $(\bs{AB})^{\text{T}}$:
# + hideCode=false hidePrompt=false
AB_t = A.dot(B).T
AB_t
# + [markdown] hideCode=false hidePrompt=false
# is equivalent to $\bs{B}^\text{T}\bs{A}^\text{T}$:
# + hideCode=false hidePrompt=false
B_tA = B.T.dot(A.T)
B_tA
# + [markdown] hideCode=false hidePrompt=false
# # System of linear equations
#
# This is an important part of why linear algebra can be very useful to solve a large variety of problems. Here we will see that it can be used to represent systems of equations.
#
# A system of equations is a set of multiple equations (at least 1). For instance we could have:
#
# <div>
# $
# \begin{cases}
# y = 2x + 1 \\\
# y = \frac{7}{2}x +3
# \end{cases}
# $
# </div>
#
# It is defined by its number of equations and its number of unknowns. In this example, there are 2 equations (the first and the second line) and 2 unknowns ($x$ and $y$). In addition we call this a system of **linear** equations because each equation is linear. We can represent that in 2 dimensions: we have one straight line per equation and dimensions correspond to the unknowns. Here is the plot of the first equation:
#
# <img src="images/plot-linear-equation.png" width="300" alt="Representation of a line from an equation" title="Plot of a linear equation">
# <em>Representation of a linear equation</em>
#
# <span class='pquote'>
# In our system of equations, the unknowns are the dimensions and the number of equations is the number of lines (in 2D) or $n$-dimensional planes.
# </span>
# -
# ## Using matrices to describe the system
#
# Matrices can be used to describe a system of linear equations of the form $\bs{Ax}=\bs{b}$. Here is such a system:
#
# $$
# A_{1,1}x_1 + A_{1,2}x_2 + A_{1,n}x_n = b_1 \\\\
# A_{2,1}x_1 + A_{2,2}x_2 + A_{2,n}x_n = b_2 \\\\
# \cdots \\\\
# A_{m,1}x_1 + A_{m,2}x_2 + A_{m,n}x_n = b_n
# $$
#
# The unknowns (what we want to find to solve the system) are the variables $x_1$ and $x_2$. It is exactly the same form as with the last example but with all the variables on the same side. $y = 2x + 1$ becomes $-2x + y = 1$ with $x$ corresponding to $x_1$ and $y$ corresponding to $x_2$. We will have $n$ unknowns and $m$ equations.
#
# The variables are named $x_1, x_2, \cdots, x_n$ by convention because we will see that it can be summarised in the vector $\bs{x}$.
#
# ### Left-hand side
#
# The left-hand side can be considered as the product of a matrix $\bs{A}$ containing weights for each variable ($n$ columns) and each equation ($m$ rows):
#
# <div>
# $
# \bs{A}=
# \begin{bmatrix}
# A_{1,1} & A_{1,2} & \cdots & A_{1,n} \\\
# A_{2,1} & A_{2,2} & \cdots & A_{2,n} \\\
# \cdots & \cdots & \cdots & \cdots \\\
# A_{m,1} & A_{m,2} & \cdots & A_{m,n}
# \end{bmatrix}
# $
# </div>
#
# with a vector $\bs{x}$ containing the $n$ unknowns
#
# <div>
# $
# \bs{x}=
# \begin{bmatrix}
# x_1 \\\
# x_2 \\\
# \cdots \\\
# x_n
# \end{bmatrix}
# $
# </div>
#
# The dot product of $\bs{A}$ and $\bs{x}$ gives a set of equations. Here is a simple example:
#
# <img src="images/system-linear-equations-matrix-form.png" width="400" alt="Matrix form of a system of linear equation" title="Matrix form of a system of linear equation">
# <em>Matrix form of a system of linear equations</em>
#
# We have a set of two equations with two unknowns. So the number of rows of $\bs{A}$ gives the number of equations and the number of columns gives the number of unknowns.
#
# ### Both sides
#
# The equation system can be wrote like that:
#
# $$
# \begin{bmatrix}
# A_{1,1} & A_{1,2} & \cdots & A_{1,n} \\\\
# A_{2,1} & A_{2,2} & \cdots & A_{2,n} \\\\
# \cdots & \cdots & \cdots & \cdots \\\\
# A_{m,1} & A_{m,2} & \cdots & A_{m,n}
# \end{bmatrix}
# \times
# \begin{bmatrix}
# x_1 \\\\
# x_2 \\\\
# \cdots \\\\
# x_n
# \end{bmatrix}
# =
# \begin{bmatrix}
# b_1 \\\\
# b_2 \\\\
# \cdots \\\\
# b_m
# \end{bmatrix}
# $$
#
# Or simply:
#
# $$\bs{Ax}=\bs{b}$$
# ### Example 4.
#
# We will try to convert the common form of a linear equation $y=ax+b$ to the matrix form. If we want to keep the previous notation we will have instead:
#
# <div>
# $x_2=ax_1+b$
# </div>
#
# Don't confuse the variable $x_1$ and $x_2$ with the vector $\bs{x}$. This vector contains all the variables of our equations:
#
# <div>
# $
# \bs{x} =
# \begin{bmatrix}
# x_1 \\\
# x_2
# \end{bmatrix}
# $
# </div>
#
# In this example we will use the following equation:
#
# <div>
# $
# \begin{aligned}
# &x_2=2x_1+1 \\\
# \Leftrightarrow& 2x_1-x_2=-1
# \end{aligned}
# $
# </div>
#
# In order to end up with this system when we multiply $\bs{A}$ and $\bs{x}$ we need $\bs{A}$ to be a matrix containing the weights of each variable. The weight of $x_1$ is $2$ and the weights of $x_2$ is $-1$:
#
# <div>
# $
# \bs{A}=
# \begin{bmatrix}
# 2 & -1
# \end{bmatrix}
# $
# </div>
#
# So we have
#
# <div>
# $
# \begin{bmatrix}
# 2 & -1
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\\
# x_2
# \end{bmatrix}
# =
# \begin{bmatrix}
# 2x_1-1x_2
# \end{bmatrix}
# $
# </div>
#
# To complete the equation we have
#
# <div>
# $
# \bs{b}=
# \begin{bmatrix}
# -1
# \end{bmatrix}
# $
# </div>
#
# which gives
#
# <div>
# $
# \begin{bmatrix}
# 2 & -1
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\\
# x_2
# \end{bmatrix}
# =
# \begin{bmatrix}
# -1
# \end{bmatrix}
# $
# </div>
#
# This system of equations is thus very simple and contains only 1 equation ($\bs{A}$ has 1 row) and 2 variables ($\bs{A}$ has 2 columns).
#
# To summarise, $\bs{A}$ will be a matrix of dimensions $m\times n$ containing scalars multiplying these variables (here $x_1$ is multiplied by 2 and $x_2$ by -1). The vector $\bs{x}$ contains the variables $x_1$ and $x_2$. And the right-hand side is the constant $\bs{b}$:
#
# <div>
# $
# \bs{A}=
# \begin{bmatrix}
# 2 & -1
# \end{bmatrix}
# $
# </div>
#
# <div>
# $
# \bs{x}=
# \begin{bmatrix}
# x_1 \\\
# x_2
# \end{bmatrix}
# $
# </div>
#
# <div>
# $
# \bs{b}=
# \begin{bmatrix}
# -1
# \end{bmatrix}
# $
# </div>
#
# We can write this system
#
# <div>
# $
# \bs{Ax}=\bs{b}
# $
# </div>
#
# We will see at the end of the [the next chapter](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.3-Identity-and-Inverse-Matrices/) that this compact way of writing sets of linear equations can be very usefull. It provides a way to solve the equations.
# <span class='notes'>
# Feel free to drop me an email or a comment. The syllabus of this series can be found [in the introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/). All the notebooks can be found on [Github](https://github.com/hadrienj/deepLearningBook-Notes).
# </span>
# # References
#
# - [Math is fun - Multiplying matrices](https://www.mathsisfun.com/algebra/matrix-multiplying.html)
| 2.2 Multiplying Matrices and Vectors/2.2 Multiplying Matrices and Vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K-means Clustering
# ## Import resources and display image
# +
import numpy as np
np.set_printoptions(threshold=np.nan)
import matplotlib.pyplot as plt
import cv2
# %matplotlib inline
# Read in the image
## TODO: Check out the images directory to see other images you can work with
# And select one!
image = cv2.imread('images/monarch.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
# -
# ## Prepare data for k-means
# +
# Reshape image into a 2D array of pixels and 3 color values (RGB)
pixel_vals = image.reshape((-1,3)) # <-- k-means needs the data in a vector!
print("Shapes:\n Image: {}\n pixel_vals: {}".format(image.shape, pixel_vals.shape))
# pixel_vals[i] is a pixel, i.e, [r g b]
# pixel_vals[i][j] is a color component, i.e, r or g or b
print("Type of elements in pixel_vals (before): {}".format(type(pixel_vals[0][0])))
# Convert to float type
pixel_vals = np.float32(pixel_vals) # <-- k-means needs the data in float format
print("Type of elements in pixel_vals (after): {}".format(type(pixel_vals[0][0])))
# -
# ## Implement k-means clustering
# ### cv2.kmeans() function in OpenCV for data clustering
#
# #### Argument 1: samples
#
# It should be of np.float32 data type, and each feature should be put in a single column.
#
# #### Argument 2: nclusters(K)
#
# Number of clusters required at end
#
# #### Argument 3: None
#
# #### Argument 4: criteria
#
# It is the iteration termination criteria. When this criteria is satisfied, algorithm iteration stops. Actually, it should be a tuple of 3 parameters. They are ( `type`, `max_iter`, `epsilon` )
#
# 3.a - Type of termination criteria.
#
# It has 3 flags as below:
# `cv2.TERM_CRITERIA_EPS` - stop the algorithm iteration if specified accuracy, epsilon, is reached. `cv2.TERM_CRITERIA_MAX_ITER` - stop the algorithm after the specified number of iterations, max_iter. `cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER` - stop the iteration when any of the above condition is met.
#
# 3.b - max_iter - An integer specifying maximum number of iterations.
#
# 3.c - epsilon - Required accuracy
#
# #### Argument 5: attempts
#
# Flag to specify the number of times the algorithm is executed using different initial labellings. The algorithm returns the labels that yield the best compactness. This compactness is returned as output.
#
# #### Argument 6 :flags
#
# This flag is used to specify how initial centers are taken. Normally two flags are used for this : `cv2.KMEANS_PP_CENTERS` and `cv2.KMEANS_RANDOM_CENTERS`.
#
# +
# define stopping criteria
# you can change the number of max iterations for faster convergence!
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2)
## TODO: Select a value for k
# then perform k-means clustering
k = 4
retval, labels, rgb_centers = cv2.kmeans(pixel_vals, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
print("retval's type:", type(retval))
print("label's type:", type(labels))
print("center's type:", type(rgb_centers))
print("labels' shape: {}".format(labels.shape))
print("centers' shape: {}".format(rgb_centers.shape))
# convert data into 8-bit values
rgb_centers = np.uint8(rgb_centers) # rgb coordinates => [[r0,g0,b0], [r1,g1,b1], [r2,g2,b2], [r3,g3,b3]], 'cause k=4
segmented_data = rgb_centers[labels.flatten()] # labels.flatten() puts a MxN matrix in a 1x(MxN) format
# i.e, all the rows in a row.
# For labels --> from Nx1 to 1x<(Nx1) = 1xN (row to column)
print(rgb_centers)
print(labels.flatten()[1:100])
print(labels.flatten()[12345:13100])
# rgb_centers => [[r0,g0,b0], [r1,g1,b1], [r2,g2,b2], ... , [r_{k-1}, g_{k-1}, b_{k-1}]] => k rgb centers => k x 3 matrix
# labels.flatten() => [0, 2, i, j, ..., 0, k-1, ..., 1, 4] (example of labels.flatten())
# => 1x(MxN) labels. original image is MxN
# => pixel in original img at (0,0) is associated at label 0
# pixel in original img at (0,1) is associated at label 2
# pixel in original img at (0,2) is associated at label i
# pixel in original img at (0,3) is associated at label j
# pixel in original img at (m,n) is associated at label 0
# pixel in original img at (m,n+1) is associated at label k-1
# pixel in original img at (M-1,N-2) is associated at label 1
# pixel in original img at (M-1,N-1) is associated at label 4
#centers[labels.flatten()]
# => [0, 2, i, j, ..., 0, k-1, ..., 1, 4] (example of labels.flatten())
# => pixel in original img at (0,0) is associated at label 0 => associated to center centers[0], i.e, [r0, g0, b0]
# pixel in original img at (0,1) is associated at label 2 => associated to center centers[2], i.e, [r2, g2, b2]
# pixel in original img at (0,2) is associated at label i => associated to center centers[i], i.e, [ri, gi, bi]
# pixel in original img at (0,3) is associated at label j => associated to center centers[j], i.e, [rj, gj, bj]
# pixel in original img at (m,n) is associated at label 0 => associated to center centers[0], i.e, [r0, g0, b0]
# pixel in original img at (m,n+1) is associated at label k-1 => associated to center centers[k-1], i.e, [r_{k-1}, g_{k-1}, b_{k-1}]
# pixel in original img at (M-1,N-2) is associated at label 1 => associated to center centers[1], i.e, [r1, g1, b1]
# pixel in original img at (M-1,N-1) is associated at label 4 => associated to center centers[4], i.e, [r4, g4, b4]
#
# centers[labels.flatten()] = centers[[0, 2, i, j, ..., 0, k-1, ..., 1, 4]] =
# = [[r0,g0,b0], [r2,g2,b2],[ri,gi,bi], [rj,gj,bj],...,[r0,g0,b0],[r_{k-1},g_{b-1},b_{k-1}]
# ,...,[r1,g1,b1],[r4,g4,b4]]
# reshape data into the original image dimensions => form an image only with k different colors
segmented_image = segmented_data.reshape((image.shape))
labels_reshape = labels.reshape(image.shape[0], image.shape[1])
plt.imshow(segmented_image)
# +
## TODO: Visualize one segment, try to find which is the leaves, background, etc!
f,(p1,p2,p3,p4) = plt.subplots(1,4,figsize=(20,30))
p1.set_title('label 0')
p1.imshow(labels_reshape==0, cmap='gray')
p2.set_title('label 1')
p2.imshow(labels_reshape==1, cmap='gray')
p3.set_title('label 2')
p3.imshow(labels_reshape==2, cmap='gray')
p4.set_title('label 3')
p4.imshow(labels_reshape==3, cmap='gray')
# +
# mask an image segment by cluster
#cluster = 0 # the first cluster
masked_image_0 = np.copy(image)
masked_image_1 = np.copy(image)
masked_image_2 = np.copy(image)
masked_image_3 = np.copy(image)
f,(p0,p1,p2,p3) = plt.subplots(1,4,figsize=(20,30))
# turn the mask green!
masked_image_0[labels_reshape == 0] = rgb_centers[0]
masked_image_0[labels_reshape != 0] = [0, 0, 0]
p0.set_title('rgb center 0')
p0.imshow(masked_image_0)
masked_image_1[labels_reshape == 1] = rgb_centers[1]
masked_image_1[labels_reshape != 1] = [0, 0, 0]
p1.set_title('rgb center 1')
p1.imshow(masked_image_1)
masked_image_2[labels_reshape == 2] = rgb_centers[2]
masked_image_2[labels_reshape != 2] = [0, 0, 0]
p2.set_title('rgb center 2')
p2.imshow(masked_image_2)
masked_image_3[labels_reshape == 3] = rgb_centers[3]
masked_image_3[labels_reshape != 3] = [0, 0, 0]
p3.set_title('rgb center 3')
p3.imshow(masked_image_3)
# -
| L04-010-Notebook-K_means.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Tutorials for <NAME> <a href='https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html'>Link here</a>
from mpl_toolkits import mplot3d
# %matplotlib widget
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.axes(projection='3d')
# +
ax = plt.axes(projection='3d')
# Data for a three-dimensional line
zline = np.linspace(0, 15, 1000)
xline = np.sin(zline)
yline = np.cos(zline)
ax.plot3D(xline, yline, zline, 'gray')
# Data for three-dimensional scattered points
zdata = 15 * np.random.random(100)
xdata = np.sin(zdata) + 0.1 * np.random.randn(100)
ydata = np.cos(zdata) + 0.1 * np.random.randn(100)
ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='Greens');
# +
def f(x, y):
return np.sin(np.sqrt(x ** 2 + y ** 2))
x = np.linspace(-6, 6, 30)
y = np.linspace(-6, 6, 30)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
# +
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.contour3D(X, Y, Z, 50, cmap='rainbow')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z');
ax.view_init(60, 35)
# -
| Plots3d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import os
import glob
from tqdm import tqdm
import shutil
# -
# ### 폴더명으로 데이터 분배하기
# - Annotator 폴더이름
# - 전송할 데이터 개수
# +
base_dir = 'S:/traffic_night2/raw_unlabeled_day'
image_path_list = sorted(glob.glob(base_dir + '/*'))
day_dir = f'S:/traffic_night2/raw_unlabeled'
print(len(image_path_list))
print(target_dir)
# +
# multi
target_person_list = ['A/A-2', 'C/C-2', 'D/D-2', 'E/E-2', 'I/I-2']
# single
# target_person_list = []
image_num = 10000
for target_person in target_person_list:
print(f'S:/traffic_day/{target_person}/JPEGImages')
# +
# Day
for target_person in target_person_list:
i = 0
image_path_list = sorted(glob.glob(base_dir + '/*'))
target_dir = f'S:/traffic_day/{target_person}/JPEGImages'
for image_path in tqdm(image_path_list):
basename = os.path.basename(image_path)
date = basename.split('_')[0]
time = int(date[8:12])
if (900 <= time <= 1600):
new_path = os.path.join(target_dir, basename)
shutil.move(image_path, new_path)
i += 1
if i == image_num:
break
else:
new_path = os.path.join(day_dir, basename)
shutil.move(image_path, new_path)
print('Done!', i)
# +
# Night
i = 0
for image_path in tqdm(image_path_list):
basename = os.path.basename(image_path)
date = basename.split('_')[0]
time = int(date[8:12])
if (1900 <= time <= 2359) or (1 <= time <= 500):
new_path = os.path.join(target_dir, basename)
shutil.move(image_path, new_path)
i += 1
if i == image_num:
break
else:
new_path = os.path.join(day_dir, basename)
shutil.move(image_path, new_path)
print('Done!', i)
| annotator/distribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ----
# #### Pandas plot
# - 5 Easy Ways of Customizing Pandas Plots and Charts
# - https://towardsdatascience.com/5-easy-ways-of-customizing-pandas-plots-and-charts-7aefa73ff18b
# ----
# +
# The first line is only required if you are using a Jupyter Notebook
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
# -
df_weather = pd.read_csv(
"https://raw.githubusercontent.com/alanjones2/dataviz/master/london2018.csv")
df_weather.tail()
df_weather.shape, df_weather.dtypes
df = df_weather.copy()
df.to_csv("london_weather_2018.csv", index=False)
df.tail()
os.getcwd()
df.plot(x="Month", y="Tmax")
# ----
# #### 1. Change the size and color
# ----
df.plot(x="Month", y="Tmax", figsize=(12,6), color="Red")
# ----
# #### 2. Setting a title
# ----
df.plot(x="Month", y="Tmax", title="Maximum Temperatures")
# ----
# #### 3. Display a grid
# ----
df.plot(x="Month", y="Tmax", grid=True)
plt.show()
# ----
# #### 4. Changing the legend
# ----
df.plot(x="Month", y="Tmax", legend=False)
df.plot(x="Month", y="Tmax", label="Maximum Temperatures")
# ----
# #### 5. Customizing the ticks
# ----
df.plot(x="Month", y="Tmax", xticks=range(1,13), yticks=(0,5,10,15,20,25,30))
df.plot(x="Month", y="Tmax", grid=True, rot=45,
xticks=range(1,13), yticks=(0,5,10,15,20,25,30))
# If you wanted to remove the ticks altogether
df.plot(x="Month", y="Tmax", grid=True,
xticks=(), yticks=(0,5,10,15,20,25,30))
# you can change the font size.
df.plot(x="Month", y="Tmax", grid=True, fontsize=15,
xticks=range(1,13), yticks=(0,5,10,15,20,25,30))
# ----
# #### It could get messy
# ----
# +
kwargs = {
"xticks" : range(1,13),
"yticks" : (0,5,10,15,20,25,30),
"grid" : True,
"rot" : 45,
"fontsize": 13,
"color" : "Red",
"label" : "Maximum Temperatures",
"title" : "London Temp 2018",
"figsize" : (12, 6)
}
df.plot(x="Month", y="Tmax", **kwargs)
# -
range(1,13)
# +
import warnings
import pandas as pd
import FinanceDataReader as fdr
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
# %matplotlib inline
warnings.filterwarnings("ignore")
mpl.rcParams["axes.unicode_minus"] = False
path = "C:\\Windows\\Fonts\\NanumMyeongjoBold.ttf"
# C:\Windows\WinSxS\amd64_microsoft-windows-font-truetype-arial_31bf3856ad364e35_10.0.18362.1_none_44e0e02b2a9382cc\
font_name = fm.FontProperties(fname=path).get_name()
print(font_name)
plt.rc('font', family=font_name)
plt.rc('font', size=15)
#--------------------------------------------------------------------------------------#
df = fdr.DataReader("USD/KRW", start="2021-01-01")
columns = "Close Open High Low".split()
#--------------------------------------------------------------------------------------#
def get_index_number(value):
for i, index in enumerate(df.index):
if index == value:
return i
#--------------------------------------------------------------------------------------#
x_min = df.query("Close == {}".format(df["Close"].min())).index[0]
x_max = df.query("Close == {}".format(df["Close"].max())).index[0]
x_now = df["Close"].index[-1]
x1 = get_index_number(x_min)
x2 = get_index_number(x_max)
x3 = get_index_number(x_now)
y1 = df["Close"].min()
y2 = df["Close"].max()
y3 = df["Close"][-1]
#--------------------------------------------------------------------------------------#
kwargs = {
"title" : "< 원달러 가격 변동율 >",
"figsize": (12, 8),
"xticks" : range(len(df)),
"yticks" : range(1080,1130,5),
"xlabel" : "2021년도 1월부터...",
"ylabel" : "원달러 가격",
"grid" : True,
"rot" : 85
}
#--------------------------------------------------------------------------------------#
df1 = df.reset_index()
df1["Date"] = df1["Date"].astype(str)
#--------------------------------------------------------------------------------------#
x4 = df1.query("Date == '2021-01-26'").index[0] # buy_date
y4 = df1["Close"][x4]
#--------------------------------------------------------------------------------------#
ax = df1.plot(x="Date", y="Close", **kwargs)
#--------------------------------------------------------------------------------------#
ax.axvline(x1, ls='--', color='b')
ax.axvline(x2, ls='--', color='r')
ax.axvline(x3, ls='--', color='g')
ax.axvline(x4, ls='-', color='y')
ax.text(x1, y1, y1, rotation=0)
ax.text(x2, y2, y2, rotation=0)
ax.text(x3, y3, y3, rotation=45)
ax.text(x4, y4, y4, rotation=0)
#--------------------------------------------------------------------------------------#
# -
df.to_csv("temp.csv")
df_ex = df.reset_index()
df_ex.dtypes
# Date datetime64[ns]
df_ex["Year"] = df_ex["Date"].dt.year
df_ex["Month"] = df_ex["Date"].dt.month
df_ex["Day"] = df_ex["Date"].dt.day
df_ex.tail()
df_ex.drop(["Year", "Month", "Day"], axis=1, inplace=True) # inplace=True
year = df_ex["Date"].dt.year
month = df_ex["Date"].dt.month_name()
day = df_ex["Date"].dt.day
df_ex.insert(1, "Year", year)
df_ex.insert(2, "Month", month)
df_ex.insert(3, "Day", day)
df_ex.head()
month
| python/pandas/pandas_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
import re
def load_data(filename):
with open(filename) as f:
data = f.read()
ids = data.split('\n\n')
# Passports is a list of dictionaries, each dict is a passport
passports = []
for i in ids:
fields = i.split()
fields_dict = {}
for field in fields:
x, y = field.split(':')
fields_dict[x] = y
passports.append(fields_dict)
return passports
# +
# Verification methods
def check_year(year, minimum, maximum):
if not re.match('(\d){4}', year):
return False
return minimum <= int(year) <= maximum
def byr(year):
return check_year(year, 1920, 2002)
def iyr(year):
return check_year(year, 2010, 2020)
def eyr(year):
return check_year(year, 2020, 2030)
def ecl(color):
return color in set(['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'])
def hcl(color):
if not re.match('^#([0-9]*|[a-f]*){6}$', color):
return False
return True
def hgt(height):
if not re.match('^(\d)+(in|cm)$', height):
return False
measurement = int(height[:-2])
if height.endswith('in'):
return 59 <= measurement <= 76
else:
return 150 <= measurement <= 193
def pid(passport):
if re.match('^(\d){9}$', passport):
return True
return False
# -
# List of required fields will be supplied to validate, otherwise dispatch to the field's method above
def validate(passport, fields):
for field, method in fields.items():
if field not in passport:
return False
if not method(passport[field]):
return False
return True
# Running validate on passports
def check_passports(passports):
count = 0
fields = {'byr': byr, 'iyr': iyr, 'eyr': eyr, 'hgt': hgt, 'hcl': hcl, 'ecl': ecl, 'pid': pid}
for passport in passports:
if validate(passport, fields):
count += 1
return count
# Run the file
data = load_data('input.txt')
print('Final answer: ' + str(check_passports(data)))
| 2020/Day 4/Day 4 Pt. 2 Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow2_p36
# language: python
# name: conda_tensorflow2_p36
# ---
# # Profiling TensorFlow Single GPU Single Node Training Job with Amazon SageMaker Debugger
#
# ***Note: 본 노트북 코드는 [영문 노트북](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-debugger/tensorflow_profiling/tf-resnet-profiling-single-gpu-single-node.ipynb)을 한국어화(중간중간 역주 추가와 documentation 보완)하면서 보완하면서, Debugger 예제 코드와 TensorBoard 로깅 코드를 추가하였습니다.***
#
# 이 노트북은 SageMaker 디버거 프로파일링(debugger profiling) 기능이 활성화된 상태에서 TensorFlow 훈련 작업을 생성하는 과정을 안내하며, 단일 노드 내의 단일 GPU 훈련을 수행합니다.
# ### Install SageMaker and smdebug
# 2020년 12월에 출시된 새로운 디버거 프로파일링 기능을 사용하려면, 최신 버전의 SageMaker 및 SMDebug SDK가 설치되어 있는지 확인하세요. 다음 코드 셀을 사용하여 라이브러리를 업데이트하고 Jupyter 커널을 다시 시작하여 업데이트를 적용합니다.
# +
import sys
import IPython
import boto3
import sagemaker
install_needed = False # should only be True once
if install_needed:
print("installing deps and restarting kernel")
# !{sys.executable} -m pip install -U sagemaker smdebug
IPython.Application.instance().kernel.do_shutdown(True)
bucket = sagemaker.Session().default_bucket()
# -
# <br>
#
# ## 1. Create a Training Job with Profiling Enabled<a class="anchor" id="option-1"></a>
# ---
#
# [SageMaker Estimator API for Tensorflow](https://sagemaker.readthedocs.io/en/stable/frameworks/tensorflow/sagemaker.tensorflow.html#tensorflow-estimator)를 사용하여 훈련 작업을 생성합니다. 프로파일링을 활성화하려면 `ProfilerConfig` 객체를 생성하고 TensorFlow estimator의 `profiler_config` 파라메터에 전달합니다.
# ### Define hyperparameters
#
# Epoch 수, 배치 크기 및 데이터 증강(augmentation)와 같은 하이퍼파라메터를 정의합니다. 시스템 사용률을 높이기 위해 배치 크기를 늘릴 수 있지만 CPU 병목 문제가 발생할 수 있습니다. 증강을 통한 대규모 배치 크기의 데이터 전처리에는 많은 계산이 필요합니다. data_augmentation을 비활성화하여 시스템 활용도에 미치는 영향을 확인할 수 있습니다.
#
# 데모 목적으로 CPU 사용률을 늘리기 위해 다음과 같은 하이퍼파라미터를 준비합니다.
hyperparameters = {"epoch": 2, "batch_size": 64, "data_augmentation": True}
# ### Configure rules
#
# 다음 규칙(rule)들을 지정합니다.
#
# - `loss_not_decreasing` : loss가 감소하고 있는지 확인하고 지난 몇 번의 iteration에서 loss이 특정 수치만큼 감소하지 않은 경우 트리거합니다.
# - `vanishing_gradient` : graidnet가 매우 작아지거나 0으로 떨어지는지를 감지합니다. gradient의 절대값 평균이 지정된 임계값 미만으로 감소하는 경우 트리거합니다.
# - `LowGPUUtilization` : GPU의 활용도가 낮은지 확인합니다.
# - `ProfilerReport` : 전체 퍼포먼스 규칙 세트를 실행하고 추가 정보 및 권장 사항이 포함된 최종 출력용 보고서를 생성합니다.
# +
from sagemaker.debugger import (
Rule,
ProfilerRule,
DebuggerHookConfig,
TensorBoardOutputConfig,
CollectionConfig,
rule_configs
)
rules = [
Rule.sagemaker(rule_configs.loss_not_decreasing()),
Rule.sagemaker(rule_configs.vanishing_gradient()),
ProfilerRule.sagemaker(rule_configs.LowGPUUtilization()),
ProfilerRule.sagemaker(rule_configs.ProfilerReport())
]
# -
# ### Configure DebuggerHookConfig and TensorBoard
#
# DebuggerHookConfig 및 TensorBoardOutputConfig 클래스 인스턴스를 Estimator에 전달 시, SageMaker Debugger는 TensorBoard와 호환되는 출력 텐서 파일을 훈련 인스턴스의 `/opt/ml/output/tensor` 경로에 자동으로 생성합니다. 훈련이 종료되면 각각 S3의 `debug-output`, `tensorboard-output` 폴더로 복사되며 이를 복사하여 로컬 상에서 텐서보드를 띄울 수도 있습니다.
# +
hook_config = DebuggerHookConfig(
hook_parameters={"save_interval": "100"},
collection_configs=[
CollectionConfig("weights"),
CollectionConfig(
name="biases",
parameters={"save_interval": "10", "end_step": "500", "save_histogram": "True"}
),
],
)
tb_config = TensorBoardOutputConfig(
s3_output_path='s3://{}/tensorboard-resnet'.format(bucket)
)
# -
# ### Specify a profiler configuration
#
# 다음 설정은 500 밀리초(0.5초)에서 시스템 메트릭을 캡처합니다. 시스템 지표에는 CPU 당 사용률, GPU, CPU 당 메모리 사용률, GPU, I/O 및 네트워크가 포함됩니다.
#
# 디버거는 5단계부터 15단계까지 상세한 프로파일링 정보를 캡처합니다. 이 정보에는 Horovod 메트릭, 데이터 로딩, 전처리, CPU 및 GPU에서 실행되는 연산자(operator)가 포함됩니다.
# +
from sagemaker.debugger import ProfilerConfig, FrameworkProfile
profiler_config = ProfilerConfig(
system_monitor_interval_millis=500,
framework_profile_params=FrameworkProfile(
local_path="/opt/ml/output/profiler/", start_step=5, num_steps=10
),
)
# -
# ### Get the image URI
#
# 이 노트북을 실행하는 리전에 따라 도커 이미지가 달라집니다.
session = boto3.session.Session()
region = session.region_name
image_uri = f"763104351884.dkr.ecr.{region}.amazonaws.com/tensorflow-training:2.4.1-gpu-py37-cu110-ubuntu18.04"
# ### Define estimator
#
# 프로파일링을 활성화하려면 디버거 프로파일링 구성 (`profiler_config`), 디버거 규칙 목록 (`rules`) 및 이미지 URI (`image_uri)`를 estimator에 전달해야 합니다. 디버거는 SageMaker estimator가 훈련 작업을 요청하는 동안 모니터링 및 프로파일링을 활성화합니다.
# +
from sagemaker.tensorflow import TensorFlow
estimator = TensorFlow(
role=sagemaker.get_execution_role(),
image_uri=image_uri,
instance_count=1,
instance_type="ml.p3.2xlarge",
entry_point="train_tf.py",
source_dir="entry_point",
hyperparameters=hyperparameters,
profiler_config=profiler_config,
debugger_hook_config=hook_config,
tensorboard_output_config=tb_config,
rules=rules
)
# -
# ### Start training job
#
# `wait=False` argument를 포함한 `estimator.fit()`은 백그라운드에서 훈련 작업을 시작합니다. 대시보드 또는 분석 노트북 실행을 계속할 수 있습니다.
estimator.fit(wait=False)
estimator.latest_training_job.rule_job_summary()
training_job_name = estimator.latest_training_job.name
print("Training Job Name: {}".format(training_job_name))
# AWS 콘솔 화면에서 `Training jobs`를 확인해 보세요. 아래 코드 셀에서 자동으로 생성되는 링크를 클릭하셔도 됩니다.
# +
from IPython.core.display import display, HTML
display(
HTML(
'<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}">Training Job</a> After About 5 Minutes</b>'.format(
region, training_job_name
)
)
)
display(
HTML(
'<b>Review <a target="blank" href="https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/TrainingJobs;prefix={};streamFilter=typeLogStreamPrefix">CloudWatch Logs</a> After About 5 Minutes</b>'.format(
region, training_job_name
)
)
)
display(
HTML(
'<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/{}/?region={}&tab=overview">S3 Output Data</a> After The Training Job Has Completed</b>'.format(
bucket, training_job_name, region
)
)
)
# -
# <br>
#
# ## 2. Analyze Profiling Data
# ---
#
# 다음 셀 (`training_job_name` 및 `region`)의 출력을 복사하여 분석 노트북 `profiling_generic_dashboard.ipynb`, `analyze_performance_bottlenecks.ipynb` 및 `profiling_interactive_analysis.ipynb`를 실행합니다.
training_job_name = estimator.latest_training_job.name
print(f"Training jobname: {training_job_name}")
print(f"Region: {region}")
# 훈련이 아직 진행 중인 동안 SageMaker Studio 또는 노트북에서 성능 데이터를 시각화할 수 있습니다. 디버거는 타임라인 차트 또는 히트맵 형식으로 시스템 메트릭을 그리는 유틸리티를 제공합니다. 자세한 내용은 노트북 [profiling_interactive_analysis.ipynb](analysis_tools/profiling_interactive_analysis.ipynb)를 확인하세요. 다음 코드 셀에서 총 CPU 및 GPU 사용률을 시계열 차트로 표시합니다. I/O, 메모리, 네트워크와 같은 다른 메트릭을 시각화하려면, `select_dimension` 및 `select_events`에 전달된 목록을 확장하기만 하면 됩니다.
# ### Access the profiling data using the SMDebug `TrainingJob` utility class
#
# Profiler 데이터가 준비될 때까지 기다립니다. Profiler 데이터는 훈련 인스턴스 프로비저닝-훈련 데이터 다운로드-훈련 시작 이후 일정 iteration 이상 반복해야 수집되기에 몇 분의 시간이 소요됩니다.
# +
from smdebug.profiler.analysis.notebook_utils.training_job import TrainingJob
tj = TrainingJob(training_job_name, region)
tj.wait_for_sys_profiling_data_to_be_available()
# -
# Debugger Rule에서 정의한 Rule에 대한 CloudWatch 로드 URL을 생성합니다. URL로 접속해서 로그를 직접 확인해 보세요.
# +
def _get_rule_job_name(training_job_name, rule_configuration_name, rule_job_arn):
"""Helper function to get the rule job name with correct casing"""
return "{}-{}-{}".format(
training_job_name[:26], rule_configuration_name[:26], rule_job_arn[-8:]
)
def _get_cw_url_for_rule_job(rule_job_name, region):
return "https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix".format(
region, region, rule_job_name
)
def get_rule_jobs_cw_urls(estimator):
training_job = estimator.latest_training_job
training_job_name = training_job.describe()["TrainingJobName"]
rule_eval_statuses = training_job.describe()["DebugRuleEvaluationStatuses"]
result = {}
for status in rule_eval_statuses:
if status.get("RuleEvaluationJobArn", None) is not None:
rule_job_name = _get_rule_job_name(
training_job_name, status["RuleConfigurationName"], status["RuleEvaluationJobArn"]
)
result[status["RuleConfigurationName"]] = _get_cw_url_for_rule_job(
rule_job_name, boto3.Session().region_name
)
return result
get_rule_jobs_cw_urls(estimator)
# -
# `wait=False`로 설정한 경우 아래 코드 셀을 실행하여 Synchronous 방식으로 변경할 수도 있습니다.
sagemaker.Session().logs_for_job(job_name=estimator.latest_training_job.job_name, wait=True)
# <br>
#
# ## 3. Download Debugger Profiling Report and Plotting Metrics
# ---
# `ProfilerReport()` 규칙은 기본 규칙 요약 및 다음 단계 권장 사항이 포함된 html 리포트 `profiler-report.html`을 생성합니다. 이 리포트는 S3 버킷에서 찾을 수 있습니다.
rule_output_path = estimator.output_path + estimator.latest_training_job.job_name + "/rule-output"
print(f"You will find the profiler report in {rule_output_path}")
# 아래 코드 셀을 실행시키면 프로파일링 리포트를 로컬 환경으로 다운로드하고 리포트를 다이렉트로 확인할 수 있는 링크를 생성합니다.
# 링크를 새 탭이나 새 창으로 연 다음 프로파일링 보고서를 확인하세요
#
# 디버거 프로파일링 보고서를 다운로드하고 여는 방법에 대한 자세한 내용은 SageMaker 개발자 가이드의 [SageMaker Debugger Profiling Report](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-profiling-report.html)를 참조하세요.
#
# **[주의] 만약 JupyterLab에서 실행하는 경우 화면 좌측 상단의 Trust HTML을 클릭해야 html 리포트가 정상적으로 출력됩니다!**
# +
output_dir = './output'
profile_output = output_dir+'/ProfilerReport'
# !rm -rf $output_dir
import json, os
from IPython.core.display import display, HTML
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(profile_output):
os.makedirs(profile_output)
# !aws s3 ls {rule_output_path}/ProfilerReport/profiler-output/
# !aws s3 cp {rule_output_path}/ProfilerReport/profiler-output/ {output_dir}/ProfilerReport/ --recursive
display(HTML('<b>ProfilerReport : <a href="{}profiler-report.html">Profiler Report</a></b>'.format(output_dir+"/ProfilerReport/")))
# +
training_job_name = estimator.latest_training_job.job_name
region = sagemaker.Session().boto_region_name
tj = TrainingJob(estimator.latest_training_job.job_name, sagemaker.Session().boto_region_name)
# Retrieve a description of the training job description and the S3 bucket URI where the metric data are saved
tj.describe_training_job()
tj.get_config_and_profiler_s3_output_path()
# +
# Wait for the data to be available
tj.wait_for_sys_profiling_data_to_be_available()
tj.wait_for_framework_profiling_data_to_be_available()
# Get the metrics reader
system_metrics_reader = tj.get_systems_metrics_reader()
framework_metrics_reader = tj.get_framework_metrics_reader()
# Refresh the event file list
system_metrics_reader.refresh_event_file_list()
framework_metrics_reader.refresh_event_file_list()
# -
# ### Plotting Metrics
#
# SageMaker Debugger Profiler는 시스템 지표와 프레임워크 지표를 쉽게 확인할 수 있는 5가지 클래스들을 제공하고 있습니다.
# - MetricsHistogram
# - StepTimelineChart
# - StepHistogram
# - TimelineCharts
# - Heatmap
#
# `select_dimensions`와 `select_events` argument는 optional 파라메터로, 이를 지정하여 필요한 지표만 시각화할 수 있습니다. 예를 들어, `select_dimensions = ["GPU"]`를 지정하면 plot 메서드는 "GPU" 키워드를 포함하는 지표를 필터링합니다. 유사하게 `select_events = ["total"]`을 지정하면 "total" 이벤트 태그를 포함하는 지표를 필터링합니다.
# 자세한 내용은 https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-access-data-profiling-default-plot.html 를 참조하세요.
from smdebug.profiler.analysis.notebook_utils.metrics_histogram import MetricsHistogram
from smdebug.profiler.analysis.notebook_utils.step_timeline_chart import StepTimelineChart
from smdebug.profiler.analysis.notebook_utils.step_histogram import StepHistogram
from smdebug.profiler.analysis.notebook_utils.timeline_charts import TimelineCharts
from smdebug.profiler.analysis.notebook_utils.heatmap import Heatmap
from smdebug.profiler.analysis.notebook_utils.training_job import TrainingJob
# #### System metrics histogram
metrics_histogram = MetricsHistogram(system_metrics_reader)
metrics_histogram.plot(
starttime=0,
endtime=system_metrics_reader.get_timestamp_of_latest_available_file(),
select_dimensions=["CPU", "GPU", "I/O"],
select_events=["total"]
)
# #### System and framework metrics heatmap
view_heatmap = Heatmap(
system_metrics_reader,
framework_metrics_reader,
select_dimensions=["CPU", "GPU", "I/O"],
select_events=["total"],
plot_height=200
)
# #### Time line charts
# +
from smdebug.profiler.analysis.notebook_utils.timeline_charts import TimelineCharts
view_timeline_charts = TimelineCharts(
system_metrics_reader,
framework_metrics_reader,
select_dimensions=[b"CPU", "GPU", "I/O"], # optional
select_events=["total"] # optional
)
| 1.profiling/tf-resnet-profiling-single-gpu-single-node.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# Below, you will find a series of methods that I (NG) tried to install Dedalus on Graham. Not all work, but some do. Raw cells can be directly copied-and-pasted.
# -
# # Using Graham's native modules
# ## The version that is currently (Dec. 2019) running
#
# ### Installation
#
# Thanks to Julio/<NAME> from McGill.
#
# Create a dedalus folder, e.g.,
#
# `mkdir ${HOME}/dedalus-JF`
#
# and then
# + active=""
# module load python/3.7 scipy-stack fftw-mpi mpi4py
# virtualenv ${HOME}/dedalus-JF/ddenv
# source ${HOME}/dedalus-JF/ddenv/bin/activate
# export FFTW_PATH=$EBROOTFFTW
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_STATIC=1
# pip3 install dedalus
# -
# #### UPDATE 2020-12-04 (by <NAME>)
#
# The above instructions did not work for me anymore. I was getting an error which had these key lines:
# + active=""
# $ pip3 install dedalus
# [...]
# Collecting dedalus
# Using cached dedalus-2.2006.tar.gz (123 kB)
# Installing build dependencies ... error
# ERROR: Command errored out with exit status 1:
# command:
# [...]
# ERROR: Could not find a version that satisfies the requirement mpi4py>=2.0.0 (from versions: none)
# ERROR: No matching distribution found for mpi4py>=2.0.0
# ----------------------------------------
# ERROR: Command errored out with exit status 1:
# [...]
# /cvmfs/soft.computecanada.ca/custom/python/wheelhouse/generic -- 'cython >= 0.22' 'mpi4py >= 2.0.0' numpy setuptools wheel Check the logs for full command output.
# -
# I contacted Compute Canada and it seems to be an error with the modules that were loaded. They suggested a stack of modules to load and I was able to successfully install dedalus after entering these commands (assuming the same file structure and virtual environment name as above):
# + active=""
# virtualenv ${HOME}/dedalus-JF/ddenv
# source ${HOME}/dedalus-JF/ddenv/bin/activate
#
# module load mpi4py
# module load StdEnv/2020
# module load python/3.7.7
# export FFTW_PATH=$EBROOTFFTW
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_STATIC=1
# pip3 install dedalus
# -
# ### Execution
#
# <a id='current_execution'></a>
#
# To execute, you need a Dedalus script somewhere. The most universal way to do is to clone the Bitbucket repository, but to only keep the `examples` folder (and might as well keep the `docs` folder):
# + active=""
# cd ${HOME}/dedalus-JF
# hg clone https://bitbucket.org/dedalus-project/dedalus
# mv -r dedalus/examples .
# mv -r dedalus/docs .
# rm -r dedalus
#
# -
# We can now run the examples. Let's do a serial example first.
# + active=""
# cd ${HOME}/dedalus-JF/examples/ivp/1d_kdv_burgers/
# python kdv_burgers.py
#
# -
# Let's now try an interactive parallel run (**replace `def-ngrisoua` by the correct account!**).
# + active=""
# cd ${HOME}/dedalus-JF/examples/ivp/3d_rayleigh_benard/
#
# salloc --time=0:30:0 --ntasks=32 --account=def-ngrisoua # 32 cores for 15 minutes
# # you have to wait for the request to be granted
# mpiexec -n 32 python rayleigh_benard.py
# exit
#
# -
# Careful, 32 cores for 30 minutes is a lot to request and they may take a while to be granted. If you are doing multiple things at once, it is easy to forget about it, and have the request be granted and expire within the time frame. On the scheduler, the run takes 16 to 17 minutes walltime.
#
# For submission on the scheduler, here would be a submission sequence (place in a file and add `#!/bin/bash` as a first line to turn into script; location of said file would not matter here because all paths are absolute):
# + active=""
# mkdir -p ${SCRATCH}/dedalus-JF/
#
# cp -r ${HOME}/dedalus-JF/examples/ivp/3d_rayleigh_benard ${SCRATCH}/dedalus-JF/.
# cp -r ${HOME}/dedalus-JF/lanceur-gra.slrm ${SCRATCH}/dedalus-JF/3d_rayleigh_benard/.
# cd ${SCRATCH}/dedalus-JF/3d_rayleigh_benard
#
# sbatch --job-name=3d_RB --nodes=1 --time=0-00:30 lanceur-gra.slrm
#
# -
# The `lanceur-gra.slrm` mentioned above would be placed in `${HOME}/dedalus-JF/` and would contain:
# + active=""
# #!/bin/bash
# #SBATCH --account=def-ngrisoua # ADJUST
# #SBATCH --mail-user=<EMAIL> # ADJUST
# #SBATCH --mail-type=ALL
# #SBATCH --ntasks-per-node=32
# #SBATCH --mem=0
#
# export FFTW_PATH=$EBROOTFFTW
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_STATIC=1
# # export OMPI_MCA_mpi_warn_on_fork=0
#
# module purge
# module load python/3.7 scipy-stack fftw-mpi mpi4py
#
# source ${HOME}/dedalus-JF/ddenv/bin/activate
#
# mpiexec -n ${SLURM_NTASKS} python rayleigh_benard.py
# # srun python rayleigh_benard.py # I'm not sure which one is best: mpiexec or srun.
#
# exit # just to make sure
#
# -
# When I tried, it ran in 16 mins and took 5 to 6 Gb.
# ### To-do's
#
# * Would be great to be able to create a virtual environment from within a job. The following link suggests that it may improve performance:
#
# https://docs.computecanada.ca/wiki/Python#Creating_virtual_environments_inside_of_your_jobs
#
# Current problem is that it seems to require an internet connection for the `pip install dedalus`, which we don't have on a compute node.
#
# * Would be great to download a specific hash, for reproducibility purposes.
# ***
#
# *Everything below is outdated.*
#
# ## <NAME>'s version (7 July 2018)
#
# Last tested in December 2019. Support from Compute Canada back in August 2018 was under Ticket `#030957`.
#
# The main difference with the version above is that instead of doing `pip install dedalus` directly, the pip install fetches the lastest version on Bitbucket. In 2018, as well as with the method above, the version is and was `dedalus-2.1810` (I think... it was `2.18`-something for sure). As of 9 Dec 2019, the version is `dedalus-2.1905b`. Other differences are that the python modules that are loaded and pip installed are slightly different.
#
# This version and the next did not work back in 2018. The best I got was to run serial jobs, but running parallel jobs eventually led to a segmentation fault.
#
# **Edit April 2019**: I compared the execution time on a serial job (`examples/ivp/2d_rayleigh_benard`). While the user time was the same for both methods (1m02s), the real time was much longer for this method (>5m) than for the Conda method (1m06s).
#
# **Edit 9 Dec. 2019**: It works now!
#
# ### Installation
#
# Starting from `$HOME`,
# + active=""
# module purge
# module load openmpi fftw-mpi
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_PATH=$EBROOTFFTW
# module load python/3.7
# # note: Naumov's python version was 3.6 back then; both work as of 9 Dec 2019
#
# mkdir dedalus-EN
# cd dedalus-EN
# mkdir ddenv ; virtualenv ddenv; source ddenv/bin/activate
# pip install cython numpy mpi4py scipy h5py --no-index
# pip install hg+https://bitbucket.org/dedalus-project/dedalus
#
# -
# The second-to-last line above basically reproduces some of the contents of `requirements.txt` and it seems to be enough. Maybe we should just install all of the contents of `requirements.txt`. The last line installs Dedalus from source.
# Finally, install dedalus from source:
#
# `(ddenv) $ pip install hg+https://bitbucket.org/dedalus-project/dedalus`
#
# ### Execution
#
# We reproduce the steps [of the current execution procedure](current_execution), replacing the folder `dedalus-JF` with `dedalus-EN`.
# + active=""
# cd ${HOME}/dedalus-EN
# hg clone https://bitbucket.org/dedalus-project/dedalus
# mv -r dedalus/examples .
# mv -r dedalus/docs .
# rm -r dedalus
#
# -
# We can now run the examples. Let's do a serial example first.
# + active=""
# cd ${HOME}/dedalus-EN/examples/ivp/1d_kdv_burgers/
# python kdv_burgers.py
#
# -
# Let's skip the interactive session and jump to the scheduler submission. Here would be a submission sequence (place in a file and add `#!/bin/bash` as a first line to turn into script; location of said file would not matter here because all paths are absolute):
# + active=""
# mkdir -p ${SCRATCH}/dedalus-EN/
#
# cp -r ${HOME}/dedalus-EN/examples/ivp/3d_rayleigh_benard ${SCRATCH}/dedalus-EN/.
# cp -r ${HOME}/dedalus-EN/lanceur-gra.slrm ${SCRATCH}/dedalus-EN/3d_rayleigh_benard/.
# cd ${SCRATCH}/dedalus-EN/3d_rayleigh_benard
#
# sbatch --job-name=3d_RB --nodes=1 --time=0-00:30 lanceur-gra.slrm
#
# -
# The `lanceur-gra.slrm` is the same as above, just replace `dedalus-JF` by `dedalus-EN` in it.
# ### Remarks
#
# * Back in the Summer of 2018, `fftw` did not work for some reason. <NAME> had found a workaround (see below), though I encontered a problem later (see below, again). As of today, the version above works. I am keeping M.B.'s workaround, in case it happens again. As of 9 Dec. 2019 though, you can skip.
#
# * Same to-do's apply as for the currently working version.
#
# * This version uses a a little more memory than the currently working version. For the `3d_rayleigh_benard` case, it uses 5.99 GB, and 5.45 GB for the previous version. This is in line with my memories from some time back.
# ## <NAME> to the rescue (18 July - 9 August 2018)
# M.B. found a solution to the problem that existed here:
#
# https://groups.google.com/forum/#!msg/dedalus-users/01kC06t7S9g/AlILy-6pDAAJ
# So, first, remove everything: deactivate whatever is activated, purge whatever is loaded, delete all directories, and clone the code in a folder with its own name, to not mix things up with other installs.
# + active=""
# hg clone https://bitbucket.org/dedalus-project/dedalus dedalus-hg
# cd dedalus-hg
# -
# Then, edit Dedalus's `setup.py` in the following manner. Look for this passage:
# + active=""
# Extension(
# name='dedalus.libraries.fftw.fftw_wrappers',
# sources=['dedalus/libraries/fftw/fftw_wrappers.pyx'],
# include_dirs=include_dirs,
# libraries=libraries,
# library_dirs=library_dirs,
# extra_compile_args=["-Wno-error=declaration-after-statement"]),
# -
# and add an extra bit at the end so that it becomes
# + active=""
# Extension(
# name='dedalus.libraries.fftw.fftw_wrappers',
# sources=['dedalus/libraries/fftw/fftw_wrappers.pyx'],
# include_dirs=include_dirs,
# libraries=libraries,
# library_dirs=library_dirs,
# extra_compile_args=["-Wno-error=declaration-after-statement"],
# extra_link_args=["-Xlinker",
# "-Bsymbolic",
# "-Wl,--whole-archive",
# get_lib('fftw') + "/libfftw3.a",
# get_lib('fftw') + "/libfftw3_omp.a",
# get_lib('fftw') + "/libfftw3_mpi.a",
# "-Wl,--no-whole-archive"]),
# -
# After that, run:
# + active=""
# module purge
# module load openmpi fftw-mpi mpi4py
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_PATH=$EBROOTFFTW
# module load python/3.6
# mkdir ddenv ; virtualenv ddenv; source ddenv/bin/activate
# # at this point you need to be one level above the cloned dedalus
# pip install -r requirements.txt
#
# -
# I still quite understand what "creating a wheel" means, and you may want to vary the name of the wheel, but here it is.
# + active=""
# pip wheel . # this will create a binary wheel out of the Dedalus source code
# pip install dedalus-2.1810-cp36-cp36m-linux_x86_64.whl
#
# -
# At this point, the examples should run interactively on one processor.
# ## Running interactively
# In any case, first load everything:
# + active=""
# module purge
# module load python/3.6 openmpi fftw-mpi mpi4py hdf5-mpi
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_PATH=$EBROOTFFTW
# cd ~/project/ngrisoua/
# source ddenv/bin/activate
# -
# If you then only need to check on one processor:
# + active=""
# cd dedalus/examples/ivp/1d_kdv_burgers/
# # or move examples in a working directory and go there
# python3.6 kdv_burgers.py
#
# -
# This one works.
#
# If on the other had you want to try a few processors, follow the instructions of https://docs.computecanada.ca/wiki/Running_jobs#Interactive_jobs:
# + active=""
# cd ~/project/ngrisoua/dedalus/examples/ivp/2d_rayleigh_benard/
# salloc --time=0:5:0 --ntasks=4 --account=def-ngrisoua # 4 cores for 5 minutes
# # you may have to wait for the request to be granted
# mpiexec -n 4 python3.6 rayleigh_benard.py
# exit
#
# -
# After running the initializing steps (creating the pencil mostly), a segfault happens.
# ## Submitting a job
# The steps below pretty much reproduce the steps above, including what does not work.
#
# I will assume I installed dedalus such that its folder is `~/projects/def-ngrisoua/ngrisoua/dedalus`, which you might want to adapt.
# Let me use the example in `~/projects/def-ngrisoua/ngrisoua/dedalus/examples/ivp/3d_rayleigh_benard`, because a 3D experiment seems appropriate on an HPC machine.
#
# Below is a 'cookbook' of the list of steps one has to do to submit a slurm job.
# + active=""
# #!/bin/bash
#
# setup=3d_rayleigh_benard
#
# # Prepare scratch for code and data dump
# rm -r ${SCRATCH}/dedalus/$setup
# cp -r ${HOME}/projects/def-ngrisoua/ngrisoua/dd_tests/${setup} ${SCRATCH}/dedalus/.
# cd ${SCRATCH}/dedalus/$1
#
# sbatch lanceur.slrm
#
# -
# And here is what a typical slurm script `lanceur.slrm` would look like:
# + active=""
# #!/bin/bash
# #SBATCH --job-name=3d_rayleigh_benard # has to be same as folder where scripts are
# #SBATCH --ntasks=8
# #SBATCH --mem-per-cpu=3G
# #SBATCH --account=def-JD # whatever your account is
# #SBATCH --time=0-00:15 # DD-HH:MM
# #SBATCH --mail-user=<EMAIL>
# #SBATCH --mail-type=ALL
#
# module purge
# module load openmpi
# module load fftw-mpi
# module load hdf5-mpi
# module load mpi4py
# module load python/3.6
#
# source ${HOME}/projects/def-ngrisoua/ngrisoua/ddenv/bin/activate
#
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_PATH=$EBROOTFFTW
#
# srun python3.6 ${SLURM_JOB_NAME}.py # python script + folder name have to match
#
# #srun python3.6 merge.py snapshots
# #srun python3.6 plot_2d_series.py snapshots/*.h5
#
# deactivate
#
# exit
# -
# After which the same segfault happens.
# # Using the conda install
#
# Last tested in April 2019, with the help of <NAME> (Compute Canada Support Ticket `#047249`).
#
# The code works serially, and in parallel up until 16 processors. Beyond 16 processors, I run into an error. Since I have two versions that work as of 9 Dec. 2019, I did not update what follows.
# ## Installing
#
# To keep all of the different install neatly separated, create your own test folder, here `dedalus-conda`.
# + active=""
# mkdir ~/dedalus-conda
# cd ~/dedalus-conda
# module --force purge
# module load nixpkgs
# wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
# chmod +x ./Miniconda3-latest-Linux-x86_64.sh
# ./Miniconda3-latest-Linux-x86_64.sh
#
# -
# A few prompts will appear: do you agree with the licence agreement, location of installation, maybe some more, until the last one:
#
# + active=""
# Do you wish the installer to initialize Miniconda3
# in your /home/ngrisoua/.bashrc ? [yes|no]
# -
# I answered yes. Note that this adds lines to your `.bashrc` which include paths like `/home/yourusername/miniconda3`. I read somewhere that is it safer to replace `/home/yourusername` by `${HOME}`, in case Graham people decided to change their path structures.
#
# The installation then proceeds as
# + active=""
# wget --no-check-certificate https://raw.githubusercontent.com/DedalusProject/conda_dedalus/master/install_conda.sh
#
# -
# *Note: the above is one long line.*
#
# *Note for the future: for reproducibility, I should find a way to download a specific hash, rather than the lastest version.*
# + active=""
# export MPI_PATH=$EBROOTOPENMPI
# export FFTW_PATH=$EBROOTFFTW
# conda activate base
# bash install_conda.sh
#
# -
# ***
#
# **Edit 5 Dec. 2019:** I did not need the two export statements above before. Today, I needed them for the last command to execute.
#
# ***
#
# At this point, the dedalus environment is installed as part of the Conda distribution. Note that you do not have access to all of the examples, provided when you download dedalus. You still have to either download them, or try the examples below with your favourite dedalus script.
# ## Running interactively
# + active=""
# module --force purge
# module load nixpkgs
# # export PATH="$HOME/miniconda3/bin:$PATH" # .bashrc
# conda activate dedalus
#
# -
# Serial test:
# + active=""
# cd PATH/TO/examples/ivp/1d_kdv_burgers/
# # or move examples in a working directory and go there
# python3.7 kdv_burgers.py
#
# -
# Parallel test:
# + active=""
# cd PATH/TO/examples/ivp/2d_rayleigh_benard/
# salloc --time=0:5:0 --ntasks=4 --account=def-ngrisoua # 4 cores for 5 minutes
# # you may have to wait for the request to be granted
#
# + active=""
# mpiexec -n 4 python3.7 rayleigh_benard.py
# exit
#
# -
# ## Submitting to the scheduler
#
# Below is a 'cookbook' of the steps, one needs to follow in order to submit to the scheduler. They could form the basis of a `master-core.bash` script:
# + active=""
# #!/bin/bash
#
# setup=3d_rayleigh_benard
#
# # Prepare scratch for code and data dump
# rm -r ${SCRATCH}/dedalus/$setup
# cp -r ${HOME}/dedalus-vessel/dd_tests/${setup} ${SCRATCH}/dedalus/.
# cd ${SCRATCH}/dedalus/$1
#
# sbatch lanceur.slrm
# -
# And here is what a typical slurm script `lanceur.slrm` would look like:
# + active=""
# #!/bin/bash
# #SBATCH --job-name=3d_rayleigh_benard # has to be same as folder where scripts are
# #SBATCH --ntasks=8
# #SBATCH --mem-per-cpu=3G
# #SBATCH --account=def-ngrisoua # or whatever your account is
# #SBATCH --time=0-00:15 # DD-HH:MM
# #SBATCH --mail-user=<EMAIL>
# #SBATCH --mail-type=ALL
#
# module --force purge
# module load nixpkgs
#
# # I may have to do this since it might be outside of .bashrc's reach
# # source ${HOME}/miniconda3/etc/profile.d/conda.sh
#
# conda activate dedalus
#
# srun python3.7 ${SLURM_JOB_NAME}.py # python script + folder name have to match
#
# #srun python3.6 merge.py snapshots
# #srun python3.6 plot_2d_series.py snapshots/*.h5
#
# conda deactivate
#
# exit
# -
| dedalus-graham.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from functools import partial
import holoviews as hv
import numpy as np
import panel as pn
import param
import pyvista as pv
from holoviews.operation.datashader import rasterize
from bokeh.util.serialization import make_globally_unique_id
from pyvista import examples
from scipy.ndimage import zoom
css = '''
.custom-wbox > div.bk {
padding-right: 10px;
}
.scrollable {
overflow: auto !important;
}
'''
js_files = {'jquery': 'https://code.jquery.com/jquery-1.11.1.min.js',
'goldenlayout': 'https://golden-layout.com/files/latest/js/goldenlayout.min.js'}
css_files = ['https://golden-layout.com/files/latest/css/goldenlayout-base.css',
'https://golden-layout.com/files/latest/css/goldenlayout-dark-theme.css']
pn.extension('vtk', js_files=js_files, raw_css=[css], css_files=css_files)
hv.renderer('bokeh').theme = 'dark_minimal'
hv.opts.defaults(hv.opts.Image(responsive=True, tools=['hover']))
# -
# ## Declare callbacks
# +
class ImageSmoother(param.Parameterized):
smooth_fun = param.Parameter(default=None)
smooth_level = param.Integer(default=5, bounds=(1,10))
order = param.Selector(default=1, objects=[1,2,3])
def __init__(self, **params):
super(ImageSmoother, self).__init__(**params)
self._update_fun()
@param.depends('order', 'smooth_level', watch=True)
def _update_fun(self):
self.smooth_fun = lambda x: zoom(x, zoom=self.smooth_level, order=self.order)
def update_camera_projection(*evts):
volume.camera['parallelProjection'] = evts[0].new
volume.param.trigger('camera')
def hook_reset_range(plot, elem, lbrt):
bkplot = plot.handles['plot']
x_range = lbrt[0], lbrt[2]
y_range = lbrt[1], lbrt[3]
old_x_range_reset = bkplot.x_range.reset_start, bkplot.x_range.reset_end
old_y_range_reset = bkplot.y_range.reset_start, bkplot.y_range.reset_end
if x_range != old_x_range_reset or y_range != old_y_range_reset:
bkplot.x_range.reset_start, bkplot.x_range.reset_end = x_range
bkplot.x_range.start, bkplot.x_range.end = x_range
bkplot.y_range.reset_start, bkplot.y_range.reset_end = y_range
bkplot.y_range.start, bkplot.y_range.end = y_range
def image_slice(dims, array, lbrt, mapper, smooth_fun):
array = np.asarray(array)
low = mapper['low'] if mapper else array.min()
high = mapper['high'] if mapper else array.max()
cmap = mapper['palette'] if mapper else 'fire'
img = hv.Image(smooth_fun(array), bounds=lbrt, kdims=dims, vdims='Intensity')
reset_fun = partial(hook_reset_range, lbrt=lbrt)
return img.opts(clim=(low, high), cmap=cmap, hooks=[reset_fun])
# -
# ## Declare Panel
# +
# Download datasets
head = examples.download_head()
brain = examples.download_brain()
dataset_selection = pn.widgets.Select(name='Dataset', value=head, options={'Head': head, 'Brain': brain})
volume = pn.pane.VTKVolume(
dataset_selection.value, sizing_mode='stretch_both', height=400,
display_slices=True, orientation_widget=True, render_background="#222222",
colormap='blue2cyan'
)
dataset_selection.link(target=volume, value='object')
volume_controls = volume.controls(jslink=False, parameters=[
'render_background', 'display_volume', 'display_slices',
'slice_i', 'slice_j', 'slice_k', 'rescale'
])
toggle_parallel_proj = pn.widgets.Toggle(name='Parallel Projection', value=False)
toggle_parallel_proj.param.watch(update_camera_projection, ['value'], onlychanged=True)
smoother = ImageSmoother()
@pn.depends(si=volume.param.slice_i, mapper=volume.param.mapper,
smooth_fun=smoother.param.smooth_fun, vol=volume.param.object)
def image_slice_i(si, mapper, smooth_fun, vol):
arr = vol.active_scalar.reshape(vol.dimensions, order='F')
lbrt = vol.bounds[2], vol.bounds[4], vol.bounds[3], vol.bounds[5]
return image_slice(['y','z'], arr[si,:,::-1].T, lbrt, mapper, smooth_fun)
@pn.depends(sj=volume.param.slice_j, mapper=volume.param.mapper,
smooth_fun=smoother.param.smooth_fun, vol=volume.param.object)
def image_slice_j(sj, mapper, smooth_fun, vol):
arr = vol.active_scalar.reshape(vol.dimensions, order='F')
lbrt = vol.bounds[0], vol.bounds[4], vol.bounds[1], vol.bounds[5]
return image_slice(['x','z'], arr[:,sj,::-1].T, lbrt, mapper, smooth_fun)
@pn.depends(sk=volume.param.slice_k, mapper=volume.param.mapper,
smooth_fun=smoother.param.smooth_fun, vol=volume.param.object)
def image_slice_k(sk, mapper, smooth_fun, vol):
arr = vol.active_scalar.reshape(vol.dimensions, order='F')
lbrt = vol.bounds[0], vol.bounds[2], vol.bounds[1], vol.bounds[3]
return image_slice(['x', 'y'], arr[:,::-1,sk].T, lbrt, mapper, smooth_fun)
dmap_i = rasterize(hv.DynamicMap(image_slice_i))
dmap_j = rasterize(hv.DynamicMap(image_slice_j))
dmap_k = rasterize(hv.DynamicMap(image_slice_k))
controller = pn.WidgetBox(
pn.Column(dataset_selection, toggle_parallel_proj, *volume_controls[1:], sizing_mode='fixed'),
pn.Param(smoother, parameters=['smooth_level', 'order']),
pn.layout.VSpacer(),
css_classes=['panel-widget-box', 'custom-wbox'], sizing_mode='stretch_height'
)
# -
# ## Set up template
# +
template = """
{%% extends base %%}
<!-- goes in body -->
{%% block contents %%}
{%% set context = '%s' %%}
{%% if context == 'notebook' %%}
{%% set slicer_id = get_id() %%}
<div id='{{slicer_id}}'></div>
{%% endif %%}
<script>
var config = {
settings: {
hasHeaders: true,
constrainDragToContainer: true,
reorderEnabled: true,
selectionEnabled: false,
popoutWholeStack: false,
blockedPopoutsThrowError: true,
closePopoutsOnUnload: true,
showPopoutIcon: false,
showMaximiseIcon: true,
showCloseIcon: false
},
content: [{
type: 'row',
content:[
{
type: 'component',
componentName: 'view',
componentState: { model: '{{ embed(roots.controller) }}',
title: 'Controls',
width: 350,
css_classes:['scrollable']},
isClosable: false,
},
{
type: 'column',
content: [
{
type: 'row',
content:[
{
type: 'component',
componentName: 'view',
componentState: { model: '{{ embed(roots.scene3d) }}', title: '3D View'},
isClosable: false,
},
{
type: 'component',
componentName: 'view',
componentState: { model: '{{ embed(roots.slice_i) }}', title: 'Slice I'},
isClosable: false,
}
]
},
{
type: 'row',
content:[
{
type: 'component',
componentName: 'view',
componentState: { model: '{{ embed(roots.slice_j) }}', title: 'Slice J'},
isClosable: false,
},
{
type: 'component',
componentName: 'view',
componentState: { model: '{{ embed(roots.slice_k) }}', title: 'Slice K'},
isClosable: false,
}
]
}
]
}
]
}]
};
{%% if context == 'notebook' %%}
var myLayout = new GoldenLayout( config, '#' + '{{slicer_id}}' );
$('#' + '{{slicer_id}}').css({width: '100%%', height: '800px', margin: '0px'})
{%% else %%}
var myLayout = new GoldenLayout( config );
{%% endif %%}
myLayout.registerComponent('view', function( container, componentState ){
const {width, css_classes} = componentState
if(width)
container.on('open', () => container.setSize(width, container.height))
if (css_classes)
css_classes.map((item) => container.getElement().addClass(item))
container.setTitle(componentState.title)
container.getElement().html(componentState.model);
container.on('resize', () => window.dispatchEvent(new Event('resize')))
});
myLayout.init();
</script>
{%% endblock %%}
"""
tmpl = pn.Template(template=(template % 'server'), nb_template=(template % 'notebook'))
tmpl.nb_template.globals['get_id'] = make_globally_unique_id
tmpl.add_panel('controller', controller)
tmpl.add_panel('scene3d', volume)
tmpl.add_panel('slice_i', pn.panel(dmap_i, sizing_mode='stretch_both'))
tmpl.add_panel('slice_j', pn.panel(dmap_j, sizing_mode='stretch_both'))
tmpl.add_panel('slice_k', pn.panel(dmap_k, sizing_mode='stretch_both'))
tmpl.servable(title='VTKSlicer')
| examples/gallery/demos/VTKSlicer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# # NoSQL (HBase) (sesión 6)
# + Collapsed="false"
from pprint import pprint as pp
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
matplotlib.style.use('ggplot')
# + Collapsed="false"
# !pip install happybase
# + Collapsed="false"
import happybase
from contextlib import contextmanager
HBASEHOST = 'hbase-thriftserver'
class Connection():
def __init__(self, host):
self.host = host
self._genpool()
def _genpool(self):
self.pool = happybase.ConnectionPool(size=5, host=self.host)
@contextmanager
def connection(self):
for _ in range(5): # Probar 5 veces a regenerar el pool
for _ in range(5): # Probar 5 veces a conectar
with self.pool.connection() as connection:
try:
connection.tables()
yield connection
return
except Exception as e:
pass
self._genpool()
raise Exception("HBase Connection Error")
# + Collapsed="false"
hbasecon = Connection(HBASEHOST)
# + Collapsed="false"
with hbasecon.connection() as connection:
print(connection.tables())
# + Collapsed="false"
# http://stackoverflow.com/a/30525061/62365
class DictTable(dict):
# Overridden dict class which takes a dict in the form {'a': 2, 'b': 3},
# and renders an HTML Table in IPython Notebook.
def _repr_html_(self):
htmltext = ["<table width=100%>"]
for key, value in self.items():
htmltext.append("<tr>")
htmltext.append("<td>{0}</td>".format(key.decode('utf-8')))
htmltext.append("<td>{0}</td>".format(value.decode('utf-8')))
htmltext.append("</tr>")
htmltext.append("</table>")
return ''.join(htmltext)
# + [markdown] Collapsed="false"
# ## EJERCICIOS
# + [markdown] Collapsed="false"
# En los siguientes ejercicios, usando `happybase`, utiliza o bien `scan` o bien `row` donde sea conveniente:
# + Collapsed="false"
hbasecon = Connection(HBASEHOST)
# + [markdown] Collapsed="false"
# ### EJERCICIO: Mostrar la familia de columnas `revision` para la entrada `ASCII` de la tabla `wikipedia`.
# + [markdown] Collapsed="false"
# Al estar buscando solo la información de una fila, la cual sabemos su nombre, solamente tenemos que usar la función `row` para esa fila e indicar que nos devuelva las columnas de la familia `b'revision'`.
# + Collapsed="false"
with hbasecon.connection() as connection:
wikipedia = connection.table('wikipedia')
DictTable(wikipedia.row(b'ASCII', columns=[b'revision']))
# + [markdown] Collapsed="false"
# ### EJERCICIO: Mostrar las 20 primeras filas de la tabla `wikipedia` cuyas columnas empiecen por 'com'.
# + [markdown] Collapsed="false"
# Con el filtro `ColumnPrefixFilter` podemos obtener todas las columnas que empiezan con `com`. Como queremos encontrar todas aquellas filas que tengan columnas con ese requisito usamos `scan` en lugar de `row`. Indicamos ademas un limite igual a `20` para que paré al encontrar esa cantidad de resultados.
# + Collapsed="false"
with hbasecon.connection() as connection:
wikipedia = connection.table('wikipedia')
for key, data in wikipedia.scan(filter="ColumnPrefixFilter('com')", limit=20):
print (key, '->', data)
# + [markdown] Collapsed="false"
# ### EJERCICIO: Mostrar las 20 primeras filas de la tabla `wikipedia` cuyas columnas empiecen por 'com' y la clave de fila empieza por 'B'.
# + [markdown] Collapsed="false"
# Este ejercicio es igual que el anterior con el añadido de que queremos que la clave de la fila empieze por la letra `B`, por lo que añadimos a la función la opción `row_start='B'`.
# + Collapsed="false"
with hbasecon.connection() as connection:
wikipedia = connection.table('wikipedia')
for key, data in wikipedia.scan(filter="ColumnPrefixFilter('com')", row_start='B', limit=20):
print (key, '->', data)
# + [markdown] Collapsed="false"
# ### EJERCICIO: Mostrar sólo la columna `revision:author` de las filas de la tabla `wikipedia` cuya clave empiece por `a` y termine por `a` (obviando mayúsculas y minúsculas).
# + [markdown] Collapsed="false"
# Nuevamente se piden todas las filas que cumplan que empiezen por `a` y terminen con `a` obviando mayusculas y minusculas (lo hacemos mediante un filtro `RowFilter`) y mostramos solo la columna `revision:author`. Utilizamos la función `scan`.
# + Collapsed="false"
with hbasecon.connection() as connection:
wikipedia = connection.table('wikipedia')
for key, data in wikipedia.scan(columns=[b'revision:author'], filter="RowFilter(=, 'regexstring:^[aA].*[aA]$')"):
print (key, '->', data)
# + [markdown] Collapsed="false"
# ### EJERCICIO: Mostrar las filas de la tabla `users` (sólo la columna `rawdata:Location`) de usuarios de España (se supondrá que su localización (columna `rawdata:Location`) contiene `España` o `ES`, obviando mayúsculas y minúsculas).
# + Collapsed="false"
with hbasecon.connection() as connection:
wikipedia = connection.table('users')
for key, data in wikipedia.scan(columns=[b'rawdata:Location'], filter="SingleColumnValueFilter('rawdata','Location',=,'regexstring:^.*(?i)(españa|\\bes\\b).*$')"):
print(key, '->', data)
# + [markdown] Collapsed="false"
# ### EJERCICIO: Crear una nueva tabla `poststags` que, de forma eficiente, para cada _tag_, liste los `Id` de los posts que utilizan ese _tag_.
#
# *Nota: El código de ejemplo para separar todas las etiquetas del campo Tags de un post está en la sesión 1.*
# + [markdown] Collapsed="false"
# Este ejercicio es algo mas completo:
# - Creamos una nueva tabla con una familia llamada `posts`
# - Recorremos la tabla `posts` y para cada post con `PostTypeId=1` (mediante un filtro) obtenemos la columna `rawdata:Tags`, obteniendo los tags y añadiendo una fila por tag con una columna nombrada: `posts:<id_post>`
# + Collapsed="false"
table_name = 'poststags'
# Create table poststags
with hbasecon.connection() as connection:
try:
connection.delete_table(table_name, disable=True)
except:
print('La tabla {} no existe'.format(table_name))
connection.create_table(
table_name,
{
'posts': dict()
}
)
# + Collapsed="false"
import re
with hbasecon.connection() as connection:
posts = connection.table('posts')
tabla = connection.table(table_name)
with tabla.batch(batch_size=500) as bp:
for key, data in posts.scan(columns=['rawdata:Tags'], filter="SingleColumnValueFilter('rawdata','PostTypeId', =, 'binary:1')"):
post_id = 'posts:' + key.decode('utf-8')
tags = re.findall('<(.*?)>', data.get(b'rawdata:Tags').decode('utf-8'))
if tags:
for tag in tags:
bp.put(tag, {post_id: None})
# + [markdown] Collapsed="false"
# Finalmente hacemos una consulta de ejemplo:
# + Collapsed="false"
with hbasecon.connection() as connection:
tabla = connection.table(table_name)
[key.decode('utf-8') for key in tabla.row('.net', columns=['posts']).keys()]
# + [markdown] Collapsed="false"
# ### EJERCICIO: Construya una tabla (de la forma más eficiente) que sirva para, dados un ID de usuario que pregunta y un ID de otro usuario que responde, se pueda decir (en orden constante) si forman una pareja al estilo de la RQ4. Con esa tabla, responder a la RQ4.
# + [markdown] Collapsed="false"
# Este ejercicio es similar al anterior aunque algo más complicado.
# - Creamos una tabla nueva con una colección llamada `helper`
# - Recorremos la tabla posts, obteniendo solamente las preguntas. Para cada pregunta:
# - Se obtiene el post que esta marcado como solución (`b'rawdata:AcceptedAnswerId'`)
# - El usuario que hace la pregunta
# - El usuario que hace la respuesta marcada como solución
# - Añadimos una línea por usuario y una columna por cada usuario que crea una solución para una pregunta del usuario
# + Collapsed="false"
table_name = 'user_correlation'
# Create table poststags
with hbasecon.connection() as connection:
try:
connection.delete_table(table_name, disable=True)
except:
print('La tabla {} no existe'.format(table_name))
connection.create_table(
table_name,
{
'helper': dict()
}
)
# + Collapsed="false"
question_columns = [b'rawdata:OwnerUserId', b'rawdata:PostTypeId', b'rawdata:AcceptedAnswerId']
with hbasecon.connection() as connection:
posts = connection.table('posts')
tabla = connection.table(table_name)
with tabla.batch(batch_size=500) as bp:
for key, data in posts.scan(columns=question_columns, filter="SingleColumnValueFilter('rawdata','PostTypeId', =, 'binary:1')"):
question_id = key.decode('utf-8')
answer_id = data.get(b'rawdata:AcceptedAnswerId') or None
if answer_id:
helped = data.get(b'rawdata:OwnerUserId').decode('utf-8')
helper = posts.row(answer_id).get(b'rawdata:OwnerUserId').decode('utf-8')
if helped and helper and helped != helper:
bp.put(helped, {'helper:' + helper: None})
# + [markdown] Collapsed="false"
# Mostramos la consulta de ejemplo para solucionar el RQ4:
# + Collapsed="false"
with hbasecon.connection() as connection:
tabla = connection.table(table_name)
print(DictTable(tabla.row(b'83', columns=[b'helper:100'])))
print(DictTable(tabla.row(b'100', columns=[b'helper:83'])))
# + Collapsed="false"
| hbase/sesion6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: my_env
# language: python
# name: my_env
# ---
# # Figure 4.1
# +
# states are cells of 4x4 grid
states = [(x, y) for x in range(4) for y in range(4)]
# actions
actions = {'north': (0, -1), 'south': (0, 1), 'east': (1, 0), 'west': (-1, 0)}
# -
def state_reward(state, action):
x, y = state
dx, dy = action
if (x, y) in [(0, 0), (3, 3)]:
return (x, y), 0
# new state
x1, y1 = x + dx, y + dy
if x1 in (-1, 4) or y1 in (-1, 4):
return (x, y), -1
else:
return (x1, y1), -1
def update_value(values, state, policy, discount=0.9):
if state in [(0, 0), (3, 3)]:
return 0
_value = 0
for action in actions.values():
next_state, reward = state_reward(state, action)
_value += policy * (reward + discount * values[next_state])
return _value
def fill_table(values):
fig, ax = plt.subplots()
tb = Table(ax, bbox=[0, 0, 1, 1])
for (x, y), val in values.items():
tb.add_cell(x, y, 1/4, 1/4, text=round(val, 1),
loc='center', facecolor='white')
ax.add_table(tb)
ax.set_axis_off()
# Two-array version of iterative policy evaluation
# +
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.table import Table
def eval_policy(iters):
values = dict((state, 0) for state in states)
policy = 0.25
for i in range(iters):
old_values = deepcopy(values)
for state in states:
prev_value = values[state]
values[state] = update_value_out(old_values, state, policy, discount=1)
fill_table(values)
# plt.savefig(f'figs/fig4_1_k_{iters}.svg', format='svg')
# -
eval_policy(0)
eval_policy(1)
eval_policy(2)
eval_policy(3)
eval_policy(10)
eval_policy(1000)
| chapter4/fig4_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:diplodatos-ayv]
# language: python
# name: conda-env-diplodatos-ayv-py
# ---
# + [markdown] nbpresent={"id": "2730c756-2a43-4bde-9c08-2fa92c0fe83b"}
# <center>
# <h4>Diplomatura AACSyA 2018 - FaMAF - UNC</h4>
# <h3>Análisis y visualización de datos</h3>
# </center>
#
# # Tarea integradora - Parte 1
#
# En esta notebook presentamos una serie de ejercicios para trabajar con variables aleatorias discretas, tanto númericas como cátegoricas, ejemplificando los conceptos de probabilidad explicados en clase. Lo primero es determinar si todas las librerías están correctamente instaladas.
# + nbpresent={"id": "1b68aef3-965d-4321-841d-5d69693ff621"}
# # %matplotlib notebook
# %matplotlib inline
import pandas
import numpy
import seaborn
import matplotlib.pyplot as plt
# + nbpresent={"id": "9a0e71ee-c409-4cda-a2b8-de488486e581"}
seaborn.set_style('whitegrid')
seaborn.set_context('talk')
# + [markdown] nbpresent={"id": "e433b9fa-59d6-40e0-a0ae-5a095b66f7ed"}
# ## Cargar el dataset en un dataframe de pandas
#
# Los dataframes son tablas con un montón de operaciones ya programadas. Prácticamente todas las funciones de SQL se pueden realizar en pandas con pocas líneas. Además de eso, tiene funciones para leer y escribir archivos en numerosos formatos.
#
# Ahora leemos directamente el archivo .csv
# + nbpresent={"id": "9aabecdd-5726-420c-a026-d3f0fd15f60c"}
dataset = pandas.read_csv('https://cs.famaf.unc.edu.ar/~mteruel/datasets/diplodatos/violencia-institucional-2018-01.csv',
encoding='utf8')
# + nbpresent={"id": "79fbcadc-ad82-4e8d-9a99-90914e18e5d2"}
dataset[:100:10]
# + [markdown] nbpresent={"id": "3b64bf41-9f9a-48de-8ca1-fd974a2fe781"}
# Los valores para los años no tienen un formato consistente en todas las filas, por lo que los procesaremos antes de comenzar. Los valores de año faltantes son reemplazados por NaN, y como la representación de enteros de numpy no soporta este tipo de valores, convertiremos cada año a tipo numpy.float. Otras decisiones pueden ser tomadas en este punto, como reemplazar los valores faltantes por 0 o elminarlos del dataset, dependiendo del tipo de datos que se requiera.
#
# También cambiaremos el nombre de la columna año, ya que el caracter ñ no permite utilizar el syntax sugar de pandas donde se obtienen los valores de la columna con la sintáxis de llamar a un método.
# + nbpresent={"id": "27431d99-0e60-4915-87a4-62153d1474b9"}
import six
def convert_to_int(row):
try:
return float(row)
except ValueError:
if isinstance(row, six.string_types):
return float(row.split(',')[0])
return row
dataset.loc[:,'year'] = dataset[u'año'].apply(convert_to_int)
# + nbpresent={"id": "e180f303-9fa7-4a94-a32f-75bb1dcabea0"}
dataset.shape, dataset.year.dropna().shape
# + [markdown] nbpresent={"id": "75500ea3-22d3-44a5-903b-f45292864259"}
# ---
# # Ejercicios
# + [markdown] nbpresent={"id": "1d99fdf4-d123-4564-96bc-db97291fc83f"}
# ### Ejercicio 1
#
# * Crear una imagen mostrando la distribución de los eventos por año
#
# * Construir una tabla de frecuencia de probabilidades dado el evento Y = ’año en el que ocurrió’
#
# $$fdp(Y)$$
#
# * Crear una imagen de dichas probabilidades con el gráfico que considere adecuado. ¿Son ambos gráficos iguales? ¿Cuál considera que es más útil?
#
# * ¿A qué se debe la forma de la distribución? ¿Qué tendencia muestra? ¿Ha aumentado la violencia institucional en los últimos años?
#
# * Calcular la función de distribución acumulada de la variable Y y graficarla. ¿Cuál es la probabilidad de que haya dado un acto de violencia antes del último cambio de gobierno?
#
# IMPORTANTE: ¿Qué hacemos con los datos faltantes?
# -
# ### Distribución de los eventos por año
# + nbpresent={"id": "67bd4466-71f8-4d60-9902-0c8e29c8a615"}
years_without_nan = dataset.year.dropna()
MIN_YEAR = years_without_nan.min()
MAX_YEAR = years_without_nan.max()
YEARS_RANGE = [MIN_YEAR, MAX_YEAR]
def distribution_by_year():
_, ax = plt.subplots(figsize=(12, 5))
# Drop nan values from year column and plot a distribution of events by year
seaborn.distplot(years_without_nan, kde=False, ax=ax)
distribution_by_year()
# -
# #### Con una aproximación a la fdp generada automáticamente
# +
_, ax_year = plt.subplots(figsize=(12, 5))
seaborn.distplot(years_without_nan)
# -
# ### Tabla de frecuencia de probabilidades dado el evento Y = ’año en el que ocurrió’
# Drop nan values and only count unique values
fdps_year = dataset.year.value_counts(normalize=True, dropna=True)
fdps_year.to_frame()
# +
_, ax_year = plt.subplots(figsize=(12, 5))
seaborn.barplot(
data=fdps_year.to_frame().reset_index(),
x='index',
y='year',
color='blue',
orient='v'
)
# Configure axes
ax_year.set_title('fdp(y)')
ax_year.set_xticklabels(ax_year.get_xticklabels(), rotation=60)
ax_year.set_xlabel('Años')
ax_year.set_ylabel('')
seaborn.despine()
# -
# ### ¿A qué se debe la forma de la distribución? ¿Qué tendencia muestra? ¿Ha aumentado la violencia institucional en los últimos años?
#
# * La forma de la distribución se debe a los distintos cambios de gobiernos, los picos corresponden al segundo y tercer año de cada gobierno mientras que los valores “bajos” se registran en período de elecciones. Si analizamos cada gobierno de manera individual y registraramos los hechos de violencia por mes a los largo de 4 años, la distribución podría tendencia normal.
# * La tendencia de los datos muestra que durante cada gobierno, la violencia institucional crece llegando a un pico que suele disminuir durante las elecciones del ultimo año. Esto se ve reflejado claramente en los graficos observando los ultimos tres gobiernos.
# * La violencia institucional ha crecido en los últimos años, pero no podemos asegurar que la razón sea por existir más casos de violencia, sino que puede ser debido a que hay más registros de los mismos en comparación con los años donde no existían políticas y herramientas que permitieran registrar estos casos.
#
# ### Funcion de distribución acumulada para la variable Y = 'Año en el que ocurrió
#
# #### ¿Cuál es la probabilidad de que haya dado un acto de violencia antes del último cambio de gobierno?
# Tomando como referencia el gráfico de distribución acumulada, la probabilidad de haya dado un acto de violencia antes del ultimo cambio de gobierno fue cercano a un 60%.
# +
_, ax_year = plt.subplots(figsize=(12, 5))
ax_year.set_xlim(YEARS_RANGE)
seaborn.distplot(dataset.year.dropna(), hist_kws={'cumulative': True}, kde_kws={'cumulative': True})
# -
# ### ¿ Que hacemos con los datos faltantes ?
#
# Si nos referimos a los eventos en los cuales no se registró el año (NaN), estos son descartados por que no pueden ser agrupados en ninguna categoría y por ende no aportan información relevante al ámbito del problema.
# + [markdown] nbpresent={"id": "e6bf2594-10fd-4106-8a4e-51644489316e"}
# ## Ejercicio 2
#
# * Construir una tabla de frecuencia de probabilidades dado el evento X = ’provincia donde se desarrollo el hecho’
#
# $$fdp(X)$$
#
# * ¿Qué información adicional es necesaria para mostrar estos datos de manera más adecuada?
# * Muestre la distribución de los eventos visualmente. ¿Qué tipo de gráfico es más adecuado para mostrar esta información?
# -
# ### Tabla de frecuencia de probabilidades dado el evento X = ’provincia donde se desarrollo el hecho’
# Drop nan values and only count unique values
fdps_province = dataset.provincia.value_counts(normalize=True, dropna=True)
fdps_province.to_frame()
# +
# Plot the table
_, ax_province = plt.subplots(figsize=(10, 7))
# Configure axes
ax_province.set_title('fdp(x)')
ax_province.set_xlabel('')
ax_province.set_ylabel('Provincia')
seaborn.barplot(
data=fdps_province.to_frame().reset_index(),
x='provincia',
y='index',
color='yellow',
orient='h',
ax=ax_province
)
seaborn.despine()
# +
# Plot event distribution
_, ax_province = plt.subplots(figsize=(10, 7))
ax_province.set_title('Distribución de eventos')
seaborn.countplot(
data=dataset.provincia.dropna().to_frame(),
y='provincia',
color='yellow',
ax=ax_province
)
seaborn.despine()
# -
# ### ¿Qué información adicional es necesaria para mostrar estos datos de manera más adecuada?
#
# Para mostrar estos datos de una manera más adecuada, haría falta como información adicional el año en el que ocurrieron los hechos, es decir, agrupar las hechos primero por año, y luego mostrar la distribución por provincias.
#
# Al no tener en cuenta el año de ocurrencia, se podría sesgar a quien visualice esta tabla, ya que los hechos registrados ocurren a lo largo de 29 años.
# + [markdown] nbpresent={"id": "5f480a0d-7fda-43cc-ac02-fee14bbbf6a7"}
# ### Ejercicio 3
#
# * Construir una tabla de doble entrada donde se observen las función de distribución de probabilidades con las 2 variables aleatorias anteriores.
# ¿Son eventos independientes?
#
# * Crear una imagen de dichas probabilidades con el gráfico que considere adecuado.
#
# -
# ### Tabla de distribución de probabilidad para las variables X e Y
pandas.crosstab(dataset.provincia, dataset.year).apply(lambda r: r/r.sum(), axis=0)
# #### ¿Son eventos independientes?
#
# Los eventos no son independientes basados en la ecuación:
#
# p ( xi , y j ) = p ( x i ) q ( y j ) ∀ ( x i , y j ) ∈ R XY
#
# Por ejemplo si tomamos la provincia de Buenos Aires durante el año 2016, entonces sean
# A = "Probabilidad de que el evento haya sucedido en la Provinvia de Buenos Aires"
# B = "Probabilidad de que el evento haya sucedido en el año 2016"
# C = "Probabilidad de que el evento haya sucedido en la provincia de Buenos Aires durante el año 2016"
#
# P(C) = P(A $\cap$ B) = P(A) * P(B)
#
# Basándonos en los calculos anteriores
#
# +
pC = 0.784512 # Ocurrió en Bs. As. durante el 2016
pA = 0.693182 # Ocurrió en Bs As.
pB = 0.225684 # Ocurrió en 2016
pAB = pA * pB
print('Son independientes ? {}'.format(pC == pAB))
print('pC = {}, pAB = {}'.format(pC, pAB))
# +
_, ax_freq = plt.subplots(figsize=(20, 7))
ax_freq.set_title('Frecuencia por provincia')
groupedByStateAndYear = dataset.groupby(['provincia', 'year']).size().reset_index(name='freq')
seaborn.tsplot(data=groupedByStateAndYear, time='year', unit = "provincia", condition='provincia', value='freq')
seaborn.despine()
# -
# Otro posible gráfico es una secuencia de boxplots. Este gráfico muestra la distribución de los eventos por año, pero no muestra la cantidad de datos en cada box. Aunque esto oscurece información, puede ser útil si queremos ver la distribución sin que Buenos Aires absorva toda la masa de casos.
#
# **Nota**: anexo obtenido de una notebook los profesores de la cátedra.
_, ax_freq = plt.subplots(figsize=(20, 10))
seaborn.boxplot(x="year", y="provincia", data=dataset, color='lightblue')
# + [markdown] nbpresent={"id": "9daf76cc-fcc9-4f95-84be-41d0b8205ffa"}
# ## Ejercicio 4
#
# Dada la variable aleatoria Z = ’contexto1 en el que se produjo el hecho’, calcular la probabilidad que el hecho sea producido en una dependencia de una fuerza de seguridad (penal, comisaría, etc)
#
# $$ fdp(z=penal ~~ |~~ z=comisaria ~~ | ...) $$
# -
# Check the possible values for 'context1'
dataset['contexto1'].drop_duplicates().to_frame()
# +
# Pick the security related values
security_context = [
'Penal / Complejo Penitenciario PROVINCIAL',
'Comisaría',
'Penal / Complejo Penitenciario FEDERAL',
'Instalaciones de la Gendarmería Nacional',
'Centros de Detención Previos al Juicio (Alcaidia)',
'Móviles y medios de transporte de Fuerzas de Seguridad',
'Instalaciones de las Fuerzas Armadas (cuarteles, regimientos, etc.)',
'Escuelas de Formación Fuerzas Nacionales',
'Escuelas de Formación Fuerzas Provinciales',
'Centros de Detención Preventiva'
];
fdps_sec = dataset['contexto1'].value_counts(normalize=True, dropna=True)
p = 0;
for e in security_context:
p += fdps_sec[e]
print('la probabilidad que el hecho sea producido en una dependencia de una fuerza de seguridad es de: {}'.format(p))
# + [markdown] nbpresent={"id": "6c2485d4-b5b9-42ef-b4bc-c0f87454a9f2"}
# ## Ejercicio 5
#
# Dada la variable aleatoria M = ‘existió violencia fisica’:
# * ¿Que probabilidad existe que también tenga violencia psíquica? N = ‘existió violencia psíquica’
#
# $$P(N~~|~~M)$$
#
# * ¿Qué conclusión se puede formular de los datos que tenemos y los datos que obtenemos de los métodos probabilísticos?
# * ¿Podemos concluir en que son eventos independientes dadas las muestras?
# -
# Registro de hechos de violencia fisica categorizados
# Como se puede observar existe la categoría 'Ninguno' el cual tenemos que tratar
phisical_violence = dataset['violencia_fisica'].value_counts(dropna=True)
phisical_violence
# +
# Probabilidad de Violencia Fisica
db_phisical_violence = dataset['violencia_fisica'].dropna()
p_phisical_violence = len(db_phisical_violence[db_phisical_violence != 'Ninguno']) / len(dataset)
p_phisical_violence
# -
# Registro de hechos de violencia psiquica categorizados
# Como se puede observar existe la categoría 'Ninguno' el cual tenemos que tratar
psychic_violence = dataset['violencia_psiquica'].value_counts(dropna=True)
psychic_violence
# Probabilidad de Violencia Psiquica
db_psychic_violence = dataset['violencia_psiquica'].dropna()
p_psychic_violence = len(db_psychic_violence[db_psychic_violence != 'Ninguno']) / len(dataset)
p_psychic_violence
# Probabilidad de Violencia Psiquica y Fisica
psyc_phisc = dataset[['violencia_fisica', 'violencia_psiquica']]
psyc_phisc_without_na = psyc_phisc.dropna()
filtered = psyc_phisc_without_na[psyc_phisc_without_na.violencia_fisica != 'Ninguno'][psyc_phisc_without_na.violencia_psiquica != 'Ninguno']
p_psyc_phisc_violence = len(filtered) / len(dataset)
p_psyc_phisc_violence
# Probabilidad de haya habido violencia psiquica dado que hubo violencia fisica
p_psyc_given_phisc_violence = p_psyc_phisc_violence / p_psychic_violence
print('la probabilidad de que haya violencia psiquica dado que hubo violencia fisica es de: {}'.format(p_psyc_given_phisc_violence))
# Si los eventos fueran independientes entonces se deberia cumplir que
#
# P(C) = P(N $\cap$ M) = P(N) * P(M)
#
# Siendo C la probabilidad de que haya habido violencia física y psiquica
# +
pC = p_psyc_phisc_violence # Hubo violencia psíquica y física
pN = p_psychic_violence # Hubo violencia psíquica
pM =p_phisical_violence # Hubo violencia física
pNM = pN * pM
print('Son independientes ? {}'.format(pC == pNM))
print('pC = {}, pNM = {}'.format(pC, pNM))
# -
# ##### Conclusiones en base a los datos
#
# Basados en los datos obtenidos y analizados podemos decir a primera vista que los hechos de violencia institucional han ido en incremento en los ultimos años. Sin embargo, debido a los pocos registros obtenidos antes del año 2014 no podemos concluir fehacientemente esto debido a que en su momento pudieron no haber políticas y herramientas que permitieran registrar estos casos.
#
# En cuanto a los casos de violencia física e psíquica, analizando los datos con los métodos probabilísticos podemos decir que los aproximadamente un 25% de los casos de violencia psíquica se dieron a que hubo violencia física primero. Los números también nos indican que de todo el dataset, los hechos registrados de violencia física representan la mayoría y de este conjunto mas de un 80% de los mismos se dieron en dependencias de seguridad como penales o dependencias militares. No necesariamente la violencia psíquica tiene su origen en la violencia física como pudimos analizar.
| analysis_and_visualization/tasks/task1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
schoolMarks = pd.read_csv("school.csv")
schoolMarks
# -
schoolMarks.loc["Німецька"]
schoolMarks.head()
schoolMarks.tail()
schoolMarks.sample(3)
schoolMarks.columns
schoolMarks.index
schoolMarks.describe()
school = pd.read_csv("school.csv")
# +
plt.figure(figsize =(18,8))
plt.title("school_analitik")
sns.lineplot(data=school['оцінка'], label="Marks")
plt.xlabel("Date")
| Visualization/school_Analyze.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # plot01
# +
import numpy as np
import pandas as pd
import pylab as pl
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv('tmp/session_c.csv')
# -
df.dtypes
p = df.plot.scatter(x='SrcPort',y='DstPort',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport001.png')
p = df.plot.scatter(x='SrcPortCls0',y='DstPortCls0',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport002.png')
p = df.plot.scatter(x='SrcPortCls1',y='DstPortCls1',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport003.png')
p = df.plot.scatter(x='SrcPortCls2',y='DstPortCls2',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport004.png')
p = df.plot.scatter(x='SrcPortCls3',y='DstPortCls3',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport005.png')
p = df.plot.scatter(x='SrcPortCls4',y='DstPortCls4',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport006.png')
p = df.plot.scatter(x='SrcPortCls5',y='DstPortCls5',figsize=(12, 6))
p.tick_params(labelbottom='off',top='off',bottom='off')
fig = p.get_figure()
fig.savefig('tmp/srcdstport007.png')
| hmac/plot01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory data analysis on the San Francisco 311 data for 1 Hour time intervals
# Data can be downloaded from: https://data.sfgov.org/City-Infrastructure/311-Cases/vw6y-z8j6/data
# +
# %matplotlib inline
# %load_ext autoreload
import matplotlib.pyplot as plt
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
import numpy as np
## The time window to bucket samples
TIME_RANGE = '1H'
## File path (original data is ~1GB, this is a reduced version with only categories and dates)
#Original file:
#DATAPATH = "SF311_simplified.csv"
#Sample raw data:
DATAPATH = "SF_data/SF-311_simplified.csv"
# -
# ### Read sample of data (original data contains additional columns)
raw_sample = pd.read_csv(DATAPATH, nrows=5)
raw_sample.head()
raw = pd.read_csv(DATAPATH).drop(columns='Unnamed: 0')
raw.head(10)
# #### Initial data prep
# +
## Rename columns
raw = raw.rename(columns={'Opened': 'date', 'Category': 'category'})
## Turn the raw data into a time series (with date as DatetimeIndex)
from moda.dataprep.raw_to_ts import raw_to_ts
ts = raw_to_ts(raw,date_format="%m/%d/%Y %H:%M:%S %p")
# -
ts.head()
# +
## Some general stats
print("Dataset length: " + str(len(ts)))
print("Min date: " + str(ts.index.get_level_values('date').min()))
print("Max date: " + str(ts.index.get_level_values('date').max()))
print("Total time: {}".format(ts.index.get_level_values('date').max() - ts.index.get_level_values('date').min()))
print("Dataset contains {} categories.".format(len(ts['category'].unique())))
# -
# #### Next, we decide on the time interval and aggregate items per time and category
from moda.dataprep.ts_to_range import ts_to_range
ranged_ts = ts_to_range(ts,time_range=TIME_RANGE)
ranged_ts.head(20)
# +
#I'm using dfply because I like its functional-like syntax. This can also be done with plain pandas.
# #!pip install dfply
from dfply import *
## Remove categories with less than 1000 items (in more than 10 years) or that existed less than 100 days
min_values = 1000
min_days = 100
categories = ranged_ts.reset_index() >> group_by(X.category) >> \
summarise(value = np.sum(X.value),duration_in_dataset = X.date.max()-X.date.min()) >> \
ungroup() >> \
mask(X.duration_in_dataset.dt.days > min_days) >> \
mask(X.value > min_values) >> \
arrange(X.value,ascending=False)
print("Filtered dataset contains {0} categories,\nafter filtering the small ones that existed less than {1} days or had {2} values of less.".
format(len(categories),min_days,min_values))
categories.head()
# -
# ### Most common categories
# +
category_names = categories['category'].values
num_categories = len(categories)
major_category_threshold=11
major_categories = category_names[:major_category_threshold]
minor_categories = category_names[major_category_threshold:]
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_figheight(5)
fig.set_figwidth(20)
categories[categories['category'].isin(major_categories)].plot(kind='bar',
x='category',
y='value',
title="Top "+str(major_category_threshold-1)+" common categories on the SF311 dataset",
ax=axes[0])
categories[categories['category'].isin(minor_categories)].plot(kind='bar',
x='category',
y='value',
title=str(major_category_threshold)+"th to "+str(num_categories)+"th most common categories on the SF311 dataset",
ax=axes[1])
plt.savefig("category_values.png",bbox_inches='tight')
# -
# ### Change in requests per category from year to year
# +
## Calculate the number of values per category per year
categories_yearly = ranged_ts.reset_index() >> mutate(year = X.date.dt.year) >> group_by(X.category,X.year) >> \
summarise(value_per_year = np.sum(X.value),
duration_in_dataset = X.date.max()-X.date.min()) >>\
ungroup() >> \
mask(X.value_per_year > (min_values/12.0)) >> \
arrange(X.value_per_year,ascending=False)
import seaborn as sns
major_cats_yearly = categories_yearly[categories_yearly['category'].isin(major_categories)]
g = sns.factorplot(x='category', y='value_per_year', hue='year', data=major_cats_yearly, kind='bar', size=4, aspect=4,legend=True)
g.set_xticklabels(rotation=90)
axes = g.axes.flatten()
axes[0].set_title("Yearly number of incidents for the top "+str(major_category_threshold-1)+" categories")
plt.savefig("yearly_values.png",bbox_inches='tight')
# +
minor_cats_yearly = categories_yearly[categories_yearly['category'].isin(minor_categories)]
g = sns.factorplot(x='category', y='value_per_year', hue='year', data=minor_cats_yearly, kind='bar', size=4, aspect=4,legend=True)
g.set_xticklabels(rotation=90)
axes = g.axes.flatten()
axes[0].set_title("Yearly number of incidents for the "+str(major_category_threshold)+"th to "+str(num_categories)+"th categories")
# -
# ### Correlation between categories over time
# +
categories_yearly_pivot = categories_yearly.pivot("year", "category", "value_per_year")
categories_yearly_pivot.head()
corr = categories_yearly_pivot.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# -
# ### One category inspection
# #### Example 1: Noise Reports
category = "Noise Report"
ranged_ts.loc[pd.IndexSlice[:, category], :].reset_index().plot(kind='line',x='date',y='value',figsize=(24,6),linewidth=0.7,
title = "Number of incidents per 1 hour for {}".format(category))
# #### Example 2: Street and Sidewalk Cleaning
START = '2015-11-01'
END = '2018-07-01'
category = 'Street and Sidewalk Cleaning'
cleaning = ranged_ts.loc[pd.IndexSlice[:, category], :].reset_index()
cleaning[(cleaning.date > START) & (cleaning.date<=END)].plot(kind='line',x='date',y='value',figsize=(24,6),linewidth=0.7,
title = "Number of incidents per 1 hours for {0} between {1} and {2}".format(category,START,END))
# As comparison, let's look at the same time series with different time ranges (30 minutes, 1 hour and 24 hours), only on two months of data
# +
from moda.dataprep.ts_to_range import ts_to_range
ranged_ts_3H = ts_to_range(ts,time_range='3H',pad_with_zeros=True)
ranged_ts_30min = ts_to_range(ts,time_range='30min',pad_with_zeros=True)
START = '2015-11-01'
END = '2016-01-01'
category = 'Street and Sidewalk Cleaning'
fig, axes = plt.subplots(nrows=3, ncols=1,figsize=(20,12))
cleaning_30min = ranged_ts_30min.loc[pd.IndexSlice[:, category], :].reset_index()
a1=cleaning_30min[(cleaning_30min.date > START) & (cleaning_30min.date<=END)].plot(kind='line',x='date',y='value',linewidth=0.7, ax=axes[0])
cleaning_1H = ranged_ts.loc[pd.IndexSlice[:, category], :].reset_index()
a2=cleaning_1H[(cleaning_1H.date > START) & (cleaning_1H.date<=END)].plot(kind='line',x='date',y='value',linewidth=0.7, ax=axes[2])
cleaning_3H = ranged_ts_3H.loc[pd.IndexSlice[:, category], :].reset_index()
a3=cleaning_3H[(cleaning_3H.date > START) & (cleaning_3H.date<=END)].plot(kind='line',x='date',y='value',linewidth=0.7, ax=axes[1])
# -
# We can see that there are multiple seasonality factors in this time series. Hourly and weekly patterns are visible on the 30 minute interval time series, and the 3 hours interval time series
# ## Evaluating different models on the SF 1H data
# First, in order to be able to estimate our models, we use [TagAnomaly](https://github.com/Microsoft/TagAnomaly) to tag the points we think are showing trends in the data. Taganomaly can be found here: https://github.com/Microsoft/TagAnomaly
# Second, we join the tagged dataset with the time series dataset. Each sample which isn't included in the tagged dataset is assumed to be non-trending (or normal)
# +
## Add labeled data
labels1H = pd.read_csv('SF_data/SF_1H_anomalies_only.csv',usecols=['date','category','value'])
labels1H.date = pd.to_datetime(labels1H.date)
labels1H['label'] = 1
labels1H.sort_values(by='date').head()
# -
# Since we have labels only for 2018, we'll filter out previous years.
ts2018 = ranged_ts[ranged_ts.index.get_level_values(0).year == 2018]
ts2018.head()
df1H = pd.merge(ts2018.reset_index(),labels1H,how='left',on=['date','category'])
df1H['label'] = np.where(np.isnan(df1H['value_y']),0,1)
df1H = df1H.set_index([pd.DatetimeIndex(df1H['date']),'category'])
df1H = df1H.drop(columns = ['date','value_y']).rename(columns = {'value_x':'value'})
df1H.head()
len(df1H)
# +
from moda.evaluators import get_metrics_for_all_categories, get_final_metrics
from moda.dataprep import read_data
from moda.models import TwitterAnomalyTrendinessDetector, MovingAverageSeasonalTrendinessDetector, \
STLTrendinessDetector, AzureAnomalyTrendinessDetector
def run_model(dataset, freq, min_date='01-01-2018', plot=False, model_name='stl', min_value=10,
min_samples_for_category=100):
if model_name == 'twitter':
model = TwitterAnomalyTrendinessDetector(is_multicategory=True, freq=freq, min_value=min_value, threshold=None,
max_anoms=0.49, seasonality_freq=7)
if model_name == 'ma_seasonal':
model = MovingAverageSeasonalTrendinessDetector(is_multicategory=True, freq=freq, min_value=min_value,
anomaly_type='or',
num_of_std=3)
if model_name == 'stl':
model = STLTrendinessDetector(is_multicategory=True, freq=freq, min_value=min_value,
anomaly_type='residual',
num_of_std=3, lo_delta=0)
if model_name == 'azure':
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'config/config.json')
subscription_key = get_azure_subscription_key(filename)
model = AzureAnomalyTrendinessDetector(is_multicategory=True, freq=freq, min_value=min_value,
subscription_key=subscription_key)
# There is no fit/predict here. We take the entire time series and can evaluate anomalies on all of it or just the last window(s)
prediction = model.predict(dataset, verbose=False)
raw_metrics = get_metrics_for_all_categories(dataset[['value']], prediction[['prediction']], dataset[['label']],
window_size_for_metrics=5)
metrics = get_final_metrics(raw_metrics)
num_categories = len(dataset.index.get_level_values(1).unique())
print("\n******\n")
print("Finished evaluating all {} categories. Metrics values across all categories:".format(num_categories))
print('f1 = {}'.format(metrics['f1']))
print('precision = {}'.format(metrics['precision']))
print('recall = {}'.format(metrics['recall']))
## Plot each category
if plot:
print("Plotting...")
model.plot(labels=dataset['label'],savefig=False)
return prediction
# -
prediction_stl = run_model(df1H,freq='1H',model_name='stl')
# +
def plot_one_category(category_dataset,model_name='stl'):
def ts_subplot(plt, series, label):
plt.plot(series, label=label, linewidth=0.5)
plt.legend(loc='best')
plt.xticks(rotation=90)
plt.subplot(421, )
ts_subplot(plt, category_dataset['value'], label='Original')
if 'residual_anomaly' in category_dataset:
plt.subplot(422)
ts_subplot(plt, category_dataset['residual_anomaly'], label='Residual anomaly')
if 'trend' in category_dataset:
plt.subplot(423)
ts_subplot(plt, category_dataset['trend'], label='Trend')
if 'trend_anomaly' in category_dataset:
plt.subplot(424)
ts_subplot(plt, category_dataset['trend_anomaly'], label='Trend anomaly')
if 'seasonality' in category_dataset:
plt.subplot(425)
ts_subplot(plt, category_dataset['seasonality'], label='Seasonality')
plt.subplot(426)
ts_subplot(plt, category_dataset['prediction'], label='Prediction')
if 'residual' in category_dataset:
plt.subplot(427)
ts_subplot(plt, category_dataset['residual'], label='Residual')
plt.subplot(428)
ts_subplot(plt, category_dataset['label'], label='Labels')
category = category_dataset.category[0]
plt.suptitle("{} results for category {}".format(model_name, category))
# -
graffiti = prediction_stl.loc[pd.IndexSlice[:, 'Graffiti'], :].reset_index(level='category', drop=False)
fig = plt.figure(figsize=(20,8))
plot_one_category(graffiti,model_name='STL')
# The time series in this case is relatively noisy. The model was more conservative than the labeler in this case.
sewer = prediction_stl.loc[pd.IndexSlice[:, 'Sewer Issues'], :].reset_index(level='category', drop=False)
fig = plt.figure(figsize=(20,8))
plot_one_category(sewer,model_name='STL')
# In this case, we missed the first peak as we didn't have enough historical data to estimate it. Let's compare this result to a different model:
prediction_ma = run_model(df1H,freq=TIME_RANGE,model_name='ma_seasonal')
sewer2 = prediction_ma.loc[pd.IndexSlice[:, 'Sewer Issues'], :].reset_index(level='category', drop=False)
fig = plt.figure(figsize=(20,8))
plot_one_category(sewer2,model_name='MA seasonal')
# This model estimates the trend differently, and found some anomalies on the trend series as well. It too couldn't detect the first peak as it requires some historical data to estimate standard deviation and other statistics.
| moda/example/EDA-1H.ipynb |