code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing and Organizing Your Data in Python - Part I
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# a. (*Challenge!*) Create a series object of 10 randomly generated integer values.
import numpy as np
import pandas as pd
ser = pd.Series(np.random.randint(10, size = 10, dtype = 'int'), name = "Column 01")
ser
# b. Extract data from Yahoo Finance for Ford from the 1st of January 2005. The ticker you need is ‘F’.
from pandas_datareader import data as wb
F = wb.DataReader('F', data_source='yahoo', start='2005-1-1')
F
|
23 - Python for Finance/1_Useful Tools/10_Importing and Organizing Data in Python - Part I (3:44)/Importing and Organizing Your Data in Python - Part I - Solution_Yahoo_Py3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/daniel-muthukrishna/transomaly/blob/master/Copy_of_compare_methods_bazin_mock_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="_Se23rOgdXar" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="7db6a113-424f-4efa-d3c7-e5514ae6a803"
from google.colab import drive
drive.mount("/content/gdrive")
# + id="V8hgTV78dhr6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f1196e23-8a1e-4407-e68a-cc38611388c8"
# ! pip install astrorapid
# ! pip install emcee
# ! pip install corner
# # ! pip install celerite
# # ! pip install keras-tcn
# # ! pip install tensorflow-gpu
# + id="GlB4vLMzdskJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4546a9d2-10bf-4392-a740-94623103cb5f"
% cd /content/gdrive/My Drive/Projects/transomaly
# + id="sr39_IbvduhJ" colab_type="code" cellView="form" colab={}
#@title NN Imports
import os
import numpy as np
import h5py
import pickle
import copy
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense, LSTM, TimeDistributed, Masking, Input, Dropout
import tensorflow_probability as tfp
tfd = tfp.distributions
from tcn import TCN, tcn_full_summary
# from bayesian_tcn import TCN, tcn_full_summary
import astrorapid
from transomaly.prepare_training_set import PrepareTrainingSetArrays
from transomaly.loss_functions import mean_squared_error, chisquare_loss, mean_squared_error_over_error, negloglike, negloglike_with_error
from transomaly.plot_metrics import plot_history # , plot_metrics
import transomaly
# + id="EVktX_zwA0Ii" colab_type="code" cellView="form" colab={}
#@title Define plot_metrics function
COLPB = {'g': 'tab:green', 'r': 'tab:red', 'gpred': 'turquoise', 'rpred': 'tab:pink'}
MARKPB = {'g': 'o', 'r': 's', 'z': 'd'}
ALPHAPB = {'g': 0.3, 'r': 1., 'z': 1}
import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import json
from transomaly.fit_gaussian_processes import save_gps
from astrorapid.get_training_data import get_data
from transomaly import helpers
def plot_metrics(model, model_name, X_test, y_test, timesX_test, yerr_test, labels_test, objids_test, passbands, fig_dir, nsamples, data_dir, save_dir, nprocesses, plot_gp=False, extrapolate_gp=True, reframe=False, plot_name='', npred=49, probabilistic=False, known_redshift=False, get_data_func=None, normalise=False, bayesian=False):
print(model_name)
nobjects, ntimesteps, nfeatures = X_test.shape
npassbands = len(passbands)
sampled_ypred = []
sampled_ystd = []
draws = []
if probabilistic:
X_test = np.asarray(X_test, np.float32)
y_test = np.asarray(y_test, np.float32)
# yhat = model(X_test)
# y_pred = np.asarray(yhat.mean())
# y_pred_std = np.asarray(yhat.stddev())
if bayesian:
ns = 100
for i in range(ns):
sampled_yhat = model(X_test)
sampled_ypred.append(np.asarray(sampled_yhat.mean()))
sampled_ystd.append(np.asarray(sampled_yhat.stddev()))
draws.append(np.random.normal(sampled_yhat.mean(), sampled_yhat.stddev()))
# plot_mean_ypred = np.mean(np.array(sampled_ypred), axis=0)
# plot_sigma_ypred = np.std(np.array(sampled_ypred), axis=0)
plot_mean_ypred = np.mean(np.array(draws), axis=0)
plot_sigma_ypred = np.std(np.array(draws), axis=0)
else:
yhat = model(X_test)
plot_mean_ypred = np.asarray(yhat.mean())
plot_sigma_ypred = np.asarray(yhat.stddev())
else:
y_pred = model.predict(X_test)
if not reframe:
npred = ntimesteps
# Get raw light curve data
light_curves = {}
gp_fits = {}
for classnum in np.unique(labels_test):
print(f"Getting lightcurves for class:{classnum}")
light_curves[classnum] = get_data(get_data_func=get_data_func, class_num=classnum, data_dir=data_dir,
save_dir=save_dir, passbands=passbands, known_redshift=known_redshift,
nprocesses=nprocesses, redo=False, calculate_t0=False)
if plot_gp is True and nsamples == 1:
gp_fits[classnum] = save_gps(light_curves, save_dir, classnum, passbands, plot=False,
nprocesses=nprocesses, redo=False, extrapolate=extrapolate_gp)
# Plot predictions vs time per class
font = {'family': 'normal',
'size': 36}
matplotlib.rc('font', **font)
for idx in np.arange(0, 10):
sidx = idx * nsamples # Assumes like samples are in order
print("Plotting example vs time", idx, objids_test[sidx])
argmax = None #timesX_test[sidx].argmax() # -1
# Get raw light curve observations
lc = light_curves[labels_test[sidx]][objids_test[sidx]]
if plot_gp is True and nsamples == 1:
gp_lc = gp_fits[labels_test[sidx]][objids_test[sidx]]
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(13, 15), sharex=True)
for pbidx, pb in enumerate(passbands):
pbmask = lc['passband'] == pb
for s in range(1):#nsamples):
lw = 3 if s == 0 else 0.5
alpha = 1 if s == 0 else 0.1
plotlabeltest = "ytest:{}".format(pb) if s == 0 else ''
plotlabelpred = "ypred:{}".format(pb) if s == 0 else ''
marker = None # MARKPB[pb] if s == 0 else None
if reframe:
ax1.plot(timesX_test[sidx + s][:-1][:argmax], X_test[sidx + s][:, pbidx][:-1][:argmax], c=COLPB[pb], lw=lw,
label=plotlabeltest, marker=marker, markersize=10, alpha=alpha, linestyle='-')
ax1.errorbar(timesX_test[sidx+s][1:][-npred:][:argmax], y_test[sidx+s][:, pbidx][:argmax], yerr=yerr_test[sidx+s][:, pbidx][:argmax], c=COLPB[pb], lw=lw,
label=plotlabeltest, marker='o', markersize=10, alpha=alpha, linestyle='-')
if probabilistic:
if bayesian:
for sp in range(ns):
ax1.errorbar(timesX_test[sidx + s][1:][-npred:][:argmax], sampled_ypred[sp][sidx + s][:, pbidx][:argmax], yerr=sampled_ystd[sp][sidx + s][:, pbidx][:argmax],
color=COLPB[f'{pb}pred'], lw=0.5, marker='*', markersize=10, alpha=1/256, linestyle=':')
ax1.errorbar(timesX_test[sidx + s][1:][-npred:][:argmax], plot_mean_ypred[sidx + s][:, pbidx][:argmax], yerr=plot_sigma_ypred[sidx + s][:, pbidx][:argmax],
color=COLPB[f'{pb}pred'], lw=lw, label=plotlabelpred, marker='x', markersize=20, alpha=1, linestyle=':')
else:
ax1.plot(timesX_test[sidx+s][1:][-npred:][:argmax], y_pred[sidx+s][:, pbidx][:argmax], c=COLPB[f'{pb}pred'], lw=lw,
label=plotlabelpred, marker='*', markersize=10, alpha=alpha, linestyle=':')
if not normalise:
ax1.errorbar(lc[pbmask]['time'].data, lc[pbmask]['flux'].data, yerr=lc[pbmask]['fluxErr'].data,
fmt="x", capsize=0, color=COLPB[pb], label='_nolegend_', markersize=15, )
if plot_gp is True and nsamples == 1:
gp_lc[pb].compute(lc[pbmask]['time'].data, lc[pbmask]['fluxErr'].data)
pred_mean, pred_var = gp_lc[pb].predict(lc[pbmask]['flux'].data, timesX_test[sidx + s][:argmax],
return_var=True)
pred_std = np.sqrt(pred_var)
ax1.fill_between(timesX_test[sidx + s][:argmax], pred_mean + pred_std, pred_mean - pred_std, color=COLPB[pb],
alpha=0.05,
edgecolor="none")
# ax1.text(0.05, 0.95, f"$\chi^2 = {round(save_chi2[objids_test[idx]], 3)}$", horizontalalignment='left',
# verticalalignment='center', transform=ax1.transAxes)
plt.xlim(-70, 80)
# Plot anomaly scores
chi2_samples = []
like_samples = []
negloglike_samples = []
for s in range(1):#nsamples):
chi2 = 0
like = 0
negloglike = 0
for pbidx in range(npassbands):
m = yerr_test[sidx+s, :, pbidx][:argmax] != 0 # ignore zeros (where no data exists)
yt = y_test[sidx+s, :, pbidx][:argmax][m]
yterr = yerr_test[sidx+s, :, pbidx][:argmax][m]
yp = plot_mean_ypred[sidx+s, :, pbidx][:argmax][m]
yperr = plot_sigma_ypred[sidx+s, :, pbidx][:argmax][m]
try:
chi2 += ((yp - yt)/yterr)**2
like += np.exp(-0.5*(yp-yt)**2 / (yterr**2+yperr**2)) * (2*np.pi*(yterr**2+yperr**2))**-0.5
negloglike -= -0.5 * (yt - yp)**2 / (yterr*2 + yperr**2) - 0.5 *np.log(yterr**2 + yperr**2) - 0.5*np.log(2*np.pi)
except ValueError as e:
pbidx -= 1
m = yerr_test[sidx + s, :, pbidx][:argmax] != 0
print(f"Failed chi2 object {objids_test[sidx+s]}", e)
chi2_samples.append(chi2 / npassbands)
like_samples.append(like/npassbands)
negloglike_samples.append(negloglike/npassbands)
anomaly_score_samples = loglike_samples
anomaly_score_mean = np.mean(anomaly_score_samples, axis=0)
anomaly_score_std = np.std(anomaly_score_samples, axis=0)
ax2.text(0.05, 0.95, f"$\chi^2 = {round(np.sum(np.mean(chi2_samples, axis=0))/len(yt), 3)}$", horizontalalignment='left',
verticalalignment='center', transform=ax2.transAxes)
ax2.text(0.05, 0.75, f"$likelihood = {round(np.sum(like_samples)/len(yt), 3)}$", horizontalalignment='left',
verticalalignment='center', transform=ax2.transAxes)
ax2.text(0.05, 0.55, f"$negloglike = {round(np.sum(np.mean(negloglike_samples, axis=0))/len(yt), 3)}$", horizontalalignment='left',
verticalalignment='center', transform=ax2.transAxes)
ax2.plot(timesX_test[sidx][1:][-npred:][:argmax][m], anomaly_score_mean, lw=3, marker='o')
ax2.fill_between(timesX_test[sidx][1:][-npred:][:argmax][m], anomaly_score_mean + anomaly_score_std, anomaly_score_mean - anomaly_score_std, alpha=0.3, edgecolor="none")
ax1.legend(frameon=True, fontsize=33)
ax1.set_ylabel("Relative flux")
ax2.set_ylabel("Anomaly score")
ax2.set_xlabel("Time since trigger [days]")
plt.tight_layout()
fig.subplots_adjust(hspace=0)
plt.savefig(os.path.join(fig_dir, model_name, f"lc_{objids_test[sidx]}_{idx}{plot_name}.pdf"))
plt.close()
print(model_name)
# print(f"Reduced chi-squared for model is {chi2_reduced_allobjects}")
# print(f"Median reduced chi-squared for model is {np.median(chi2_hist)}")
# + id="B5LnAyL9qKO-" colab_type="code" cellView="form" colab={}
#@title Build NN model
def build_model(X_train, passbands=('g', 'r'), reframe=False, probabilistic=False, nunits=100, bayesian=False, dropout_rate=0.0):
if bayesian:
mc_dropout = True
else:
mc_dropout = None
npb = len(passbands)
inputs = Input(shape=(X_train.shape[1], X_train.shape[2]))
hidden = Masking(mask_value=0.)(inputs)
hidden = TCN(nunits, return_sequences=True, kernel_size=2, nb_stacks=1, dilations=[1, 2, 4, 8],
padding='causal', use_skip_connections=True, dropout_rate=dropout_rate, activation='sigmoid')(hidden, training=mc_dropout)
hidden = Dropout(dropout_rate)(hidden, training=mc_dropout)
if reframe is True:
hidden = LSTM(nunits)(hidden)
hidden = Dense(npb)(hidden)
else:
if probabilistic:
hidden = TimeDistributed(Dense(npb * 2))(hidden)
else:
hidden = TimeDistributed(Dense(npb * 1))(hidden)
if probabilistic:
outputs = tfp.layers.DistributionLambda(
lambda t: tfd.Normal(loc=t[..., :npb], scale=1e-3 + tf.math.softplus(0.01*t[..., npb:])))(hidden)
else:
outputs = hidden
model = Model(inputs, outputs)
return model
def train_model(X_train, X_test, y_train, y_test, yerr_train, yerr_test, fig_dir='.', epochs=20, retrain=False,
passbands=('g', 'r'), model_change='', reframe=False, probabilistic=False, train_from_last_stop=0,
batch_size=50, nunits=100, use_uncertainties=False, bayesian=False, dropout_rate=0.0, learning_rate=0.001):
model_name = f"keras_model_epochs{epochs+train_from_last_stop}_{model_change}"
model_filename = os.path.join(fig_dir, model_name, f"{model_name}.hdf5")
if not os.path.exists(os.path.join(fig_dir, model_name)):
os.makedirs(os.path.join(fig_dir, model_name))
if probabilistic:
if use_uncertainties:
lossfn = negloglike_with_error()
else:
lossfn = negloglike()
elif 'chi2' in model_change:
lossfn = chisquare_loss()
elif 'mse_oe' in model_change:
lossfn = mean_squared_error_over_error()
else:
lossfn = mean_squared_error()
if not retrain and os.path.isfile(model_filename):
model = load_model(model_filename, custom_objects={'loss': lossfn, 'TCN': TCN})
else:
if train_from_last_stop:
old_model_name = f"keras_model_epochs{train_from_last_stop}_{model_change}"
old_model_filename = os.path.join(fig_dir, old_model_name, f"{old_model_name}.hdf5")
model = load_model(model_filename, custom_objects={'loss': lossfn, 'TCN': TCN})
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs,
batch_size=batch_size, verbose=2, inital_epoch=train_from_last_stop)
else:
model = build_model(X_train, passbands, reframe, probabilistic, nunits, bayesian, dropout_rate=dropout_rate)
model.compile(loss=lossfn, optimizer=tf.optimizers.Adam(learning_rate=learning_rate))
# tcn_full_summary(model, expand_residual_blocks=True)
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size, verbose=2)
print(model.summary())
model.save(model_filename)
plot_history(history, model_filename)
return model, model_name
# + id="I51LXPqkTBMn" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import scipy.optimize
import tensorflow as tf
import tensorflow_probability as tfp
import autograd
import numpy.linalg as lin
import pickle
import transomaly
from transomaly import helpers
COLPB = {'g': 'tab:blue', 'r': 'tab:red'}
# + id="3xOYMd2CGaDB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="e2d9e84f-f75b-4667-903a-b0d00ce61aa8"
# GET BAZIN FIT PARAMETERS FROM PLASTICC DATA
import os
import astrorapid
from astrorapid.get_training_data import get_data
light_curves_sims = {}
for class_num in [1, 2, 3, 41, 51, 60, 64]:
print("Getting light curves for class", class_num)
light_curves_sims[class_num] = get_data(get_data_func=astrorapid.get_training_data.get_data_from_snana_fits,
class_num=class_num,
data_dir=os.path.join('data/ZTF_20190512'),
save_dir=os.path.join('data/saved_light_curves'),
passbands=('g', 'r'),
known_redshift=False,
nprocesses=1,
redo=False,
calculate_t0=False)
light_curves = get_data(get_data_func=astrorapid.get_training_data.get_real_ztf_training_data,
class_num='Ia',
data_dir=os.path.join('data/real_ZTF_data_from_osc'),
save_dir=os.path.join('data/saved_real_ZTF_light_curves'),
passbands=('g', 'r'),
known_redshift=False,
nprocesses=1,
redo=False,
calculate_t0=False)
# + id="kC1fhKQFqk_z" colab_type="code" colab={}
# Make mock bazin model dataset and Bayesian functions
def get_y_data(fit_until=80, sigma_intrinsic=5, A=100, B=0, t0=10, tau_fall=20, tau_rise=-6, seed=None, t_all=np.arange(-70,80,step=3)):
np.random.seed(seed)
epsilon_intrinsic = np.random.normal(0, sigma_intrinsic, size=50)
sigma_measurement = 20
yerr_all = np.random.normal(0, sigma_measurement, size=50)
numerator = np.exp(-(t_all - t0)/tau_fall)
denominator = 1 + np.exp((t_all-t0)/tau_rise)
y_all = A * numerator/denominator + B + A*epsilon_intrinsic + yerr_all
# Only fit up to day `fit_until`
mask = t_all <= fit_until
t = t_all[mask]
y = y_all[mask]
yerr = yerr_all[mask]
return t, y, yerr, t_all, y_all, yerr_all
def get_y_pred(fit_until=80, logA=2, B=0, t0=10, tau_fall=20, tau_rise=-6, np=np, t_all=np.arange(-70,80,step=3)):
A = np.exp(logA)
numerator = np.exp(-(t_all - t0)/tau_fall)
denominator = 1 + np.exp((t_all-t0)/tau_rise)
y_all = A * numerator/denominator + B
# Only fit up to day `fit_until`
mask = t_all <= fit_until
t = t_all[mask]
y_pred = y_all[mask]
return t, y_pred
def log_likelihood(t_data, y_data, sigma_measurement, fit_until=80, log_sigma_intrinsic=0.7, logA=2, B=0, t0=10, tau_fall=20, tau_rise=-6, np=np):
A = np.exp(logA)
sigma_intrinsic = np.exp(log_sigma_intrinsic)
t, y_pred = get_y_pred(fit_until, logA, B, t0, tau_fall, tau_rise, np, t_all=t_data)
# logL = np.sum(-0.5*np.log(2*np.pi*sigma_intrinsic**2) - 0.5*((y_data - y_pred)**2)/sigma_intrinsic**2)
mse = -0.5 * (y_data - y_pred)**2 / (A**2*sigma_intrinsic**2 + sigma_measurement**2)
sigma_trace = -0.5 * np.log(A**2*sigma_intrinsic**2 + sigma_measurement**2)
log2pi = -0.5 * np.log(2 * np.pi)
logL = np.sum(mse + sigma_trace + log2pi)
if np.isnan(logL):
# print(logL, sigma_intrinsic, A, B, t0, tau_fall, tau_rise)
# print(y_pred, y_data)
logL = -np.inf
return logL
def log_prior(class_num, passband, log_sigma_intrinsic=0.7, logA=2, B=0, t0=10, tau_fall=20, tau_rise=-6, np=np):
x = np.array([log_sigma_intrinsic, logA, B, t0, tau_fall, tau_rise])
# n = len(means)
# logprior = -(n/2)*np.log(2*np.pi) - 0.5*np.log(np.linalg.det(covariance)) - 0.5 * (x-means) @ inverse_covariance @ (x-means)
# if log_sigma_intrinsic <= 0: return -np.inf
return 0#logprior
def log_posterior(class_num, passband, t_data, y_data, sigma_measurement, fit_until=80, log_sigma_intrinsic=0.7, logA=2, B=0, t0=10, tau_fall=20, tau_rise=-6, np=np):
logL = log_likelihood(t_data, y_data, sigma_measurement, fit_until, log_sigma_intrinsic, logA, B, t0, tau_fall, tau_rise, np)
logprior = log_prior(class_num, passband, log_sigma_intrinsic, logA, B, t0, tau_fall, tau_rise, np)
return logL + logprior
# + id="z9UxAm2IH2XG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="469757fe-59d5-4f32-a4e1-48afe9fb1800"
import warnings
from transomaly import helpers
import emcee
import corner
import matplotlib
font = {'family': 'normal',
'size': 12}
matplotlib.rc('font', **font)
# Initial values
fit_until = 80
x0 = {classnum: {} for classnum in [1, 2, 3, 41, 51, 60, 64]}
for class_num in [1, 2, 3, 41, 51, 60, 64]:
print("Fitting Bazin params for class", class_num)
passbands = ('g', 'r')
nobjidx = len(light_curves_sims[class_num].items())
save_bazin_params = {pb: {} for pb in passbands}
for objidx, (objid, lc) in enumerate(light_curves_sims[class_num].items()):
if objidx % 10 == 0: print(f"{objidx} of {nobjidx}", objid)
# if objidx not in np.random.randint(0, nobjidx, 100):
# continue
try:
time, flux, fluxerr, photflag = {}, {}, {}, {}
for pb in passbands:
pbmask = lc['passband'] == pb
time[pb] = lc[pbmask]['time'].data
flux[pb] = lc[pbmask]['flux'].data
fluxerr[pb] = lc[pbmask]['fluxErr'].data
photflag[pb] = lc[pbmask]['photflag'].data
# Mask out times outside of mintime and maxtime
timemask = (time[pb] > -70) & (time[pb] < 80)
time[pb] = time[pb][timemask]
flux[pb] = flux[pb][timemask]
fluxerr[pb] = fluxerr[pb][timemask]
photflag[pb] = photflag[pb][timemask]
if np.max(flux[pb]) < 1e-5:
print(objidx, objid, "Low flux due to incorrect zeropoints")
continue
# Optimise fit
autograd_numpy = False
def objective_func(theta):
if autograd_numpy:
np1 = autograd.numpy
else:
np1 = np
return -log_posterior(class_num, pb, time[pb], flux[pb], fluxerr[pb], fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np1)
res = minimize(objective_func, x0[class_num][pb], method='Nelder-Mead', options={'xatol': 1e-12, 'disp': False})
# res = scipy.optimize.brute(objective_func, ((0,15), (50,20000), (-100, 1000), (-30, 40), (-30, 40), (-30, 40)))
# print(res.x )
# print("Compute Covariance Matrix using Autograd")
autograd_numpy = True
hessian_ = autograd.hessian(objective_func)
autograd_hessian_inv = lin.inv(hessian_(res.x))
# print(autograd_hessian_inv)
# Get Uncertaintites
cov_matrix = autograd_hessian_inv
sigmas = np.sqrt(np.diagonal(cov_matrix))
# print(sigmas)
# Save best values
if np.any(np.isnan(sigmas)) or np.any(res.x == 0.0) or np.any(sigmas==0.0) or res.x[1] <= 0 or np.any(abs(res.x)<1e-15) or np.any(np.isnan(res.x)):
print(f"Bad fit for objid {objidx} {pb} {objid}. Params: {res.x} sigmas {sigmas}")
continue
else:
save_bazin_params[pb][objid] = res.x
# print("Best fit", res.x)
# ##
# print(res.x, sigmas)
# if np.any(np.isnan(sigmas)):
# pos = res.x + 0.2*res.x * np.random.randn(100, len(res.x))
# else:
# pos = res.x + sigmas * np.random.randn(100, len(res.x))
# nwalkers, ndim = pos.shape
# def mcmc_objective_func(theta):
# np1 = np
# # if theta[0] < 0: return -np.inf
# return log_posterior(class_num, pb, time[pb], flux[pb], fluxerr[pb], fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np1)
# sampler = emcee.EnsembleSampler(nwalkers, ndim, mcmc_objective_func)
# sampler.run_mcmc(pos, 2000, progress=True);
# samples = sampler.get_chain(discard=1000, flat=True)
# labels = [r"$\log(\sigma)$", r"$\log(A)$", r"$B$", "$t_0$", r"$\tau_{fall}$", r"$\tau_{rise}$"]
# fig = corner.corner(samples, labels=labels, truths=res.x);
# # print mcmc 16th, 50th and 84th percentiles
# print("\nMCMC 16th, 50th, and 84th percentiles")
# param_vals = []
# for i in range(ndim):
# mcmc = np.percentile(samples[:, i], [16, 50, 84])
# q = np.diff(mcmc)
# txt = f"{labels[i]} = {mcmc[1]:.2f} (+{q[0]:.2f}) (-{q[1]:.2f})"
# print(txt)
# param_vals.append(mcmc[1])
# # if q[0] > 1e5 or q[1] > 1e5:
# # print("Bad fit for", objid, i, q[0], q[1])
# # badfit = True
# # break
# # Save best values
# save_bazin_params[pb][objid] = param_vals
# ##
# # Plot draws
# plt.figure()
# t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
# plt.errorbar(time[pb], flux[pb], yerr=fluxerr[pb], label='true - used for fitting', fmt='.')
# plt.plot(t_plot, y_pred_plot, label='optimiser')
# save_y_pred_draws = []
# for i in range(100):
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# param_draws = np.random.multivariate_normal(res.x, cov_matrix)
# t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
# plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
# # sample_idx = np.random.randint(low=0, high=samples.shape[0])
# # t_draw, y_pred_draw = get_y_pred(80, *samples[sample_idx][1:])
# # plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
# save_y_pred_draws.append(y_pred_draw)
# plt.plot(t_draw, np.median(save_y_pred_draws, axis=0), label='median draws')
# # plt.plot(t_plot, get_y_pred(80, *param_vals[1:])[1], label='50percentile samples')
# plt.legend()
# plt.xlabel('Time in days')
# plt.ylabel(f'Flux {pb} band')
# plt.title(f"{objidx} {objid} {pb}")
# plt.ylim(bottom=min(0, min(flux[pb]), min(y_pred_plot)), top=max(max(flux[pb]), max(y_pred_plot)))
# plt.show()
# plt.close()
# print("HERRRREEE")
# print(res.x)
# # print(param_vals)
# # Print best fit parameters
# print("\n")
# print(f"log(sigma) : initial={x0[0]}, pred={res.x[0]:.2f} +- {sigmas[0]:.2f}")
# print(f"log(A) : initial={x0[1]}, pred={res.x[1]:.2f} +- {sigmas[1]:.2f}")
# print(f"B : initial={x0[2]}, pred={res.x[2]:.2f} +- {sigmas[2]:.2f}")
# print(f"t0 : initial={x0[3]}, pred={res.x[3]:.2f} +- {sigmas[3]:.2f}")
# print(f"tau_fall : initial={x0[4]}, pred={res.x[4]:.2f} +- {sigmas[4]:.2f}")
# print(f"tau_rise : initial={x0[5]}, pred={res.x[5]:.2f} +- {sigmas[5]:.2f}")
except Exception as e:
print(objidx, objid, e)
continue
# with open('save_real_data_from_osc_bazin_params.pickle', 'wb') as f:
with open(f'save_bazin_params_class_multipb_{class_num}_280820_nopriors_resetx0_allobjects_logs.pickle', 'wb') as f:
pickle.dump(save_bazin_params, f)
# + id="yScn3kcSIpA9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4095f266-6666-4519-a891-972fcd3f459a"
# Compare different classes bazin paramters as histograms
ndim = 6
passbands = ('g', 'r')
for class_num in [1, 2, 3, 51, 60, 64]:
with open(f'save_bazin_params_class_multipb_{class_num}_280820_resetpriors_resetx0_allobjects_logs.pickle', 'rb') as f:
load_bazin_params = pickle.load(f)
for pb in passbands:
params_array = np.array(list(load_bazin_params[pb].values()))
# Using logA and logsigma
labels = [r"$\log(\sigma)$", r"$\log(A)$", r"$B$", r"$t_0$", r"$\tau_{fall}$", r"$\tau_{rise}$"]
# params_array[:,0] = np.log(params_array[:,0])
# params_array[:,1] = np.log(params_array[:,1])
# labels = [r"$\sigma$", r"$A$", r"$B$", "$t_0$", r"$\tau_{fall}$", r"$\tau_{rise}$"]
# Compute mean and covariance
params_array_means = np.mean(params_array, axis=0)
params_array_sigmas = np.std(params_array, axis=0)
params_covariance = np.cov(params_array.T)
# print(f"if class_num == {repr(class_num)}:")
# print(f" if passband == {repr(pb)}:")
# print(f" means = np.{repr(params_array_means)}")
# print(f" covariance = np.{repr(params_covariance)}")
# print(f" inverse_covariance = np.{repr(lin.inv(params_covariance))}")
print(f"x0[{repr(class_num)}][{repr(pb)}] = np.{repr(np.median(params_array, axis=0))}")
# Plot histograms and remove 3 median absolute deviation outliers for plotting
params_removed_outliers = []
for i in range(ndim):
d = abs(params_array[:,i] - np.median(params_array[:,i]))
mad = np.median(d)
params_removed_outliers.append(params_array[:,i][d < 3*mad])
# print(params_array_means, params_array_sigmas)
for i in range(ndim):
fig = plt.figure(f"{i}_{pb}", figsize=(7,3.2))
ax1 = plt.gca()
ax1.hist(params_removed_outliers[i], bins=50, density=True, label=f"{helpers.get_sntypes()[class_num]}", alpha=0.9)
ax1.set_xlabel(fr"{labels[i]} ${pb}$-band", fontsize=15)
ax1.legend(fontsize=13)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
ax1.axes.yaxis.set_ticklabels([])
if labels[i] == r"$B$":
plt.xlim(-5, 5)
plt.ylim(0,4)
if labels[i] == r"$t_0$":
plt.xlim(-5, 5)
plt.ylim(0,1.5)
if labels[i] == r"$\tau_{fall}$":
plt.xlim(-5, 40)
if labels[i] == r"$\tau_{rise}$":
plt.xlim(-7, 0)
plt.ylim(0,2)
plt.tight_layout()
plt.savefig(f"{i}_{pb}")
# + id="CvWRSi1ZUmeD" colab_type="code" cellView="form" colab={}
#@title MCMC bazin fit to plasticc data
# # ! pip install emcee
# # ! pip install corner
import emcee
import corner
labels = ["sigma", "A", "B", "t0", "taufall", "taurise"]
pb = 'g'
save_bazin_params = {}
for objidx, (objid, lc) in enumerate(light_curves.items()):
if objidx>3: break
print(objidx, objid)
try:
pbmask = lc['passband'] == pb
time = lc[pbmask]['time'].data
flux = lc[pmask]['flux'].data
fluxerr = lc[pbmask]['fluxErr'].data
photflag = lc[pbmask]['photflag'].data
# Mask out times outside of mintime and maxtime
timemask = (time > -70) & (time < 80)
time = time[timemask]
flux = flux[timemask]
fluxerr = fluxerr[timemask]
photflag = photflag[timemask]
def objective_func(theta):
sigma_intrinsic = theta[0]
if sigma_intrinsic < 0:
return -np.inf
return log_posterior(time, flux, fluxerr, fit_until, sigma_intrinsic=theta[0], A=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np)
pos = res.x + 1e-4 * np.random.randn(100, len(res.x))
nwalkers, ndim = pos.shape
print(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_func)
sampler.run_mcmc(pos, 2000, progress=True);
samples = sampler.get_chain(discard=200, flat=True)
# fig = corner.corner(samples, labels=labels, truths=res.x);
# print mcmc 16th, 50th and 84th percentiles
print("\nMCMC 16th, 50th, and 84th percentiles")
badfit = False
param_vals = []
for i in range(ndim):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = f"{labels[i]} = {mcmc[1]:.2f} (+{q[0]:.2f}) (-{q[1]:.2f})"
print(txt)
param_vals.append(mcmc[1])
if q[0] > 1e5 or q[1] > 1e5:
print("Bad fit for", objid, i, q[0], q[1])
badfit = True
break
if badfit:
continue
# Save best values
save_bazin_params[objid] = param_vals
# Plot draws
plt.figure()
t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
# plt.errorbar(t_all, y_data_all, yerr=yerr_all, label='true - not used for fitting', fmt='.')
plt.errorbar(time, flux, yerr=fluxerr, label='true - used for fitting', fmt='.')
plt.plot(t_plot, y_pred_plot, label='pred')
plt.axvspan(t_all[np.argmax(t_data)+1], max(t_all), alpha=0.2, color='grey')
save_y_pred_draws = []
for i in range(100):
sample_idx = np.random.randint(low=0, high=samples.shape[0])
t_draw, y_pred_draw = get_y_pred(80, *samples[sample_idx][1:])
plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
save_y_pred_draws.append(y_pred_draw)
plt.plot(t_draw, np.median(save_y_pred_draws, axis=0), label='median draws')
plt.legend()
plt.xlabel('Time in days')
plt.ylabel('Flux')
plt.title(f"{objidx} {objid}")
plt.show()
# plt.ylim(bottom=-10, top=70)
except Exception as e:
print(i, objid, e)
continue
# import pickle
# with open('save_bazin_params.pickle', 'wb') as f:
# pickle.dump(save_bazin_params, f)
# + id="st5pOoG8bzu5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="59b54bf6-5057-45b1-b9ae-489838e80dbd"
# Compare real data to simulated data for SNIa
ndim = 6
with open('save_bazin_params.pickle', 'rb') as f:
load_bazin_params = pickle.load(f)
params_array = np.array(list(load_bazin_params.values()))
#
with open('save_real_data_from_osc_bazin_params.pickle', 'rb') as f:
load_bazin_params2 = pickle.load(f)
params_array2 = np.array(list(load_bazin_params2.values()))
# Change to logA and logsigma
labels = ["log(sigma)", "log(A)", "B", "t0", "taufall", "taurise"]
params_array[:,0] = np.log(params_array[:,0])
params_array[:,1] = np.log(params_array[:,1])
#
params_array2[:,0] = np.log(params_array2[:,0])
params_array2[:,1] = np.log(params_array2[:,1])
# Compute mean and covariance
params_array_means = np.mean(params_array, axis=0)
params_array_sigmas = np.std(params_array, axis=0)
params_covariance = np.cov(params_array.T)
print(params_covariance)
#
params_array_means2 = np.mean(params_array2, axis=0)
params_array_sigmas2 = np.std(params_array2, axis=0)
params_covariance2 = np.cov(params_array2.T)
print(params_covariance2)
# Plot histograms and remove 3 sigma outliers for plotting
params_removed_outliers = []
for i in range(ndim):
params_removed_outliers.append(params_array[:,i][abs(params_array[:,i] - np.median(params_array[:,i])) < 3*np.std(params_array[:,i])])
#
params_removed_outliers2 = []
for i in range(ndim):
params_removed_outliers2.append(params_array2[:,i][abs(params_array2[:,i] - np.median(params_array2[:,i])) < 3*np.std(params_array2[:,i])])
print(params_array_means, params_array_sigmas)
for i in range(ndim):
plt.figure(figsize=(7,3.2))
plt.hist(params_removed_outliers[i], bins=50, density=True, label="SNIa ZTF PLAsTiCC sims", alpha=0.5)
plt.hist(params_removed_outliers2[i], bins=50, density=True, label='SNIa Real data', alpha=0.5)
plt.xlabel(labels[i])
ax1 = plt.gca()
ax1.legend(fontsize=13)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
ax1.axes.yaxis.set_ticklabels([])
plt.tight_layout()
plt.savefig(f"real_vs_sims_{i}_{pb}")
# + id="B_ZBVVs5jrjZ" colab_type="code" colab={}
# # np.where(~np.isfinite(params_array))
# params_covariance
# params_array_means
params_array
# + id="4KDS4IbqQ-AL" colab_type="code" colab={}
params_covariance
np.linalg.inv(params_covariance)
# params_array_means
# np.median(fluxerr)
# + id="pInmvcakgq2t" colab_type="code" cellView="form" colab={}
#@title Check univariate vs multivariate gaussian priors
# x = np.array([sigma_intrinsic, A, B, t0, tau_fall, tau_rise])
# means = np.array([24.9723914 , 890.8069568 , -26.34104599, -3.4609063 ,
# 16.20697774, -3.40826335])
# covariance = np.array([[ 1.74474637e+02, 2.18543822e+03, -2.70052169e+02,
# 7.41085359e+00, 1.41350541e+01, -1.71021825e+00],
# [ 2.18543822e+03, 2.88251062e+05, -9.25276312e+04,
# 6.59647012e+02, 2.79607128e+03, -3.53338108e+02],
# [-2.70052169e+02, -9.25276312e+04, 6.81433971e+04,
# 5.16278048e+02, -2.50140730e+03, 2.49325353e+02],
# [ 7.41085359e+00, 6.59647012e+02, 5.16278048e+02,
# 4.14864470e+01, -3.13248842e+01, 1.02721038e+00],
# [ 1.41350541e+01, 2.79607128e+03, -2.50140730e+03,
# -3.13248842e+01, 1.17189022e+02, -9.34560249e+00],
# [-1.71021825e+00, -3.53338108e+02, 2.49325353e+02,
# 1.02721038e+00, -9.34560249e+00, 1.72261655e+00]])
# n = len(means)
# inverse_covariance = np.array([[6.78744316e-03, -9.29642223e-05, -2.28506150e-04,
# 8.05930286e-04, -2.90254365e-03, 4.51563295e-03],
# [-9.29642223e-05, 1.01991443e-05, 2.03397428e-05,
# -3.17739063e-04, 1.00292910e-04, -2.10594624e-04],
# [-2.28506150e-04, 2.03397428e-05, 1.24103770e-04,
# -4.10170214e-04, 1.73313886e-03, -4.36989143e-03],
# [ 8.05930286e-04, -3.17739063e-04, -4.10170214e-04,
# 4.35185650e-02, 1.39113687e-02, 4.45149742e-02],
# [-2.90254365e-03, 1.00292910e-04, 1.73313886e-03,
# 1.39113687e-02, 4.92597661e-02, 2.57922494e-02],
# [ 4.51563295e-03, -2.10594624e-04, -4.36989143e-03,
# 4.45149742e-02, 2.57922494e-02, 1.28766568e+00]])
# log_prior = -(n/2)*np.log(2*np.pi) - 0.5*np.log(np.linalg.det(covariance)) - 0.5 * (x-means) @ inverse_covariance @ (x-means)
# print(log_prior)
# sigma_intrinsic, A, B, t0, tau_fall, tau_rise = x
# # sigma_intrinsic prior
# sigma_intrinsic_mean = means[0]
# sigma_intrinsic_sigma = np.sqrt(np.diagonal(covariance)[0])
# sigma_log_prior = -0.5*np.log(2*np.pi*sigma_intrinsic_sigma**2)-0.5*(sigma_intrinsic-sigma_intrinsic_mean)**2/sigma_intrinsic_sigma**2
# # A prior
# A_mean = means[1]
# A_sigma = np.sqrt(np.diagonal(covariance)[1])
# A_log_prior = -0.5*np.log(2*np.pi*A_sigma**2)-0.5*(A-A_mean)**2/A_sigma**2
# # B prior
# B_mean = means[2]
# B_sigma = np.sqrt(np.diagonal(covariance)[2])
# B_log_prior = -0.5*np.log(2*np.pi*B_sigma**2)-0.5*(B-B_mean)**2/B_sigma**2
# # t0 prior
# t0_mean = means[3]
# t0_sigma = np.sqrt(np.diagonal(covariance)[3])
# t0_log_prior = -0.5*np.log(2*np.pi*t0_sigma**2)-0.5*(t0-t0_mean)**2/t0_sigma**2
# # tau_fall prior
# tau_fall_mean = means[4]
# tau_fall_sigma = np.sqrt(np.diagonal(covariance)[4])
# tau_fall_log_prior = -0.5*np.log(2*np.pi*tau_fall_sigma**2)-0.5*(tau_fall-tau_fall_mean)**2/tau_fall_sigma**2
# # tau_rise prior
# tau_rise_mean = means[5]
# tau_rise_sigma = np.sqrt(np.diagonal(covariance)[5])
# tau_rise_log_prior = -0.5*np.log(2*np.pi*tau_rise_sigma**2)-0.5*(tau_rise-tau_rise_mean)**2/tau_rise_sigma**2
# log_prior = np.log(1) + sigma_log_prior + A_log_prior + B_log_prior + tau_fall_log_prior + tau_rise_log_prior
# print(log_prior)
# + id="jCf26Q3ErpHg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5065f4f7-0987-4ee0-d19d-45e07a69ea1a"
np.random.seed(42)
# Make dataset
t_all = np.arange(-70,80,step=3)
# sigma_intrinsic = np.random.normal(25, 13) # 5
# A = np.random.normal(891, 536) # 100
# B = np.random.normal(-26, 260) #
# t0 = np.random.normal(-3.5, 6.4) #
# tau_fall = np.random.normal(16.2, 10.8) # 20
# tau_rise = np.random.normal(-3.4, 1.3) # -6
# print(sigma_intrinsic, A, B, t0, tau_fall, tau_rise)
data_means = np.array([ 6.20522369, 6.83039727, 3.60728806, -2.72013241, 13.69171025, -3.21985621])
data_covariance = np.array([[7.49096627e+00, 9.01726007e-01, 2.66965110e+01, 8.27519610e+00,
3.22998697e+00, 3.02413974e-01],
[9.01726007e-01, 5.39866858e-01, 7.91625228e+00, 2.27039763e+00,
6.59346887e-01, 8.90609190e-02],
[2.66965110e+01, 7.91625228e+00, 4.34861842e+03, 1.73073597e+02,
1.03490513e+02, 1.94038357e+01],
[8.27519610e+00, 2.27039763e+00, 1.73073597e+02, 4.59726855e+01,
5.14228861e+00, 4.92809317e-01],
[3.22998697e+00, 6.59346887e-01, 1.03490513e+02, 5.14228861e+00,
2.96042102e+01, 1.33934350e+00],
[3.02413974e-01, 8.90609190e-02, 1.94038357e+01, 4.92809317e-01,
1.33934350e+00, 2.90804476e-01]])
log_sigma_intrinsic, logA, B, t0, tau_fall, tau_rise = np.random.multivariate_normal(data_means, data_covariance)
log_sigma_intrinsic = 4
print(log_sigma_intrinsic, logA, B, t0, tau_fall, tau_rise)
fit_until = 79 # 30
t_data, y_data, yerr, t_all, y_data_all, yerr_all = get_y_data(fit_until, np.exp(log_sigma_intrinsic), np.exp(logA), B, t0, tau_fall, tau_rise, seed=0, t_all=t_all)
# t_data, y_data, yerr = time[pb], flux[pb], fluxerr[pb]
class_num = 1
pb = 'g'
#Initial values
# sigma_intrinsic0 = 10
# A0 = 200
# B0 = 10
# t00 = 15
# tau_fall0 = 18
# tau_rise0 = -7
# x0 = np.array([sigma_intrinsic0, A0, B0, t00, tau_fall0, tau_rise0])
x0 = data_means
autograd_numpy = False
# Optimise
def objective_func(theta):
if autograd_numpy:
np1 = autograd.numpy
else:
np1 = np
return -log_posterior(class_num, pb, t_data, y_data, yerr, fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np1)
res = minimize(objective_func, x0, method='Nelder-Mead', options={'disp': True})
# # Compute hessian in two ways
# print("Get Inverse Hessian from BFGS optimiser")
# hessian_inv = res.hess_inv
# print(hessian_inv)
print("Compute Inverse Hessian using Autograd")
autograd_numpy = True
hessian_ = autograd.hessian(objective_func)
autograd_hessian_inv = lin.inv(hessian_(res.x))
print(autograd_hessian_inv)
# Get Uncertaintites
cov_matrix = autograd_hessian_inv
sigmas = np.sqrt(np.diagonal(cov_matrix))
print(sigmas)
# Print comparisons
print("\n")
print(f"log(sigma) : true={log_sigma_intrinsic}, pred={res.x[0]:.2f} +- {sigmas[0]:.2f}")
print(f"log(A) : true={logA}, pred={res.x[1]:.2f} +- {sigmas[1]:.2f}")
print(f"B : true={B}, pred={res.x[2]:.2f} +- {sigmas[2]:.2f}")
print(f"t0 : true={t0}, pred={res.x[3]:.2f} +- {sigmas[3]:.2f}")
print(f"tau_fall : true={tau_fall}, pred={res.x[4]:.2f} +- {sigmas[4]:.2f}")
print(f"tau_rise : true={tau_rise}, pred={res.x[5]:.2f} +- {sigmas[5]:.2f}")
# Plot
t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
plt.errorbar(t_all, y_data_all, yerr=yerr_all, label='true - not used for fitting', fmt='.')
plt.errorbar(t_data, y_data, yerr=yerr, label='true - used for fitting', fmt='.')
plt.plot(t_plot, y_pred_plot, label='pred')
plt.axvspan(fit_until, max(t_all), alpha=0.2, color='grey')
# Draw random parameter functions
save_y_pred_draws = []
for i in range(100):
param_draws = np.random.multivariate_normal(res.x, cov_matrix)
t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
save_y_pred_draws.append(y_pred_draw)
plt.plot(t_draw, np.median(save_y_pred_draws, axis=0), label='median draws')
plt.legend()
plt.xlabel('Time in days')
plt.ylabel('Flux')
plt.ylim(bottom=min(y_data_all), top=max(y_data_all))
# + id="atOiOlSBO6nz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a8ab4ab3-0207-4af7-9bdc-b81fc66bd577"
# ## MCMC fit
# # ! pip install emcee
# # ! pip install corner
# # ! pip install chainconsumer
labels = ["log(sigma)", "log(A)", "B", "t0", "taufall", "taurise"]
def objective_func(theta):
# return log_posterior(class_num, pb, time, flux, fluxerr, fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np)
return log_posterior(class_num, pb, t_data, y_data, yerr, fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np)
import emcee
import corner
pos = res.x + 1e-4 * np.random.randn(100, len(res.x))
nwalkers, ndim = pos.shape
print(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, objective_func)
sampler.run_mcmc(pos, 1000, progress=True);
samples = sampler.get_chain(discard=200, flat=True)
fig = corner.corner(samples, labels=labels, truths=[log_sigma_intrinsic, logA, B, t0, tau_fall, tau_rise]);
fig = corner.corner(samples, labels=labels, truths=res.x);
# print mcmc 16th, 50th and 84th percentiles
print("\nMCMC 16th, 50th, and 84th percentiles")
for i in range(ndim):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = f"{labels[i]} = {mcmc[1]:.2f} (+{q[0]:.2f}) (-{q[1]:.2f})"
print(txt)
# Plot draws
plt.figure()
t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
plt.errorbar(t_all, y_data_all, yerr=yerr_all, label='true - not used for fitting', fmt='.')
plt.errorbar(t_data, y_data, yerr=yerr, label='true - used for fitting', fmt='.')
# plt.errorbar(time, flux, yerr=fluxerr, label='true - used for fitting', fmt='.')
plt.plot(t_plot, y_pred_plot, label='pred')
plt.axvspan(fit_until, max(t_all), alpha=0.2, color='grey')
save_y_pred_draws = []
for i in range(100):
sample_idx = np.random.randint(low=0, high=samples.shape[0])
t_draw, y_pred_draw = get_y_pred(80, *samples[sample_idx][1:])
plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
save_y_pred_draws.append(y_pred_draw)
plt.plot(t_draw, np.median(save_y_pred_draws, axis=0), label='median draws')
plt.legend()
plt.xlabel('Time in days')
plt.ylabel('Flux')
# plt.ylim(bottom=-10, top=70)
print("\nMLE with sigmas from inverse Hessian")
print(f"log(sigma) : true={log_sigma_intrinsic}, pred={res.x[0]:.2f} +- {sigmas[0]:.2f}")
print(f"log(A) : true={logA}, pred={res.x[1]:.2f} +- {sigmas[1]:.2f}")
print(f"B : true={B}, pred={res.x[2]:.2f} +- {sigmas[2]:.2f}")
print(f"t0 : true={t0}, pred={res.x[3]:.2f} +- {sigmas[3]:.2f}")
print(f"tau_fall : true={tau_fall}, pred={res.x[4]:.2f} +- {sigmas[4]:.2f}")
print(f"tau_rise : true={tau_rise}, pred={res.x[5]:.2f} +- {sigmas[5]:.2f}")
# + id="526rh19D4gRt" colab_type="code" cellView="form" colab={}
#@title Fit Bazin simulations at multiple time steps single passband
for fit_until in range(-70, 80, 3):
t_data, y_data, yerr, t_all, y_data_all, yerr_all = get_y_data(fit_until, np.exp(log_sigma_intrinsic), np.exp(logA), B, t0, tau_fall, tau_rise, seed=0, t_all=t_all)
def objective_func(theta):
if autograd_numpy:
np1 = autograd.numpy
else:
np1 = np
return -log_posterior(t_data, y_data, yerr, fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np1)
res = minimize(objective_func, x0, method='BFGS', options={'xatol': 1e-8, 'disp': True})
# Compute hessian in two ways
# print("Compute Inverse Hessian using Autograd")
autograd_numpy = True
hessian_ = autograd.hessian(objective_func)
autograd_hessian_inv = lin.inv(hessian_(res.x))
# print(autograd_hessian_inv)
# Get Uncertaintites
cov_matrix = autograd_hessian_inv
sigmas = np.sqrt(np.diagonal(cov_matrix))
print(sigmas)
if np.any(np.isnan(cov_matrix)):
print("covariance matrix has nans", fit_until, cov_matrix)
continue
t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
plt.figure()
plt.errorbar(t_all, y_data_all, yerr=yerr_all, label='true - not used for fitting', fmt='.')
plt.errorbar(t_data, y_data, yerr=yerr, label='true - used for fitting', fmt='.')
plt.plot(t_plot, y_pred_plot, label='pred')
plt.axvspan(fit_until, max(t_all), alpha=0.2, color='grey')
# Draw random parameter functions
save_y_pred_draws = []
for i in range(100):
param_draws = np.random.multivariate_normal(res.x, cov_matrix)
t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
save_y_pred_draws.append(y_pred_draw)
plt.plot(t_draw, np.median(save_y_pred_draws, axis=0), label='median draws')
plt.legend()
plt.xlabel('Time in days')
plt.ylabel('Flux')
plt.ylim(bottom=min(y_data_all), top=max(y_data_all))
plt.show()
# Print comparisons
print("\n")
print(fit_until)
print(f"log(sigma) : true={log_sigma_intrinsic}, pred={res.x[0]:.2f} +- {sigmas[0]:.2f}")
print(f"log(A) : true={logA}, pred={res.x[1]:.2f} +- {sigmas[1]:.2f}")
print(f"B : true={B}, pred={res.x[2]:.2f} +- {sigmas[2]:.2f}")
print(f"t0 : true={t0}, pred={res.x[3]:.2f} +- {sigmas[3]:.2f}")
print(f"tau_fall : true={tau_fall}, pred={res.x[4]:.2f} +- {sigmas[4]:.2f}")
print(f"tau_rise : true={tau_rise}, pred={res.x[5]:.2f} +- {sigmas[5]:.2f}")
# + id="e0CkycQ6APiE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 443} outputId="edaf930d-0e28-4e5d-868d-94387399fabf"
# Fit actual light curves with Bazin function at different time steps
import warnings
from transomaly import helpers
class_num = 1
passbands = ('g', 'r')
nobjidx = len(light_curves_sims[class_num].items())
x0 = np.array([1.3 , 6 , -26.34104599, -3.4609063, 16.20697774, -3.40826335])
save_flux_pred = {}
save_fluxerr_pred = {}
save_flux_interp = {}
save_fluxerr_interp = {}
for objidx, (objid, lc) in enumerate(light_curves_sims[class_num].items()):
if objidx != 13:
continue
print(f"{objidx} of {nobjidx}", objid)
save_flux_pred[objidx] = {pb: np.zeros(50) for pb in passbands}
save_fluxerr_pred[objidx] = {pb: np.zeros(50) for pb in passbands}
save_flux_interp[objidx] = {pb: np.zeros(50) for pb in passbands}
save_fluxerr_interp[objidx] = {pb: np.zeros(50) for pb in passbands}
for f, fit_until in enumerate(range(-70, 80, 3)):
print(f"objidx: {objidx}, timestep: {f}")
plt.close()
try:
time, flux, fluxerr, photflag = {}, {}, {}, {}
t_pred1, y_pred1 = {}, {}
respb = {}
for pb in passbands:
pbmask = lc['passband'] == pb
time[pb] = lc[pbmask]['time'].data
flux[pb] = lc[pbmask]['flux'].data
fluxerr[pb] = lc[pbmask]['fluxErr'].data
photflag[pb] = lc[pbmask]['photflag'].data
# Mask out times outside of mintime and maxtime
timemask = (time[pb] > -70) & (time[pb] < 80)
time[pb] = time[pb][timemask]
flux[pb] = flux[pb][timemask]
fluxerr[pb] = fluxerr[pb][timemask]
photflag[pb] = photflag[pb][timemask]
if np.max(flux[pb]) < 1e-5:
print(objidx, objid, "Low flux due to incorrect zeropoints")
continue
# Only fit up to day `fit_until`
mask = (time[pb] <= fit_until) & (time[pb] >= -70)
time[pb] = time[pb][mask]
flux[pb] = flux[pb][mask]
fluxerr[pb] = fluxerr[pb][mask]
photflag[pb] = photflag[pb][mask]
# Optimise fit
autograd_numpy = False
def objective_func(theta):
if autograd_numpy:
np1 = autograd.numpy
else:
np1 = np
return -log_posterior(class_num, pb, time[pb], flux[pb], fluxerr[pb], fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np1)
res = minimize(objective_func, x0, method='Nelder-Mead', options={'xatol': 1e-12, 'disp': False})
respb[pb] = res
# print("Compute Covariance Matrix using Autograd")
autograd_numpy = True
hessian_ = autograd.hessian(objective_func)
autograd_hessian_inv = lin.inv(hessian_(res.x))
# print(autograd_hessian_inv)
# Get Uncertaintites
cov_matrix = autograd_hessian_inv
sigmas = np.sqrt(np.diagonal(cov_matrix))
# print(sigmas)
# Ignore bad fits
if np.any(np.isnan(cov_matrix)) or np.any(np.isnan(sigmas)) or np.any(res.x == 0.0) or np.any(sigmas==0.0) or res.x[1] <= 0 or np.any(abs(res.x)<1e-4) or np.any(np.isnan(res.x)):
print(f"Bad fit for objid {objidx} {objid}. Params: {res.x} sigmas {sigmas}")
continue
# Plot draws
t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
t_pred1[pb] = t_plot
y_pred1[pb] = y_pred_plot
time_all, flux_all, fluxerr_all = lc[pbmask]['time'].data, lc[pbmask]['flux'].data, lc[pbmask]['fluxErr'].data
spl = helpers.ErrorPropagationSpline(time_all, flux_all, fluxerr_all, k=1, N=100, ext='zeros')
fluxinterp, fluxerrinterp = spl(t_plot)
if time[pb].size == 0 or fit_until > time_all[-1] or fit_until >= t_plot[-1]:
continue
# plt.figure()
# plt.errorbar(time_all, flux_all, yerr=fluxerr_all, label='true - not used for fitting', fmt='.')
# plt.errorbar(time[pb], flux[pb], yerr=fluxerr[pb], label='true - used for fitting', fmt='.')
# plt.plot(t_plot, y_pred_plot, label='pred')
# # plt.axvspan(time_all[np.argmax(time[pb])+1], max(time_all), alpha=0.2, color='grey')
# plt.axvspan(fit_until, 80, alpha=0.2, color='grey')
# save_y_pred_draws = []
# for i in range(100):
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# param_draws = np.random.multivariate_normal(res.x, cov_matrix)
# t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
# save_y_pred_draws.append(y_pred_draw)
# # One of these Y-pred_draws has a first prediction of 10**24
# plt.plot(t_draw, y_pred_draw, color='black', alpha=0.1)
# plt.plot(t_draw, np.median(save_y_pred_draws, axis=0), label='median draws')
# plt.legend()
# plt.xlabel('Time in days')
# plt.ylabel(f'Flux {pb} band')
# plt.title(f"{objidx} {objid}")
# plt.ylim(bottom=min(0, min(flux_all), min(y_pred_plot)), top=max(max(flux_all), max(y_pred_plot)))
# # plt.xlim(left=max(-70, min(time_all)), right = min(80, max(time_all)))
# plt.xlim(-70, 80)
# plt.show()
# # # Print best fit parameters
# # print("\n")
# # print(f"log(sigma) : initial={x0[0]}, pred={res.x[0]:.2f} +- {sigmas[0]:.2f}")
# # print(f"log(A) : initial={x0[1]}, pred={res.x[1]:.2f} +- {sigmas[1]:.2f}")
# # print(f"B : initial={x0[2]}, pred={res.x[2]:.2f} +- {sigmas[2]:.2f}")
# # print(f"t0 : initial={x0[3]}, pred={res.x[3]:.2f} +- {sigmas[3]:.2f}")
# # print(f"tau_fall : initial={x0[4]}, pred={res.x[4]:.2f} +- {sigmas[4]:.2f}")
# # print(f"tau_rise : initial={x0[5]}, pred={res.x[5]:.2f} +- {sigmas[5]:.2f}")
# save_flux_pred[objidx][pb][f+1] = y_pred_plot[f+1]
# save_fluxerr_pred[objidx][pb][f+1] = np.std(save_y_pred_draws, axis=0)[f+1]
# save_flux_interp[objidx][pb][f+1] = fluxinterp[f+1]
# save_fluxerr_interp[objidx][pb][f+1] = fluxerrinterp[f+1]
# Figures for paper
fig, (ax1) = plt.subplots(nrows=1, ncols=1)
for pb in passbands:
pbmask = lc['passband'] == pb
time_all, flux_all, fluxerr_all = lc[pbmask]['time'].data, lc[pbmask]['flux'].data, lc[pbmask]['fluxErr'].data
ax1.errorbar(time_all, flux_all, yerr=fluxerr_all, label=f'${pb}$ band obs', fmt='.', color=COLPB[pb])
# ax1.axvspan(fit_until, 80, alpha=0.2, color='grey')
ax1.axvspan(-67, fit_until, alpha=0.2, color='grey')
save_y_pred_draws = []
for i in range(100):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
param_draws = np.random.multivariate_normal(respb[pb].x, cov_matrix)
t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
save_y_pred_draws.append(y_pred_draw)
ax1.plot(t_draw, y_pred_draw, color=COLPB[pb], alpha=0.05)
ax1.plot(t_draw, np.median(save_y_pred_draws, axis=0), label=f'${pb}$ band pred', color=COLPB[pb], linewidth=2, linestyle='-') # median
ax1.set_ylabel("Relative flux", fontsize=15)
ax1.set_xlabel("Time since trigger [days]", fontsize=15)
ax1.legend(fontsize=13.5)
# plt.ylim(bottom=min(0, min(flux_all), min(y_pred_plot)), top=max(max(flux_all), max(y_pred_plot)))
plt.ylim(-200, 1200) #
plt.ylim()
plt.xlim(-67, 77)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.tight_layout()
plt.savefig(f"bazin_fit_example_timestep_{f}")
plt.show()
except EnvironmentError as e:
print(objidx, objid, e)
continue
# #Compute likelihood and mask out where there is no data
# for pb in passbands:
# mask = (save_fluxerr_pred[objidx][pb] != 0)
# yt = save_flux_interp[objidx][pb][mask]
# yterr = save_fluxerr_pred[objidx][pb][mask]
# yp = save_flux_pred[objidx][pb][mask]
# yperr = save_fluxerr_interp[objidx][pb][mask]
# negloglike = -0.5 * (yt - yp)**2 / (yterr*2 + yperr**2) - 0.5 *np.log(yterr**2 + yperr**2) - 0.5*np.log(2*np.pi)
# print(negloglike)
# fig = plt.figure()
# plt.errorbar(t_plot, save_flux_pred[objidx][pb], yerr=save_fluxerr_pred[objidx][pb], label='pred', fmt='.', linestyle='-')
# plt.errorbar(t_plot, save_flux_interp[objidx][pb], yerr=save_fluxerr_interp[objidx][pb], label='interp data', fmt='.', linestyle='-')
# # plt.errorbar(time_all[time_all>=-70], flux_all[time_all>=-70], yerr=fluxerr_all[time_all>=-70], label='data', fmt='.', linestyle='-')
# plt.legend()
# plt.xlabel('Time in days')
# plt.ylabel(f'Flux {pb} band')
# plt.title(f"3 day predictions plotted plot {objidx} {objid}")
# plt.ylim(bottom=min(0, min(save_flux_pred[objidx][pb]), min(save_flux_interp[objidx][pb])), top=max(max(save_flux_pred[objidx][pb]), max(save_flux_interp[objidx][pb])))
# # plt.xlim(left=max(-70, min(time_all)), right = min(80, max(time_all)))
# plt.xlim(-70, 80)
# plt.text(0.6, 0.7, f"$negloglike = {round(np.sum(negloglike, axis=0)/len(yt), 3)}$", horizontalalignment='left', verticalalignment='center', transform=fig.transFigure)
# plt.show()
# # with open('save_real_data_from_osc_bazin_params.pickle', 'wb') as f:
# # with open(f'save_bazin_params_class_multipb_{class_num}.pickle', 'wb') as f:
# # pickle.dump(save_bazin_params, f)
# + id="ToRAtloAViaR" colab_type="code" colab={}
# @title Similarity matrix function definitions
def get_similarity_matrix(class_nums, ignore_class_names_test_on=[]):
similarity_matrix = {classnum: [] for classnum in class_nums}
similarity_matrix_chi2 = {classnum: [] for classnum in class_nums}
for class_num in class_nums:
sntypes_map = helpers.get_sntypes()
class_name = sntypes_map[class_num]
print(class_name)
saved_scores_fp = f'bazin_similarity_scores_{class_nums}.json'
if os.path.exists(saved_scores_fp):
print("Using saved similarity scores")
with open(saved_scores_fp, 'r') as fp:
similarity_score = json.load(fp)
with open(saved_scores_fp.replace('similarity_scores_', 'similarity_scores_chi2_'), 'r') as fp:
similarity_score_chi2 = json.load(fp)
else:
print("Saving similarity scores...")
similarity_score, similarity_score_chi2 = similarity_metric(class_num, class_nums_to_test_against=class_nums)
with open(saved_scores_fp, 'w') as fp:
json.dump(similarity_score, fp)
with open(saved_scores_fp.replace('similarity_scores_', 'similarity_scores_chi2_'), 'w') as fp:
json.dump(similarity_score_chi2, fp)
similarity_matrix[class_name] = similarity_score
similarity_matrix_chi2[class_name] = similarity_score_std
similarity_matrix = pd.DataFrame(similarity_matrix)
similarity_matrix_chi2 = pd.DataFrame(similarity_matrix_chi2)
similarity_matrix.to_csv('bazin_similarity_matrix.csv')
similarity_matrix_chi2.to_csv('similarity_matrix_chi2.csv')
print(similarity_matrix)
similarity_matrix = similarity_matrix.drop(ignore_class_names_test_on)
return similarity_matrix
def similarity_metric(class_num, class_nums_to_test_against):
nobjidx = len(light_curves_sims[class_num].items())
x0 = np.array([1.3 , 6 , -26.34104599, -3.4609063, 16.20697774, -3.40826335])
save_flux_pred = {}
save_fluxerr_pred = {}
save_flux_interp = {}
save_fluxerr_interp = {}
sntypes_map = helpers.get_sntypes()
class_names = [sntypes_map[class_num] for class_num in class_nums_to_test_against]
anomaly_scores = {key: [] for key in class_names}
anomaly_scores_chi2 = {key: [] for key in class_names}
for objidx, (objid, lc) in enumerate(light_curves_sims[class_num].items()):
save_flux_pred[objidx] = {pb: np.zeros(50) for pb in passbands}
save_fluxerr_pred[objidx] = {pb: np.zeros(50) for pb in passbands}
save_flux_interp[objidx] = {pb: np.zeros(50) for pb in passbands}
save_fluxerr_interp[objidx] = {pb: np.zeros(50) for pb in passbands}
for f, fit_until in enumerate(range(-70, 80, 3)):
print(f"objidx: {objidx}, {objid} timestep: {f}")
plt.close()
time, flux, fluxerr, photflag = {}, {}, {}, {}
t_pred1, y_pred1 = {}, {}
respb = {}
for pb in passbands:
pbmask = lc['passband'] == pb
time[pb] = lc[pbmask]['time'].data
flux[pb] = lc[pbmask]['flux'].data
fluxerr[pb] = lc[pbmask]['fluxErr'].data
photflag[pb] = lc[pbmask]['photflag'].data
# Mask out times outside of mintime and maxtime
timemask = (time[pb] > -70) & (time[pb] < 80)
time[pb] = time[pb][timemask]
flux[pb] = flux[pb][timemask]
fluxerr[pb] = fluxerr[pb][timemask]
photflag[pb] = photflag[pb][timemask]
if np.max(flux[pb]) < 1e-5:
print(objidx, objid, "Low flux due to incorrect zeropoints")
continue
# Only fit up to day `fit_until`
mask = (time[pb] <= fit_until) & (time[pb] >= -70)
time[pb] = time[pb][mask]
flux[pb] = flux[pb][mask]
fluxerr[pb] = fluxerr[pb][mask]
photflag[pb] = photflag[pb][mask]
# Optimise fit
autograd_numpy = False
def objective_func(theta):
if autograd_numpy:
np1 = autograd.numpy
else:
np1 = np
return -log_posterior(class_num, pb, time[pb], flux[pb], fluxerr[pb], fit_until, log_sigma_intrinsic=theta[0], logA=theta[1], B=theta[2], t0=theta[3], tau_fall=theta[4], tau_rise=theta[5], np=np1)
res = minimize(objective_func, x0, method='Nelder-Mead', options={'xatol': 1e-12, 'disp': False})
respb[pb] = res
# print("Compute Covariance Matrix using Autograd")
autograd_numpy = True
hessian_ = autograd.hessian(objective_func)
autograd_hessian_inv = lin.inv(hessian_(res.x))
# print(autograd_hessian_inv)
# Get Uncertaintites
cov_matrix = autograd_hessian_inv
sigmas = np.sqrt(np.diagonal(cov_matrix))
# print(sigmas)
# Ignore bad fits
if np.any(np.isnan(cov_matrix)) or np.any(np.isnan(sigmas)) or np.any(res.x == 0.0) or np.any(sigmas==0.0) or res.x[1] <= 0 or np.any(abs(res.x)<1e-4) or np.any(np.isnan(res.x)):
print(f"Bad fit for objid {objidx} {objid}. Params: {res.x} sigmas {sigmas}")
continue
# Get draws
t_plot, y_pred_plot = get_y_pred(80, *res.x[1:])
t_pred1[pb] = t_plot
y_pred1[pb] = y_pred_plot
save_y_pred_draws = []
for i in range(100):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
param_draws = np.random.multivariate_normal(respb[pb].x, cov_matrix)
t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
save_y_pred_draws.append(y_pred_draw)
time_all, flux_all, fluxerr_all = lc[pbmask]['time'].data, lc[pbmask]['flux'].data, lc[pbmask]['fluxErr'].data
spl = helpers.ErrorPropagationSpline(time_all, flux_all, fluxerr_all, k=1, N=100, ext='zeros')
fluxinterp, fluxerrinterp = spl(t_plot)
if time[pb].size == 0 or fit_until > time_all[-1] or fit_until >= t_plot[-1]:
continue
save_flux_pred[objidx][pb][f+1] = y_pred_plot[f+1]
save_fluxerr_pred[objidx][pb][f+1] = np.std(save_y_pred_draws, axis=0)[f+1]
save_flux_interp[objidx][pb][f+1] = fluxinterp[f+1]
save_fluxerr_interp[objidx][pb][f+1] = fluxerrinterp[f+1]
# # Plot fits at different time steps
# fig, (ax1) = plt.subplots(nrows=1, ncols=1)
# for pb in passbands:
# pbmask = lc['passband'] == pb
# time_all, flux_all, fluxerr_all = lc[pbmask]['time'].data, lc[pbmask]['flux'].data, lc[pbmask]['fluxErr'].data
# ax1.errorbar(time_all, flux_all, yerr=fluxerr_all, label=f'${pb}$ band obs', fmt='.', color=COLPB[pb])
# # ax1.axvspan(fit_until, 80, alpha=0.2, color='grey')
# ax1.axvspan(-67, fit_until, alpha=0.2, color='grey')
# save_y_pred_draws = []
# for i in range(100):
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# param_draws = np.random.multivariate_normal(respb[pb].x, cov_matrix)
# t_draw, y_pred_draw = get_y_pred(80, *param_draws[1:])
# save_y_pred_draws.append(y_pred_draw)
# ax1.plot(t_draw, y_pred_draw, color=COLPB[pb], alpha=0.05)
# ax1.plot(t_draw, np.median(save_y_pred_draws, axis=0), label=f'${pb}$ band pred', color=COLPB[pb], linewidth=2, linestyle='-') # median
# ax1.set_ylabel("Relative flux", fontsize=15)
# ax1.set_xlabel("Time since trigger [days]", fontsize=15)
# ax1.legend(fontsize=13.5)
# plt.ylim(bottom=min(0, min(flux_all), min(y_pred_plot)), top=max(max(flux_all), max(y_pred_plot)))
# plt.xlim(-67, 77)
# plt.xticks(fontsize=15)
# plt.yticks(fontsize=15)
# plt.tight_layout()
# plt.show()
# Get anomaly scores
npb = 0
chi2 = 0
like = 0
negloglike = 0
maskzeros = True
for pb in passbands:
yt = save_flux_interp[objidx][pb]
yterr = save_fluxerr_pred[objidx][pb]
yp = save_flux_pred[objidx][pb]
yperr = save_fluxerr_interp[objidx][pb]
maskzeros = ((yterr != 0) * (np.isfinite(yterr))) & maskzeros
try:
chi2 += ((yp - yt)/yterr)**2
like += np.exp(-0.5*(yp-yt)**2 / (yterr**2+yperr**2)) * (2*np.pi*(yterr**2+yperr**2))**-0.5
negloglike += -(-0.5 * (yp - yt)**2 / (yterr*2 + yperr**2))# - 0.5 *np.log(yterr**2 + yperr**2) - 0.5*np.log(2*np.pi))
npb += 1
except ValueError as e:
print(f"Failed chi2 object {objids_test[sidx + s]}", e)
# import pdb; pdb.set_trace()
if not np.any(maskzeros):
print("here not any maskzeros")
continue
chi2 = chi2[maskzeros] / npb
like = like[maskzeros] / npb
negloglike = negloglike[maskzeros] / npb
anomaly_scores_lc = np.array(negloglike)
anomaly_scores_lc_chi2 = np.array(chi2)
anomaly_score_max = max(anomaly_scores_lc)
obj_classnum = int(objid.split('_')[0])
obj_class_name = sntypes_map[obj_classnum]
anomaly_scores[obj_class_name].append(anomaly_score_max)
anomaly_scores_lc_chi2 = np.array(chi2)
anomaly_score_lc_max = max(anomaly_scores_lc_chi2)
anomaly_scores_chi2[obj_class_name].append(anomaly_score_lc_max)
fig = plt.figure()
plt.errorbar(t_plot, save_flux_pred[objidx][pb], yerr=save_fluxerr_pred[objidx][pb], label='pred', fmt='.', linestyle='-')
plt.errorbar(t_plot, save_flux_interp[objidx][pb], yerr=save_fluxerr_interp[objidx][pb], label='interp data', fmt='.', linestyle='-')
# plt.errorbar(time_all[time_all>=-70], flux_all[time_all>=-70], yerr=fluxerr_all[time_all>=-70], label='data', fmt='.', linestyle='-')
plt.legend()
plt.xlabel('Time in days')
plt.ylabel(f'Flux {pb} band')
plt.title(f"3 day predictions plotted plot {objidx} {objid}")
plt.ylim(bottom=min(0, min(save_flux_pred[objidx][pb]), min(save_flux_interp[objidx][pb])), top=max(max(save_flux_pred[objidx][pb]), max(save_flux_interp[objidx][pb])))
# plt.xlim(left=max(-70, min(time_all)), right = min(80, max(time_all)))
plt.xlim(-70, 80)
plt.text(0.6, 0.7, f"$negloglike = {round(np.sum(negloglike, axis=0)/len(yt), 3)}$", horizontalalignment='left', verticalalignment='center', transform=fig.transFigure)
plt.text(0.6, 0.6, f"$chi2 = {round(np.sum(chi2, axis=0)/len(yt), 3)}$", horizontalalignment='left', verticalalignment='center', transform=fig.transFigure)
plt.show()
similarity_score = {key: [] for key in class_names}
similarity_score_chi2 = {key: [] for key in class_names}
for c in class_names:
similarity_score[c] = np.median(anomaly_scores[c])
similarity_score_chi2[c] = np.median(similarity_score_chi2[c])
return similarity_score, similarity_score_chi2
def plot_similarity_matrix(similarity_matrix):
font = {'family': 'normal',
'size': 36}
matplotlib.rc('font', **font)
xrange, yrange = similarity_matrix.shape
similarity_matrix = similarity_matrix.T
# similarity_matrix = similarity_matrix[
# ['SNIa', 'SNIa-x', 'SNII', 'SNIbc', 'SLSN-I', 'TDE', 'AGN', 'SNIIn', 'Ia-91bg', 'CART', 'TDE', 'PISN',
# 'Kilonova']]
xlabels = similarity_matrix.columns.values
ylabels = similarity_matrix.index.values
sntypes_map = helpers.get_sntypes()
ylabels = [sntypes_map[ylabel] for ylabel in ylabels]
maxval = min(20, similarity_matrix.values.max())
plt.figure(figsize=(15,12))
plt.imshow(similarity_matrix, cmap=plt.cm.RdBu_r, vmin=6, vmax=maxval)#, norm=colors.LogNorm())
cb = plt.colorbar()
# cb.ax.set_yticklabels(cb.ax.get_yticklabels(), fontsize=27)
plt.xticks(np.arange(xrange), xlabels, rotation=90, fontsize=27)
plt.yticks(np.arange(yrange), ylabels, fontsize=27)
thresh_q3 = 0.75 * maxval
thresh_q1 = 0.25 * maxval
for i in range(xrange):
for j in range(yrange):
c = similarity_matrix.iloc[j, i]
if c > 100:
cell_text = f"{c:.0f}"
elif c > 10:
cell_text = f"{c:.1f}"
else:
cell_text = f"{c:.2f}"
plt.text(i, j, cell_text, va='center', ha='center',
color="white" if c < thresh_q1 or c > thresh_q3 else "black", fontsize=14)
plt.ylabel('Trained on')
plt.xlabel('Tested on')
plt.tight_layout()
print("Saving matrix plot...")
plt.savefig("bazin_similarity_matrix.pdf")
# + id="H-p2clK-rXPH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 368} outputId="fff20c8e-1c8f-4bac-a35d-beae704227a9"
# @title # Plot similarity matrix
import warnings
passbands = ('g', 'r')
class_nums = [1, 2, 3, 41, 51, 60, 64]
similarity_matrix, similarity_matrix_chi2 = get_similarity_matrix(class_nums, ignore_class_names_test_on=[])
plot_similarity_matrix(similarity_matrix)
# + id="-sN4SzQYMvDr" colab_type="code" colab={}
# -0.5 * (yt - yp)**2 / (yterr*2 + yperr**2) - 0.5 *np.log(yterr**2 + yperr**2) - 0.5*np.log(2*np.pi)
# print(yt, yp, yterr, yperr)
import transomaly
from transomaly import helpers
# + id="G9reHlTO754B" colab_type="code" colab={}
model = build_model(X_train, passbands=('g',), reframe=False, probabilistic=True, nunits=50, bayesian=False, dropout_rate=0.2)
# Load and set weights
with open('save_weights_bazin_mle.pickle', 'rb') as fp:
load_weights = pickle.load(fp)
model.set_weights(load_weights)
model.compile(loss=negloglike(), optimizer=tf.optimizers.Adam(learning_rate=0.01))
# tcn_full_summary(model, expand_residual_blocks=True)
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=0, batch_size=64, verbose=2)
model.evaluate(X_train, y_train)
# plot_history(history, 'bazinloss')
# + id="MtPSz3Pw-CkM" colab_type="code" colab={}
yhat = model(X_train)
y_pred = np.asarray(yhat.mean())
y_pred_std = np.asarray(yhat.stddev())
print(y_pred_std.shape)
pidx = 99
time = t[pidx, 1:]
plt.figure()
plt.errorbar(time, y_train[pidx], yerr=sigma, fmt='.', label='true data')
plt.errorbar(time, y_pred[pidx], yerr=y_pred_std[pidx], fmt='x', c='tab:orange', label='one draw from posterior')
plt.legend()
plt.figure()
plt.scatter(time, y_pred_std[pidx], label='predicted sigma one draw')
plt.hlines(y=sigma, xmin=min(time), xmax=max(time), label='true sigma')
# plt.plot(time, sigma, color='k', label='true sigma')
plt.legend()
sampled_ypred = []
sampled_ystd = []
draws = []
ns = 100
for i in range(ns):
sampled_yhat = model(X_train)
sampled_ypred.append(np.asarray(sampled_yhat.mean()))
sampled_ystd.append(np.asarray(sampled_yhat.stddev()))
draws.append(np.random.normal(sampled_yhat.mean(), sampled_yhat.stddev()))
mean_sampled_ypred = np.mean(np.array(draws), axis=0)
std_sampled_ypred = np.std(np.array(draws), axis=0)
plt.figure()
plt.errorbar(time, y_train[pidx], yerr=sigma, fmt='.', label='true data')
# for sp in range(ns):
# plt.errorbar(time, sampled_ypred[sp][pidx], yerr=sampled_ystd[sp][pidx], fmt='.', c='black', alpha=0.01)
plt.errorbar(time, mean_sampled_ypred[pidx], yerr=std_sampled_ypred[pidx], fmt='x', c='tab:orange', alpha=1, label=f'mean of {ns} draws')
plt.legend()
plt.figure()
plt.scatter(time, std_sampled_ypred[pidx], label=f'predicted sigma of {ns} draws')
plt.hlines(y=sigma, xmin=min(time), xmax=max(time), label='true sigma')
# plt.plot(time, sigma[pidx], color='k', label='true sigma')
plt.legend()
# + id="C1yc6I4Zei8d" colab_type="code" colab={}
# # Save model weights
# import pickle
# weights = model.get_weights()
# with open('save_weights_bazin_mle.pickle', 'wb') as fp:
# pickle.dump(weights, fp)
# # Load and set weights
# with open('save_weights_bazin_mle.pickle', 'rb') as fp:
# load_weights = pickle.load(fp)
# model.set_weights(load_weights)
# + id="tiA0P_j_lH2Q" colab_type="code" colab={}
print(weights)
# + id="xP7HLogS7Coi" colab_type="code" colab={}
# Build model.
model = tf.keras.Sequential([
tf.keras.layers.Dense(1),
tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)),
])
# Do inference.
model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglike())
model.fit(x, y, epochs=1000, verbose=False);
# Profit.
print(model.weights)
[print(np.squeeze(w.numpy())) for w in model.weights];
yhat = model(x_tst)
assert isinstance(yhat, tfd.Distribution)
# + id="WP5jDB3WeRJi" colab_type="code" colab={}
for npred in range(1, 2):
SCRIPT_DIR = 'transomaly/' # os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(SCRIPT_DIR, '..', 'data/ZTF_20190512')
save_dir = os.path.join(SCRIPT_DIR, '..', 'data/saved_light_curves')
training_set_dir = os.path.join(SCRIPT_DIR, '..', 'data/training_set_files')
get_data_func = astrorapid.get_training_data.get_data_from_snana_fits
fig_dir = os.path.join(SCRIPT_DIR, '..', 'plots')
passbands = ('g', 'r')
contextual_info = ()
known_redshift = True if 'redshift' in contextual_info else False
nprocesses = None
class_nums = (1,)
otherchange = 'single_object_1_50075859'
nsamples = 1000
extrapolate_gp = True
redo = False
train_epochs = 10000
retrain = False
reframe_problem = False
# npred = 1
probabilistic = True
batch_size = 128
nunits = 30
train_from_last_stop = 0
normalise = True
use_uncertainties = True
bayesian = True
# nn_architecture_change = 'norm_onepointpred{}timestepsinfuture_steps{}normalised_{}negloglike_predict_last{}_timesteps_nodropout_100lstmneurons'.format(npred,'probabilistic_' if probabilistic else '', 'reframe_Xy_' if reframe_problem else '', npred) # 'normalise_mse_withmasking_1000lstmneurons' # 'chi2' # 'mse'
nn_architecture_change = f"NotDenseFlipoutOnly1TCN_{'probabilistic_' if probabilistic else ''}bayesian{bayesian}_uncertainties{use_uncertainties}_predictfuture{npred}point_normalised{normalise}_nodropout_{nunits}units_batchsize{batch_size}"
fig_dir = os.path.join(fig_dir, "model_{}_ci{}_ns{}_c{}".format(otherchange, contextual_info, nsamples, class_nums))
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
preparearrays = PrepareTrainingSetArrays(passbands, contextual_info, data_dir, save_dir, training_set_dir, redo, get_data_func)
X_train, X_test, y_train, y_test, Xerr_train, Xerr_test, yerr_train, yerr_test, \
timesX_train, timesX_test, labels_train, labels_test, objids_train, objids_test = \
preparearrays.make_training_set(class_nums, nsamples, otherchange, nprocesses, extrapolate_gp, reframe=reframe_problem, npred=npred, normalise=normalise, use_uncertainties=use_uncertainties)
model, model_name = train_model(X_train, X_test, y_train, y_test, yerr_train, yerr_test, fig_dir=fig_dir, epochs=train_epochs,
retrain=retrain, passbands=passbands, model_change=nn_architecture_change, reframe=reframe_problem, probabilistic=probabilistic, train_from_last_stop=train_from_last_stop, batch_size=batch_size, nunits=nunits, use_uncertainties=use_uncertainties, bayesian=bayesian)
plot_metrics(model, model_name, X_test, y_test, timesX_test, yerr_test, labels_test, objids_test, passbands=passbands,
fig_dir=fig_dir, nsamples=nsamples, data_dir=data_dir, save_dir=save_dir, nprocesses=nprocesses, plot_gp=True, extrapolate_gp=extrapolate_gp, reframe=reframe_problem, plot_name='', npred=npred, probabilistic=probabilistic, known_redshift=known_redshift, get_data_func=get_data_func, normalise=normalise, bayesian=bayesian)
plot_metrics(model, model_name, X_train, y_train, timesX_train, yerr_train, labels_train, objids_train, passbands=passbands,
fig_dir=fig_dir, nsamples=nsamples, data_dir=data_dir, save_dir=save_dir, nprocesses=nprocesses, plot_gp=True, extrapolate_gp=extrapolate_gp, reframe=reframe_problem, plot_name='_training_set', npred=npred, probabilistic=probabilistic, known_redshift=known_redshift, get_data_func=get_data_func, normalise=normalise, bayesian=bayesian)
# # Test on other classes #51,60,62,70 AndOtherTypes
# X_train, X_test, y_train, y_test, Xerr_train, Xerr_test, yerr_train, yerr_test, \
# timesX_train, timesX_test, labels_train, labels_test, objids_train, objids_test = \
# preparearrays.make_training_set(class_nums=(1,51,), nsamples=1, otherchange='getKnAndOtherTypes', nprocesses=nprocesses, extrapolate_gp=extrapolate_gp, reframe=reframe_problem, npred=npred, normalise=normalise, use_uncertainties=use_uncertainties)
# plot_metrics(model, model_name, X_train, y_train, timesX_train, yerr_train, labels_train, objids_train, passbands=passbands,
# fig_dir=fig_dir, nsamples=nsamples, data_dir=data_dir, save_dir=save_dir, nprocesses=nprocesses, plot_gp=True, extrapolate_gp=extrapolate_gp, reframe=reframe_problem, plot_name='anomaly', npred=npred, probabilistic=probabilistic, known_redshift=known_redshift, get_data_func=get_data_func, normalise=normalise, bayesian=bayesian)
# + id="PkgtKD1Yn6IR" colab_type="code" colab={}
X_train.shape
# + id="WpLFrlSaa6ER" colab_type="code" colab={}
# loss=lambda y, rv_y: -rv_y.log_prob(y)
# model.set_weights(old_weights)
model.compile(loss=negloglike_with_error(), optimizer='adam')
model.evaluate(X_test, y_test)
# Default model
# + id="vf1k0CMqpeND" colab_type="code" colab={}
old_weights = model.get_weights()
with open('save_weights_400epochs_nonbayesian.pickle', 'rb') as fp:
load_weights = pickle.load(fp)
# + id="daWFgdtQpkWT" colab_type="code" colab={}
print(old_weights[0])
print(load_weights[0])
# + id="JgMEzbEkDL1x" colab_type="code" colab={}
yhat = model(X_test)
y_pred = np.asarray(yhat.mean())
y_pred_std = np.asarray(yhat.stddev())
ns = 10
sampled_ypred = []
sampled_ystd = []
for i in range(ns):
sampled_yhat = model(X_test)
sampled_ypred.append(np.asarray(sampled_yhat.mean()))
sampled_ystd.append(np.asarray(sampled_yhat.stddev()))
# + id="QCKOBhWXDb_n" colab_type="code" colab={}
np.std(np.array(sampled_ypred), axis=0).shape
# + id="a_uUsXJYjBcx" colab_type="code" colab={}
# + id="deEliBvaRyfg" colab_type="code" colab={}
old_weights = model.get_weights()
with open('save_weights_400epochs_nonbayesian.pickle', 'rb') as fp:
load_weights = pickle.load(fp)
initialise_weights = copy.deepcopy(old_weights)
initialise_weights[0] = load_weights[0]
initialise_weights[2] = load_weights[1]
initialise_weights[3] = load_weights[2]
initialise_weights[5] = load_weights[3]
initialise_weights[6] = load_weights[4]
initialise_weights[8] = load_weights[5]
initialise_weights[9] = load_weights[6]
initialise_weights[11] = load_weights[7]
initialise_weights[12] = load_weights[8]
initialise_weights[14] = load_weights[9]
initialise_weights[15] = load_weights[10]
initialise_weights[17] = load_weights[11]
initialise_weights[18] = load_weights[12]
initialise_weights[20] = load_weights[13]
initialise_weights[21] = load_weights[14]
initialise_weights[23] = load_weights[15]
initialise_weights[24] = load_weights[16]
initialise_weights[26] = load_weights[17]
initialise_weights[27] = load_weights[18]
initialise_weights[28] = load_weights[19]
model.set_weights(old_weights)
# + id="Tc4c3UIo7OQL" colab_type="code" colab={}
new_weights = model.get_weights()
yhat = model(X_test)
y_pred = np.asarray(yhat.mean())
y_pred_std = np.asarray(yhat.stddev())
np.mean(np.square(y_pred - y_test[:,:,:2]))
#0.09301782987233782
# + id="nzhB2gCQO6hH" colab_type="code" colab={}
# + id="y_dBvt_0OHeP" colab_type="code" colab={}
for i, w in enumerate(load_weights):
print(i, w.shape)
# + id="HXJKZSK6OsjM" colab_type="code" colab={}
for i, w in enumerate(old_weights):
print(i, w.shape, w)
# + id="cp9P5alLIRHD" colab_type="code" colab={}
plot_metrics(model, model_name, X_train, y_train, timesX_train, yerr_train, labels_train, objids_train, passbands=passbands,
fig_dir=fig_dir, nsamples=nsamples, data_dir=data_dir, save_dir=save_dir, nprocesses=nprocesses, plot_gp=True, extrapolate_gp=extrapolate_gp, reframe=reframe_problem, plot_name='_training_set', npred=npred, probabilistic=probabilistic, known_redshift=known_redshift, get_data_func=get_data_func, normalise=normalise, bayesian=bayesian)
# + id="wMJaAl_pOrB_" colab_type="code" colab={}
# + id="c80kH5H93h1J" colab_type="code" colab={}
from tensorflow.keras import initializers
initializers.RandomNormal(stddev=0.01)
print(weights)
# + id="1b5Uf4H-7Knv" colab_type="code" colab={}
# weights_400epochs_nonbayesian = np.array(model.get_weights())
# import pickle
# with open('save_weights_400epochs_nonbayesian.pickle', 'wb') as fp:
# pickle.dump(weights_400epochs_nonbayesian, fp)
# + id="lub3nNGGjGhP" colab_type="code" colab={}
# weights_400epochs_bayesian = np.array(model.get_weights())
# import pickle
# with open('save_weights_400epochs_bayesian.pickle', 'wb') as fp:
# pickle.dump(weights_400epochs_bayesian, fp)
# + id="ikxrGQKjcCsS" colab_type="code" colab={}
weights = np.array(model.get_weights())
for w in weights:
print(w.shape)
# + id="EqDvMj486dsM" colab_type="code" colab={}
for lay in model.layers:
print(lay.name)
w = lay.get_weights()
print(len(w))
for i in range(len(w)):
print(w[i].shape)
# + id="_NQYRVSraeAB" colab_type="code" colab={}
tcn_full_summary(model, expand_residual_blocks=True)
# + id="nVHSRqhZ6spH" colab_type="code" colab={}
# # ! git config --user.email "<EMAIL>"
# # ! git config --user.name "daniel-muthukrishna"
# # ! git stash
# # ! git pull
# + id="f_8-rNT1Rmmn" colab_type="code" colab={}
# # not-Bayesian:
# Model: "sequential_1"
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# masking_1 (Masking) (None, 49, 2) 0
# _________________________________________________________________
# matching_conv1D (Conv1D) (None, 49, 30) 90
# _________________________________________________________________
# activation_14 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1D) (None, 49, 30) 150
# _________________________________________________________________
# activation_12 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_8 (Spatial (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_13 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_9 (Spatial (None, 49, 30) 0
# _________________________________________________________________
# matching_identity (Lambda) (None, 49, 30) 0
# _________________________________________________________________
# activation_17 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_15 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_10 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_16 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_11 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# matching_identity (Lambda) (None, 49, 30) 0
# _________________________________________________________________
# activation_20 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_18 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_12 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_19 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_13 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# matching_identity (Lambda) (None, 49, 30) 0
# _________________________________________________________________
# activation_23 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_21 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_14 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1D) (None, 49, 30) 1830
# _________________________________________________________________
# activation_22 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_15 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# lambda_1 (Lambda) multiple 0
# _________________________________________________________________
# time_distributed_1 (TimeDist (None, 49, 4) 124
# _________________________________________________________________
# distribution_lambda_1 (Distr ((None, 49, 2), (None, 49 0
# =================================================================
# Total params: 13,174
# Trainable params: 13,174
# Non-trainable params: 0
# # Bayesian network:
# _________________________________________________________________
# Layer (type) Output Shape Param #
# =================================================================
# masking_3 (Masking) (None, 49, 2) 0
# _________________________________________________________________
# matching_conv1D (Conv1DFlipo (None, 49, 30) 150
# _________________________________________________________________
# activation_14 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1DFlipout) (None, 49, 30) 270
# _________________________________________________________________
# activation_12 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_8 (Spatial (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_13 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_9 (Spatial (None, 49, 30) 0
# _________________________________________________________________
# matching_identity (Lambda) (None, 49, 30) 0
# _________________________________________________________________
# activation_17 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_15 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_10 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_16 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_11 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# matching_identity (Lambda) (None, 49, 30) 0
# _________________________________________________________________
# activation_20 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_18 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_12 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_19 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_13 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# matching_identity (Lambda) (None, 49, 30) 0
# _________________________________________________________________
# activation_23 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# conv1D_0 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_21 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_14 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# conv1D_1 (Conv1DFlipout) (None, 49, 30) 3630
# _________________________________________________________________
# activation_22 (Activation) (None, 49, 30) 0
# _________________________________________________________________
# spatial_dropout1d_15 (Spatia (None, 49, 30) 0
# _________________________________________________________________
# lambda_1 (Lambda) multiple 0
# _________________________________________________________________
# time_distributed_3 (TimeDist (None, 49, 4) 244
# _________________________________________________________________
# distribution_lambda_3 (Distr ((None, 49, 2), (None, 49 0
# =================================================================
# Total params: 26,074
# Trainable params: 26,074
# Non-trainable params: 0
# + id="Lo3lJpdUj268" colab_type="code" colab={}
# !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
# !pip install gputil
# !pip install psutil
# !pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
# + id="yYw1_iUe9Bz9" colab_type="code" colab={}
# + id="pxRBTE3b9EfI" colab_type="code" colab={}
# + id="0JfsSeYV9K9q" colab_type="code" colab={}
# Deleted code
# # sigma_intrinsic prior
# sigma_intrinsic_mean = 25
# sigma_intrinsic_sigma = 13
# sigma_log_prior = -0.5*np.log(2*np.pi*sigma_intrinsic_sigma**2)-0.5*(sigma_intrinsic-sigma_intrinsic_mean)**2/sigma_intrinsic_sigma**2
# # A prior
# A_mean = 891
# A_sigma = 536
# A_log_prior = -0.5*np.log(2*np.pi*A_sigma**2)-0.5*(A-A_mean)**2/A_sigma**2
# # B prior
# B_mean = -26
# B_sigma = 260
# B_log_prior = -0.5*np.log(2*np.pi*B_sigma**2)-0.5*(B-B_mean)**2/B_sigma**2
# # t0 prior
# t0_mean = -3.5
# t0_sigma = 6.4
# t0_log_prior = -0.5*np.log(2*np.pi*t0_sigma**2)-0.5*(t0-t0_mean)**2/t0_sigma**2
# # tau_fall prior
# tau_fall_mean = 16.2
# tau_fall_sigma = 10.8
# tau_fall_log_prior = -0.5*np.log(2*np.pi*tau_fall_sigma**2)-0.5*(tau_fall-tau_fall_mean)**2/tau_fall_sigma**2
# # tau_rise prior
# tau_rise_mean = -3.4
# tau_rise_sigma = 1.3
# tau_rise_log_prior = -0.5*np.log(2*np.pi*tau_rise_sigma**2)-0.5*(tau_rise-tau_rise_mean)**2/tau_rise_sigma**2
# log_prior = np.log(1) + sigma_log_prior + A_log_prior + B_log_prior + tau_fall_log_prior + tau_rise_log_prior
|
Copy_of_compare_methods_bazin_mock_dataset.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Neural Network Example
# - Tutorial: http://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html
# +
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # All dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
# -
# ## Return learnable parameters
params = list(net.parameters())
print(len(params))
for pp in params:
print(pp.size())
# # Forward
# - Input should be `autograd.Variable` so is the output
# - **`torch.nn` only supports mini-batches, not a single sample. Then nn.Conv2d will take a 4D Tensor: `nSamples x nChannels x Height x Width`. If you have a single sample, just use `input.unsqueeze(0)` to add a fake batch dimension.**
input = Variable(torch.randn(1, 1, 32, 32))
out = net(input)
print(out)
# # Backward
# - It is done automatically with autograd
net.zero_grad()
out.backward(torch.randn(1, 10))
# # Loss Function
# - all functions in [link](http://pytorch.org/docs/nn.html#loss-functions)
# - For the NN the backward is:
# ```
# input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
# -> view -> linear -> relu -> linear -> relu -> linear
# -> MSELoss
# -> loss
# ```
# - with loss.backward(), the whole graph is differentiated w.r.t. the loss. And all Variables in the graph will have their `.brad` Variable accumulated with the gradient.
# +
output = net(input)
target = Variable(torch.arange(1, 11)) # A dummy target
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
# -
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
# # Backprop
# +
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv.1.bias.grad after backward')
print(net.conv1.bias.grad)
# -
# # Update the weights
# - `weight = weight - learning_rate * gradient`
# - To use various different **update rules** such as SGD, Nesterov-SGD, Adam, RMSProp, etc. To enable this, use: `torch.optim` that implements all these methods.
learning_rate = 0.01
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate) # It is substracting
# +
import torch.optim as optim
# Create your optimizer
optimizer = optim.SGD(net.parameters(), lr=0.01)
# In your training loop
optimizer.zero_grad() # zero the gradient buffers
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update
|
notebooks/miscellaneous/dl_frameworks/neural_network_tuto.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Grouping and Aggregating - Analyzing and Exploring Your Data
import pandas as pd
df = pd.read_csv('data/survey_results_public.csv', index_col='Respondent')
schema_df = pd.read_csv('data/survey_results_schema.csv', index_col='Column')
pd.set_option('display.max_columns', 85)
pd.set_option('display.max_rows', 85)
df.head()
df['ConvertedComp'].head(15)
df['ConvertedComp'].median()
df.median()
filt = df['Country'] == "India"
df[filt]['ConvertedComp'].median()
df.describe()
df['ConvertedComp'].count()
df['Hobbyist']
df['Hobbyist'].value_counts()
df['SocialMedia']
schema_df.loc['SocialMedia']
df['SocialMedia'].value_counts()
df['SocialMedia'].value_counts(normalize=True)
df[['Country', 'SocialMedia']].groupby('SocialMedia').count()
df['Country'].value_counts()
country_group = df.groupby('Country')
country_group
country_group.get_group("United States")
country_group.get_group("India")
country_group.get_group("India").describe()
filt = df['Country'] == "United States"
df.loc[filt]
df.loc[filt]['SocialMedia'].value_counts()
country_group['SocialMedia'].value_counts().head(50)
country_group['SocialMedia'].value_counts().loc['India']
country_group['SocialMedia'].value_counts().loc['China']
country_group['SocialMedia'].value_counts().loc['Russian Federation']
country_group['SocialMedia'].value_counts(normalize=True).loc['Russian Federation']
country_group['ConvertedComp'].median()
country_group['ConvertedComp'].median().loc['United Kingdom']
country_group['ConvertedComp'].median().loc['United States']
country_group['ConvertedComp'].agg(['median', 'mean'])
country_group['ConvertedComp'].agg(['median', 'mean']).loc["Canada"]
filt = df['Country'] == 'India'
df.loc[filt]['LanguageWorkedWith'].str.contains('Python')
df.loc[filt]['LanguageWorkedWith'].str.contains('Python').value_counts()
df.loc[filt]['LanguageWorkedWith'].str.contains('Python').sum()
country_group['LanguageWorkedWith'].str.contains('Python').sum()
country_group['LanguageWorkedWith'].apply(lambda x: True if 'Python' in str(x) else False)
country_group['LanguageWorkedWith'].apply(lambda x: x.str.contains('Python').sum())
country_group['LanguageWorkedWith'].apply(lambda x: x.str.contains('Python').sum())
country_res = df['Country'].value_counts()
country_res
country_py = country_group['LanguageWorkedWith'].apply(lambda x: x.str.contains('Python').sum())
py_df = pd.concat([country_res, country_py], axis='columns', sort=False)
py_df
py_df.rename(columns = {'Country':'TotalNoPeople','LanguageWorkedWith' : 'PeopleKnowPython'})
py_df.rename(columns = {'Country':'TotalNoPeople','LanguageWorkedWith' : 'PeopleKnowPython'}, inplace=True)
py_df
py_df['PctKnowsPython'] = (py_df['PeopleKnowPython']/py_df['TotalNoPeople'])*100
py_df
py_df.sort_values('PctKnowsPython',ascending=False, inplace=True)
py_df
py_df.head(50)
py_df.loc['Japan']
|
pandas basics/Pandas Demo 8.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <style>div.container { width: 100% }</style>
# <img style="float:left; vertical-align:text-bottom;" height="65" width="172" src="../assets/PyViz_logo_wm_line.png" />
# <div style="float:right; vertical-align:text-bottom;"><h2>Tutorial 05. Working with Gridded Datasets</h2></div>
# Many datasets in science and engineering consist of n-dimensional data. Gridded datasets usually represent observations of some continuous variable across multiple dimensions---a monochrome image representing luminance values across a 2D surface, volumetric 3D data, an RGB image sequence over time, or any other multi-dimensional parameter space. This type of data is particularly common in research areas that make use of spatial imaging or modeling, such as climatology, biology, and astronomy, but can also be used to represent any arbitrary data that varies over multiple dimensions.
#
# For gridded data, we'll use xarray, a convenient way of working with and representing labelled n-dimensional arrays, like pandas for labelled n-D arrays, along with our other usual libraries:
#
# <div style="margin: 10px">
# <a href="http://holoviews.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/holoviews.png"/></a>
# <a href="http://bokeh.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/bokeh.png"/></a>
# <a href="http://xarray.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/xarray_wm.png"/></a>
# <a href="http://numpy.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:150px" src="../assets/numpy.png"/></a>
# <a href="http://pandas.pydata.org"><img style="margin:8px; display:inline; object-fit:scale-down; max-height:130px" src="../assets/pandas.png"/></a>
# </div>
import numpy as np
import holoviews as hv
from holoviews import opts
import xarray as xr
hv.extension('bokeh')
# ## Load the data
mri_xr = xr.open_dataset('../data/mri.nc')
mri_xr
# xarray is particularly useful for geographic data, but it supports any type of n-dimensional data. The data here represents volumetric data from an [MRI scan](https://graphics.stanford.edu/data/voldata/), with three coordinate dimensions 'x', 'y' and 'z'. In this simple example these coordinates are integers, but they are not required to be. Instead of volumetric data, we could imagine the data could be 2D spatial data that evolves over time, as is common in climatology and many other fields.
# ## Declaring the dataset
#
# Unlike with Pandas, in a gridded dataset the dimensions are typically already declared unambiguously, with **coordinates** (i.e. key dimensions) and **data variables** (i.e. value dimensions) that HoloViews can determine automatically:
mri = hv.Dataset(mri_xr)
mri
# ## Displaying the data
#
# Just as we saw in the previous tutorial, we can group the data by one or more dimensions. Since we are dealing with volumetric data but have only a 2D display device, we can take slices along each axis. Here we will slice along the sagittal plane corresponding to the z-dimension:
mri.to(hv.Image, groupby='z', dynamic=True)
# Here we supplied ``dynamic=True`` to get a [``DynamicMap``](http://holoviews.org/reference/containers/bokeh/DynamicMap.html), which works like a [``HoloMap``](http://holoviews.org/reference/containers/bokeh/HoloMap.html) but computes each frame on demand, so that it only creates each plot when it is being used.
# Exercise: Display transverse (x,z) or frontal (z,y) sections of the data by declaring the kdims in the .to method
# ## Slice and dice across n dimensions
#
# We can use ``.to`` to slice the cube along all three axes separately:
# +
layout = (mri.to(hv.Image, ['z', 'y'], dynamic=True) +
mri.to(hv.Image, ['z', 'x'], dynamic=True) +
mri.to(hv.Image, ['x', 'y'], dynamic=True)).redim.range(MR=(0, 255))
layout.opts(
opts.Image(xaxis=None, yaxis=None, width=225, height=225))
# -
# ## Aggregation
#
# We can also easily compute aggregates across one or more dimensions. Previously we used the ``aggregate`` method for this purpose, but when working with gridded datasets it often makes more sense to think of aggregation as a ``reduce`` operation. We can for example reduce the ``z`` dimension using ``np.mean`` and display the resulting averaged 2D array as an [``Image``](http://holoviews.org/reference/elements/bokeh/Image.html):
hv.Image(mri.reduce(z=np.mean))
# Exercise: Recreate the plot above using the aggregate method
# Hint: The aggregate and reduce methods are inverses of each other
# Try typing "hv.Image(mri.aggregate(" and press shift-Tab to see the signature of `aggregate`.
# If you keep a handle on your image, you can use HoloViews to generate the aggregate array by accessing ``.data``:
im = hv.Image(mri.reduce(z=np.mean))
im.data
# As you can see, it is straightforward to work with the additional dimensions of data available in gridded datasets, as explained in more detail in the [user guide](http://holoviews.org/user_guide/Gridded_Datasets.html).
#
# # Onwards
#
# The previous sections focused on displaying plots that provide certain standard types of interactivity, whether widget-based (to select values along a dimension) or within each plot (for panning, zooming, etc.). A wide range of additional types of interactivity can also be defined by the user for working with specific types of data, as outlined in the following sections, beginning with [Network Graphs](./06_Network_Graphs.ipynb).
|
examples/tutorial/05_Working_with_Gridded_Data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The concepts to be introduced in this notebook, such as the Ising model, simulated annealing, and the transverse Ising model, play an important role in today's quantum algorithms and quantum computing paradigms, including quantum annealing, the quantum approximate optimization algorithm, and quantum-enhanced sampling. Here we give some insight on how these physical building blocks work.
#
# # The Ising model
#
# We would like to make a connection between the computational hardness of a problem and how difficult it is to solve a corresponding physical system. The Ising model is the most basic model to do this. It is an extensively studied model and one of the most basic examples to teach statistical mechanics and phase transitions, but we only require an elementary understanding of it.
#
# Imagine that you have two magnets fixed on the same axis.
#
# <img src="../figures/two_magnets.svg" alt="Two magnets" style="width: 100px;"/>
#
# They will naturally anti-align: one will have north pole facing up, the second the south pole facing up. We can think of them as two binary variables, $\sigma_1$ and $\sigma_2$. Say, if the north pole is facing up, we assign the value +1 to the variable, and -1 otherwise. To abstract away from magnets, in general, we call these variables spins. So in the optimal configuration, their product is -1:
#
# $$ \sigma_1\sigma_2=-1 $$
#
# We can think of this as the energy of the system: the lowest energy is called the ground state energy. Note that there are two physical configurations corresponding to this optimum: $\sigma_1=+1, \sigma_2=-1$, and $\sigma_1=-1, \sigma_2=+1$.
#
# If we keep adding more magnets to this system, we can sum up their pairwise interaction to get the total energy. The total energy of the system is called the Hamiltonian, and we will denote it by $H$. So if we have $N$ magnets arranged along a straight line, we have
#
# $$ H=\sum_{i=1}^{N-1} \sigma_i \sigma_{i+1}$$.
#
# We did a simplification here: we assumed that remote magnets do not interact with each other (e.g. there is no such term as $\sigma_i\sigma_{i+2}$. In general, the interactions modeled depend on the layout of the spins and assumptions about the physical model: there will be some graph describing the connectivity of interactions. To reflect this, we write
#
# $$ H=\sum_{<i,j>} \sigma_i \sigma_{j}$$,
#
# where $<i,j>$ typically means nearest neighbours, but it is up to us to declare what nearest neighbours mean.
#
# Now imagine that the distance is not the same between each pair. In other words, some pairs interact more than others. We can express this by adding a parameter that describes the interaction strength:
#
# $$ H=-\sum_{<i,j>} J_{ij} \sigma_i \sigma_{j}$$,
#
# where $J_{ij}$ is a real number. We added a negative sign to the Hamiltonian: this is by convention. If the spins are antiferromagnetic, that is, they behave as we would expect from magnets, then all $J_{ij}$ values would be negative. That cancels out the negative sign of the sum, so we still expect that each product $\sigma_i\sigma_j$ would give you -1 in the optimum configuration.
#
# The model is fairly complicated by this point. Imagine that you have many spins and not all of them behave like magnets (that is, $J_{ij}$ can take both negative and positive values for different pairs). Nature still wants to find the lowest energy configuration, though. Let's take a look at how we would do it in code. Let's calculate the energy of spins on a line, given some couplings and a spin configuration:
def calculate_energy(J, σ):
return -sum(J_ij*σ[i]*σ[i+1] for i, J_ij in enumerate(J))
# Let's give it a fixed set of couplings and a spin configuration on three sites:
J = [1.0, -1.0]
σ = [+1, -1, +1]
# The energy of this is
calculate_energy(J, σ)
# Is this the ground state? How do we know? We are interested in the minimum, but we cannot use some gradient-based method to find it, since the variables are binary, plus the optimization landscape is nonconvex. So the easiest choice is an exhaustive search of all possibilities:
import itertools
for σ in itertools.product(*[{+1,-1} for _ in range(3)]):
print(calculate_energy(J, σ), σ)
# We see that -2 is the optimum, with two optimal configurations, but we had to enumerate all possibilities to figure this out. For this particular case, there are more clever ways to find the best solution, but in the general case, this is not the case.
#
# To get to the general case, we need one more component, an external field. Imagine that you add a large magnet below each and every one of our magnets, creating an external magnetic field for each site. If this field is strong enough, it can override the pairwise interaction and flip the magnets. We model this by adding a linear term to the Hamiltonian:
#
# $$ H=-\sum_{<i,j>} J_{ij} \sigma_i \sigma_{j} - \sum_i h_i \sigma_i$$,
#
# where $h_i$ is the strength of the external field. This is the full description of the *classical Ising model*. The Hamiltonian describes the energy, but in computer science language, it means it expresses the objective function we want to minimize. The corresponding computer science problem is called quadratic unconstrained binary optimization (QUBO), where the only difference is that the variables take values in $\{0, 1\}$, but that is only a constant shift. QUBOs are NP-hard in general, that is, we are not aware of an efficient polynomial time algorithm to solve any given QUBO. So the generic strategy is the exhaustive search we did above, which takes exponentially many steps in the number of sites (variables).
#
# As we mentioned, nature seeks the minimum energy configuration. So how does computational hardness map to physical difficulty? Imagine that the energy difference between the ground state and the next lowest energy state (also called the first excited state) is small, but the energetic cost of going from one to the other is high. A cartoon picture of this is the following:
#
# <img src="../figures/energy_landscape.svg" alt="Energy landscape" style="width: 200px;"/>
#
# If we start from a random configuration, we might get stuck in the local optimum denoted by the green spot. This is what happens in metals if they are cooled down too quickly: the crystal lattice will have imperfections and the metal will not have the desired properties. A process called *annealing* helps in metallurgy: by increasing the temperature, the chance of overcoming the potential barrier increases and the crystal structure can reconfigure itself. If the barrier is high and the energy difference is small between the ground state and the first excited state, the probability of this happening drops. This is what it means that the problem is difficult to do in a physical system.
#
# Annealing inspired a heuristic algorithm called *simulated annealing*. This defines a temperature to be able to hop out of local minima. The temperature is lowered over time to find the actual minimum. Simulated annealing has many implementations. Here we'll use the one implemented in dimod to solve our problem above:
import dimod
# The simulated annealing solver requires us to define the couplings as a dictionary between spins, and we must also pass the external field values as a dictionary. The latter is all zeros for us.
J = {(0, 1): 1.0, (1, 2): -1.0}
h = {0:0, 1:0, 2:0}
# We instantiate an Ising model:
model = dimod.BinaryQuadraticModel(h, J, 0.0, dimod.SPIN)
# Finally, we create a simulated annealing sampler that pulls out potentially optimal solutions, and we read out 10 possible solutions:
sampler = dimod.SimulatedAnnealingSampler()
response = sampler.sample(model, num_reads=10)
# You can see that this configuration is actually easy, since you get the optimal solution -2 most of the time:
[solution.energy for solution in response.data()].count(-2)
# Simulated annealing is a classical heuristic algorithm. Quantum annealing uses physical effects to find the global optimum of an Ising model: it uses thermal effects just like annealing in metallurgy, but it also uses quantum effects like tunneling to overcome potential barriers.
#
# The Ising model also plays an important role in quantum-enhanced sampling, but that idea requires a better understanding of the role of temperature, which we will revisit in a subsequent notebook.
# # The transverse-field Ising model
# We discussed the Hamiltonian of the classical Ising model. We can write the same Hamiltonian in a quantum mechanical form. In quantum mechanics, the Hamiltonian is not a function of variables, but of operators. We will simulate what it means in a quantum circuit.
import numpy as np
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
from qiskit import Aer
np.set_printoptions(precision=3, suppress=True)
backend = Aer.get_backend('statevector_simulator')
q = QuantumRegister(1)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q, c)
# The operator that replicates the effect of what we have seen in the classical case is the Pauli-Z matrix, defined as $\begin{bmatrix}1 & 0\\ 0& -1\end{bmatrix}$. Let's see what it does on the elements of the computational basis:
circuit.z(q[0])
job = execute(circuit, backend)
state = job.result().get_statevector(circuit)
print(state)
# This is nothing but the $|0\rangle$ state. In other words, it does not do anything to $|0\rangle$, which can also be thought of as multiplying it by +1. Let's try it on $|1\rangle$:
circuit = QuantumCircuit(q, c)
circuit.x(q[0])
circuit.z(q[0])
job = execute(circuit, backend)
state = job.result().get_statevector(circuit)
print(state)
# We get $-|1\rangle$, which means it adds a minus sign to it. This way we have the +1, -1 values, just the same way as in the classical formalism. If we write $\sigma^Z_i$ for the operator $Z$ at a site $i$, the quantum mechanical Hamiltonian of the classical Ising model reads as
#
# $$ H=-\sum_{<i,j>} J_{ij} \sigma^Z_i \sigma^Z_{j} - \sum_i h_i \sigma^Z_i$$.
#
# Technically speaking, we should put a hat on $H$ and on all of the $\sigma^Z_i$ to indicate that they are operators, and not numbers or variables, but we omit this for notational simplicity.
#
# The expectation value $<H>$ of the Hamiltonian is the energy of the system, and the corresponding quantum state $|\psi\rangle$ is the configuration of that energy level. We can create the quantum mechanical version of calculating the energy, matching the function we defined above for the classical mechanical variant:
def calculate_energy_expectation(state, hamiltonian):
return float((np.dot(state.T.conj(), np.dot(hamiltonian, state))).real)
# It is a bit tricky to define the Hamiltonian with the $\sigma^Z_i$ operators, since saying that it acts on site $i$ means that it acts trivially on all other sites. So, for instance, for two sites, if we act on site one, the actual operator is $\sigma^Z\otimes I$, and acting on site two, we have $I \otimes \sigma^Z$. The above function to calculate the energy takes numpy arrays, so we manually define $\sigma^Z$ and calculate the energy of the Hamiltonian $H=-\sigma^Z_1\sigma^Z_2 - 0.5 (\sigma^Z_1 + \sigma^Z_2)$ on the state $|00\rangle$.
PauliZ = np.array([[1, 0], [0, -1]])
IZ = np.kron(np.eye(2), PauliZ)
ZI = np.kron(PauliZ, np.eye(2))
ZZ = np.kron(PauliZ, PauliZ)
H = -ZZ + -0.5*(ZI+IZ)
ψ = np.kron([[1], [0]], [[1], [0]])
calculate_energy_expectation(ψ, H)
# This Hamiltonian commutes, which means all of its operators are commutative, which is a clear sign of nothing much quantum going on.
#
# To make this a quantum Ising model, we need to add a term that does not commute with the rest of the terms. A transverse field is such, which is an on-site interaction just like the external field. Its effect is described by the Pauli-X operator (the NOT gate), which we will denote by $\sigma^X_i$ for a site $i$. It is very easy to see that the Pauli-Z and the Pauli-X do not commute:
circuit = QuantumCircuit(q, c)
circuit.x(q[0])
circuit.z(q[0])
job = execute(circuit, backend)
state = job.result().get_statevector(circuit)
print("Pauli-X, then Pauli-Z:", state)
circuit = QuantumCircuit(q, c)
circuit.z(q[0])
circuit.x(q[0])
job = execute(circuit, backend)
state = job.result().get_statevector(circuit)
print("Pauli-Z, then Pauli-X:", state)
# There is a clear sign difference.
#
# There are many other ways of making the Ising Hamiltonian noncommuting, but adding the onsite Pauli-X operations leads to the *transverse field Ising model*. Its Hamiltonian reads as
#
# $$ H=-\sum_{<i,j>} J_{ij} \sigma^Z_i \sigma^Z_{j} - \sum_i h_i \sigma^Z_i - \sum_i g_i \sigma^X_i$$.
#
# The transverse field Ising model is critically important to explain how quantum annealing works because by adding the $\sigma^X$ part to the Hamiltonian it becomes possible to exploit quantum effects like tunnelling. It is also important for understanding the quantum approximation optimization algorithms, since it was inspired by quantum annealing.
|
QuantumComputing/Qiskit/04_Classical_and_Quantum_Many-Body_Physics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## #2: mybinder
# homework 7 mybinder test notebook: following code copied from Week 7 In Class Notebook (https://uiuc-ischool-dataviz.github.io/is445_spring2022/nbv.html?notebook_name=%2Fis445_spring2022%2Fweek07%2FinClass_week07.ipynb)
import pandas as pd
import bqplot
import numpy as np
import ipywidgets
import matplotlib.pyplot as plt
data = np.random.random((10,10,20))
data.shape
data[0,0,:]
myLabel = ipywidgets.Label()
myLabel.value = str(data[0,0,:].mean())
def on_selected(change):
#print(change)
#print(change['owner'])
if len(change['owner'].selected) == 1: # only allow user to select one bin at a time
i,j = change['owner'].selected[0]
print(i,j)
# +
# 1. data (random dataset)
#2. Scales
col_sc = bqplot.ColorScale(scheme='Greens')
x_sc = bqplot.OrdinalScale()
y_sc = bqplot.OrdinalScale()
# 3. axis
ax_col = bqplot.ColorAxis(scale=col_sc, orientation='vertical', side='right')
ax_x = bqplot.Axis(scale=x_sc)
ax_y = bqplot.Axis(scale=y_sc, orientation='vertical')
# 4. Mark (heatmap)
heat_map = bqplot.GridHeatMap(color=data.mean(axis=2),
scales={'color':col_sc, 'row':y_sc, 'column':x_sc},
interactions={'click':'select'},
selected_style={'fill':'magenta'})
# 5. interaction, cont.
heat_map.observe(on_selected,'selected')
# combine into a figure
fig = bqplot.Figure(marks=[heat_map],axes=[ax_col, ax_x, ax_y])
# Finally dashboard
myDashboard = ipywidgets.VBox([myLabel,fig])
myDashboard
# -
|
hwMidInstall/homework7-mybinder.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="966a61c4"
# # **IMI BIG DATA & AI CASE COMPETITION**
#
# ## *By: <NAME>, Tahir & Albert*
#
# Recall that most of our exploratory work, and finding out the best cleaning methods was conducted in the Exploratory Data Analysis Notebook. <br>
# Here, we just use the best methods to move on to the next phase, and build some more features with our data. <br>
# Then, we save the feature generated data and move onto the feature selection phase.
# + [markdown] id="4716c344"
# # Initial Set-Up & Import Data <a class="anchor" id="1"></a>
# + id="c1066cb4"
# Import relevent Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gc
import math
# Model Metrics & Data Pre-processing
from scipy import stats
from sklearn import metrics
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.metrics import confusion_matrix, roc_auc_score, roc_curve, classification_report, precision_recall_curve
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, GridSearchCV, RandomizedSearchCV
#Models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import GradientBoostingClassifier
#import lightgbm and xgboost
import lightgbm as lgb
import xgboost as xgb
# Imbalance dataset methods
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import ADASYN
from imblearn.combine import SMOTETomek
from imblearn.combine import SMOTEENN
# Miscellaneous
from collections import Counter
# Additional Libraries -- Automatic Explanatory Data Analysis
from pandas_profiling import ProfileReport
from IPython.core.display import display, HTML
# Remove warnings (so it doesn't take up space)
import warnings
warnings.filterwarnings('ignore')
# Set seed for repition
np.random.seed(2022)
# + colab={"base_uri": "https://localhost:8080/", "height": 16} id="8SwYTfpdSt9s" outputId="8622b770-fed7-49f3-e457-7761b828836e"
# Some settings for making the tables, plots and the report look nicer
sns.set_theme()
# %matplotlib inline
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 999)
display(HTML("<style>.container { width:80% !important; }</style>"))
display(HTML("<style>.container { length:100% !important; }</style>"))
# + colab={"base_uri": "https://localhost:8080/", "height": 73} id="c6030a07" outputId="f4fd5c8e-7daa-4209-dd28-5a4264748358"
# Read in the data
# Canadian Companies Data
df1 = pd.read_excel("/Users/tahir/Desktop/Fraudit/Datasets/cmeg_df_case_competition_scrambled_train.xlsx")
# General Companies Data
df2 = pd.read_excel("/Users/tahir/Desktop/Fraudit/Datasets/general_industries_df_case_competition_scrambled_train.xlsx")
# Data Dictionary from Canadian Companies Dataset (Although both data dictionaries are the same)
data_dictionary = pd.read_excel("/Users/tahir/Desktop/Fraudit/Datasets/cmeg_df_case_competition_scrambled_train.xlsx", sheet_name=1)
# + [markdown] id="b30d6ae5"
# ## Data Cleaning Methods <a class="anchor" id="2.2"></a>
# ### Data Prep -- Column Types, Date Manipulations, Fixing infinity values <a class="anchor" id="2.2.1"></a>
#
# + colab={"base_uri": "https://localhost:8080/"} id="c05e8eb8" outputId="2122ec9e-de9b-44f3-cd0b-61c3b5894e98"
# Fix the object type column to datetime
df1["Date"] = pd.to_datetime(df1.Period)
df2["Date"] = pd.to_datetime(df1.Period)
# Drop the object column
df1 = df1.drop(['Period'], axis=1)
df2 = df2.drop(['Period'], axis=1)
df1.info() # Now we have removed the Object column, and got the date as a datetime column
# + id="e44be061"
# Extract year and month for df1
df1['Year'] = df1['Date'].dt.year
df1['Month'] = df1['Date'].dt.month
# Do the same for df2
df2['Year'] = df2['Date'].dt.year
df2['Month'] = df2['Date'].dt.month
# + id="5ac16854"
# Fix the infinity Issues (Some columns have values at infinity -- Fix this )
def impute_inf(data,col):
#Impute -inf of the column of data as the minimum of the rest
#Impute inf of the column of data as the maximum of the rest
data.loc[data[col] == -np.inf, col] = data.loc[(data[col] > -np.inf) & (data[col] < np.inf),col].min()
data.loc[data[col] == np.inf, col] = data.loc[(data[col] < np.inf) & (data[col] < np.inf),col].max()
return data
# Replace all -/+ inf values with min and max values of the dataset
for col in df1.columns:
if col in ["Date","Period"]: pass
else: df1 = impute_inf(df1,col)
# Do the same for General Companies
for col in df2.columns:
if col in ["Date","Period"]: pass
else: df2 = impute_inf(df2,col)
# -
# ## Generate a missing data label
# +
missing_label = []
for i in range(len(df1.index)):
missing_values_per_row = df1.iloc[i].isnull().sum()
if missing_values_per_row <= 15:
missing_label.append(1)
elif (missing_values_per_row >= 16 and missing_values_per_row <= 45):
missing_label.append(2)
else:
missing_label.append(3)
df1["missing_data_flag"] = missing_label
# +
# Do the same for df2
missing_label = []
for i in range(len(df2.index)):
missing_values_per_row = df2.iloc[i].isnull().sum()
if missing_values_per_row <= 15:
missing_label.append(1)
elif (missing_values_per_row >= 16 and missing_values_per_row <= 45):
missing_label.append(2)
else:
missing_label.append(3)
df2["missing_data_flag"] = missing_label
# -
# ## Check & fix outliers
# +
data1 = df1.copy()
non_financial_features = ['ROW','Corp_Residence_Country_Code', 'BR Code', 'CUSTOMER_ID', 'Final_IG', 'B_PLUS_FLAG', 'Year', 'Month', 'Date', 'missing_data_flag']
financial_features = data1.drop(non_financial_features, axis=1)
fin_features_lst = list(financial_features.columns)
def outlier_replace(data, col_name, q1=0.25, q3=0.75):
quartile1 = data[col_name].quantile(q1)
quartile3 = data[col_name].quantile(q3)
interquantile_range = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquantile_range
low_limit = quartile1 - 1.5 * interquantile_range
data.loc[(data[col_name] < low_limit), col_name] = low_limit
data.loc[(data[col_name] > up_limit), col_name] = up_limit
return None
# Fix outliers in Financial Features for DF1
for col in df1.columns:
if col in fin_features_lst:
outlier_replace(df1,col)
else:
pass
# Do the same for DF2
for col in df2.columns:
if col in fin_features_lst:
outlier_replace(df2,col)
else:
pass
# -
# ## Impute Missing Data
# +
# # Essentially we are filling every column with it's grouped ID's average value per column.
def impute_average_and_constant(data,group = 'CUSTOMER_ID',constant = 0):
"""This will impute the missing values by grouping the dataset based on a shared value in a column.
If no groups can be made (i.e. only one dataset in a group, then the missing data will be imputed with a constant.
data - the data table to be imputed on. Type = pandas dataframe
group - the column to group by. Default is CUSTOMER_ID
constant - the constant to impute any data that cannot be grouped. Default is 0
"""
for col in data.columns:
data[col] = data.groupby('CUSTOMER_ID')[col].apply(lambda x:x.fillna(x.mean()))
data = data.fillna(constant)
return data
# This is now clean, imputed data.
df1 = impute_average_and_constant(df1)
df2 = impute_average_and_constant(df2)
# -
df1.isnull().sum()
# + [markdown] id="90ac796d"
# # Feature Engineering <a class="anchor" id="4"></a>
# + [markdown] id="c9cf2e2b"
# ## Creating Additional Financial Features<a class="anchor" id="4.1"></a>
# + [markdown] id="0614114a"
# We create few additional features which to better be able to predict the investment flag for a company. <br>
# Total leverage = debt / ebitda <br>
# Ebitda margin = ebitda / revenue <br>
# Operating margin = operating profit / revenue <br>
# Interest Coverage Ratio = Ebitda / interest <br>
# Average growth by Revenue = (Revenue Growth in year 2 + Revenue Growth in year 1) / 2
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="suCPE3F5XUi3" outputId="59d8ce0d-fb5e-4908-cba1-b676295b784a"
# Use Data Dictionary
data_dictionary
# + [markdown] id="TX7DRKqwrP7f"
# ## Creating Three Year Averages for Financial Columns
# + id="Ud5k_fFJlEK6"
def three_year_average(dataframe,col_y0,col_y1,col_y2,col):
dataframe.drop(dataframe[dataframe[col_y0].isnull()].index, inplace = True)
#Remove those records if the first year financial info is not available
two_years_average = dataframe[col_y1].notnull() & dataframe[col_y2].isnull()
dataframe.loc[two_years_average, col] = dataframe.loc[two_years_average,col_y0] * 4/7 + dataframe.loc[two_years_average,col_y1] * 3/7
three_years_average = dataframe[col_y1].notnull() & dataframe[col_y2].notnull()
dataframe.loc[three_years_average, col] = dataframe.loc[three_years_average,col_y0] * 4/9 + dataframe.loc[three_years_average,col_y1] * 3/9 + dataframe.loc[three_years_average,col_y2] * 2/9
return dataframe
def dataframe_plus_three_year_average(dataframe):
columns = list(dataframe.columns)
year_list = ['_Y0','_Y1','_Y2','_Y3']
year_dictionary = {}
year_dictionary['Features'] = []
for year in year_list:
year_dictionary[year] = []
for col in columns:
if year in col:
year_dictionary[year].append(col) #Separates out the features based on year (purpose = organization)
if col[:-3] not in year_dictionary['Features']:
year_dictionary['Features'].append(col[:-3])
#Build the new dataset using three year averages
cols_to_avg = []
for feature in year_dictionary['Features']:
cols_to_avg.append([feature + '_Y0', feature + '_Y1', feature + '_Y2', feature + '_3YearAverage'])
for col1, col2, col3, col in cols_to_avg:
dataframe = three_year_average(dataframe, col1, col2, col3, col)
return dataframe
# + colab={"base_uri": "https://localhost:8080/", "height": 374} id="642BlgnElFh5" outputId="d73328a4-4edd-4da3-e6c6-c7783571af50"
#Append three year averages to dataset 1
df1 = dataframe_plus_three_year_average(df1)
df1.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 374} id="xQSPKuBRmeZa" outputId="a155c0bb-2ea9-47e7-a463-8fb9a948f614"
#Append three year averages to dataset 2
df2 = dataframe_plus_three_year_average(df2)
df2.head()
# + id="THGAS6KugYC9"
# Fix the denominator columns so we don't divide anything by 0. (Changing it to 0.0001 instead)
denominator_cols = ["EBITDA_Y0", "EBITDA_Y1", "EBITDA_Y2", "EBITDA_Y3",
"TOTALREVENUE_Y0", "TOTALREVENUE_Y1", "TOTALREVENUE_Y2", "TOTALREVENUE_Y3",
"TOTINTEXP_Y0", "TOTINTEXP_Y1", "TOTINTEXP_Y2", "TOTINTEXP_Y3",
"CURRENTLIABILITY_Y0", "CURRENTLIABILITY_Y1", "CURRENTLIABILITY_Y2", "CURRENTLIABILITY_Y3",
"TOTALASSET_Y0", "TOTALASSET_Y1", "TOTALASSET_Y2", "TOTALASSET_Y3",
"NETSALES_Y0", "NETSALES_Y1", "NETSALES_Y2", "NETSALES_Y3",
"Average_Total_Assets"
]
for col in df1.columns:
if col in denominator_cols:
df1[col]=df1[col].replace(0.000000, 0.0001)
for col in df2.columns:
if col in denominator_cols:
df2[col]=df2[col].replace(0.000000, 0.0001)
# -
# ## Generate Ratios which are useful in determining a companies financial well-being
# 
# 
# + colab={"base_uri": "https://localhost:8080/"} id="e92812ff" outputId="4d315fdd-d49e-44e0-f61a-c138ebb9b01b"
def Total_Leverage(dataset):
dataset["Total_Leverage_Y0"] = dataset["TOTALDEBT_Y0"] / dataset["EBITDA_Y0"]
dataset["Total_Leverage_Y1"] = dataset["TOTALDEBT_Y1"] / dataset["EBITDA_Y1"]
dataset["Total_Leverage_Y2"] = dataset["TOTALDEBT_Y2"] / dataset["EBITDA_Y2"]
dataset["Total_Leverage_Y3"] = dataset["TOTALDEBT_Y3"] / dataset["EBITDA_Y3"]
return dataset
def Ebitda_Margin(dataset):
dataset["Ebitda_Margin_Y0"] = dataset["EBITDA_Y0"] / dataset["TOTALREVENUE_Y0"]
dataset["Ebitda_Margin_Y1"] = dataset["EBITDA_Y1"] / dataset["TOTALREVENUE_Y1"]
dataset["Ebitda_Margin_Y2"] = dataset["EBITDA_Y2"] / dataset["TOTALREVENUE_Y2"]
dataset["Ebitda_Margin_Y3"] = dataset["EBITDA_Y3"] / dataset["TOTALREVENUE_Y3"]
return dataset
def Operating_Margin(dataset):
dataset["Operating_Margin_Y0"] = dataset["EBITDA_Y0"] / dataset["TOTALREVENUE_Y0"]
dataset["Operating_Margin_Y1"] = dataset["EBITDA_Y1"] / dataset["TOTALREVENUE_Y1"]
dataset["Operating_Margin_Y2"] = dataset["EBITDA_Y2"] / dataset["TOTALREVENUE_Y2"]
dataset["Operating_Margin_Y3"] = dataset["EBITDA_Y3"] / dataset["TOTALREVENUE_Y3"]
return dataset
def Interest_Coverage_Ratio(dataset):
dataset["Interest_CR_Y0"] = dataset["EBITDA_Y0"] / dataset["TOTINTEXP_Y0"]
dataset["Interest_CR_Y1"] = dataset["EBITDA_Y1"] / dataset["TOTINTEXP_Y1"]
dataset["Interest_CR_Y2"] = dataset["EBITDA_Y2"] / dataset["TOTINTEXP_Y2"]
dataset["Interest_CR_Y3"] = dataset["EBITDA_Y3"] / dataset["TOTINTEXP_Y3"]
return dataset
def Average_Growth_By_Revenue(dataset):
dataset["Growth_Y3_to_Y2"] = (dataset["TOTALREVENUE_Y2"] - dataset["TOTALREVENUE_Y3"]) / (dataset["TOTALREVENUE_Y3"])
dataset["Growth_Y2_to_Y1"] = (dataset["TOTALREVENUE_Y1"] - dataset["TOTALREVENUE_Y2"]) / (dataset["TOTALREVENUE_Y2"])
dataset["Average_Growth"] = (dataset["Growth_Y3_to_Y2"] + dataset["Growth_Y2_to_Y1"]) / 2
return dataset
# 1
def Current_Ratio(dataset):
dataset["Current_Ratio_Y0"] = dataset["CURRENTASSET_Y0"] / dataset["CURRENTLIABILITY_Y0"]
dataset["Current_Ratio_Y1"] = dataset["CURRENTASSET_Y1"] / dataset["CURRENTLIABILITY_Y1"]
dataset["Current_Ratio_Y2"] = dataset["CURRENTASSET_Y2"] / dataset["CURRENTLIABILITY_Y2"]
dataset["Current_Ratio_Y3"] = dataset["CURRENTASSET_Y3"] / dataset["CURRENTLIABILITY_Y3"]
return dataset
# 2
def Quick_Ratio(dataset):
dataset["Quick_Ratio_Y0"] = (dataset["CCE_Y0"] + dataset["ARTurnover_Y0"]) / dataset["CURRENTLIABILITY_Y0"]
dataset["Quick_Ratio_Y1"] = (dataset["CCE_Y1"] + dataset["ARTurnover_Y1"]) / dataset["CURRENTLIABILITY_Y1"]
dataset["Quick_Ratio_Y2"] = (dataset["CCE_Y2"] + dataset["ARTurnover_Y2"]) / dataset["CURRENTLIABILITY_Y2"]
dataset["Quick_Ratio_Y3"] = (dataset["CCE_Y3"] + dataset["ARTurnover_Y3"]) / dataset["CURRENTLIABILITY_Y3"]
return dataset
# 8
def Debt_Ratio(dataset):
dataset["Debt_Ratio_Y0"] = dataset["TOTALDEBT_Y0"] / dataset["TOTALASSET_Y0"]
dataset["Debt_Ratio_Y1"] = dataset["TOTALDEBT_Y1"] / dataset["TOTALASSET_Y1"]
dataset["Debt_Ratio_Y2"] = dataset["TOTALDEBT_Y2"] / dataset["TOTALASSET_Y2"]
dataset["Debt_Ratio_Y3"] = dataset["TOTALDEBT_Y3"] / dataset["TOTALASSET_Y3"]
return dataset
# 13
def Asset_Turnover(dataset):
dataset["Average_Total_Assets"] = (dataset["CURRENTASSET_Y0"] + dataset["CURRENTASSET_Y1"] + dataset["CURRENTASSET_Y2"] + dataset["CURRENTASSET_Y3"]) / 4
dataset["Asset_Turnover_Y0"] = dataset["NETSALES_Y0"] / dataset["Average_Total_Assets"]
dataset["Asset_Turnover_Y1"] = dataset["NETSALES_Y1"] / dataset["Average_Total_Assets"]
dataset["Asset_Turnover_Y2"] = dataset["NETSALES_Y2"] / dataset["Average_Total_Assets"]
dataset["Asset_Turnover_Y3"] = dataset["NETSALES_Y3"] / dataset["Average_Total_Assets"]
return dataset
# 12
def Return_Net_Sales(dataset):
dataset["Return_Net_Sales_Y0"] = dataset["EBITDA_Y0"] / dataset["NETSALES_Y0"]
dataset["Return_Net_Sales_Y1"] = dataset["EBITDA_Y1"] / dataset["NETSALES_Y1"]
dataset["Return_Net_Sales_Y2"] = dataset["EBITDA_Y2"] / dataset["NETSALES_Y2"]
dataset["Return_Net_Sales_Y3"] = dataset["EBITDA_Y3"] / dataset["NETSALES_Y3"]
return dataset
def Create_Features(my_df):
Total_Leverage(my_df)
Ebitda_Margin(my_df)
Operating_Margin(my_df)
Interest_Coverage_Ratio(my_df)
Average_Growth_By_Revenue(my_df)
Current_Ratio(my_df)
Quick_Ratio(my_df)
Debt_Ratio(my_df)
Return_Net_Sales(my_df)
Asset_Turnover(my_df)
return my_df
Create_Features(df1)
Create_Features(df2)
print(" ")
# -
df1.head()
print("Original Dataset shape was: (28224, 125)")
print("Now, we have:", df1.shape)
# ### A total of over 73 new columns were created!
# +
# Make sure the new datasets are still clean
data1 = df1.copy()
non_financial_features = ['ROW','Corp_Residence_Country_Code', 'BR Code', 'CUSTOMER_ID', 'Final_IG', 'B_PLUS_FLAG', 'Year', 'Month', 'Date', 'missing_data_flag']
financial_features = data1.drop(non_financial_features, axis=1)
fin_features_lst = list(financial_features.columns)
# Fix outliers in Financial Features for DF1
for col in df1.columns:
if col in fin_features_lst:
outlier_replace(df1,col)
else:
pass
# Do the same for DF2
for col in df2.columns:
if col in fin_features_lst:
outlier_replace(df2,col)
else:
pass
# Replace all -/+ inf values with min and max values of the dataset
for col in df1.columns:
if col in ["Date","Period"]: pass
else: df1 = impute_inf(df1,col)
# Do the same for General Companies
for col in df2.columns:
if col in ["Date","Period"]: pass
else: df2 = impute_inf(df2,col)
# -
# Feature Engineering Completed
df1.to_excel("/Users/tahir/Desktop/Fraudit/Datasets/Feature_Engineering_Completed_df1.xlsx")
df2.to_excel("/Users/tahir/Desktop/Fraudit/Datasets/Feature_Engineering_Completed_df2.xlsx")
|
Scripts/Feature Engineering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Image Classification - Conv Nets -Pytorch
# > Classifying if an image is a `bee` of an `ant` using `ConvNets` in pytorch
# ### Imports
import cv2
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
import torch
from torch import nn
import torch.nn.functional as F
import os
# ### Data Preparation
# +
class Insect:
BEE = 'BEE'
ANT = "ANT"
BEES_IMAGES_PATH = 'data/colored/rgb/bees'
ANTS_IMAGES_PATH = 'data/colored/rgb/ants'
classes = {'bee': 0, 'ant' : 1}
classes =dict([(i, j) for (j, i) in classes.items()])
classes
# -
os.path.exists(Insect.BEES_IMAGES_PATH)
insects = []
for path in os.listdir(Insect.BEES_IMAGES_PATH):
img_path = os.path.join(Insect.BEES_IMAGES_PATH, path)
image = np.array(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), dtype='float32')
image = image / 255
insects.append([image, 0])
for path in os.listdir(Insect.ANTS_IMAGES_PATH):
img_path = os.path.join(Insect.ANTS_IMAGES_PATH, path)
image = np.array(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), dtype='float32')
image = image / 255
insects.append([image, 1])
insects = np.array(insects)
np.random.shuffle(insects)
# ### Visualization
plt.imshow(insects[7][0], cmap="gray"), insects[10][0].shape
# > Seperating Labels and features
X = np.array([insect[0] for insect in insects])
y = np.array([insect[1] for insect in insects])
X[0].shape
# > Splitting the data into training and test.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33, test_size=.2)
X_train.shape, y_train.shape, y_test.shape, X_test.shape
# > Converting the data into `torch` tensor.
# +
X_train = torch.from_numpy(X_train.astype('float32'))
X_test = torch.from_numpy(X_test.astype('float32'))
y_train = torch.Tensor(y_train)
y_test = torch.Tensor(y_test)
# -
# ### Model Creation
# +
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels= 32, kernel_size=(3, 3))
self.conv2 = nn.Conv2d(32, 64, (3, 3))
self.conv3 = nn.Conv2d(64, 64, (3, 3))
self._to_linear = None # protected variable
self.x = torch.randn(3, 200, 200).view(-1, 3, 200, 200)
self.conv(self.x)
self.fc1 = nn.Linear(self._to_linear, 64)
self.fc2 = nn.Linear(64, 2)
def conv(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
if self._to_linear is None:
self._to_linear = x.shape[1] * x.shape[2] * x.shape[3]
return x
def forward(self, x):
x = self.conv(x)
x = x.view(-1, self._to_linear)
x = F.relu(self.fc1(x))
return x
net = Net()
net
# -
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
loss_function = nn.CrossEntropyLoss()
EPOCHS = 10
BATCH_SIZE = 5
for epoch in range(EPOCHS):
print(f'Epochs: {epoch+1}/{EPOCHS}')
for i in range(0, len(y_train), BATCH_SIZE):
X_batch = X_train[i: i+BATCH_SIZE].view(-1, 3, 200, 200)
y_batch = y_train[i: i+BATCH_SIZE].long()
net.zero_grad() ## or you can say optimizer.zero_grad()
outputs = net(X_batch)
loss = loss_function(outputs, y_batch)
loss.backward()
optimizer.step()
print("Loss", loss)
# ### Evaluating the model
# ### Test set
total, correct = 0, 0
with torch.no_grad():
for i in range(len(X_test)):
correct_label = torch.argmax(y_test[i])
prediction = torch.argmax(net(X_test[i].view(-1, 3, 200, 200))[0])
if prediction == correct_label:
correct+=1
total +=1
print(f"Accuracy: {correct/total}")
torch.argmax(net(X_test[1].view(-1, 3, 200, 200))), y_test[0]
# ### Train set
total, correct = 0, 0
with torch.no_grad():
for i in range(len(X_train)):
correct_label = torch.argmax(y_train[i])
prediction = torch.argmax(net(X_train[i].view(-1, 3, 200, 200))[0])
if prediction == correct_label:
correct+=1
total +=1
print(f"Accuracy: {correct/total}")
# ### Making Predictions
plt.imshow(X_test[12])
plt.title(classes[torch.argmax(net(X_test[12].view(-1, 3, 200, 200))).item()].title(), fontsize=16)
plt.show()
# +
fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(10, 10))
for row in ax:
for col in row:
col.imshow(X_test[2])
plt.show()
# -
|
04_Projects/01_Ants_vrs_Bees/04_Conv_NN-Image Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Define the function "say_hello" so it prints "Hello!" when called.
def say_hello():
print("Hello!")
say_hello()
# Define the function "say_something" so it prints whatever is passed as the variable when called.
def say_something(something):
print(something)
Jane_says = "Hi, my name is Jane. I'm learning Python!"
say_something(Jane_says)
# Define a function that calculates the percentage of students that passed both # math and reading and prints the passing percentage to the output when the
# function is called.
def passing_math_percent(pass_math_count, student_count):
return pass_math_count / float(student_count) * 100
passing_math_count = 29370
total_student_count = 39170
# Call the function.
passing_math_percent(passing_math_count, total_student_count)
# A list of my grades.
my_grades = ['B', 'C', 'B' , 'D']
# Import pandas.
import pandas as pd
# Convert the my_grades to a Series
my_grades = pd.Series(my_grades)
my_grades
# +
# Change the grades by one letter grade.
my_grades.map({'B': 'A', 'C': 'B', 'D': 'C'})
# +
# Using the format() function.
my_grades = [92.34, 84.56, 86.78, 98.32]
for grade in my_grades:
print("{:.0f}".format(grade))
# -
# Convert the numerical grades to a Series.
my_grades = pd.Series([92.34, 84.56, 86.78, 78.32])
my_grades
# Format the grades to the nearest whole number percent.
my_grades.map("{:.0f}".format)
|
function.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def isnegative(n):
if n < 0:
return True
else:
return False
isnegative(-6)
list1 = [1,2,3]
def count_evens(list1):
even_count = 0
for num in list1:
if num % 2 == 0:
even_count += 1
print(even_count)
def increment_odds(n):
nums = []
for n in range(1, 2*n, 2):
nums.append(n)
return nums
increment_odds(3)
def average(l):
for n in l:
return round(len(l)/n, 2)
average([1,2,3])
name_to_dict = dict()
name_to_dict["frist_name"] = "Ada"
name_to_dict["last_name"] = "Lovelace"
name_to_dict
def capitalize_names(name):
for n in name[0]:
return(f"{name}".capitalize())
capitalize_names("")
def count_vowels(value):
value = value.lower()
vowel = ['a','e','i','o','u']
count = 0
for a in value:
if a in vowel:
count += 1
return count
count_vowels('abcde')
# +
def analyze_word(word):
vowels = ['a','e','i','o','u']
og_word = {}
num_of_vowels = {}
num_of_char = {}
for c in word:
if c in num_of_char:
num_of_char[word] += 1
else:
num_of_char[word] = 1
return len(word)
for c in word:
if c in vowels:
num_of_char[word] += 1
else:
num_of_char[word] = 1
return(c)
analyze_word('word')
# -
|
Untitled1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reproduce the Results for the Public Datasets
# Here, we report the code to reproduce the results reported in our work "GHOST: Adjusting the Decision Threshold to Handle Imbalanced Data in Machine Learning".
#
# Especially, the code below allows to train four different machine learning classifiers (random forest, gradient boosting, XGBoost, and logistic regression) in combination with two different molecular descriptors (ECFP4 and RDKit2D). The models are trained on 138 public datasets, which are described and used in [this RDKit blog post](http://rdkit.blogspot.com/2018/11/working-with-unbalanced-data-part-i.html) by <NAME>.
#
# For a step by step execution and explanation of the code, see the tutorial `Tutorial_Threshold_Optimization_RF.ipynb`.
# ### Outlook
#
# [Import Libraries](#section0) <br>
#
# 1. [Functions](#section1)
#
# 1.1 [Functions to prepare the datasets and generate the fingerprints](#section1.1) <br>
# 1.2 [Functions to calculate output metrics](#section1.2) <br>
# 1.3 [Functions to optimize the decision threhold](#section1.3) <br>
# 1.3.1 [Optimize the decision threshold using the prediction probabilities of the out-of-bag (oob) set](#section1.3.1) <br>
# 1.3.2 [Generalized procedure to optimize the decision threshold](#section1.3.2) <br>
# 1.4 [Functions to run other rebalancing approaches: RUS, SMOTE, and balanced random forest](#section1.4) <br>
# 1.5 [Functions for Conformal Prediction](#section1.5) <br>
# 1.6 [Function to run everything and store the results into an archive](#section1.6) <br>
#
#
# 2. [Run the code](#section2)
#
# 2.1 [Set Parameters](#section2.1) <br>
# 2.2 [Serotonin Datasets](#section2.2) <br>
# 2.3 ["Dataset 1" (DS1) Datasets](#section2.3) <br>
# 2.4 [PubChem Datasets](#section1.3.1) <br>
# 2.5 [CHEMBL DrugMatrix Datasets](#section2.4) <br>
#
#
# 3. [Plot Results](#section3)<br>
# <a id='section0'></a>
#
# ### Import Libraries
# +
from rdkit import Chem, DataStructs
from rdkit.Chem import Draw
from rdkit.Chem import rdFingerprintGenerator
from rdkit.Chem import AllChem
from rdkit import SimDivFilters
from rdkit.Chem.MolStandardize import rdMolStandardize
from collections import defaultdict
import pandas as pd
import numpy as np
import gzip
import pickle
import matplotlib.pyplot as plt
from descriptastorus.descriptors.DescriptorGenerator import MakeGenerator
# -
from sklearn.model_selection import train_test_split
import sklearn
import random
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
try:
import xgboost as xgb
except ImportError:
xgb = None
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
# only print out critical error messages of rdkit and reduce verbosity of xgboost
from rdkit import RDLogger
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
# <a id='section1'></a>
#
# # 1. Functions
# <a id='section1.1'></a>
#
# ### 1.1 Functions to prepare the datasets and generate the fingerprints
# +
# Remove counterions: Take the largest organic fragment
def salt_remover(smiles):
rmv = rdMolStandardize.LargestFragmentChooser(preferOrganic = True)
cleaned_smiles = []
for smi in smiles:
if "." in smi:
smi_new = Chem.MolToSmiles(rmv.choose(Chem.MolFromSmiles(smi)))
cleaned_smiles.append(smi_new)
else:
cleaned_smiles.append(smi)
return cleaned_smiles
# Compute the ECFP4 fingerprint with 2048 bits
def add_MorganFP(acts):
act_data = []
for i,fp in enumerate(rdFingerprintGenerator.GetFPs([Chem.MolFromSmiles(x) for x in acts['canonical_smiles']],fpType=rdFingerprintGenerator.MorganFP)):
bv = np.zeros((len(fp),),np.int16)
DataStructs.ConvertToNumpyArray(fp,bv)
act_data.append(bv)
acts['MorganFP'] = act_data
return(acts)
# Compute the RDKit2D fingerprint (200 topological properties) using the descriptastorus library
def add_rdkit2d(acts, smiles_column = 'canonical_smiles'):
if smiles_column not in list(acts):
print("Error: the column {} is not contained in the input dataframe. A column containing SMILES is required to calculate property-based fingerprints".format(smiles_column))
return(acts)
acts[smiles_column] = acts[smiles_column].astype(str)
rdkit2d = []
generator = MakeGenerator(("RDKit2D",))
for n, smi in enumerate(acts[smiles_column]):
try:
data = generator.process(smi)
if data[0] == True:
data.pop(0)
if data[0] == False:
data.pop(0)
data = np.float32(data)
data[np.isposinf(data)] = np.finfo('float32').max
data[np.isneginf(data)] = np.finfo('float32').min
data = np.nan_to_num(data)
rdkit2d.append(list(data))
except:
rdkit2d.append([0]*200)
print("Error: RDKit2D not generated for {}".format(acts['compound_chembl_id'][n]))
acts['RDKit2D'] = rdkit2d
return(acts)
def gen_fps(acts,inacts, descriptor = 'MorganFP'):
# generate fingerprints for the actives and inactives and convert them to numpy arrays so that
# we can learn from them. This uses the generalized fingerprinter added in the RDKit 2018.09 release
labels_column = 'labels'
if descriptor == 'RDKit2D':
acts = add_rdkit2d(acts)
inacts = add_rdkit2d(inacts)
if descriptor == 'MorganFP':
acts = add_MorganFP(acts)
inacts = add_MorganFP(inacts)
acts[labels_column] = 1
inacts[labels_column] = 0
df_dataset = pd.concat((acts, inacts))
df_dataset.reset_index(inplace = True, drop = True)
# exclude the compounds for which RDKit2D could not be completely calculated
if descriptor == 'RDKit2D':
check_idx = [i for i,a in enumerate(list(df_dataset[descriptor])) if len(a) != 200]
df_dataset.drop(check_idx, inplace=True)
return df_dataset
# -
# <a id='section1.2'></a>
#
# ### 1.2 Function to calculate output metrics
# +
def calc_metrics(labels_test, test_probs, threshold = 0.5):
scores = [1 if x>=threshold else 0 for x in test_probs]
auc = metrics.roc_auc_score(labels_test, test_probs)
kappa = metrics.cohen_kappa_score(labels_test,scores)
confusion = metrics.confusion_matrix(labels_test,scores, labels=list(set(labels_test)))
print('thresh: %.2f, kappa: %.3f, AUC test-set: %.3f'%(threshold, kappa, auc))
print(confusion)
print(metrics.classification_report(labels_test,scores))
return kappa, confusion, auc
# -
# <a id='section1.3'></a>
#
# ### 1.3 Functions to optimize the decision threhold
# <a id='section1.3.1'></a>
# #### 1.3.1 Optimize the decision threshold using the prediction probabilities of the out-of-bag (oob) set
# This approach has been described [here](http://rdkit.blogspot.com/2018/11/working-with-unbalanced-data-part-i.html) and can only be used for the random forest classifier.
#
# The function below requires as input:
# - `oob_probs`: the oob active prediction probabilities, which can be extracted from the trained RF model (cls) as
#
# oob_probs = cls.oob_decision_function_
# oob_probs = [x[1] for x in oob_probs]
#
# - `labels_train`: the true labels of the training set
# - `thresholds`: list a thresholds (e.g. [0.1, 0.2, 0.3, 0.4, 0.5])
#
# To optmize the decision threshold, one can use different metrics.
# Here, you can select `ThOpt_metrics = 'Kappa'`, `'ROC'`, or `'ROC_Song14'`.
#
# With, `ThOpt_metrics = 'Kappa'`, the Cohen's Kappa is calculated for the oob set and a range of thresholds (specified using the threshold argument). The optimal threshold (returned) is the one that maximizes the Cohen's Kappa.
#
# The threshold optimization based on the ROC curve is described [here](https://doi.org/10.1007/s11548-013-0913-8). With, `ThOpt_metrics = 'ROC'`, sensitivity and specificity are calculated for a range of thresholds using the roc_curve function of scikit-learn. The optimal threshold (returned) is the one that minimizes the distance to the [0,1] corner of the ROC plot.
import ghostml
def run_it_oob_optimization(oob_probs, labels_train, thresholds, ThOpt_metrics = 'Kappa'):
"""Optimize the decision threshold based on the prediction probabilities of the out-of-bag set of random forest.
The threshold that maximizes the Cohen's kappa coefficient or a ROC-based criterion
on the out-of-bag set is chosen as optimal.
Parameters
----------
oob_probs : list of floats
Positive prediction probabilities for the out-of-bag set of a trained random forest model
labels_train: list of int
True labels for the training set
thresholds: list of floats
List of decision thresholds to screen for classification
ThOpt_metrics: str
Optimization metric. Choose between "Kappa" and "ROC"
Returns
----------
thresh: float
Optimal decision threshold for classification
"""
return ghostml.optimize_threshold_from_oob_predictions(labels_train, oob_probs, thresholds,
ThOpt_metrics = ThOpt_metrics)
# <a id='section1.3.2'></a>
#
# #### 1.3.2 GHOST: Generalized procedure to optimize the decision threshold
# The function below utilizes the prediction probabilities of N training subsets (`N_subsets`) to optimize the classification threshold. The subsets can be drawn with or without replacement (`with_replacement`). The subset size can be selected using the `subset_size` argument (default = 0.2), which, if float, represents the proportion of the dataset to include in the subset. If integer, it represents the actual number of instances to include in the subset. As for the previous function, two optimization metrics are available `ThOpt_metrics = 'Kappa'` or `ThOpt_metrics = 'ROC'`.
#
# Required inputs are:
# - `cls`: trained classification model
# - `fps_train`: numpy array of descriptors of the training set
# - `labels_train`: list of true labels for the training set
# - `thresholds`: list a thresholds (e.g. [0.1, 0.2, 0.3, 0.4, 0.5])
#
import ghostml
def optimize_threshold_train_subset(cls, fps_train, labels_train, thresholds,
ThOpt_metrics = 'Kappa', N_subsets = 100,
subsets_size = 0.2, with_replacement = False, random_seed = None):
"""Optimize the decision threshold based on subsets of the training set.
The threshold that maximizes the Cohen's kappa coefficient or a ROC-based criterion
on the training subsets is chosen as optimal.
Parameters
----------
cls : obj
Trained machine learning classifier built using scikit-learn
fps_train: list
Molecular descriptors for the training set
labels_train: list of int
True labels for the training set
thresholds: list of floats
List of decision thresholds to screen for classification
ThOpt_metrics: str
Optimization metric. Choose between "Kappa" and "ROC"
N_subsets: int
Number of training subsets to use in the optimization
subsets_size: float or int
Size of the subsets. if float, represents the proportion of the dataset to include in the subsets.
If integer, it represents the actual number of instances to include in the subsets.
with_replacement: bool
The subsets are drawn randomly. True to draw the subsets with replacement
random_seed: int
random number to seed the drawing of the subsets
Returns
----------
thresh: float
Optimal decision threshold for classification
"""
# calculate prediction probability for the training set
probs_train = cls.predict_proba(fps_train)[:,1]
opt_thresh = ghostml.optimize_threshold_from_predictions(labels_train,probs_train,thresholds,
ThOpt_metrics=ThOpt_metrics,N_subsets=N_subsets,
subsets_size=subsets_size,with_replacement=with_replacement,
random_seed = random_seed)
return opt_thresh
# <a id='section1.6'></a>
#
# ### 1.6 Function to run everything and store the results into an archive
# +
def run_it_(assay_id, acts, inacts, archive, descriptor = 'MorganFP', method = "RF", scale_features = False,
test_set_size = 0.2, random_seed = None,
opt_oob = False, ThOpt_metrics = 'Kappa', thresholds = None, N_subsets = None,
subsets_size = None, with_replacement = False, try_resampling = True, try_brf = False,
try_cp = False, cv_folds = 5, nc_normalization = True, N_acp_models = 25, **kwargs):
# clean data:
acts.canonical_smiles = salt_remover(acts['canonical_smiles'])
inacts.canonical_smiles = salt_remover(inacts['canonical_smiles'])
# generate descriptors and split the data into a training and test set:
df_dataset = gen_fps(acts,inacts, descriptor = descriptor)
# return fps and true labels
labels = np.array(list(df_dataset['labels']))
fps = np.array(df_dataset[descriptor])
fps = np.stack(fps, axis = 0)
# retrieve compound IDs
mol_names = list(df_dataset['compound_chembl_id'])
# scale features:
if scale_features:
scaler = StandardScaler()
fps = scaler.fit_transform(fps)
# train - test split
fps_train, fps_test, labels_train, labels_test, names_train, names_test = train_test_split(fps, labels, mol_names,
test_size = test_set_size,
stratify = labels, random_state = random_seed)
# train classifier
print("--------- Default -----------")
if method == 'RF':
cls = RandomForestClassifier(n_estimators = 500, max_depth = 15, min_samples_leaf = 2,oob_score = True,
n_jobs=4, random_state=random_seed, **kwargs)
elif method == 'GB':
cls = GradientBoostingClassifier(n_estimators = 100, validation_fraction = 0.2, n_iter_no_change = 10,
tol = 0.01, random_state=random_seed, **kwargs)
elif method == 'XGB':
cls = xgb.XGBClassifier(n_estimators = 100, **kwargs)
elif method == 'LR':
cls = LogisticRegression(random_state=random_seed, **kwargs)
if method == 'XGB':
cls.fit(fps_train, labels_train, early_stopping_rounds=10, eval_metric="error",
eval_set=[(fps_train, labels_train)])
else:
cls.fit(fps_train, labels_train)
# predict the test set
test_probs = cls.predict_proba(fps_test)[:,1] #prediction probabilities for the test set
#store predictions in dataframe
scores = [1 if x>=0.5 else 0 for x in test_probs]
df_preds = pd.DataFrame({'mol_names': names_test, 'y_true': labels_test, 'standard': scores})
# generate and show some evaluation stats for the model on the test data:
kappa, confusion, auc = calc_metrics(labels_test, test_probs, threshold = 0.5)
archive[assay_id].append(('standard',0.5,kappa,confusion,auc))
# Optimize decision threshold
print("--------- Balanced -----------")
if thresholds is None:
# pick the best threshold using the OOB kappas
thresholds = np.round(np.arange(0.05,0.55,0.05),2)
# optimize the decision thresholds based on the prediction probabilities of the oob set
# Can only be used for random forest
if opt_oob == True and method == 'RF':
print("--------- Balanced Oob -----------")
# extract oob probabilities
oob_probs = cls.oob_decision_function_
oob_probs = [x[1] for x in oob_probs]
oob_auc = metrics.roc_auc_score(labels_train, oob_probs)
# optimize threshold
thresh_oob = run_it_oob_optimization(oob_probs, labels_train, thresholds, ThOpt_metrics = ThOpt_metrics)
# calculate metrics using the optimized decision threshold
kappa, confusion, auc_tmp = calc_metrics(labels_test, test_probs, threshold = thresh_oob)
archive[assay_id].append(('OobOpt',thresh_oob,kappa,confusion,oob_auc))
#store predictions in dataframe
scores = [1 if x>=thresh_oob else 0 for x in test_probs]
df_preds['OobOpt'] = scores
print("--------- Balanced Subsets -----------")
# optimize the decision thresholds based on the prediction probabilities of N training subsets
# Can be used for every machine learning model
thresh_sub = optimize_threshold_train_subset(cls, fps_train, labels_train, thresholds,
ThOpt_metrics = ThOpt_metrics,
N_subsets = N_subsets, subsets_size = subsets_size,
with_replacement = with_replacement, random_seed = random_seed)
#store predictions in dataframe
scores = [1 if x>=thresh_sub else 0 for x in test_probs]
df_preds['GHOST'] = scores
# calculate metrics using the optimized decision threshold
kappa, confusion, auc_tmp = calc_metrics(labels_test, test_probs, threshold = thresh_sub)
archive[assay_id].append(('GHOST',thresh_sub,kappa,confusion,auc_tmp))
return df_preds
# -
# <a id='section2'></a>
#
#
# # 2. Run the code
# <a id='section2.1'></a>
#
# ### 2.1 Set Paramenters
# +
# parameters to build model
method = 'GB' # choose between RF, GB, XGB, and LR
descriptor = 'MorganFP' # choose between MorganFP and RDKit2D
scale_features = False
test_set_size = 0.2
# parameters for threshold optimization - we use default values for most parameters
thresholds = np.round(np.arange(0.05,0.55,0.05),2)
random_seed = 16
#these are default:
ThOpt_metrics = 'Kappa'
N_subsets = 100
subsets_size = 0.2
with_replacement = False
opt_oob = True # only for RF, one can also optimize the threshold using the oob-based method
# run resampling with SMOTE and RUS
try_resampling = False
# run BRF
try_brf = False # set it to True to also run a balanced random forest
# run conformal prediction
try_cp = False # very computational expensive - set it to True to run conformal prediction
# +
# create folders where to store the output files
import os
if not os.path.exists("results"):
os.makedirs("results")
if not os.path.exists("figures"):
os.makedirs("figures")
# -
# The results shown in the paper were averaged over 50 train-test splits. The 50 random seeds used for the paper are:
list_random_seeds_paper = [16, 102, 279, 314, 325, 376, 382, 398, 453, 490 ,
10, 133, 181, 202, 269, 304, 317, 392, 429, 447,
109, 124, 137, 145, 155, 170, 297, 435, 470, 481,
33, 37, 59, 76, 299, 340, 412, 444, 471, 493,
48, 82, 132, 175, 191, 253, 264, 364, 399, 478]
# <a id='section2.2'></a>
#
# ### 2.2 Serotonin Datasets
# This is a set of data about binding to serotonin receptors exported from ChEMBL.
with open('data/serotonin_data.pkl','rb') as inf:
serotonin_d,assay_lookup = pickle.load(inf)
tpls = sorted([(len(v),k) for k,v in serotonin_d.groupby('target_chembl_id').groups.items()],reverse=True)
# **Run the `run_it_` function for all serotonin datasets**
def run_serotonin_assay(assay_id, d, assay_lookup, method = 'RF', descriptor = 'MorganFP', scale_features = False,
test_set_size = None, random_seed = None,
opt_oob = False, ThOpt_metrics = 'Kappa', thresholds=None, N_subsets = 100,
subsets_size = 0.2, with_replacement = False,
archive=None, archive_key = None, **kwargs):
if archive is None:
archive=defaultdict(list)
if archive_key == None:
archive_key = assay_id
# these need to be converted into a classification problem.
# start with a pretty strict cutoff for active/inactive:
assay = d.loc[d['target_chembl_id']==assay_id]
acts = assay.loc[assay['pchembl_value']>9.0]
if len(acts)>=50:
inacts = assay.loc[assay['pchembl_value']<8.5]
else:
# but relax that if we don't end up with a reasonable number of actives:
acts = assay.loc[assay['pchembl_value']>8.0]
inacts = assay.loc[assay['pchembl_value']<7.5]
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
print(f'assay_id {assay_id}, organism: {assay_lookup[assay_id][0]}, target: {assay_lookup[assay_id][1]}')
df_preds = run_it_(archive_key, acts[:], inacts[:]
, archive, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size, random_seed = random_seed,
opt_oob = opt_oob, ThOpt_metrics = ThOpt_metrics, thresholds = thresholds,
N_subsets = N_subsets, subsets_size = subsets_size, with_replacement = with_replacement,
**kwargs)
# save predictions
df_preds.to_csv(f"./results/{method}_{descriptor}_predictions_{assay_id}.csv", index = False)
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n\n')
# Run it all:
# +
# parameters for threshold optimization - we use default values for most parameters
thresholds = np.round(np.arange(0.05,0.55,0.05),2)
random_seed = 16
#these are default:
ThOpt_metrics = 'ROC'
N_subsets = 100
subsets_size = 0.2
with_replacement = True
method = 'RF' # choose between RF, GB, XGB, and LR
opt_oob = True # only for RF, one can also optimize the threshold using the oob-based method
serotonin_archive = defaultdict(list)
for v,k in tpls:
if v>900:
run_serotonin_assay(k, serotonin_d, assay_lookup, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size,
ThOpt_metrics = ThOpt_metrics, N_subsets = N_subsets, subsets_size = subsets_size,
with_replacement = with_replacement, opt_oob = opt_oob,
archive=serotonin_archive, archive_key = k,
random_seed=random_seed)
# +
serotonin_archive = defaultdict(list)
for v,k in tpls:
if v>900:
run_serotonin_assay(k, serotonin_d, assay_lookup, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size,
ThOpt_metrics = ThOpt_metrics, N_subsets = N_subsets, subsets_size = subsets_size,
with_replacement = with_replacement, opt_oob = opt_oob,
archive=serotonin_archive, archive_key = k,
random_seed=random_seed)
# -
#### Save Results
pickle.dump(serotonin_archive, open(f'./results/serotonin_results_{method}_{descriptor}.pkl','wb+'))
# <a id='section2.3'></a>
#
# ### 2.3 "Dataset 1" (DS1) Datasets
#
# Now we'll work with the "Dataset 1" datasets from the benchmarking set. These are collections of diverse actives from various ChEMBL targets together with assumed inactives pulled from an older version of ZINC.
import glob
datasets = glob.glob('data/ChEMBL/cmp_list*_actives.dat.gz')
print(f"There are {len(datasets)} DS1 datasets")
# +
import re
with gzip.open('data/ChEMBL/cmp_list_ChEMBL_zinc_decoys.dat.gz') as inf:
inactive_df = pd.read_csv(inf,sep='\t')
chembl_active_sets = {}
for fn in datasets:
nm = re.search(r'cmp_list_(ChEMBL_[0-9]*)_actives',fn).groups()[0]
with gzip.open(fn) as inf:
chembl_active_sets[nm] = pd.read_csv(inf,sep='\t')
# -
def run_dataset1_assay(assay_id, chembl_active_sets, inactive_df, factor=20, method = 'RF', descriptor = 'MorganFP', scale_features = False,
test_set_size = None, random_seed = None,
opt_oob = False, ThOpt_metrics = 'Kappa', thresholds=None, N_subsets = 100,
subsets_size = 0.2, with_replacement = False, try_resampling = False, try_brf = False,
try_cp = False, cv_folds = 5, nc_normalization = True, N_acp_models = 25,
archive=None, archive_key = None, **kwargs):
if archive is None:
archive=defaultdict(list)
if archive_key == None:
archive_key = assay_id
assay = chembl_active_sets[assay_id]
acts = assay.rename(index=str,columns={'SMILES':'canonical_smiles','# _Name':'compound_chembl_id'})
inacts = inactive_df.sample(n=factor*len(acts),random_state=hash(assay_id)%0xf00d).rename(index=str,columns={'SMILES':'canonical_smiles','# _Name':'compound_chembl_id'})
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
print(f'assay_id {assay_id}')
df_preds = run_it_(archive_key, acts, inacts, archive, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size, random_seed = random_seed,
opt_oob = opt_oob, ThOpt_metrics = ThOpt_metrics, thresholds = thresholds,
N_subsets = N_subsets, subsets_size = subsets_size, with_replacement = with_replacement,
try_resampling = try_resampling, try_brf = try_brf, try_cp = try_cp, cv_folds = cv_folds,
nc_normalization = nc_normalization, N_acp_models = N_acp_models, **kwargs)
# save predictions
df_preds.to_csv(f"./results/{method}_{descriptor}_predictions_{assay_id}.csv", index = False)
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n\n')
# +
ds1_archive = defaultdict(list)
for k in chembl_active_sets:
run_dataset1_assay(k, chembl_active_sets, inactive_df, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size,
ThOpt_metrics = ThOpt_metrics, N_subsets = N_subsets, subsets_size = subsets_size,
with_replacement = with_replacement, opt_oob = opt_oob,
archive=ds1_archive, archive_key = k,
try_resampling = try_resampling, try_cp = try_cp, random_seed = random_seed)
# -
#### Save Results
pickle.dump(ds1_archive, open(f'./results/ds1_results_{method}_{descriptor}.pkl','wb+'))
# <a id='section2.4'></a>
#
# ### 2.4 PubChem Datasets
with gzip.open('data/ChEMBL_PubChem_HTS.csv.gz') as inf:
pubchem_d = pd.read_csv(inf)
pubchem_d.head()
with open('data/pubchem_data.pkl','rb') as inf:
pubchem_d,pubchem_assay_lookup = pickle.load(inf)
# +
def run_pubchem_assay(assay_id, d, assay_lookup, method = 'RF', descriptor = 'MorganFP', scale_features = False,
test_set_size = None, random_seed = None,
opt_oob = False, ThOpt_metrics = 'Kappa', thresholds=None, N_subsets = 100,
subsets_size = 0.2, with_replacement = False, try_resampling = False, try_brf = False,
try_cp = False, cv_folds = 5, nc_normalization = True, N_acp_models = 25,
archive=None, archive_key = None, **kwargs):
if archive is None:
archive=defaultdict(list)
if archive_key == None:
archive_key = assay_id
assay = d.loc[d['assay_chembl_id']==assay_id]
acts = pd.concat((assay.loc[assay['activity_comment'] == 'Active'],
assay.loc[assay['activity_comment'] == 'active']))
inacts = pd.concat((assay.loc[assay['activity_comment'] == 'inactive'],
assay.loc[assay['activity_comment'] == 'inconclusive'],
assay.loc[assay['activity_comment'] == 'Inconclusive'],
assay.loc[assay['activity_comment'] == 'Not Active']))
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
print(f'assay_id {assay_id}, description: {assay_lookup[assay_id]}')
df_preds = run_it_(archive_key, acts, inacts, archive, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size, random_seed = random_seed,
opt_oob = opt_oob, ThOpt_metrics = ThOpt_metrics, thresholds = thresholds,
N_subsets = N_subsets, subsets_size = subsets_size, with_replacement = with_replacement,
try_resampling = try_resampling, try_brf = try_brf, try_cp = try_cp, cv_folds = cv_folds,
nc_normalization = nc_normalization, N_acp_models = N_acp_models, **kwargs)
# save predictions
df_preds.to_csv(f"./results/{method}_{descriptor}_predictions_{assay_id}.csv", index = False)
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n\n')
# +
pubchem_archive = defaultdict(list)
for k in pubchem_assay_lookup:
try:
run_pubchem_assay(k, pubchem_d, pubchem_assay_lookup, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size,
ThOpt_metrics = ThOpt_metrics, N_subsets = N_subsets, subsets_size = subsets_size,
with_replacement = with_replacement, opt_oob = opt_oob,
archive=pubchem_archive, archive_key = k,
try_resampling = try_resampling, try_cp = try_cp, random_seed = random_seed)
except:
print("Error: {}".format(k))
# -
#### Save Results
pickle.dump(pubchem_archive, open(f'./results/pubchem_results_{method}_{descriptor}.pkl','wb+'))
# <a id='section2.5'></a>
#
# ### 2.5 ChEMBL DrugMatrix Datasets
with gzip.open('data/ChEMBL_DrugMatrix.csv.gz') as inf:
drugmatrix_d = pd.read_csv(inf)
drugmatrix_d.head()
with open('data/drugmatrix_data.pkl','rb') as inf:
drugmatrix_d,drugmatrix_assay_lookup = pickle.load(inf)
# +
def run_drugmatrix_assay(assay_id, d, assay_lookup, method = 'RF', descriptor = 'MorganFP', scale_features = False,
test_set_size = None, random_seed = None,
opt_oob = False, ThOpt_metrics = 'Kappa', thresholds=None, N_subsets = 100,
subsets_size = 0.2, with_replacement = False, try_resampling = False, try_brf = False,
try_cp = False, cv_folds = 5, nc_normalization = True, N_acp_models = 25,
archive=None, archive_key = None, **kwargs):
if archive is None:
archive=defaultdict(list)
if archive_key == None:
archive_key = assay_id
assay = d.loc[d['assay_chembl_id']==assay_id]
inact_indices = [x for x,y in enumerate(assay['activity_comment']) if y.find('Not Active')==0]
act_indices = [x for x,y in enumerate(assay['activity_comment']) if y.find('Active')==0]
acts = assay.iloc[act_indices]
inacts = assay.iloc[inact_indices]
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
if(len(act_indices)<40):
print(f'>>>>> SKIPPING {assay_id} DUE TO INSUFFICIENT ACTIVES <<<<<< ')
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n\n')
return
print(f'assay_id {assay_id}, description: {assay_lookup[assay_id]}')
df_preds = run_it_(archive_key, acts, inacts, archive, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size, random_seed = random_seed,
opt_oob = opt_oob, ThOpt_metrics = ThOpt_metrics, thresholds = thresholds,
N_subsets = N_subsets, subsets_size = subsets_size, with_replacement = with_replacement,
try_resampling = try_resampling, try_brf = try_brf, try_cp = try_cp, cv_folds = cv_folds,
nc_normalization = nc_normalization, N_acp_models = N_acp_models, **kwargs)
# save predictions
df_preds.to_csv(f"./results/{method}_{descriptor}_predictions_{assay_id}.csv", index = False)
print('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n\n')
# +
drugmatrix_archive = defaultdict(list)
for k in drugmatrix_assay_lookup:
run_drugmatrix_assay(k, drugmatrix_d, drugmatrix_assay_lookup, method = method, descriptor = descriptor,
scale_features = scale_features, test_set_size = test_set_size,
ThOpt_metrics = ThOpt_metrics, N_subsets = N_subsets, subsets_size = subsets_size,
with_replacement = with_replacement, opt_oob = opt_oob,
archive=drugmatrix_archive, archive_key = k,
try_resampling = try_resampling, try_cp = try_cp, random_seed = random_seed)
# -
#### Save Results
pickle.dump(drugmatrix_archive, open(f'./results/drugmatrix_results_{method}_{descriptor}.pkl','wb+'))
# <a id='section3'></a>
#
# # 3. Plot Results
# ### Functions
# **Functions to load the data**
def read_in_data(serotonin_archive, pubchem_archive, ds1_archive, drugmatrix_archive, value = 'kappa'):
if value == 'kappa':
idx = 2
elif value == 'threshold':
idx = 1
elif value == 'auc':
idx = 4
elif value == 'accuracy':
idx = 3
archive_names = ['serotonin','pubchem','ds1','drugmatrix']
df_kappa_out = pd.DataFrame()
for j, archive1 in enumerate([serotonin_archive, pubchem_archive, ds1_archive, drugmatrix_archive]):
df_kappa_tmp = pd.DataFrame.from_dict(archive1, orient='index')
col_names = [x[0] for x in serotonin_archive[list(serotonin_archive.keys())[0]]]
#col_names = [a.replace('standard', 'Th05') for a in col_names]
df_kappa_tmp.columns = col_names
df_kappa = df_kappa_tmp.apply(lambda x: x.str[idx])
df_kappa['assay'] = df_kappa.index
df_kappa['archive'] = archive_names[j]
df_kappa_out = pd.concat([df_kappa_out, df_kappa])
df_kappa_out.reset_index(inplace=True, drop=True)
return df_kappa_out
# **Functions for plotting the results**
# +
from matplotlib import cm
import matplotlib as mpl
def round_nearest(x, a):
return round(x / a) * a
# SCATTERPLOT OF THE KAPPA OBTAINED BY TWO DIFFERENT METHODS
# df_mean1: dataframe containing the results of the first method (unbalanced)
# df_mean2: dataframe containing the results of the second method (balanced)
# df_thresh2: dataframe containing the optimized thresholds
# df_auc: dataframe containing the calculated ROC-AUC. The ROC-AUC for the test set is in the standard/Th05 column
# xax: name of the column containing the results for method 1
# yax: name of the column containing the results for method 2
# errorbars, df_std1, df_std2: if errorbars = True and dataframes containing the standard deviation for df_mean1
# and df_mean2 are provided, then errorbars are also plotted
# COLOR according to the optimized threshold, SHAPE according to the AUC of the test set:
def scatterplot_results(df_mean1, df_mean2, df_thresh2, df_auc,
xax = 'Th05', yax = 'ThOpt', x_lab = "$\kappa$ (Th = 0.5)", y_lab = "$\kappa$ (Th = Opt)",
errorbars = False, df_std1 = None, df_std2 = None, output_name = "test",
cmap = None, norm = None, transparency = 0.9, markersize = 40,
labels_size = 20, tick_labels_size = 18, legend_labels_size = 14, map_ticks_size = 18):
if cmap == None:
cmap = mpl.cm.terrain
if norm == None:
norm = mpl.colors.Normalize(vmin=0.05, vmax=0.5)
# compare sizes of datasets
if df_mean1.shape[0] != df_mean2.shape[0]:
exclude = list(set(df_mean1.assay) ^ set(df_mean2.assay))
df_mean1 = df_mean1.loc[~df_mean1.assay.isin(exclude)]
df_mean2 = df_mean2.loc[~df_mean2.assay.isin(exclude)]
df_thresh2 = df_thresh2.loc[~df_thresh2.assay.isin(exclude)]
df_auc = df_auc.loc[~df_auc.assay.isin(exclude)]
if isinstance(df_std1, pd.DataFrame):
df_std1 = df_std1.loc[~df_std1.assay.isin(exclude)]
if isinstance(df_std2, pd.DataFrame):
df_std2 = df_std2.loc[~df_std2.assay.isin(exclude)]
# thresholds and test set auc
thresh_round = [np.round(round_nearest(i,0.05),2) for i in list(df_thresh2[yax])]
auc_test = np.array(list(df_auc.standard))
# plot
fig, ax = plt.subplots(figsize=(7, 5))
plt.scatter(np.array(df_mean1[xax])[auc_test > 0.9], np.array(df_mean2[yax])[auc_test > 0.9],
marker='o', s = markersize, linewidth=0,
c=np.array(thresh_round)[auc_test > 0.9],
label=r'$\rm AUC$ $>$ $0.9$', alpha = transparency, cmap=cmap, norm=norm, zorder = 2)
plt.scatter(np.array(df_mean1[xax])[(auc_test <= 0.9) & (auc_test > 0.8)], np.array(df_mean2[yax])[(auc_test <= 0.9) & (auc_test > 0.8)],
marker='v', s = markersize,linewidth=0, label=r'$0.8$ $<$ $\rm AUC$ $\leq$ $0.9$',
c=np.array(thresh_round)[(auc_test <= 0.9) & (auc_test > 0.8)],
cmap=cmap, norm=norm, alpha = transparency, zorder = 2)
plt.scatter(np.array(df_mean1[xax])[auc_test <= 0.8], np.array(df_mean2[yax])[auc_test <= 0.8],
marker='X', s = markersize, linewidth=0, label=r'$\rm AUC$ $\leq$ $0.8$',
c=np.array(thresh_round)[auc_test <= 0.8],
cmap=cmap, norm=norm, alpha = transparency, zorder = 2)
plt.plot([0,1],[0,1],'k-', ls='--')
plt.xlabel(x_lab, fontsize = labels_size)
plt.ylabel(y_lab, fontsize = labels_size)
plt.tick_params(labelsize = tick_labels_size)
lg = plt.legend(loc = 'lower right', fontsize=legend_labels_size)
lg.legendHandles[0].set_color('k')
lg.legendHandles[1].set_color('k')
lg.legendHandles[2].set_color('k')
cbar = plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), orientation='vertical', )
cbar.ax.tick_params(labelsize=map_ticks_size)
cbar.set_label('Optimized Threshold', rotation=270, size=labels_size, labelpad=26)
if errorbars:
if not isinstance(df_std1, pd.DataFrame) or not isinstance(df_std2, pd.DataFrame):
print("Error: specify df_std1 and df_std2 to plot the errorbars")
plt.savefig('{}.png'.format(output_name), dpi=600, bbox_inches='tight')
return
plt.errorbar(np.array(df_mean1[xax])[auc_test > 0.9], np.array(df_mean2[yax])[auc_test > 0.9],
xerr = np.array(df_std1[xax])[auc_test > 0.9], yerr = np.array(df_std2[yax])[auc_test > 0.9],
lw=2, capsize = 3, c='silver', alpha = 0.2, linestyle="None", zorder = 1)
plt.errorbar(np.array(df_mean1[xax])[(auc_test <= 0.9) & (auc_test > 0.8)], np.array(df_mean2[yax])[(auc_test <= 0.9) & (auc_test > 0.8)],
xerr = np.array(df_std1[xax])[(auc_test <= 0.9) & (auc_test > 0.8)], yerr = np.array(df_std2[yax])[(auc_test <= 0.9) & (auc_test > 0.8)],
lw=2, capsize = 3, c='silver', alpha = 0.2, linestyle="None", zorder = 1)
plt.errorbar(np.array(df_mean1[xax])[auc_test <= 0.8], np.array(df_mean2[yax])[auc_test <= 0.8],
xerr = np.array(df_std1[xax])[auc_test <= 0.8], yerr = np.array(df_std2[yax])[auc_test <= 0.8],
lw=2, capsize = 3, c='silver', alpha = 0.2, linestyle="None", zorder = 1)
plt.savefig('{}.png'.format(output_name), dpi=600, bbox_inches='tight')
# COLOR according to the threshold difference between method 1 and 2, SHAPE according to the AUC of the test set:
# df_thresh1: dataframe containing the thresholds used by method 1
# df_thresh2: dataframe containing the thresholds used by method 2
def scatterplot_results_th_diff(df_mean1, df_mean2, df_thresh1, df_thresh2, df_auc,
xax = 'Th05', yax = 'ThOpt', x_lab = "$\kappa$ (Th = 0.5)", y_lab = "$\kappa$ (Th = Opt)",
errorbars = False, df_std1 = None, df_std2 = None, output_name = "test",
cmap = None, norm = None, transparency = 0.9, markersize = 40,
labels_size = 20, tick_labels_size = 18, legend_labels_size = 14, map_ticks_size = 18):
if cmap == None:
cmap = mpl.cm.terrain
if norm == None:
norm = mpl.colors.Normalize(vmin=0.0, vmax=0.1)
# compare sizes of datasets
if df_mean1.shape[0] != df_mean2.shape[0]:
exclude = list(set(df_mean1.assay) ^ set(df_mean2.assay))
df_mean1 = df_mean1.loc[~df_mean1.assay.isin(exclude)]
df_mean2 = df_mean2.loc[~df_mean2.assay.isin(exclude)]
df_thresh1 = df_thresh1.loc[~df_thresh1.assay.isin(exclude)]
df_thresh2 = df_thresh2.loc[~df_thresh2.assay.isin(exclude)]
df_auc = df_auc.loc[~df_auc.assay.isin(exclude)]
if isinstance(df_std1, pd.DataFrame):
df_std1 = df_std1.loc[~df_std1.assay.isin(exclude)]
if isinstance(df_std2, pd.DataFrame):
df_std2 = df_std2.loc[~df_std2.assay.isin(exclude)]
# thresholds and test set auc
thresh1_round = [np.round(round_nearest(i,0.05),2) for i in list(df_thresh1[xax])]
thresh2_round = [np.round(round_nearest(i,0.05),2) for i in list(df_thresh2[yax])]
th_diff = list(abs(np.array(thresh1_round) - np.array(thresh2_round)))
th_diff = [np.round(round_nearest(i,0.05),2) for i in th_diff]
auc_test = np.array(list(df_auc.standard))
# plot
fig, ax = plt.subplots(figsize=(7, 5))
plt.scatter(np.array(df_mean1[xax])[auc_test > 0.9], np.array(df_mean2[yax])[auc_test > 0.9],
marker='o', s = markersize, c=np.array(th_diff)[auc_test > 0.9],linewidth=0,
label=r'$\rm AUC$ $>$ $0.9$', alpha = transparency, cmap=cmap, norm=norm, zorder = 3)
plt.scatter(np.array(df_mean1[xax])[(auc_test <= 0.9) & (auc_test > 0.8)], np.array(df_mean2[yax])[(auc_test <= 0.9) & (auc_test > 0.8)],
marker='v', s = markersize, c=np.array(th_diff)[(auc_test <= 0.9) & (auc_test > 0.8)],linewidth=0,
label=r'$0.8$ $<$ $\rm AUC$ $\leq$ $0.9$', alpha = transparency, cmap=cmap, norm=norm, zorder = 2)
plt.scatter(np.array(df_mean1[xax])[auc_test <= 0.8], np.array(df_mean2[yax])[auc_test <= 0.8],
marker='X', s = markersize, c=np.array(th_diff)[auc_test <= 0.8],linewidth=0,
label=r'$\rm AUC$ $\leq$ $0.8$', alpha = transparency, cmap=cmap, norm=norm, zorder = 2)
plt.plot([0,1],[0,1],'k-', ls='--')
plt.xlabel(x_lab, fontsize = labels_size)
plt.ylabel(y_lab, fontsize = labels_size)
plt.tick_params(labelsize = tick_labels_size)
lg = plt.legend(loc = 'lower right', fontsize=legend_labels_size)
lg.legendHandles[0].set_color('k')
lg.legendHandles[1].set_color('k')
lg.legendHandles[2].set_color('k')
cbar = plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), orientation='vertical', ticks=[0.0, 0.05, 0.1])
cbar.ax.tick_params(labelsize=map_ticks_size)
cbar.set_label('Threshold Difference', rotation=270, size=labels_size, labelpad=20)
cbar.ax.set_yticklabels(['0', '0.05', '> 0.1']) # vertically oriented colorbar
if errorbars:
if not isinstance(df_std1, pd.DataFrame) or not isinstance(df_std2, pd.DataFrame):
print("Error: specify df_std1 and df_std2 to plot error bars")
plt.savefig('{}.png'.format(output_name), dpi=600, bbox_inches='tight')
return
plt.errorbar(np.array(df_mean1[xax])[auc_test > 0.9], np.array(df_mean2[yax])[auc_test > 0.9],
xerr = np.array(df_std1[xax])[auc_test > 0.9], yerr = np.array(df_std2[yax])[auc_test > 0.9],
lw=2, capsize = 3, c='silver', alpha = 0.2, linestyle="None", zorder = 1)
plt.errorbar(np.array(df_mean1[xax])[(auc_test <= 0.9) & (auc_test > 0.8)], np.array(df_mean2[yax])[(auc_test <= 0.9) & (auc_test > 0.8)],
xerr = np.array(df_std1[xax])[(auc_test <= 0.9) & (auc_test > 0.8)], yerr = np.array(df_std2[yax])[(auc_test <= 0.9) & (auc_test > 0.8)],
lw=2, capsize = 3, c='silver', alpha = 0.2, linestyle="None", zorder = 1)
plt.errorbar(np.array(df_mean1[xax])[auc_test <= 0.8], np.array(df_mean2[yax])[auc_test <= 0.8],
xerr = np.array(df_std1[xax])[auc_test <= 0.8], yerr = np.array(df_std2[yax])[auc_test <= 0.8],
lw=2, capsize = 3, c='silver', alpha = 0.2, linestyle="None", zorder = 1)
plt.savefig('{}.png'.format(output_name), dpi=600, bbox_inches='tight')
# -
# ### Load Results
#model to read in
basename = f'{method}_{descriptor}'
output_folder = 'figures'
output_basename = f'{method}_{descriptor}'
serotonin_archive = pd.read_pickle(f"results/serotonin_results_{basename}.pkl")
pubchem_archive1 = pd.read_pickle(f"results/pubchem_results_{basename}.pkl")
ds1_archive1 = pd.read_pickle(f"results/ds1_results_{basename}.pkl")
drugmatrix_archive1 = pd.read_pickle(f"results/drugmatrix_results_{basename}.pkl")
df_kappa = read_in_data(serotonin_archive, pubchem_archive, ds1_archive, drugmatrix_archive, value = 'kappa')
df_auc = read_in_data(serotonin_archive, pubchem_archive, ds1_archive, drugmatrix_archive, value = 'auc')
df_thresholds = read_in_data(serotonin_archive, pubchem_archive, ds1_archive, drugmatrix_archive, value = 'threshold')
# ### Plots
# color map to display different thresholds
cmap3 = mpl.colors.LinearSegmentedColormap.from_list('custom', ['steelblue', 'paleturquoise', 'lightseagreen', 'limegreen', 'greenyellow', 'gold', 'lightyellow', 'sandybrown', 'darksalmon', 'indianred', 'saddlebrown'], N=10)
# color map to display threshold difference
cmap4 = mpl.colors.LinearSegmentedColormap.from_list('custom', ['#27647bff', '#849fadff', '#ca3542ff'], N=3)
scatterplot_results(df_kappa, df_kappa, df_thresholds, df_auc,
xax = 'standard', yax = 'GHOST', x_lab = "$\kappa$ (Th = 0.5)", y_lab = "$\kappa$ (Th = Opt)",
errorbars = False, df_std1 = None, df_std2 = None,
output_name = f"{output_folder}/scatterplot_{basename}_ThOpt_vs_Th05.png",
cmap = cmap3, norm = None, transparency = 1)
if method == 'RF' and opt_oob:
scatterplot_results_th_diff(df_kappa, df_kappa, df_thresholds, df_thresholds, df_auc,
xax = 'GHOST', yax = 'OobOpt', x_lab = "$\kappa$ (Th = Opt)", y_lab = "$\kappa$ (Th = OobOpt)",
errorbars = False, df_std1 = None, df_std2 = None,
output_name = f"{output_folder}/scatterplot_{basename}_ThOpt_vs_ThOob.png",
cmap = cmap4, norm = None, transparency = 1)
if try_resampling:
scatterplot_results(df_kappa, df_kappa, df_thresholds, df_auc,
xax = 'SMOTE', yax = 'GHOST', x_lab = "$\kappa$ (SMOTE)", y_lab = "$\kappa$ (Th = Opt)",
errorbars = False, df_std1 = None, df_std2 = None,
output_name = f"{output_folder}/scatterplot_{basename}_GHOST_vs_SMOTE.png",
cmap = cmap3, norm = None, transparency = 1)
if try_resampling:
scatterplot_results(df_kappa, df_kappa, df_thresholds, df_auc,
xax = 'RUS', yax = 'GHOST', x_lab = "$\kappa$ (RUS)", y_lab = "$\kappa$ (Th = Opt)",
errorbars = False, df_std1 = None, df_std2 = None,
output_name = f"{output_folder}/scatterplot_{basename}_GHOST_vs_RUS.png",
cmap = cmap3, norm = None, transparency = 1)
# ### Compare to CP
# +
import glob
if try_cp:
files_preds = glob.glob(f"results/{basename}_predictions_*.csv")
ACP_significance_level = 0.2
# -
# **Comparison between generalized threshold optimization and CP.**
#
# For a fair comparison, in the plot below, we compare only the predictions where CP produced statistically significant results (at a ACP significance level of 0.2):
# calculate Cohen's kappa only on single predictions
if try_cp:
assay_ids = []
kappa_cp = []
kappa_cp_single = []
kappa_th05_single = []
kappa_thopt_single = []
for f1 in files_preds:
assay_ids.append(f1.split('_')[3].replace('.csv',''))
df_preds1 = pd.read_csv(f1)
df_preds1['uncertain_CP'] = 0
df_preds1['uncertain_CP'][df_preds1['p1'] >= df_preds1['p0']] = 1
tmp = df_preds1.loc[(df_preds1[f'y_pred_{ACP_significance_level}']=="0") | (df_preds1[f'y_pred_{ACP_significance_level}']=="1")]
print(tmp)
kappa_cp_single.append(metrics.cohen_kappa_score(tmp.y_true,tmp.uncertain_CP))
kappa_th05_single.append(metrics.cohen_kappa_score(tmp.y_true,tmp.Th05))
kappa_thopt_single.append(metrics.cohen_kappa_score(tmp.y_true,tmp.ThOpt))
df_kappa = df_kappa.reset_index().rename(columns = {'index': 'assay_id'})
df_cp = pd.DataFrame({'assay_id': assay_ids, 'CP_single': kappa_cp_single, 'Th05_single':kappa_th05_single, 'GHOST_single': kappa_thopt_single})
df_kappa = pd.merge(df_kappa, df_cp, how = 'left', on = 'assay_id')
if try_cp:
scatterplot_results(df_kappa, df_kappa, df_thresholds.rename(columns = {'ThOpt': 'ThOpt_single'}), df_auc,
xax = 'CP_single', yax = 'GHOST_single', x_lab = "$\kappa$ (CP, single pred.)", y_lab = "$\kappa$ (Th = Opt, single pred.)",
errorbars = False, df_std1 = None, df_std2 = None,
output_name = f"{output_folder}/scatterplot_{basename}_GHOST_vs_CP.png",
cmap = cmap3, norm = None, transparency = 1)
|
notebooks/library_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import print_function
import torch
x = torch.empty(5,3)
print(x)
print(x.type())
x = torch.rand(5,3)
print(x)
# ### different data type
# - float: float32
# - long: long int
# - double: float64
x = torch.zeros(5,3, dtype=torch.long)
print(x)
print(x.type())
# default data type is in following case is float
x = torch.tensor([5.5,3])
print(x)
print(x.type())
# note the difference
x = torch.tensor([5,3])
print(x)
print(x.type())
# +
x = x.new_ones(5,3,dtype=torch.double)
print(x)
x = x.new_zeros(6,4)
print(x)
x = torch.randn_like(x,dtype=torch.float)
print(x)
# -
print(x.size())
print(x.shape)
a = torch.rand(5,3)
b = torch.ones(5,3)
print(a)
print(b)
print(a+b)
print(torch.add(a,b))
print(a)
print(b)
result = torch.empty(5,3)
print(result)
torch.add(b,a,out=result)
print(result)
print(a)
print(b)
b.add_(a)
print(a)
print(b)
print(a[:,0])
# +
x = torch.randn(4,4)
print(x.type())
y = x.view(16)
print(x)
print(y)
z = x.view(-1,8)
print(z)
print(x.size(),y.size(),z.size())
# -
x = torch.randn(1)
print(x,x.size())
print(x.item())
a= torch.ones(5,3)
print(a.type())
print(a)
b = a.numpy()
print(b)
print(type(b))
print(b.shape)
print(b.size)
b +=1
print(b)
print(a)
a.add_(1)
print(a)
print(b)
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a,1,out=a)
print(a)
print(type(a))
print(a.dtype)
print(b)
# #### 买的显卡终于有用了
# cuda data
# +
x = torch.ones(4,1)
if torch.cuda.is_available():
device = torch.device("cuda")
y = torch.ones_like(x,device=device)
x = x.to(device)
z = x + y
print(z)
print(z.to("cpu",torch.double))
print(z.to(torch.double))
print(z.to("cpu"))
# -
|
how_to_use_pytorch/note1_BASIC_0.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:Coding-py39]
# language: python
# name: python3
# ---
# ### Purpose:
# - Download retrospective hourly analysis & assimilation NWM files from the Google Cloud archive
# - Obtain and plot the discharge time series of a stream of interest
# - Run the cells in order to import libraries, select parameters, define functions, download files, and plot a discharge series
#
# ### Inputs:
# - (1) begin date for data retrieval
# - (2) end date for data retrieval (inclusive)
# - (3) desired product (channel routing or forcing)
# - (4) directory for downloaded files
# + tags=[]
# import python libraries
import matplotlib.pyplot as plt
import os
import pandas as pd
import requests
import sys
import xarray as xr
from matplotlib.dates import DateFormatter
from time import sleep
# +
# select input parameters
begindate = '20190917' # data is avail. beginning 20180917
enddate = '20190927'
# enddate = '20190930' # inclusive
product = 'channel_rt' # 'channel_rt' or 'forcing'
destfolder_name = 'test2' # directory will be created in current working directory if it does not already exist
# +
# define functions
def get_netcdf(filetype,begin_date,end_date,output_folder_name):
output_folder = os.path.join(os.getcwd(),output_folder_name)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
if filetype =='channel_rt':
prodstr = ''
elif filetype == 'forcing':
prodstr = 'forcing_'
else:
print("Product error. Choose 'channel_rt' or 'forcing'.")
sys.exit()
t1 = pd.to_datetime(begin_date)
t2 = pd.to_datetime(end_date)
dates = pd.date_range(t1,t2)
for i in range(len(dates)):
date = dates[i]
save_dir = date.strftime('%Y%m%d')
if not os.path.exists(output_folder+'/'+save_dir):
os.mkdir(output_folder+'/'+save_dir)
for hr in range(24):
# url = f'https://storage.googleapis.com/national-water-model/' \
# f'nwm.{date.strftime("%Y%m%d")}/{prodstr}analysis_assim/' \
# f'nwm.t{str(hr).zfill(2)}z.analysis_assim.{filetype}.tm00.conus.nc'
url = f'https://storage.googleapis.com/national-water-model/' \
f'nwm.{date.strftime("%Y%m%d")}/{prodstr}short_range/' \
f'nwm.t{str(hr).zfill(2)}z.short_range.{filetype}.f001.conus.nc'
# https://storage.googleapis.com/national-water-model/nwm.20200404/analysis_assim/nwm.t00z.analysis_assim.channel_rt.tm00.conus.nc
# https://storage.googleapis.com/national-water-model/nwm.20200404/short_range/nwm.t00z.short_range.channel_rt.f001.conus.nc
# url = 'https://storage.googleapis.com/national-water-model/nwm/prod/nwm.20190917/short_range/nwm.t00z.short_range.channel_rt.f001.conus.nc'
filename = os.path.basename(url)
write_file = os.path.join(output_folder, save_dir, filename)
for attempt in range(30):
try:
r = requests.get(url)
with open(write_file, 'wb') as f:
f.write(r.content)
break
except Exception as ex:
if attempt != max_attempts - 1:
sleep(0.5) # Give NOAA time to wake up
else:
m = 'Could not download file.\n' + str(ex)
raise Exception(m)
print(f'{save_dir} done')
def get_series(comid, begin_date, end_date, datafolder_name, filetype):
data_folder = os.path.join(os.getcwd(), datafolder_name)
t1 = pd.to_datetime(begin_date)
t2 = pd.to_datetime(end_date)
dates = pd.date_range(t1,t2)
df = pd.DataFrame()
for i in range(len(dates)):
date = dates[i]
date_dir = date.strftime('%Y%m%d')
for hr in range(24):
# filename = f'nwm.t{str(hr).zfill(2)}z.analysis_assim.{filetype}.f001.conus.nc'
filename = f'nwm.t{str(hr).zfill(2)}z.short_range.{filetype}.f001.conus.nc'
# nwm.t00z.short_range.channel_rt.f001.conus.nc
nc_file = os.path.join(data_folder, date_dir, filename)
data = xr.open_dataset(nc_file)
Q = float(data.sel(feature_id=comid).streamflow.values)
timestamp = pd.to_datetime(f'{date_dir} {hr}:00')
df.loc[timestamp,'Q'] = Q
return df
# +
# download files
get_netcdf(product,begindate,enddate,destfolder_name)
# +
# extract a discharge time series for an individual stream from the downloaded files
# this example shows a reach of the Sabine River in Southeast Texas during TS Imelda
# compare: https://nwis.waterdata.usgs.gov/nwis/uv?cb_00060=on&cb_00065=on&format=gif_default&site_no=08030500&period=&begin_date=2019-09-15&end_date=2019-09-30
comid = 1479611
# 1114345 # Neches River Saltwater Barrier 8331880 # Sabine river near USGS gage 08030500
sabine_timeseries_df = get_series(comid, begindate, enddate, destfolder_name, product)
# print (sabine_timeseries_df)
sabine_timeseries_df.to_csv('NWM_discharge.csv')
nwm_discharge = pd.read_csv('NWM_discharge.csv')
nwm_discharge = nwm_discharge.rename(columns={" ":"Time", "Q":"Discharge"})
nwm_discharge
# -
# plot the discharge time series
plt.figure(facecolor='white')
plt.rc('font', size=14)
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(sabine_timeseries_df, color='tab:blue', label='Q')
ax.set(xlabel='Date',
ylabel='Discharge [cms]',
title=f'COMID {comid} from ' \
f'{pd.to_datetime(begindate).strftime("%b %d %Y")} to ' \
f'{pd.to_datetime(enddate).strftime("%b %d %Y")}')
date_form = DateFormatter("%b %d")
ax.xaxis.set_major_formatter(date_form)
ax.legend()
ax.grid(True)
plt.savefig('Discharge_graph.jpg')
# +
#Combining with the USGS / DD6 gages Get the USGS file
import json
import pandas as pd
file = open("C:/Users/st35889/Coding/datasphere/data2/JAIT2.json")
obj = json.load(file)
data = obj['locations'][0]['timeseries'][0]['data']
cols = obj['locations'][0]['timeseries'][0]['columns'].split(',')
df = pd.DataFrame(data=data,columns=cols)
# print (obj)
print(df)
# -
#Import file for Rating Curve
import pickle
rating_curves = pickle.load( open( "D:/Sujana/Project/rating_curves_dict.p", "rb" ) )
# Get relevant rating curve for specific stream
rc120402 = rating_curves['120402']
# Get the rating curve for the specific stream in this huc6.
rating_curve_gage = rc120402[rc120402['CatchId']==1479611]
#View rating curve data
rating_curve_gage
#Interpolation to convert NWM discharge to stage
interpolated_stage = list()
from scipy.interpolate import interp1d
discharge_list = list(rating_curve_gage['Discharge (m3s-1)'])
stage_list = list(rating_curve_gage['Stage'])
y_interp = interp1d(discharge_list, stage_list, fill_value='extrapolate')
interpolated_stage.append(float(y_interp(discharge)))
# y_interp = interp1d(rating_curve_gage['Discharge (m3s-1)'], rating_curve_gage['Stage'], fill_value='extrapolate')
# +
import pandas as pd
df = pd.read_csv('NWM_discharge.csv')
df.head()
df.columns=['Time','Discharge']
df.head()
# -
|
NWM_DD6_combined.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## An RNA-Seq analysis of output from Cuffdiff (produced using the Tailor pipeline) using python instead of R
# First we'll import some needed packages - matplotlib for plotting, and the pandas package for analyzing tables of data. We often use shorthands for these...
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import sys
import pprint
import vcf as vcf
import allel as al
from pysam import VariantFile
# %matplotlib inline
# + language="bash"
# pwd
# -
gene_counts = '../data/gene_count_matrix.tsv'
gene_data = pd.read_table(gene_counts)
gene_data.head()
samples = list(gene_data.columns)
ctrl_genes=gene_data[samples[0:8]]
luts_genes=gene_data[samples[9:]]
luts_genes.head()
luts_genes.plot()
ctrl_genes.plot()
ctrl_genes.hist(log=True, bins=50)
luts_genes.hist(log=True, bins=50)
genes_with_counts.shape
# When would the above ways of selecting a column be more or less convenient?
genes_with_counts.max()
gene_sig = '../data/gene_exp_diff.tab'
sig_genes = pd.read_table(gene_sig)
sig_genes.set_index('genes')
#logq_vals = sig_genes.columns.set_index[['genes']]
#logq_vals.head()
#log_q_genes=sig_genes[["genes","logFC","q_value"]]
# Let's create a new data frame, `gene_count_data` that omits the length column.
# _Hands on. There's a few ways to do this - the manual way..._
names = list(gene_data.columns)
gene_count_data = gene_data[names[1:]]
gene_count_data.head()
gene_count_data = gene_data[gene_data.columns[1:]]
gene_count_data.head()
# Now that we've got the counts, we can look at plots a little more sensibly:
gene_count_data.hist(log=True, bins=50, range=[0,1178150])
# Ok, now we want to skip rows that have all-zero (or maybe all-below-some-threshold) number of counts. Let's play around with data frame rows.
#
# When you index a data frame with a list or with a name, it selects columns; if you just use numbers, it selects rows:
# You can select rows not just by numbers, but by whether a condition is true or false
# So this gives us a way to select our rows... we just need a way to figure out if all of the relevant columns pass some criterion. This would work:
# But it's sort of too terrible to contemplate (what if we had 30 samples?)
(gene_count_data > 0).head()
(gene_count_data > 0).all()
(gene_count_data > 0).all(axis=1)
expressed_genes=gene_count_data[(gene_count_data > 0).all(axis=1)]
expressed_genes
expressed_genes.describe()
expressed_genes.hist(log=True, bins=50)
# _Hands on - create a function which takes such a raw gene dataframe, applies a count threshold, and gets rid of the lengths_
def gene_counts_above_threshold(dataframe, threshold=0):
"""
Returns a data frame from a given dataframe without any column named 'length', and
with rows such that every column is above threshold.
dataframe: an input pandas dataframe
threshold: an threshold that all int64 columns must _exceed_ for the row to be kept
returns a pandas dataframe with the column named 'length' stripped,
and with rows such that all int64 columns are strictly above threshold
"""
no_length_column = dataframe[dataframe.columns.difference(["length"])]
above_threshold = no_length_column[(no_length_column > threshold).all(axis=1)]
return above_threshold
help(gene_counts_above_threshold)
genes_have_counts_data = gene_counts_above_threshold(expressed_genes, 5)
genes_have_counts_data.shape
genes_have_counts_data.head()
genes_have_counts_data.hist(log=True, bins=50)
# Ok, so now we have the rows and columns we want, and we're ready to start doing some very crude analyses.
# However, we need to scale the data across samples; we might just have more data overall from one sample
# so that the counts would seem elevated compared to another. A simple but crude way to do that scaling is
# to scale by some average number of counts:
genes_have_counts_data.mean()
scaled_data = genes_have_counts_data / genes_have_counts_data.mean()
scaled_data.head()
scaled_data.describe()
# Scaling by mean is actually a pretty bad idea, because a few highly-expressed genes can badly skew the mean.
# A more robust method is to scale by the median - do that now
scaled_data = genes_have_counts_data / genes_have_counts_data.median()
print(scaled_data.head())
print(scaled_data.describe())
# Let's do a little sanity check, and use a PCA to see if the samples we expect to be most similar in gene counts
# in fact are - we expect the control samples (first three) to cluster together, and the treatment samples (last three)
# to cluster together
# +
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
pca = PCA(n_components=2).fit_transform(scale(scaled_data).T)
print(pca)
plt.scatter(pca[:,0], pca[:,1])
plt.xlabel("1st PCA")
plt.ylabel("2nd PCA")
# -
# So this seems promising. Now let's generate an MA plot - mean expression of the gene across all samples,
# _vs_ the change in expression, all log2 scaled
np.log2(scaled_data.mean(axis=1))[:5]
# +
A_untreated = np.log2(scaled_data[scaled_data.columns[0:3]].mean(axis=1))
A_treated = np.log2(scaled_data[scaled_data.columns[3:7]].mean(axis=1))
A = 1./2. *(A_untreated + A_treated)
M = A_treated - A_untreated
print(A_untreated[:5])
print(A_treated[:5])
print(M[:5])
# -
scaled_data['M'] = M
scaled_data['A'] = A
plt.scatter(scaled_data.A, scaled_data.M)
plt.xlabel('A')
plt.ylabel('M')
# Now we're going to make a volcano plot: p-value of change in expression vs change in expression
#
# We'll calculate the p-values _very_ crudely by doing a 2-sided independent t-test of the scaled counts in the
# control and treatment samples. This will require looking at the data a little differently - we're going to
# be processing subsets of columns for each row.
from scipy.stats import ttest_ind
onerow = scaled_data[0:1]
onerow.values[0]
onerow.values[0][0:3]
ttest_ind(onerow.values[0][0:3], onerow.values[0][3:6])
ttest_ind(onerow.values[0][0:3], onerow.values[0][3:7]).pvalue
def gene_pvalue(dataframe_row):
control = dataframe_row[0:3]
treatment = dataframe_row[3:7]
return ttest_ind(control, treatment).pvalue
gene_pvalue(onerow.values[0])
foo = scaled_data.apply(gene_pvalue, axis=1)
# Hands on: make a "p_value" column in the scaled_data dataframe, and plot M along x axis and -log10 p_value along y axis
scaled_data['p_value'] = scaled_data.apply(gene_pvalue, axis=1)
plt.scatter(scaled_data['M'], -np.log10(scaled_data['p_value']))
plt.xlabel('Log2 change in expression')
plt.ylabel('-Log10 p_value')
sorted_data = scaled_data.sort_values('p_value')
sorted_data.shape
sorted_data.head()
# Now you could start applying the appropriate multiple-hypothesis testing corrections to see if any of these changes were significant. A Bonferroni correction would look like
sorted_data['bonferroni'] = sorted_data['p_value'] * sorted_data.shape[0]
sorted_data.head()
# ## Advanced - simple aggregation
# +
genes_with_families = genes_have_counts_data
def phony_family_from_name(gene_name):
return int(gene_name[-1])
families = list(map(phony_family_from_name, genes_have_counts_data.index))
genes_with_families['family'] = families
# -
genes_with_families.head()
# We can do simple things like find out how many are in each family:
genes_with_families['family'].value_counts()
# Now we can start aggregating data by family:
family_data = genes_with_families.groupby('family').aggregate('mean')
family_data.head()
family_data.plot(kind='bar')
|
jupyter-notebooks/diff-expr-of-cuffdiff-output-w-pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ahalya24/Ahalya24.github.io/blob/master/char_rnn_ethinicity.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="HmtalCqrjtvu" colab_type="code" colab={}
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import unicodedata
import string
import torch
import torch.nn as nn
import random
import time
import math
# %matplotlib inline
# + id="xgnjF2-ftfrj" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive')
# + id="D1QdA_quOV7C" colab_type="code" outputId="5fdbcc4e-7a8f-4da8-ff40-dbcdd236e5d7" colab={"base_uri": "https://localhost:8080/", "height": 34}
path = '/content/*.txt'
files = glob.glob(path)
len(files)
# + id="FNzjjFG6PHTJ" colab_type="code" outputId="f889d5b1-b403-4f60-cb8c-e87ae67fcbf4" colab={"base_uri": "https://localhost:8080/", "height": 34}
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
# + id="h-GT0DbKjtv9" colab_type="code" outputId="0b8c63b1-47e4-401a-9708-765dd0f62e3a" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().split()
return [unicodeToAscii(line) for line in lines]
for filename in glob.glob(path):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
lines = [l.lower() for l in lines]
category_lines[category] = lines
n_categories = len(all_categories)
print (n_categories)
# + id="vYKsIK1UpAOn" colab_type="code" outputId="2c1ea62f-f338-437e-8565-256ee23d7e44" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(category_lines['Arabic'][:5])
# + id="zKOTxPHCjtwP" colab_type="code" outputId="7408116c-3cd4-4831-9e15-967d1f9ce0bc" colab={"base_uri": "https://localhost:8080/", "height": 105}
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
# + id="6vrH0-FPjtwY" colab_type="code" colab={}
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
# + id="7oO9SuY-jtwf" colab_type="code" colab={}
input = letterToTensor('A')
hidden =torch.zeros(1, n_hidden)
output, next_hidden = rnn(input, hidden)
# + id="icI12s38jtwp" colab_type="code" outputId="47193368-c3a8-4ce8-eeeb-33c2c238ea79" colab={"base_uri": "https://localhost:8080/", "height": 70}
input = lineToTensor('Mohammed')
hidden = torch.zeros(1, n_hidden)
output, next_hidden = rnn(input[0], hidden)
print(output)
# + id="yjonRXnBjtw0" colab_type="code" outputId="46d6845b-f36b-4bf2-81f4-a87467a1e499" colab={"base_uri": "https://localhost:8080/", "height": 34}
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
print(categoryFromOutput(output))
# + [markdown] id="UM_v9R7Rjtw8" colab_type="text"
# We will also want a quick way to get a training example (a name and its
# language):
#
#
#
# + id="oBTSi0tdjtw9" colab_type="code" outputId="9121099e-8826-4338-a42c-4f6df9acfd15" colab={"base_uri": "https://localhost:8080/", "height": 193}
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
# + id="90EuDWM2jtxG" colab_type="code" colab={}
criterion = nn.NLLLoss()
# + id="t0w7np83jtxN" colab_type="code" colab={}
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item()
# + id="TQbmO8qXjtxV" colab_type="code" outputId="d7437013-4cb2-4829-b4d8-c244a962eeb6" colab={"base_uri": "https://localhost:8080/", "height": 368}
n_iters = 100000
print_every = 5000
plot_every = 1000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
# + [markdown] id="7U7ta_IJjtxe" colab_type="text"
# Plotting the Results
# --------------------
#
# Plotting the historical loss from ``all_losses`` shows the network
# learning:
#
#
#
# + id="JMJ37cvSjtxh" colab_type="code" outputId="31b2dbc9-5459-40e1-c972-a7aa6c468712" colab={"base_uri": "https://localhost:8080/", "height": 286}
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.figure()
plt.plot(all_losses)
# + [markdown] id="ye3FSH9Ajtxp" colab_type="text"
# Evaluating the Results
# ======================
#
# To see how well the network performs on different categories, we will
# create a confusion matrix, indicating for every actual language (rows)
# which language the network guesses (columns). To calculate the confusion
# matrix a bunch of samples are run through the network with
# ``evaluate()``, which is the same as ``train()`` minus the backprop.
#
#
#
# + id="yufn3visjtxr" colab_type="code" outputId="9b7a3f93-3a7a-471f-c678-e5eac3bc8a74" colab={"base_uri": "https://localhost:8080/", "height": 321}
# Keep track of correct guesses in a confusion matrix
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
# Go through a bunch of examples and record which are correctly guessed
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
confusion[category_i][guess_i] += 1
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
# + [markdown] id="P_8PMO4sjtx1" colab_type="text"
# Running on User Input
# ---------------------
#
#
#
# + id="c0x5JPXYjtx2" colab_type="code" colab={}
def predict(input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(lineToTensor(input_line))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
# + id="_2wu9-qA3gSM" colab_type="code" outputId="b2d5e7b1-ca35-4014-d220-4e8e4542f42c" colab={"base_uri": "https://localhost:8080/", "height": 105}
predict ('latt')
|
char_rnn_ethinicity.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src='Hello.gif' alt="drawing" style="width:600px;"/>
# + [markdown] slideshow={"slide_type": "slide"}
# # IDEA
# + [markdown] slideshow={"slide_type": "slide"}
# Multi-objective Optimization over AutoML
# + [markdown] slideshow={"slide_type": "slide"}
# # PROBLEM
# + [markdown] slideshow={"slide_type": "slide"}
# | Example | Correctness (Bias-Variance) | Confidence | Explainability |
# |----------------------|-------------|------------|----------------|
# | Siri | High | Low | Low |
# | Stock Prices | High | High | Low |
# | Macroeconomic Models | Low | High | High |
# | Medical Imaging | High | High | High |
#
# + [markdown] slideshow={"slide_type": "slide"}
# | Scenario | Method | Cheap | Scallable |
# |-----------------|-------------------------|----------------|-----------|
# | Embedded System | Least-square Regression | Yes | No |
# | Large cluster | Alex-net (Deep CNN's) | No | Yes |
# + [markdown] slideshow={"slide_type": "slide"}
# - __Accuracy__
# - Reliability in Production
# - Changes in data
# - Random initializations
# - Size and complexity
# - Interprettability
# - Federated Learning
# - Edge Applications
# - Serving accross cluster
# - Cost
# - Time
# - Cloud credits
# + [markdown] slideshow={"slide_type": "slide"}
# $$\large{ \textbf{AutoML} }$$
# $$\large{ \cap}$$
# $$\large{ \textbf{Iteractive Optimization} }$$
# $$\large{\cap }$$
# $$\large{\textbf{Multi-objective Optimization} }$$
# + [markdown] slideshow={"slide_type": "slide"}
# # Relevance
# __1. Genetic Algorithms for Deep Learning Architecture Search__
# <NAME>., <NAME>. and <NAME>., 2017, July. A genetic programming approach to designing convolutional neural network architectures. In Proceedings of the Genetic and Evolutionary Computation Conference (pp. 497-504). ACM.
# __Cited by 114__
#
# [BLOG: Genetic Algorithm Deep Learning Search](dirtydata.co.za)
#
# + [markdown] slideshow={"slide_type": "slide"}
# __2. Reinforcement Learning__
# 
# + slideshow={"slide_type": "fragment"}
from IPython.display import HTML
HTML(""" '<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src='https://buttons.github.io/buttons.js'></script>
<!-- Place this tag where you want the button to render. -->
<a class='github-button' href='https://github.com/deepmind/bsuite/subscription'
data-icon='octicon-eye' data-show-count='true'
aria-label='Watch deepmind/bsuite on GitHub'>Watch</a>"""
)
# + [markdown] slideshow={"slide_type": "slide"}
# ## HYPERPARAMETER OPTIMIZATION
# + [markdown] slideshow={"slide_type": "slide"}
# ### Papers
# + [markdown] slideshow={"slide_type": "slide"}
# <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016. Hyperband: A novel bandit-based approach to hyperparameter optimization. arXiv preprint arXiv:1603.06560.
# __Cited by 246__
#
# + slideshow={"slide_type": "fragment"}
from IPython.display import HTML
HTML(""" '<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src='https://buttons.github.io/buttons.js'></script>
<!-- Place this tag where you want the button to render. -->
<a class='github-button' href='https://github.com/sdask/dask-ml/subscription'
data-icon='octicon-eye' data-show-count='true'
aria-label='Watch dask/dask-ml on GitHub'>Watch</a>"""
)
# + slideshow={"slide_type": "fragment"}
from IPython.lib.display import YouTubeVideo
YouTubeVideo('x67K9FiPFBQ')
# + [markdown] slideshow={"slide_type": "slide"}
# $$+$$
#
# + [markdown] slideshow={"slide_type": "slide"}
# <NAME>., <NAME>. and <NAME>., 2013. Multi-task bayesian optimization. In Advances in neural information processing systems (pp. 2004-2012).
# __Cited by 269__
#
# + slideshow={"slide_type": "fragment"}
from IPython.display import HTML
HTML(""" '<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src='https://buttons.github.io/buttons.js'></script>
<!-- Place this tag where you want the button to render. -->
<a class='github-button' href='https://github.com/scikit-optimize/scikit-optimize/subscription'
data-icon='octicon-eye' data-show-count='true'
aria-label='Watch scikit-optimize/scikit-optimize on GitHub'>Watch</a>"""
)
# + slideshow={"slide_type": "fragment"}
from IPython.lib.display import YouTubeVideo
YouTubeVideo('jtRPxRnOXnk')
# + slideshow={"slide_type": "slide"}
from IPython.lib.display import IFrame
IFrame('https://open.spotify.com/embed-podcast/episode/4hodYBMVlD1xqXi6iPfl5Y', width="100%", height="232")
# + [markdown] slideshow={"slide_type": "slide"}
# $$=$$
# + [markdown] slideshow={"slide_type": "slide"}
# <NAME>., <NAME>. and <NAME>., 2018. BOHB: Robust and efficient hyperparameter optimization at scale. arXiv preprint arXiv:1807.01774.
# __Cited by 44__
# + slideshow={"slide_type": "fragment"}
from IPython.display import HTML
HTML(""" '<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src='https://buttons.github.io/buttons.js'></script>
<!-- Place this tag where you want the button to render. -->
<a class='github-button' href='https://github.com/automl/HpBandSter/subscription'
data-icon='octicon-eye' data-show-count='true'
aria-label='Watch automl/HpBandSter on GitHub'>Watch</a>"""
)
# + slideshow={"slide_type": "fragment"}
from IPython.lib.display import YouTubeVideo
YouTubeVideo('IqQT8se9ofQ')
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ### Take away
# _"[...] using so-called low fidelity approximations of the actual loss function to minimize. These approximations introduce a tradeoff between optimization performance and runtime [that] often outweigh the approximation error."_ - <NAME>. and <NAME>., 2019. Hyperparameter optimization. In Automated Machine Learning (pp. 3-33). Springer, Cham.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# # AUTOML
# + [markdown] slideshow={"slide_type": "slide"}
# ### Papers
# + [markdown] slideshow={"slide_type": "slide"}
# <!---One approach for handling arbitrary-sized pipelines more natively is the
# tree-structured pipeline optimization toolkit (TPOT [112], see also Chapter 8),
# which uses genetic programming and describes possible pipelines by a grammar.
# TPOT uses multi-objective optimization to trade off pipeline complexity with
# performance to avoid generating unnecessarily complex pipelines.
# A different pipeline creation paradigm is the usage of hierarchical planning;
# the recent ML-Plan [105, 98] -->
#
# <NAME>., <NAME>., <NAME>. and <NAME>., 2016, July. Evaluation of a tree-based pipeline optimization tool for automating data science. In Proceedings of the Genetic and Evolutionary Computation Conference 2016 (pp. 485-492). ACM.
# __Cited by 102__
#
# + slideshow={"slide_type": "fragment"}
from IPython.display import HTML
HTML(""" '<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src='https://buttons.github.io/buttons.js'></script>
<!-- Place this tag where you want the button to render. -->
<a class='github-button' href='https://github.com/automl/auto-sklearn/subscription'
data-icon='octicon-eye' data-show-count='true'
aria-label='Watch automl/auto-sklearn on GitHub'>Watch</a>"""
)
# + slideshow={"slide_type": "fragment"}
from IPython.lib.display import YouTubeVideo
YouTubeVideo('QrJlj0VCHys')
# + [markdown] slideshow={"slide_type": "slide"}
# $$ \downarrow $$
# + [markdown] slideshow={"slide_type": "slide"}
# <NAME>., <NAME>. and <NAME>., 2018. ML-Plan: Automated machine learning via hierarchical planning. Machine Learning, 107(8-10), pp.1495-1515.
# __Cited by 14__
# + [markdown] slideshow={"slide_type": "slide"}
# # MULTIOBJECTIVE
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# [BLOG: Choosing the Hotest Model!](dirtydata.co.za)
#
# DEA
# $\downarrow$
# Hypothetical comparison units
# $\downarrow$
# Choose most balanced | population
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # HYPOTHESIS
# + [markdown] slideshow={"slide_type": "slide"}
# 1. Constrained applications
# 2. Genetic Diversity
# 3. Sparsity $\rightarrow$ Performance (RELU / Dropout)
|
UCT Masters Research Group/Model-free Hyperparameter Optimization/Presentation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="cb19f71d-51c8-417f-829e-3179d3319dcd" _uuid="c3b9226e142667d6b96e34daf7d6e42bea0ea1e2"
# # **Recoverting history!**
#
# This notebook references the work of [BMnssari](https://www.kaggle.com/mnassrib/titanic-logistic-regression-with-python). We will create a classifier for Titanic shipwreck and evaluate it!
# + [markdown] _cell_guid="33c91cae-2ff8-45a6-b8cb-671619e9c933" _uuid="0a395fd25f20834b070ef55cb8987c8c1f9b55f9"
# <h2> Setting common stuff
# + _cell_guid="de05512e-6991-44df-9599-da92a7e459ac" _uuid="d8bdd5f0320e244e4702ed8ec1c2482b022c51cd"
import numpy as np
import pandas as pd
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
# + _cell_guid="e0a17223-f682-45fc-89a5-667af9782bbe" _uuid="7964157913fbcff581fc1929eed487708e81ac9c"
# Read CSV train data file into DataFrame
train_df = pd.read_csv("./titanic_train_final.csv")
# Read CSV test data file into DataFrame
test_df = pd.read_csv("./titanic_test_final.csv")
# preview train data
train_df.head()
# + _cell_guid="872d0de9-a873-4b60-b1ee-d557ee39d8a1" _uuid="d38222a64d4dfd1d1ee1a7ee1f58c4aa54560de3"
print('The number of samples into the train data is {}.'.format(train_df.shape[0]))
# + _cell_guid="1d969b76-ea88-4d32-a58e-f22a070258bf" _uuid="bff38fcf31baf67493513c06f0c2f6e50576ff09"
# preview test data
test_df.head()
# + _cell_guid="254dd074-e07e-49f2-9184-80046b10b481" _uuid="62de7ddd73fed8d88ccbe1ba79e59b8e596cbb13"
print('The number of samples into the test data is {}.'.format(test_df.shape[0]))
# + [markdown] _cell_guid="c833cbf5-74db-44ff-90fa-b600ff0a09d7" _uuid="39dbc095f99dcec6d25a7a4561e81bb641078622"
# <h2> Logistic Regression and Results
# + [markdown] _cell_guid="b70cda8a-e8d9-44a6-b9f0-2b365fdf3428" _uuid="136cf9e02ea1ab48a397f534b491fb2d9dbb5684"
# ### Feature elimination
#
# Given an external estimator that assigns weights to features, recursive feature elimination (RFE) is to select features by recursively considering smaller and smaller sets of features.
#
# As we have many features, we may have collinearity so this tool basically performs cross validation on all different possible models we could have for the number of features specified, only to return the best!
#
# References: <br>
# http://scikit-learn.org/stable/modules/feature_selection.html <br>
# + _cell_guid="11a2a468-20df-40cd-a4ba-4ae7bd2fc403" _uuid="64befdf1182c2b4e845f488f5bfd0e19ce3dc17a"
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
cols = ["Age","Fare","TravelAlone","Pclass_1","Pclass_2","Embarked_C","Embarked_S","Sex_male","IsMinor"]
X = train_df[cols]
y = train_df['Survived']
model = LogisticRegression(solver='liblinear')
# create the RFE model and select 8 attributes
rfe = RFE(model, 8)
rfe = rfe.fit(X, y)
# summarize the selection of the attributes
print('Selected features: %s' % list(X.columns[rfe.support_]))
# + [markdown] _cell_guid="29281bd5-b954-4f3d-87e3-7416b1ec8c6b" _uuid="626da3348b48ced3564e6e05bdb0c3b4bd1402e6"
# ### But how many features is the best?
#
# RFECV performs RFE in a cross-validation loop to find the optimal number or the best number of features.
# + _cell_guid="7239aa6f-7fd2-4b75-a387-f6624f1c338c" _uuid="53d79f38cfe33d75d6ff869a443b9a29c93b4cbd"
from sklearn.feature_selection import RFECV
# Create the RFE object and compute a cross-validated score.
# The "accuracy" scoring is proportional to the number of correct classifications
rfecv = RFECV(estimator=LogisticRegression(solver='liblinear'), step=1, cv=10, scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features: %d" % rfecv.n_features_)
print('Selected features: %s' % list(X.columns[rfecv.support_]))
plt.figure(figsize=(10,6))
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
# + [markdown] _cell_guid="b1b3b56f-2f5f-47d6-9375-62c11e49ce79" _uuid="e9d52d5b182c0a01218982e844e53d5278e0d98a"
# As we see, eight variables were kept.
# + _cell_guid="08986ec4-79ff-466b-b763-61bf84a0879b" _uuid="3f6950a7c24c629b72e17e54c556f3c183b3f779"
Selected_features = ['Age', 'TravelAlone', 'Pclass_1', 'Pclass_2', 'Embarked_C',
'Embarked_S', 'Sex_male', 'IsMinor']
X = train_df[Selected_features]
plt.subplots(figsize=(15, 10))
sns.heatmap(X.corr(), annot=True, cmap="RdYlGn")
plt.show()
# + [markdown] _cell_guid="a7455afe-9716-4189-b207-f1cc9facce12" _uuid="46b76691c5f109b17f805f233a5ad5ba900b353b"
# ### Optimal threshold
#
# Lets train an out of the box logistic regression to see its performance and lets evaluate which is the best threshold!
# + _cell_guid="84233f59-f3c7-4ea0-884d-96f8ad4d5b10" _uuid="46336228eeb864bc82e6739768122579d1c9634c"
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss
# create X (features) and y (response)
X = train_df[Selected_features]
y = train_df['Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2)
# check classification scores of logistic regression
logreg = LogisticRegression(solver='liblinear')
logreg.fit(X_train, y_train)
y_pred = logreg.predict(X_test)
y_pred_proba = logreg.predict_proba(X_test)[:, 1]
[fpr, tpr, thr] = roc_curve(y_test, y_pred_proba)
print('Train/Test split results:')
print(logreg.__class__.__name__+" accuracy is %2.3f" % accuracy_score(y_test, y_pred))
print(logreg.__class__.__name__+" log_loss is %2.3f" % log_loss(y_test, y_pred_proba))
print(logreg.__class__.__name__+" auc is %2.3f" % auc(fpr, tpr))
idx = np.min(np.where(tpr > 0.95)) # index of the first threshold for which the sensibility > 0.95
plt.figure()
plt.plot(fpr, tpr, color='coral', label='ROC curve (area = %0.3f)' % auc(fpr, tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot([0,fpr[idx]], [tpr[idx],tpr[idx]], 'k--', color='blue')
plt.plot([fpr[idx],fpr[idx]], [0,tpr[idx]], 'k--', color='blue')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate (1 - specificity)', fontsize=14)
plt.ylabel('True Positive Rate (recall)', fontsize=14)
plt.title('Receiver operating characteristic (ROC) curve')
plt.legend(loc="lower right")
plt.show()
print("Using a threshold of %.3f " % thr[idx] + "guarantees a sensitivity of %.3f " % tpr[idx] +
"and a specificity of %.3f" % (1-fpr[idx]) +
", i.e. a false positive rate of %.2f%%." % (np.array(fpr[idx])*100))
# + [markdown] _cell_guid="6292c3f2-6be9-45e4-be33-1dca41e604a7" _uuid="ba0017b461cea0b8849746e76475598bbba7c9ce"
# <a id="t4.2.2."></a>
# ### 4.2.2. Model evaluation based on K-fold cross-validation using `cross_val_score()` function
# + _cell_guid="32a611ae-b2b7-43e0-8fa8-3cc56e351bf6" _uuid="7f0aba7b861c3fa1748060b4733778851fb00a31"
# 10-fold cross-validation logistic regression
logreg = LogisticRegression(solver='liblinear')
# Use cross_val_score function
# We are passing the entirety of X and y, not X_train or y_train, it takes care of splitting the data
# cv=10 for 10 folds
# scoring = {'accuracy', 'neg_log_loss', 'roc_auc'} for evaluation metric - althought they are many
scores_accuracy = cross_val_score(logreg, X, y, cv=10, scoring='accuracy')
scores_log_loss = cross_val_score(logreg, X, y, cv=10, scoring='neg_log_loss')
scores_auc = cross_val_score(logreg, X, y, cv=10, scoring='roc_auc')
print('K-fold cross-validation results:')
print(logreg.__class__.__name__+" average accuracy is %2.3f" % scores_accuracy.mean())
print(logreg.__class__.__name__+" average log_loss is %2.3f" % -scores_log_loss.mean())
print(logreg.__class__.__name__+" average auc is %2.3f" % scores_auc.mean())
# + [markdown] _cell_guid="f485ca4c-2172-4383-979e-21e78a66192c" _uuid="ec44fbaddbc23f03a8ac470391f41ce35f40cf72"
# <a id="t4.3."></a>
# ## 4.3. GridSearchCV evaluating using multiple scorers simultaneously
# + _cell_guid="4a39cb49-b446-4ffa-88a2-e37b627eb5e5" _uuid="765695a9712d3fe1ecff10f17dcc077a80ed7682"
from sklearn.model_selection import GridSearchCV
X = train_df[Selected_features]
param_grid = {'C': np.arange(1e-05, 3, 0.1)}
scoring = {'Accuracy': 'accuracy', 'AUC': 'roc_auc', 'Log_loss': 'neg_log_loss'}
gs = GridSearchCV(LogisticRegression(solver='liblinear'), return_train_score=True,
param_grid=param_grid, scoring=scoring, cv=10, refit='Accuracy')
gs.fit(X, y)
results = gs.cv_results_
print('='*20)
print("best params: " + str(gs.best_estimator_))
print("best params: " + str(gs.best_params_))
print('best score:', gs.best_score_)
print('='*20)
plt.figure(figsize=(10, 10))
plt.title("GridSearchCV evaluating using multiple scorers simultaneously",fontsize=16)
plt.xlabel("Inverse of regularization strength: C")
plt.ylabel("Score")
plt.grid()
ax = plt.axes()
ax.set_xlim(0, param_grid['C'].max())
ax.set_ylim(0.35, 0.95)
# Get the regular numpy array from the MaskedArray
X_axis = np.array(results['param_C'].data, dtype=float)
for scorer, color in zip(list(scoring.keys()), ['g', 'k', 'b']):
for sample, style in (('train', '--'), ('test', '-')):
sample_score_mean = -results['mean_%s_%s' % (sample, scorer)] if scoring[scorer]=='neg_log_loss' else results['mean_%s_%s' % (sample, scorer)]
sample_score_std = results['std_%s_%s' % (sample, scorer)]
ax.fill_between(X_axis, sample_score_mean - sample_score_std,
sample_score_mean + sample_score_std,
alpha=0.1 if sample == 'test' else 0, color=color)
ax.plot(X_axis, sample_score_mean, style, color=color,
alpha=1 if sample == 'test' else 0.7,
label="%s (%s)" % (scorer, sample))
best_index = np.nonzero(results['rank_test_%s' % scorer] == 1)[0][0]
best_score = -results['mean_test_%s' % scorer][best_index] if scoring[scorer]=='neg_log_loss' else results['mean_test_%s' % scorer][best_index]
# Plot a dotted vertical line at the best score for that scorer marked by x
ax.plot([X_axis[best_index], ] * 2, [0, best_score],
linestyle='-.', color=color, marker='x', markeredgewidth=3, ms=8)
# Annotate the best score for that scorer
ax.annotate("%0.2f" % best_score,
(X_axis[best_index], best_score + 0.005))
plt.legend(loc="best")
plt.grid('off')
plt.show()
# +
test_df['Survived'] = gs.predict(test_df[Selected_features])
submission = test_df[['PassengerId','Survived']]
submission.tail()
# -
|
Module 6/titanic-logistic-regression-with-python.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
from fastai2.data.all import *
from fastai2.optimizer import *
from fastai2.learner import *
from nbdev.showdoc import *
# +
#default_exp metrics
# default_cls_lvl 3
# -
# # Metrics
#
# > Definition of the metrics that can be used in training models
# ## Core metric
# This is where the function that converts scikit-learn metrics to fastai metrics is defined. You should skip this section unless you want to know all about the internals of fastai.
#export
import sklearn.metrics as skm
#export
import scipy.stats as scs
#export torch_core
def flatten_check(inp, targ):
"Check that `out` and `targ` have the same number of elements and flatten them."
inp,targ = inp.contiguous().view(-1),targ.contiguous().view(-1)
test_eq(len(inp), len(targ))
return inp,targ
x1,x2 = torch.randn(5,4),torch.randn(20)
x1,x2 = flatten_check(x1,x2)
test_eq(x1.shape, [20])
test_eq(x2.shape, [20])
x1,x2 = torch.randn(5,4),torch.randn(21)
test_fail(lambda: flatten_check(x1,x2))
#export
mk_class('ActivationType', **{o:o.lower() for o in ['No', 'Sigmoid', 'Softmax', 'BinarySoftmax']},
doc="All possible activation classes for `AccumMetric")
#export
class AccumMetric(Metric):
"Stores predictions and targets on CPU in accumulate to perform final calculations with `func`."
def __init__(self, func, dim_argmax=None, activation=ActivationType.No, thresh=None, to_np=False,
invert_arg=False, flatten=True, **kwargs):
store_attr(self,'func,dim_argmax,activation,thresh,flatten')
self.to_np,self.invert_args,self.kwargs = to_np,invert_arg,kwargs
def reset(self): self.targs,self.preds = [],[]
def accumulate(self, learn):
pred = learn.pred
if self.activation in [ActivationType.Softmax, ActivationType.BinarySoftmax]:
pred = F.softmax(pred, dim=self.dim_argmax)
if self.activation == ActivationType.BinarySoftmax: pred = pred[:, -1]
elif self.activation == ActivationType.Sigmoid: pred = torch.sigmoid(pred)
elif self.dim_argmax: pred = pred.argmax(dim=self.dim_argmax)
if self.thresh: pred = (pred >= self.thresh)
targ = learn.y
pred,targ = to_detach(pred),to_detach(targ)
if self.flatten: pred,targ = flatten_check(pred,targ)
self.preds.append(pred)
self.targs.append(targ)
@property
def value(self):
if len(self.preds) == 0: return
preds,targs = torch.cat(self.preds),torch.cat(self.targs)
if self.to_np: preds,targs = preds.numpy(),targs.numpy()
return self.func(targs, preds, **self.kwargs) if self.invert_args else self.func(preds, targs, **self.kwargs)
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
# `func` is only applied to the accumulated predictions/targets when the `value` attribute is asked for (so at the end of a validation/trianing phase, in use with `Learner` and its `Recorder`).The signature of `func` should be `inp,targ` (where `inp` are the predictions of the model and `targ` the corresponding labels).
#
# For classification problems with single label, predictions need to be transformed with a sofmax then an argmax before being compared to the targets. Since a softmax doesn't change the order of the numbers, we can just apply the argmax. Pass along `dim_argmax` to have this done by `AccumMetric` (usually -1 will work pretty well). If you need to pass to your metrics the probabilities and not the predictions, use `softmax=True`.
#
# For classification problems with multiple labels, or if your targets are onehot-encoded, predictions may need to pass through a sigmoid (if it wasn't included in your model) then be compared to a given threshold (to decide between 0 and 1), this is done by `AccumMetric` if you pass `sigmoid=True` and/or a value for `thresh`.
#
# If you want to use a metric function sklearn.metrics, you will need to convert predictions and labels to numpy arrays with `to_np=True`. Also, scikit-learn metrics adopt the convention `y_true`, `y_preds` which is the opposite from us, so you will need to pass `invert_arg=True` to make `AccumMetric` do the inversion for you.
#For testing: a fake learner and a metric that isn't an average
class TstLearner():
def __init__(self): self.pred,self.y = None,None
# +
def _l2_mean(x,y): return torch.sqrt((x.float()-y.float()).pow(2).mean())
#Go through a fake cycle with various batch sizes and computes the value of met
def compute_val(met, x1, x2):
met.reset()
vals = [0,6,15,20]
learn = TstLearner()
for i in range(3):
learn.pred,learn.y = x1[vals[i]:vals[i+1]],x2[vals[i]:vals[i+1]]
met.accumulate(learn)
return met.value
# +
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(_l2_mean)
test_close(compute_val(tst, x1, x2), _l2_mean(x1, x2))
test_eq(torch.cat(tst.preds), x1.view(-1))
test_eq(torch.cat(tst.targs), x2.view(-1))
#test argmax
x1,x2 = torch.randn(20,5),torch.randint(0, 5, (20,))
tst = AccumMetric(_l2_mean, dim_argmax=-1)
test_close(compute_val(tst, x1, x2), _l2_mean(x1.argmax(dim=-1), x2))
#test thresh
x1,x2 = torch.randn(20,5),torch.randint(0, 2, (20,5)).bool()
tst = AccumMetric(_l2_mean, thresh=0.5)
test_close(compute_val(tst, x1, x2), _l2_mean((x1 >= 0.5), x2))
#test sigmoid
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(_l2_mean, activation=ActivationType.Sigmoid)
test_close(compute_val(tst, x1, x2), _l2_mean(torch.sigmoid(x1), x2))
#test to_np
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(lambda x,y: isinstance(x, np.ndarray) and isinstance(y, np.ndarray), to_np=True)
assert compute_val(tst, x1, x2)
#test invert_arg
x1,x2 = torch.randn(20,5),torch.randn(20,5)
tst = AccumMetric(lambda x,y: torch.sqrt(x.pow(2).mean()))
test_close(compute_val(tst, x1, x2), torch.sqrt(x1.pow(2).mean()))
tst = AccumMetric(lambda x,y: torch.sqrt(x.pow(2).mean()), invert_arg=True)
test_close(compute_val(tst, x1, x2), torch.sqrt(x2.pow(2).mean()))
# -
#hide
def _l2_mean(x,y): return torch.sqrt((x.argmax(dim=-1).float()-y.float()).pow(2).mean())
x1,x2 = torch.randn(20,5),torch.randint(0, 5, (20,))
tst = AccumMetric(_l2_mean, dim_argmax=-1, flatten=False, activation=ActivationType.Softmax)
test_close(compute_val(tst, x1, x2), _l2_mean(F.softmax(x1, dim=-1), x2))
#export
def skm_to_fastai(func, is_class=True, thresh=None, axis=-1, activation=None, **kwargs):
"Convert `func` from sklearn.metrics to a fastai metric"
dim_argmax = axis if is_class and thresh is None else None
if activation is None:
activation = ActivationType.Sigmoid if (is_class and thresh is not None) else ActivationType.No
return AccumMetric(func, dim_argmax=dim_argmax, activation=activation, thresh=thresh,
to_np=True, invert_arg=True, **kwargs)
# This is the quickest way to use a sckit-learn metric in a fastai training loop. `is_class` indicates if you are in a classification problem or not. In this case:
# - leaving `thresh` to `None` indicates it's a single-label classification problem and predictions will pass through an argmax over `axis` before being compared to the targets
# - setting a value for `thresh` indicates it's a multi-label classification problem and predictions will pass through a sigmoid (can be deactivated with `sigmoid=False`) and be compared to `thresh` before being compared to the targets
#
# If `is_class=False`, it indicates you are in a regression problem, and predictions are compared to the targets without being modified. In all cases, `kwargs` are extra keyword arguments passed to `func`.
tst_single = skm_to_fastai(skm.precision_score)
x1,x2 = torch.randn(20,2),torch.randint(0, 2, (20,))
test_close(compute_val(tst_single, x1, x2), skm.precision_score(x2, x1.argmax(dim=-1)))
# +
tst_multi = skm_to_fastai(skm.precision_score, thresh=0.2)
x1,x2 = torch.randn(20),torch.randint(0, 2, (20,))
test_close(compute_val(tst_multi, x1, x2), skm.precision_score(x2, torch.sigmoid(x1) >= 0.2))
tst_multi = skm_to_fastai(skm.precision_score, thresh=0.2, activation=ActivationType.No)
x1,x2 = torch.randn(20),torch.randint(0, 2, (20,))
test_close(compute_val(tst_multi, x1, x2), skm.precision_score(x2, x1 >= 0.2))
# -
tst_reg = skm_to_fastai(skm.r2_score, is_class=False)
x1,x2 = torch.randn(20,5),torch.randn(20,5)
test_close(compute_val(tst_reg, x1, x2), skm.r2_score(x2.view(-1), x1.view(-1)))
#export
@delegates(AccumMetric.__init__)
def scs_to_fastai(func, dim_argmax=-1, **kwargs):
return AccumMetric(func, dim_argmax=-1, **kwargs)
#export
def optim_metric(f, argname, bounds, tol=0.01, do_neg=True, get_x=False):
"Replace metric `f` with a version that optimizes argument `argname`"
def _f(preds, targs):
def minfunc(x):
kwargs = {argname:x}
res = f(preds, targs, **kwargs)
return -res if do_neg else res
optres = scipy.optimize.minimize_scalar(minfunc, bounds=bounds, method='bounded',
options={'xatol':0.01})
fun = -optres.fun if do_neg else optres.fun
return (fun,optres.x) if get_x else fun
_f.__name__ = f'opt_{f.__name__}'
return _f
# ## Single-label classification
# > Warning: All functions defined in this section are intended for single-label classification and targets that are not one-hot encoded. For multi-label problems or one-hot encoded targets, use the version suffixed with multi.
#export
def accuracy(inp, targ, axis=-1):
"Compute accuracy with `targ` when `pred` is bs * n_classes"
pred,targ = flatten_check(inp.argmax(dim=axis), targ)
return (pred == targ).float().mean()
#For testing
def change_targ(targ, n, c):
idx = torch.randperm(len(targ))[:n]
res = targ.clone()
for i in idx: res[i] = (res[i]+random.randint(1,c-1))%c
return res
x = torch.randn(4,5)
y = x.argmax(dim=1)
test_eq(accuracy(x,y), 1)
y1 = change_targ(y, 2, 5)
test_eq(accuracy(x,y1), 0.5)
test_eq(accuracy(x.unsqueeze(1).expand(4,2,5), torch.stack([y,y1], dim=1)), 0.75)
#export
def error_rate(inp, targ, axis=-1):
"1 - `accuracy`"
return 1 - accuracy(inp, targ, axis=axis)
x = torch.randn(4,5)
y = x.argmax(dim=1)
test_eq(error_rate(x,y), 0)
y1 = change_targ(y, 2, 5)
test_eq(error_rate(x,y1), 0.5)
test_eq(error_rate(x.unsqueeze(1).expand(4,2,5), torch.stack([y,y1], dim=1)), 0.25)
#export
def top_k_accuracy(inp, targ, k=5, axis=-1):
"Computes the Top-k accuracy (`targ` is in the top `k` predictions of `inp`)"
inp = inp.topk(k=k, dim=axis)[1]
targ = targ.unsqueeze(dim=axis).expand_as(inp)
return (inp == targ).sum(dim=-1).float().mean()
x = torch.randn(6,5)
y = torch.arange(0,6)
test_eq(top_k_accuracy(x[:5],y[:5]), 1)
test_eq(top_k_accuracy(x, y), 5/6)
#export
def APScoreBinary(axis=-1, average='macro', pos_label=1, sample_weight=None):
"Average Precision for single-label binary classification problems"
return skm_to_fastai(skm.average_precision_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score) for more details.
#export
def BalancedAccuracy(axis=-1, sample_weight=None, adjusted=False):
"Balanced Accuracy for single-label binary classification problems"
return skm_to_fastai(skm.balanced_accuracy_score, axis=axis,
sample_weight=sample_weight, adjusted=adjusted)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html#sklearn.metrics.balanced_accuracy_score) for more details.
#export
def BrierScore(axis=-1, sample_weight=None, pos_label=None):
"Brier score for single-label classification problems"
return skm_to_fastai(skm.brier_score_loss, axis=axis,
sample_weight=sample_weight, pos_label=pos_label)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.brier_score_loss.html#sklearn.metrics.brier_score_loss) for more details.
#export
def CohenKappa(axis=-1, labels=None, weights=None, sample_weight=None):
"Cohen kappa for single-label classification problems"
return skm_to_fastai(skm.cohen_kappa_score, axis=axis, labels=labels, weights=weights,
sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.cohen_kappa_score.html#sklearn.metrics.cohen_kappa_score) for more details.
#export
def F1Score(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"F1 score for single-label classification problems"
return skm_to_fastai(skm.f1_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score) for more details.
#export
def FBeta(beta, axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"FBeta score with `beta` for single-label classification problems"
return skm_to_fastai(skm.fbeta_score, axis=axis,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html#sklearn.metrics.fbeta_score) for more details.
#export
def HammingLoss(axis=-1, sample_weight=None):
"Hamming loss for single-label classification problems"
return skm_to_fastai(skm.hamming_loss, axis=axis,
sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.hamming_loss.html#sklearn.metrics.hamming_loss) for more details.
#export
def Jaccard(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Jaccard score for single-label classification problems"
return skm_to_fastai(skm.jaccard_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.jaccard_score.html#sklearn.metrics.jaccard_score) for more details.
#export
def Precision(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Precision for single-label classification problems"
return skm_to_fastai(skm.precision_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn.metrics.precision_score) for more details.
#export
def Recall(axis=-1, labels=None, pos_label=1, average='binary', sample_weight=None):
"Recall for single-label classification problems"
return skm_to_fastai(skm.recall_score, axis=axis,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html#sklearn.metrics.recall_score) for more details.
#export
def RocAuc(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='ovr'):
"Area Under the Receiver Operating Characteristic Curve for single-label multiclass classification problems"
assert multi_class in ['ovr', 'ovo']
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.Softmax, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score) for more details.
#export
def RocAucBinary(axis=-1, average='macro', sample_weight=None, max_fpr=None, multi_class='raise'):
"Area Under the Receiver Operating Characteristic Curve for single-label binary classification problems"
return skm_to_fastai(skm.roc_auc_score, axis=axis, activation=ActivationType.BinarySoftmax,
average=average, sample_weight=sample_weight, max_fpr=max_fpr, multi_class=multi_class)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score) for more details.
#export
def MatthewsCorrCoef(sample_weight=None, **kwargs):
"Matthews correlation coefficient for single-label classification problems"
return skm_to_fastai(skm.matthews_corrcoef, sample_weight=sample_weight, **kwargs)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html#sklearn.metrics.matthews_corrcoef) for more details.
#export
@delegates(scs_to_fastai)
def PearsonCorrCoef(**kwargs):
"Pearson correlation coefficient"
def pearsonr(x,y): return spm.pearsonr(x,y)[0]
return scs_to_fastai(pearsonr, invert_arg=False, **kwargs)
# See the [scipy documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html?highlight=pearson#scipy.stats.pearsonr) for more details.
#export
@delegates(scs_to_fastai)
def SpearmanCorrCoef(axis=0, nan_policy='propagate', **kwargs):
"Spearman correlation coefficient"
def spearmanr(a,b=None,**kwargs): return spm.spearmanr(a,b,**kwargs)[0]
return scs_to_fastai(spearmanr, invert_arg=False, axis=axis, nan_policy=nan_policy, **kwargs)
# See the [scipy documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html?highlight=spearman#scipy.stats.spearmanr) for more details.
# +
#export
class Perplexity(AvgLoss):
"Perplexity (exponential of cross-entropy loss) for Language Models"
@property
def value(self): return torch.exp(self.total/self.count) if self.count != 0 else None
@property
def name(self): return "perplexity"
perplexity = Perplexity()
# -
x1,x2 = torch.randn(20,5),torch.randint(0, 5, (20,))
tst = perplexity
tst.reset()
vals = [0,6,15,20]
learn = TstLearner()
for i in range(3):
learn.y,learn.yb = x2[vals[i]:vals[i+1]],(x2[vals[i]:vals[i+1]],)
learn.loss = F.cross_entropy(x1[vals[i]:vals[i+1]],x2[vals[i]:vals[i+1]])
tst.accumulate(learn)
test_close(tst.value, torch.exp(F.cross_entropy(x1,x2)))
# ## Multi-label classification
#export
def accuracy_multi(inp, targ, thresh=0.5, sigmoid=True):
"Compute accuracy when `inp` and `targ` are the same size."
inp,targ = flatten_check(inp,targ)
if sigmoid: inp = inp.sigmoid()
return ((inp>thresh)==targ.bool()).float().mean()
#For testing
def change_1h_targ(targ, n):
idx = torch.randperm(targ.numel())[:n]
res = targ.clone().view(-1)
for i in idx: res[i] = 1-res[i]
return res.view(targ.shape)
# +
x = torch.randn(4,5)
y = (torch.sigmoid(x) >= 0.5).byte()
test_eq(accuracy_multi(x,y), 1)
test_eq(accuracy_multi(x,1-y), 0)
y1 = change_1h_targ(y, 5)
test_eq(accuracy_multi(x,y1), 0.75)
#Different thresh
y = (torch.sigmoid(x) >= 0.2).byte()
test_eq(accuracy_multi(x,y, thresh=0.2), 1)
test_eq(accuracy_multi(x,1-y, thresh=0.2), 0)
y1 = change_1h_targ(y, 5)
test_eq(accuracy_multi(x,y1, thresh=0.2), 0.75)
#No sigmoid
y = (x >= 0.5).byte()
test_eq(accuracy_multi(x,y, sigmoid=False), 1)
test_eq(accuracy_multi(x,1-y, sigmoid=False), 0)
y1 = change_1h_targ(y, 5)
test_eq(accuracy_multi(x,y1, sigmoid=False), 0.75)
# -
#export
def APScoreMulti(sigmoid=True, average='macro', pos_label=1, sample_weight=None):
"Average Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.average_precision_score, activation=activation, flatten=False,
average=average, pos_label=pos_label, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score) for more details.
#export
def BrierScoreMulti(thresh=0.5, sigmoid=True, sample_weight=None, pos_label=None):
"Brier score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.brier_score_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight, pos_label=pos_label)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.brier_score_loss.html#sklearn.metrics.brier_score_loss) for more details.
#export
def F1ScoreMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"F1 score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.f1_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score) for more details.
#export
def FBetaMulti(beta, thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"FBeta score with `beta` for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.fbeta_score, thresh=thresh, activation=activation, flatten=False,
beta=beta, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html#sklearn.metrics.fbeta_score) for more details.
#export
def HammingLossMulti(thresh=0.5, sigmoid=True, labels=None, sample_weight=None):
"Hamming loss for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.hamming_loss, thresh=thresh, activation=activation, flatten=False,
sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.hamming_loss.html#sklearn.metrics.hamming_loss) for more details.
#export
def JaccardMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Jaccard score for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.jaccard_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.jaccard_score.html#sklearn.metrics.jaccard_score) for more details.
#export
def MatthewsCorrCoefMulti(thresh=0.5, sigmoid=True, sample_weight=None):
"Matthews correlation coefficient for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.matthews_corrcoef, thresh=thresh, activation=activation, flatten=False, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html#sklearn.metrics.matthews_corrcoef) for more details.
#export
def PrecisionMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Precision for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.precision_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn.metrics.precision_score) for more details.
#export
def RecallMulti(thresh=0.5, sigmoid=True, labels=None, pos_label=1, average='macro', sample_weight=None):
"Recall for multi-label classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.recall_score, thresh=thresh, activation=activation, flatten=False,
labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html#sklearn.metrics.recall_score) for more details.
#export
def RocAucMulti(sigmoid=True, average='macro', sample_weight=None, max_fpr=None):
"Area Under the Receiver Operating Characteristic Curve for multi-label binary classification problems"
activation = ActivationType.Sigmoid if sigmoid else ActivationType.No
return skm_to_fastai(skm.roc_auc_score, activation=activation, flatten=False,
average=average, sample_weight=sample_weight, max_fpr=max_fpr)
roc_auc_metric = RocAucMulti(sigmoid=False)
x,y = torch.tensor([np.arange(start=0, stop=0.2, step=0.04)]*20), torch.tensor([0, 0, 1, 1]).repeat(5)
assert compute_val(roc_auc_metric, x, y) == 0.5
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score) for more details.
# ## Regression
#export
def mse(inp,targ):
"Mean squared error between `inp` and `targ`."
return F.mse_loss(*flatten_check(inp,targ))
x1,x2 = torch.randn(4,5),torch.randn(4,5)
test_close(mse(x1,x2), (x1-x2).pow(2).mean())
#export
def _rmse(inp, targ): return torch.sqrt(F.mse_loss(inp, targ))
rmse = AccumMetric(_rmse)
rmse.__doc__ = "Root mean squared error"
show_doc(rmse, name="rmse")
x1,x2 = torch.randn(20,5),torch.randn(20,5)
test_eq(compute_val(rmse, x1, x2), torch.sqrt(F.mse_loss(x1,x2)))
#export
def mae(inp,targ):
"Mean absolute error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return torch.abs(inp - targ).mean()
x1,x2 = torch.randn(4,5),torch.randn(4,5)
test_eq(mae(x1,x2), torch.abs(x1-x2).mean())
#export
def msle(inp, targ):
"Mean squared logarithmic error between `inp` and `targ`."
inp,targ = flatten_check(inp,targ)
return F.mse_loss(torch.log(1 + inp), torch.log(1 + targ))
x1,x2 = torch.randn(4,5),torch.randn(4,5)
x1,x2 = torch.relu(x1),torch.relu(x2)
test_close(msle(x1,x2), (torch.log(x1+1)-torch.log(x2+1)).pow(2).mean())
#export
def _exp_rmspe(inp,targ):
inp,targ = torch.exp(inp),torch.exp(targ)
return torch.sqrt(((targ - inp)/targ).pow(2).mean())
exp_rmspe = AccumMetric(_exp_rmspe)
exp_rmspe.__doc__ = "Root mean square percentage error of the exponential of predictions and targets"
show_doc(exp_rmspe, name="exp_rmspe")
x1,x2 = torch.randn(20,5),torch.randn(20,5)
test_eq(compute_val(exp_rmspe, x1, x2), torch.sqrt((((torch.exp(x2) - torch.exp(x1))/torch.exp(x2))**2).mean()))
#export
def ExplainedVariance(sample_weight=None):
"Explained variance betzeen predictions and targets"
return skm_to_fastai(skm.explained_variance_score, is_class=False, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.explained_variance_score.html#sklearn.metrics.explained_variance_score) for more details.
#export
def R2Score(sample_weight=None):
"R2 score betzeen predictions and targets"
return skm_to_fastai(skm.r2_score, is_class=False, sample_weight=sample_weight)
# See the [scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html#sklearn.metrics.r2_score) for more details.
# ## Segmentation
#export
def foreground_acc(inp, targ, bkg_idx=0, axis=1):
"Computes non-background accuracy for multiclass segmentation"
targ = targ.squeeze(1)
mask = targ != bkg_idx
return (inp.argmax(dim=axis)[mask]==targ[mask]).float().mean()
x = torch.randn(4,5,3,3)
y = x.argmax(dim=1)[:,None]
test_eq(foreground_acc(x,y), 1)
y[0] = 0 #the 0s are ignored so we get the same value
test_eq(foreground_acc(x,y), 1)
#export
class Dice(Metric):
"Dice coefficient metric for binary target in segmentation"
def __init__(self, axis=1): self.axis = axis
def reset(self): self.inter,self.union = 0,0
def accumulate(self, learn):
pred,targ = flatten_check(learn.pred.argmax(dim=self.axis), learn.y)
self.inter += (pred*targ).float().sum().item()
self.union += (pred+targ).float().sum().item()
@property
def value(self): return 2. * self.inter/self.union if self.union > 0 else None
x1 = torch.randn(20,2,3,3)
x2 = torch.randint(0, 2, (20, 3, 3))
pred = x1.argmax(1)
inter = (pred*x2).float().sum().item()
union = (pred+x2).float().sum().item()
test_eq(compute_val(Dice(), x1, x2), 2*inter/union)
#export
class JaccardCoeff(Dice):
"Implemetation of the jaccard coefficient that is lighter in RAM"
@property
def value(self): return self.inter/(self.union-self.inter) if self.union > 0 else None
x1 = torch.randn(20,2,3,3)
x2 = torch.randint(0, 2, (20, 3, 3))
pred = x1.argmax(1)
inter = (pred*x2).float().sum().item()
union = (pred+x2).float().sum().item()
test_eq(compute_val(JaccardCoeff(), x1, x2), inter/(union-inter))
# ## NLP
#export
class CorpusBLEUMetric(Metric):
def __init__(self, vocab_sz=5000, axis=-1):
"BLEU Metric calculated over the validation corpus"
self.metric_name = 'CorpusBLEU'
self.axis, self.vocab_sz = axis, vocab_sz
self.pred_len,self.targ_len,self.samp_idx,self.corrects,self.counts, = 0,0,0,[0]*4,[0]*4
def reset(self):
self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4
class NGram():
def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)]))
def get_grams(self, x, n, max_n=5000):
return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(self, pred, targ, n, max_n=5000):
pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n)
pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams)
def accumulate(self, learn):
if learn.training: return None
else:
last_output = learn.pred.argmax(dim=self.axis)
last_target = learn.y
for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()):
self.pred_len += len(pred)
self.targ_len += len(targ)
smooth_mteval = 1
for i in range(4):
c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)
if c == 0:
smooth_mteval *= 2
c = 1 / smooth_mteval # exp smoothing, method 3 from http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
self.corrects[i] += c
self.counts[i] += t
@property
def value(self):
if self.counts == 0: return None
elif max(self.corrects) == 0: return 0.0
else:
precs = [c/t for c,t in zip(self.corrects,self.counts)]
len_penalty = math.exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1
return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25)
# +
def create_vcb_emb(pred, targ):
# create vocab "embedding" for predictions
vcb_sz = max(torch.unique(torch.cat([pred, targ])))+1
pred_emb=torch.zeros(pred.size()[0], pred.size()[1] ,vcb_sz)
for i,v in enumerate(pred):
pred_emb[i].scatter_(1, v.view(len(v),1),1)
return pred_emb
def compute_bleu_val(met, x1, x2):
met.reset()
learn = TstLearner()
learn.training=False
for i in range(len(x1)):
learn.pred,learn.y = x1, x2
met.accumulate(learn)
return met.value
targ = torch.tensor([[1,2,3,4,5,6,1,7,8]])
pred = torch.tensor([[1,9,3,4,5,6,1,10,8]])
pred_emb = create_vcb_emb(pred, targ)
test_close(compute_bleu_val(CorpusBLEUMetric(), pred_emb, targ), 0.48549)
targ = torch.tensor([[1,2,3,4,5,6,1,7,8],[1,2,3,4,5,6,1,7,8]])
pred = torch.tensor([[1,9,3,4,5,6,1,10,8],[1,9,3,4,5,6,1,10,8]])
pred_emb = create_vcb_emb(pred, targ)
test_close(compute_bleu_val(CorpusBLEUMetric(), pred_emb, targ), 0.48549)
# -
# The BLEU metric was introduced in [this article](https://www.aclweb.org/anthology/P02-1040) to come up with a way to evaluate the performance of translation models. It's based on the precision of n-grams in your prediction compared to your target. See the [fastai NLP course BLEU notebook](https://github.com/fastai/course-nlp/blob/master/bleu_metric.ipynb) for a more detailed description of BLEU.
#
# The smoothing used in the precision calculation is the same as in [SacreBLEU](https://github.com/mjpost/sacrebleu/blob/32c54cdd0dfd6a9fadd5805f2ea189ac0df63907/sacrebleu/sacrebleu.py#L540-L542), which in turn is "method 3" from the [Chen & Cherry, 2014](http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf) paper.
# ## LossMetrics -
#export
class LossMetric(AvgMetric):
"Create a metric from `loss_func.attr` named `nm`"
def __init__(self, attr, nm=None): store_attr(self, 'attr,nm')
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += to_detach(getattr(learn.loss_func, self.attr, 0))*bs
self.count += bs
@property
def name(self): return self.attr if self.nm is None else self.nm
#export
def LossMetrics(attrs, nms=None):
"List of `LossMetric` for each of `attrs` and `nms`"
if isinstance(attrs, str): attrs = attrs.split(',')
nms = attrs if nms is None else nms.split(',') if isinstance(nms, str) else nms
return [LossMetric(a, n) for a,n in zip(attrs,nms)]
#hide
from fastai2.test_utils import *
class CombineL1L2(Module):
def forward(self, out, targ):
self.l1 = F.l1_loss(out, targ)
self.l2 = F.mse_loss(out, targ)
return self.l1+self.l2
learn = synth_learner(metrics=LossMetrics('l1,l2'))
learn.loss_func = CombineL1L2()
learn.fit(2)
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/13b_metrics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.datasets import mnist,fashion_mnist
from keras.utils import to_categorical
from keras import datasets
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import SGD
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
# +
# load the data
# display a few images
# +
# reshape the x_train and x_test data
#one hot encoding (data types and list)
# +
# build a 3 layer neural network (80, 40, 10)
# discuss classes, activations, loss functions, optimizers, metrics
# -
# fit the model, show the progression of training
# (batch_size, epochs, validation )
# evaluate the model with test data
# compare ground truth to predictions using test data
# +
# confusion matrix
# use argmax to tally counts of y_test and y_hat (conf_y, conf_y_hat)
# -
|
Baby's First Network .ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import matplotlib.pyplot as plt
from math import *
import time
torch.set_default_tensor_type('torch.DoubleTensor')
# activation function
def activation(x):
return x * torch.sigmoid(x)
# build ResNet with one blocks
class Net(nn.Module):
def __init__(self,input_size,width):
super(Net,self).__init__()
self.layer_in = nn.Linear(input_size,width)
self.layer_1 = nn.Linear(width,width)
self.layer_2 = nn.Linear(width,width)
self.layer_out = nn.Linear(width,1)
def forward(self,x):
output = self.layer_in(x)
output = activation(self.layer_2(activation(self.layer_1(output)))) # residual block 1
output = self.layer_out(output)
return output
input_size = 1
width = 4
net = Net(input_size,width)
def model(x):
return x * (x - 1.0) * net(x)
# exact solution
def u_ex(x):
return torch.sin(pi*x)
# f(x)
def f(x):
return pi**2 * torch.sin(pi*x)
grid_num = 100
x = torch.zeros(grid_num + 1, input_size)
for index in range(grid_num + 1):
x[index] = index * 1 / grid_num
optimizer = optim.Adam(net.parameters(), lr = 0.05)
scheduler = lr_scheduler.StepLR(optimizer, 50, 0.9) # every 100 epoch, learning rate * 0.1
# Xavier normal initialization for weights:
# mean = 0 std = gain * sqrt(2 / fan_in + fan_out)
# zero initialization for biases
def initialize_weights(self):
for m in self.modules():
if isinstance(m,nn.Linear):
nn.init.xavier_normal(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
# loss function to DGM by auto differential
def loss_function(x):
h = 1 / grid_num
sum_0 = 0.0
sum_1 = 0.0
sum_2 = 0.0
sum_a = 0.0
sum_b = 0.0
for index in range(grid_num):
x_temp = x[index] + h / 2
x_temp.requires_grad = True
# grad_x_temp = torch.autograd.grad(model(x_temp), x_temp, create_graph = True)
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_1 += ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
for index in range(1, grid_num):
x_temp = x[index]
x_temp.requires_grad = True
# grad_x_temp = torch.autograd.grad(model(x_temp), x_temp, create_graph = True)
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_2 += ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
x_temp = x[0]
x_temp.requires_grad = True
# grad_x_temp = torch.autograd.grad(model(x_temp), x_temp, create_graph = True)
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_a = ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
x_temp = x[grid_num]
x_temp.requires_grad = True
# grad_x_temp = torch.autograd.grad(model(x_temp), x_temp, create_graph = True)
grad_x_temp = torch.autograd.grad(outputs = model(x_temp), inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
grad_grad_x_temp = torch.autograd.grad(outputs = grad_x_temp[0], inputs = x_temp, grad_outputs = torch.ones(model(x_temp).shape), create_graph = True)
sum_b = ((grad_grad_x_temp[0])[0] + f(x_temp)[0])**2
sum_0 = h / 6 * (sum_a + 4 * sum_1 + 2 * sum_2 + sum_b)
return sum_0
def error_function(x):
error = 0.0
for index in range(len(x)):
x_temp = x[index]
error += (model(x_temp)[0] - u_ex(x_temp)[0])**2
return error / len(x)
print("Total number of paramerters in networks is {} ".format(sum(x.numel() for x in net.parameters())))
param_num = sum(x.numel() for x in net.parameters())
# +
epoch = 500
loss_record = np.zeros(epoch)
error_record = np.zeros(epoch)
time_start = time.time()
for i in range(epoch):
optimizer.zero_grad()
loss = loss_function(x)
loss_record[i] = float(loss)
error = error_function(x)
error_record[i] = float(error)
print("current epoch is: ", i)
print("current loss is: ", loss.detach())
print("current error is: ", error.detach())
loss.backward()
optimizer.step()
np.save("loss_of_DGM_100.npy", loss_record)
np.save("error_of_DGM_100.npy", error_record)
np.save("loss_of_DGM_100.npy", loss_record)
np.save("error_of_DGM_100.npy", error_record)
time_end = time.time()
print('total time is: ', time_end-time_start, 'seconds')
# -
torch.save(net.state_dict(), 'net_params_DGM.pkl')
|
code/Results1D/visualization loss landscape/roughness index and landscape/small internal/TVDFCNetDGM/fcnetDGMtrain.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time Series Exercise -
#
# ### Follow along with the instructions in bold. Watch the solutions video if you get stuck!
# ## The Data
#
# ** Source: https://datamarket.com/data/set/22ox/monthly-milk-production-pounds-per-cow-jan-62-dec-75#!ds=22ox&display=line **
#
# **Monthly milk production: pounds per cow. Jan 62 - Dec 75**
# ** Import numpy pandas and matplotlib **
# ** Use pandas to read the csv of the monthly-milk-production.csv file and set index_col='Month' **
# ** Check out the head of the dataframe**
# ** Make the index a time series by using: **
#
# milk.index = pd.to_datetime(milk.index)
# ** Plot out the time series data. **
# ___
# ### Train Test Split
#
# ** Let's attempt to predict a year's worth of data. (12 months or 12 steps into the future) **
#
# ** Create a test train split using indexing (hint: use .head() or tail() or .iloc[]). We don't want a random train test split, we want to specify that the test set is the last 12 months of data is the test set, with everything before it is the training. **
# ### Scale the Data
#
# ** Use sklearn.preprocessing to scale the data using the MinMaxScaler. Remember to only fit_transform on the training data, then transform the test data. You shouldn't fit on the test data as well, otherwise you are assuming you would know about future behavior!**
# ## Batch Function
#
# ** We'll need a function that can feed batches of the training data. We'll need to do several things that are listed out as steps in the comments of the function. Remember to reference the previous batch method from the lecture for hints. Try to fill out the function template below, this is a pretty hard step, so feel free to reference the solutions! **
def next_batch(training_data,batch_size,steps):
"""
INPUT: Data, Batch Size, Time Steps per batch
OUTPUT: A tuple of y time series results. y[:,:-1] and y[:,1:]
"""
# STEP 1: Use np.random.randint to set a random starting point index for the batch.
# Remember that each batch needs have the same number of steps in it.
# This means you should limit the starting point to len(data)-steps
# STEP 2: Now that you have a starting index you'll need to index the data from
# the random start to random start + steps + 1. Then reshape this data to be (1,steps+1)
# STEP 3: Return the batches. You'll have two batches to return y[:,:-1] and y[:,1:]
# You'll need to reshape these into tensors for the RNN to .reshape(-1,steps,1)
# ## Setting Up The RNN Model
# ** Import TensorFlow **
# ### The Constants
#
# ** Define the constants in a single cell. You'll need the following (in parenthesis are the values I used in my solution, but you can play with some of these): **
# * Number of Inputs (1)
# * Number of Time Steps (12)
# * Number of Neurons per Layer (100)
# * Number of Outputs (1)
# * Learning Rate (0.03)
# * Number of Iterations for Training (4000)
# * Batch Size (1)
# ** Create Placeholders for X and y. (You can change the variable names if you want). The shape for these placeholders should be [None,num_time_steps-1,num_inputs] and [None, num_time_steps-1, num_outputs] The reason we use num_time_steps-1 is because each of these will be one step shorter than the original time steps size, because we are training the RNN network to predict one point into the future based on the input sequence.**
# ** Now create the RNN Layer, you have complete freedom over this, use tf.contrib.rnn and choose anything you want, OutputProjectionWrappers, BasicRNNCells, BasicLSTMCells, MultiRNNCell, GRUCell etc... Keep in mind not every combination will work well! (If in doubt, the solutions used an Outputprojection Wrapper around a basic LSTM cell with relu activation.**
# ** Now pass in the cells variable into tf.nn.dynamic_rnn, along with your first placeholder (X)**
# ### Loss Function and Optimizer
#
# ** Create a Mean Squared Error Loss Function and use it to minimize an AdamOptimizer, remember to pass in your learning rate. **
# ** Initialize the global variables **
# ** Create an instance of tf.train.Saver() **
# ### Session
#
# ** Run a tf.Session that trains on the batches created by your next_batch function. Also add an a loss evaluation for every 100 training iterations. Remember to save your model after you are done training. **
with tf.Session() as sess:
# CODE HERE!
# Save Model for Later
saver.save(sess, "./ex_time_series_model")
# ______
# ## Predicting Future (Test Data)
# ** Show the test_set (the last 12 months of your original complete data set) **
# +
# CODE HERE
# -
# ** Now we want to attempt to predict these 12 months of data, using only the training data we had. To do this we will feed in a seed training_instance of the last 12 months of the training_set of data to predict 12 months into the future. Then we will be able to compare our generated 12 months to our actual true historical values from the test set! **
# # Generative Session
# ### NOTE: Recall that our model is really only trained to predict 1 time step ahead, asking it to generate 12 steps is a big ask, and technically not what it was trained to do! Think of this more as generating new values based off some previous pattern, rather than trying to directly predict the future. You would need to go back to the original model and train the model to predict 12 time steps ahead to really get a higher accuracy on the test data. (Which has its limits due to the smaller size of our data set)
# ** Fill out the session code below to generate 12 months of data based off the last 12 months of data from the training set. The hardest part about this is adjusting the arrays with their shapes and sizes. Reference the lecture for hints.**
with tf.Session() as sess:
# Use your Saver instance to restore your saved rnn time series model
saver.restore(sess, "./ex_time_series_model")
# CODE HERE!
# ** Show the result of the predictions. **
# ** Grab the portion of the results that are the generated values and apply inverse_transform on them to turn them back into milk production value units (lbs per cow). Also reshape the results to be (12,1) so we can easily add them to the test_set dataframe.**
# ** Create a new column on the test_set called "Generated" and set it equal to the generated results. You may get a warning about this, feel free to ignore it.**
# ** View the test_set dataframe. **
# ** Plot out the two columns for comparison. **
# # Great Job!
#
# Play around with the parameters and RNN layers, does a faster learning rate with more steps improve the model? What about GRU or BasicRNN units? What if you train the original model to not just predict one timestep ahead into the future, but 3 instead? Lots of stuff to add on here!
|
04-Recurrent-Neural-Networks/02-Time-Series-Exercise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''otopy38'': conda)'
# name: python3
# ---
# # Schema from example
oto_data = {
"_id" : "5e3868eb61e48d0017ab68b0",
"kind" : "OutlierModel",
"bt" : 1580755178368101,
"tt" : 0,
"modelId" : "5c1a7b648a6df285f82bdd4f",
"distance" : 204311,
"closest_centroid" : 82,
"outlier_score" : 90.9653,
"centroid_rareness" : 4.26592,
"is_outlier" : 1,
"source" : "40FWF4_R-",
"account" : "analogdevices_dev"
}
# # Using schema
# ## Validation of a schema
# +
from schema import Schema, And, Use, Optional, SchemaError
schema = Schema([{'name': And(str, len),
'age': And(Use(int), lambda n: 18 <= n <= 99),
Optional('gender'): And(str, Use(str.lower),
lambda s: s in ('squid', 'kid'))}])
data = [{'name': 'Sue', 'age': '28', 'gender': 'Squid'},
{'name': 'Sam', 'age': '42'},
{'name': 'Sacha', 'age': '20', 'gender': 'KID'}]
# +
validated = schema.validate(data)
assert validated == [{'name': 'Sue', 'age': 28, 'gender': 'squid'},
{'name': 'Sam', 'age': 42},
{'name': 'Sacha', 'age' : 20, 'gender': 'kid'}]
# -
"""{'_id': str,
'kind': str,
'bt': int,
'tt': int,
'modelId': str,
'distance': int,
'closest_centroid': int,
'outlier_score': float,
'centroid_rareness': float,
'is_outlier': int,
'source': str,
'account': str}
"""
schema_otosense= Schema([{'_id': str,
'kind':Use(str),
'bt': Use(int),
'tt':Use(int),
'modelId':Use(str),
'distance':Use(int),
'closest_centroid':Use(int),
'outlier_score':Use(int),
'centroid_rareness':Use(float),
'is_outlier':Use(bool),
'source':Use(str),
'account':And(str,
lambda s: s in ('analogdevices_dev', 'analogdevices_prod'))
}])
schema_otosense.validate([oto_data])
# # Validation using Great expectations
import great_expectations as ge
import pandas as pd
data_list = [{'_id': '5e3868eb61e48d0017ab68b0',
'kind': 'OutlierModel',
'bt': 1580755178368101,
'tt': 0,
'modelId': '5c1a7b648a6df285f82bdd4f',
'distance': 204311,
'closest_centroid': 82,
'outlier_score': 90,
'centroid_rareness': 4.26592,
'is_outlier': True,
'source': '40FWF4_R-',
'account': 'analogdevices_dev'},
{'_id': '4e3868eb61e48d0017ab6898',
'kind': 'OutlierModel',
'bt': 1580755178368156,
'tt': 0,
'modelId': '5c1a7b648a6df285f82bdd4f',
'distance': 204316,
'closest_centroid': 80,
'outlier_score': 10,
'centroid_rareness': 4.26592,
'is_outlier': True,
'source': '40FWF4_R-',
'account': 'analogdevices_dev'},
{'_id': '3e3868eb61e48d0017ab6800',
'kind': 'OutlierModel',
'bt': 1580755178368101,
'tt': 0,
'modelId': '5c1a7b648a6df285f82bdd4f',
'distance': 20400,
'closest_centroid': 17,
'outlier_score': 9,
'centroid_rareness': 3.1,
'is_outlier': True,
'source': '40FWF4_R-',
'account': 'analogdevices_prod'}
]
df_csv = pd.DataFrame(data_list).to_csv('data_great_expect.csv')
df=ge.read_csv('data_great_expect.csv')
df
# +
feature_columns = ['kind', 'bt', 'tt','modelId', 'distance','closest_centroid']
for col in feature_columns:
df.expect_column_to_exist(col)
df.expect_column_values_to_be_of_type('kind', 'str')
# -
df.get_expectation_suite()
# # Generating data from a model
from sdv import SDV #synthetic data vault
sdv = SDV()
df_data = pd.DataFrame(data_list)
sdv.fit(tables =[df_data])
#
df_data
|
viable/schema_validation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense , Dropout , Activation , Conv2D , MaxPooling2D,Flatten
from keras.utils import plot_model
from tensorflow.keras.callbacks import TensorBoard
# +
import pickle
pickle_in = open("./Built_Dataset/X_50.pickle" , "rb")
X = pickle.load(pickle_in)
pickle_in = open("./Built_Dataset/Y_50.pickle" , "rb")
y = pickle.load(pickle_in)
X = X/255.0
# +
import time
NAME = "Cats-vs-Dogs-CNN-64*3-Dense-1-{}".format(int(time.time()))
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
# +
model = Sequential()
model.add(Conv2D(64 , (3,3) , input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64 , (3,3) , input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64 , (3,3) , input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer = 'adam',
metrics=['accuracy'])
# -
#Saving model details
model.summary()
plot_model(model, to_file='model.png' , show_shapes=True)
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
# +
from sklearn.utils import shuffle
# y = np.array(y)
X, y = shuffle(X, y)
print(X.shape)
print(len(y))
# +
#creating a small dataset due resource exhausting error
lim = 15000
X_small = X[:lim,:,:,:]
y_small = []
for i in range(lim):
y_small.append(y[i])
print(X_small.shape)
len(y_small)
# -
import matplotlib.pyplot as plt
history = model.fit(X_small, y_small, validation_split=0.3, epochs=3, batch_size=32, verbose=1, callbacks=[tensorboard])
model_name = NAME + '.model'
model.save(model_name)
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
|
.ipynb_checkpoints/Model-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# +
#python packages pd
import numpy as np
import matplotlib.pyplot as plt
#machine learning packages
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D, Bidirectional, Dropout
from keras.layers import CuDNNLSTM
from keras.utils.np_utils import to_categorical
# from keras.callbacks import EarlyStopping
from keras.layers import Dropout
from sklearn.model_selection import train_test_split
import importlib
#custom python scripts
import generator
import utilis
# -
# Check that you are running GPU's
utilis.GPU_checker()
utilis.aws_setup()
# %%time
# generators
importlib.reload(generator)
training_generator = generator.Keras_DataGenerator( dataset='train', w_hyp=False)
validation_generator = generator.Keras_DataGenerator(dataset='valid', w_hyp= False)
# +
#Constants
# ARE YOU LOADINNG MODE?
VOCAB_SIZE = 1254
INPUT_LENGTH = 1000
EMBEDDING_DIM = 256
# # model
def build_model(vocab_size, embedding_dim, input_length):
model = Sequential()
model.add(Embedding(vocab_size, embedding_dim, input_length=input_length))
model.add(SpatialDropout1D(0.4))
model.add(Bidirectional(CuDNNLSTM(128)))
model.add(Dropout(0.4))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(41, activation='softmax'))
return model
model = build_model(VOCAB_SIZE, EMBEDDING_DIM, INPUT_LENGTH)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# print(model.summary())
# +
## WARNING IF YOU CONTAIN MULTIPLE CORE GPUS
# NOTE unclear if these causes a speed up
# @TANCREDI, I HAVE TREID THIS ON JUST GOALS AND DOES NOT SEEM TO CAUSE A SPEED UP MAY
#CAUSE A SPEED UP IF WE USE HYPOTHESIS
# unclea rif this seepd
# from keras.utils import multi_gpu_model
# model_GPU = multi_gpu_model(model, gpus= 4)
# model_GPU.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# +
## ARE YOU LOADING A MODEL IF YES RUN TEH FOLLOWING LINES
# from keras.models import model_from_json
# json_file = open('model.json', 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# loaded_model = model_from_json(loaded_model_json)
# # load weights into new model
# loaded_model.load_weights("model.h5")
# print("Loaded model from disk")
# # REMEMEBER TO COMPILE
# loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# +
#overwriting model
# model = loaded_model
# -
print(model.summary())
# %%time
n_epochs = 6
history = model.fit_generator(generator=training_generator,
# validation_data=validation_generator,
verbose=1,
use_multiprocessing= False,
epochs=n_epochs)
# FOR SAVING MODEL
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
# +
#WARNING_DECIDE_HOW_TO_NAME_LOG
#descriptionofmodel_personwhostartsrun
#e.g. LSTM_128encoder_etc_tanc
LOSS_FILE_NAME = "SIMPLE_LSTM_SMALL_TANK"
#WARNING NUMBER 2 - CURRENTLY EVERYTIME YOU RERUN THE CELLS BELOW THE FILES WITH THOSE NAMES GET WRITTEN OVER
# -
# save history - WARNING FILE NAME
utilis.history_saver_bad(history, LOSS_FILE_NAME)
# +
# read numpy array
# history_toplot = np.genfromtxt("training_logs/"+ LOSS_FILE_NAME +".csv")
# plt.plot(history_toplot)
# plt.title('Loss history')
# plt.show()
# -
# %%time
n_epochs = 1
history = loaded_model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1,
use_multiprocessing= False,
epochs=n_epochs)
|
deepmath/deephol/train/A_Simple_GH_S_1/.ipynb_checkpoints/clean-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis: Haberman Cancer Survival Data
# ## The Header
# ## Table of Content
# 1. [Introduction](#the-header)
# 2. Exploratory Data Analysis
# ## <a href="#the-header">1. Introduction</a>
# [Link to Header](#the-header)
# ## The Header
# [Link to Youtube](youtube.com)
# + [markdown] language="html"
# # <a href="youtube.com">Showing Text</a>
# -
|
eda-haberman-dataset.ipynb
|
;; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .scm
;; format_name: light
;; format_version: '1.5'
;; jupytext_version: 1.14.4
;; kernelspec:
;; display_name: Calysto Scheme 3
;; language: scheme
;; name: calysto_scheme
;; ---
;; **Example**
(define (factorial n)
(if (= n 1)
1
(* n (factorial (- n 1)))))
(define (factorial n)
(define (fact-iter product counter max-count)
(if (> counter max-count)
product
(fact-iter (* counter product)
(+ counter 1)
max-count)))
(fact-iter 1 1n))
;; **Exercise 1.10**
(define (A x y)
(cond ((= y 0) 0)
((= x 0) (* 2 y))
((= y 1) 2)
(else (A (- x 1) (A x (- y 1))))))
(A 1 10)
(A 2 4)
(A 3 3)
(define (f n)(A 0 n))
(define (g n)(A 1 n))
(define (h n)(A 2 n))
(f 3)
(g 3)
(h 3)
;; **Example**
(define (fib n)
(cond ((= n 0) 0)
((= n 1) 1)
(else (+ (fib (- n 1))
(fib (- n 2))))))
(define (fib n)
(fib-iter 1 0 n))
(define (fib-iter a b count)
(if (= count 0)
b
(fib-iter (+ a b) a (- count 1))))
;; **Example**
(define (count-change amount) (cc amount 5))
(define (cc amount kinds-of-coins)
(cond ((= amount 0) 1)
((or (< amount 0) (= kinds-of-coins 0)) 0)
(else (+ (cc amount
(- kinds-of-coins 1))
(cc (- amount
(first-denomination kinds-of-coins))
kinds-of-coins)))))
(define (first-denomination kinds-of-coins)
(cond ((= kinds-of-coins 1) 1)
((= kinds-of-coins 2) 5)
((= kinds-of-coins 3) 10)
((= kinds-of-coins 4) 25)
((= kinds-of-coins 5) 50)))
(count-change 100)
;; **Exercise 1.11**
(define (f-recursive n)
(if (< n 3)
n
(+ (f-recursive (- n 1)) (* (f-recursive (- n 2)) 2) (* (f-recursive (- n 3)) 3))))
(f-recursive 10)
;; +
(define (f-iterative n)
(f-iter 0 1 2 n))
(define (f-iter a b c count)
(cond ((= count 0) a)
((= count 1) b)
((= count 2) c)
(else (f-iter b c (+ (* 3 a) (* 2 b) (* 1 c)) (- count 1)))))
;; -
(f-iterative 10)
;; **Exercise 1.12**
(define (pascal-triangle x y)
(cond ((= y 1) 1)
((= y x) 1)
(else (+ (pascal-triangle (- x 1) (- y 1)) (pascal-triangle (- x 1) (+ y 0))))))
(pascal-triangle 10 6)
;; **Exercise 1.15**
(define (cube x) (* x x x))
(define (p x) (- (* 3 x) (* 4 (cube x))))
(define (sine angle)
(if (not (> (abs angle) 0.1))
angle
(p (sine (/ angle 3.0)))))
(sine 12.15)
;; **Example**
(define (expt b n)
(if (= n 0)
1
(* b (expt b (- n 1)))))
(define (expt b n)
(expt-iter b n 1))
(define (expt-iter b counter product)
(if (= counter 0)
product
(expt-iter b
(- counter 1)
(* b product))))
(define (fast-expt b n)
(cond ((= n 0) 1)
((even? n) (sqare (fast-expt b (/ n 2))))
(else (* b (fast-expt b (- n 1))))))
(define (even? n)
(= (remainder n 2) 0))
;; **Exercise 1.18**
(define (* a b)
(if (= b 0)
0
(+ a (* a (- b 1)))))
(define (fast-product a b)
(cond ((= b 0) 0)
((even? b)(double (fast-product a (/ b 2))))
(else (+ a (fast-product a (- b 1))))))
(define (even? n)
(= (remainder n 2) 0))
(define (double n)
(+ n n))
(fast-product 5 3)
;; **Exercise 1.19**
(define (fib n)
(fib-iter 1 0 1 1 1 0 n))
(define (fib-iter a b c d e f count)
(cond ((= count 0 ) b)
((even? count)
(fib-iter a
b
(+ (* c c) (* d e))
(+ (* c d) (* d f))
(+ (* e c) (* f e))
(+ (* e d) (* f f))
(/ count 2)))
(else (fib-iter (+ (* a c) (* b d))
(+ (* a e) (* f b))
c
d
e
f
(- count 1)))))
(fib 30)
;; **Example**
(define (gcd a b)
(if (= b 0)
a
(gcd b (remainder a b))))
(define (smaller-divisor n)
(find-divisor n 2))
(define (find-divisor n test-divisor)
(cond (( > (square test-divisor) n) n)
((divides? test-divisor n) test-divisor)
(else (find-divisor n (+ test-divisor 1)))))
(define (divides? a b)
(= (remainder b a) 0))
(define (square n) (* n n))
(define (prime? n)
(= n (smaller-divisor n)))
(define (expmod base exp m)
(cond ((= exp 0) 1)
((even? exp)
(remainder (square (expmod base (/ exp 2) m)) m))
(else
(remainder (* base (expmod base (- exp 1) m)) m))))
(define (fermat-test n)
(define (try-it a)
(= (expmod a n n) a))
(try-it (+ 1 (random (- n 1)))))
(define (fast-prime? n times)
(cond ((= times 0) #t)
((fermat-test n)(fast-prime? n (- times 1)))
(else #f)))
(fast-prime? 561 10)
;; **Exercise 1.21**
(smaller-divisor 199)
(smaller-divisor 1999)
(smaller-divisor 19999)
;; **Exercise 1.22**
(define (timed-prime-test n)
(newline)
(display n)
(start-prime-test n (runtime)))
(define (start-prime-test n start-time)
(if (prime? n)
(report-prime (- (runtime) start-time))))
(define (report-prime elapsed-time)
(display " *** ")
(display elapsed-time))
|
ch01/section-1.2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to ITK Segmentation in SimpleITK Notebooks <a href="https://mybinder.org/v2/gh/InsightSoftwareConsortium/SimpleITK-Notebooks/master?filepath=Python%2F300_Segmentation_Overview.ipynb"><img style="float: right;" src="https://mybinder.org/badge_logo.svg"></a>
#
# <b>Goal</b>: To become familiar with basic segmentation algorithms available in ITK, and interactively explore their parameter space.
#
# Image segmentation filters process an image to partition it into (hopefully) meaningful regions. The output is commonly an image of integers where each integer can represent an object. The value 0 is commonly used for the background, and 1 ( sometimes 255) for a foreground object.
#
# +
from __future__ import print_function
# %matplotlib inline
import matplotlib.pyplot as plt
from ipywidgets import interact, FloatSlider
import SimpleITK as sitk
# Download data to work on
# %run update_path_to_download_script
from downloaddata import fetch_data as fdata
from myshow import myshow, myshow3d
# +
img_T1 = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/A1_grayT1.nrrd"))
img_T2 = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/A1_grayT2.nrrd"))
# To visualize the labels image in RGB with needs a image with 0-255 range
img_T1_255 = sitk.Cast(sitk.RescaleIntensity(img_T1), sitk.sitkUInt8)
img_T2_255 = sitk.Cast(sitk.RescaleIntensity(img_T2), sitk.sitkUInt8)
myshow3d(img_T1)
# -
# ## Thresholding
#
# Thresholding is the most basic form of segmentation. It simply labels the pixels of an image based on the intensity range without respect to geometry or connectivity.
seg = img_T1>200
myshow(sitk.LabelOverlay(img_T1_255, seg), "Basic Thresholding")
seg = sitk.BinaryThreshold(img_T1, lowerThreshold=100, upperThreshold=400, insideValue=1, outsideValue=0)
myshow(sitk.LabelOverlay(img_T1_255, seg), "Binary Thresholding")
# ITK has a number of histogram based automatic thresholding filters including Huang, MaximumEntropy, Triangle, and the popular Otsu's method. These methods create a histogram then use a heuristic to determine a threshold value.
# +
otsu_filter = sitk.OtsuThresholdImageFilter()
otsu_filter.SetInsideValue(0)
otsu_filter.SetOutsideValue(1)
seg = otsu_filter.Execute(img_T1)
myshow(sitk.LabelOverlay(img_T1_255, seg), "Otsu Thresholding")
print(otsu_filter.GetThreshold() )
# -
# ## Region Growing Segmentation
#
# The first step of improvement upon the naive thresholding is a class of algorithms called region growing. This includes:
# <ul>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1ConnectedThresholdImageFilter.html">ConnectedThreshold</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1ConfidenceConnectedImageFilter.html">ConfidenceConnected</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1VectorConfidenceConnectedImageFilter.html">VectorConfidenceConnected</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1NeighborhoodConnectedImageFilter.html">NeighborhoodConnected</a></li>
# </ul>
#
# Earlier we used 3D Slicer to determine that index: (132,142,96) was a good seed for the left lateral ventricle.
seed = (132,142,96)
seg = sitk.Image(img_T1.GetSize(), sitk.sitkUInt8)
seg.CopyInformation(img_T1)
seg[seed] = 1
seg = sitk.BinaryDilate(seg, [3]*3)
myshow(sitk.LabelOverlay(img_T1_255, seg), "Initial Seed")
# +
seg = sitk.ConnectedThreshold(img_T1, seedList=[seed], lower=100, upper=190)
myshow(sitk.LabelOverlay(img_T1_255, seg), "Connected Threshold")
# -
# Improving upon this is the ConfidenceConnected filter, which uses the initial seed or current segmentation to estimate the threshold range.
# +
seg = sitk.ConfidenceConnected(img_T1, seedList=[seed],
numberOfIterations=1,
multiplier=2.5,
initialNeighborhoodRadius=1,
replaceValue=1)
myshow(sitk.LabelOverlay(img_T1_255, seg), "ConfidenceConnected")
# -
img_multi = sitk.Compose(img_T1, img_T2)
seg = sitk.VectorConfidenceConnected(img_multi, seedList=[seed],
numberOfIterations=1,
multiplier=2.5,
initialNeighborhoodRadius=1)
myshow(sitk.LabelOverlay(img_T2_255, seg))
# ## Fast Marching Segmentation
#
# The FastMarchingImageFilter implements a fast marching solution to a simple level set evolution problem (eikonal equation). In this example, the speed term used in the differential equation is provided in the form of an image. The speed image is based on the gradient magnitude and mapped with the bounded reciprocal $1/(1+x)$.
#
seed = (132,142,96)
feature_img = sitk.GradientMagnitudeRecursiveGaussian(img_T1, sigma=.5)
speed_img = sitk.BoundedReciprocal(feature_img) # This is parameter free unlike the Sigmoid
myshow(speed_img)
# The output of the FastMarchingImageFilter is a <b>time-crossing map</b> that indicates, for each pixel, how much time it would take for the front to arrive at the pixel location.
fm_filter = sitk.FastMarchingBaseImageFilter()
fm_filter.SetTrialPoints([seed])
fm_filter.SetStoppingValue(1000)
fm_img = fm_filter.Execute(speed_img)
myshow(sitk.Threshold(fm_img,
lower=0.0,
upper=fm_filter.GetStoppingValue(),
outsideValue=fm_filter.GetStoppingValue()+1))
# +
def fm_callback(img, time, z):
seg = img<time;
myshow(sitk.LabelOverlay(img_T1_255[:,:,z], seg[:,:,z]))
interact( lambda **kwargs: fm_callback(fm_img, **kwargs),
time=FloatSlider(min=0.05, max=1000.0, step=0.05, value=100.0),
z=(0,fm_img.GetSize()[2]-1))
# -
# ## Level-Set Segmentation
#
# There are a variety of level-set based segmentation filter available in ITK:
# <ul>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1GeodesicActiveContourLevelSetImageFilter.html">GeodesicActiveContour</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1ShapeDetectionLevelSetImageFilter.html">ShapeDetection</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1ThresholdSegmentationLevelSetImageFilter.html">ThresholdSegmentation</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1LaplacianSegmentationLevelSetImageFilter.html">LaplacianSegmentation</a></li>
# <li><a href="http://www.itk.org/Doxygen/html/classitk_1_1ScalarChanAndVeseDenseLevelSetImageFilter.html">ScalarChanAndVese</a></li>
# </ul>
#
# There is also a <a href="http://www.itk.org/Doxygen/html/group__ITKLevelSetsv4.html">modular Level-set framework</a> which allows composition of terms and easy extension in C++.
#
#
#
# First we create a label image from our seed.
# +
seed = (132,142,96)
seg = sitk.Image(img_T1.GetSize(), sitk.sitkUInt8)
seg.CopyInformation(img_T1)
seg[seed] = 1
seg = sitk.BinaryDilate(seg, [3]*3)
# -
# Use the seed to estimate a reasonable threshold range.
# +
stats = sitk.LabelStatisticsImageFilter()
stats.Execute(img_T1, seg)
factor = 3.5
lower_threshold = stats.GetMean(1)-factor*stats.GetSigma(1)
upper_threshold = stats.GetMean(1)+factor*stats.GetSigma(1)
print(lower_threshold,upper_threshold)
# -
init_ls = sitk.SignedMaurerDistanceMap(seg, insideIsPositive=True, useImageSpacing=True)
lsFilter = sitk.ThresholdSegmentationLevelSetImageFilter()
lsFilter.SetLowerThreshold(lower_threshold)
lsFilter.SetUpperThreshold(upper_threshold)
lsFilter.SetMaximumRMSError(0.02)
lsFilter.SetNumberOfIterations(1000)
lsFilter.SetCurvatureScaling(.5)
lsFilter.SetPropagationScaling(1)
lsFilter.ReverseExpansionDirectionOn()
ls = lsFilter.Execute(init_ls, sitk.Cast(img_T1, sitk.sitkFloat32))
print(lsFilter)
myshow(sitk.LabelOverlay(img_T1_255, ls>0))
|
Python/300_Segmentation_Overview.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Meals Count Backend Utilities
#
# This notebook contains Python functionality to be incorporated into modules part of the [MealsCount](https://github.com/opensandiego/mealscount-backend) backend. **MealsCount** is an open-source project currently underway at [**OpenSanDiego**](https://opensandiego.org/), a [Code for America](https://www.codeforamerica.org/) brigade.
#
# **Note**: See [**[1]**](backend_utils_test.ipynb) for a detailed explanation of the core functionality, including intermediate outputs based on sample test data.
# +
import sys
import os
import pandas as pd
import numpy as np
from abc import ABC, abstractmethod
# +
#
# GLOBAL CONSTANTS (DO NOT MODIFY)
#
# these are used to identify data rows
# level 1 header
DATA_L1_HDR_KEYS = ['Non-Charter School(s)','Charter School(s)']
# level 2 header
DATA_L2_HDR_KEYS = ['School Code','School Name','Total Enrollment','Free & Reduced Meal Program: 181/182',
'Foster','Homeless(1)','Migrant Program: 135','Direct Certification',
'Unduplicated Eligible Free/Reduced Meal Counts','EL Funding Eligible (2)',
'Total Unduplicated FRPM/EL Eligible (3)']
# keywords for aggregated rows
DATA_SUM1_KEYS = ['TOTAL - Selected Schools']
DATA_SUM2_KEYS = ['TOTAL LEA']
# these are used for recoding header names/col values where applicable
DATA_L1_HDR_DICT = {'Non-Charter School(s)':'non-charter','Charter School(s)':'charter'}
DATA_L2_HDR_DICT = {'School Code':'school_code','School Name':'school_name','Total Enrollment':'total_enrolled',
'Free & Reduced Meal Program: 181/182':'frpm','Foster':'foster','Homeless (1)':'homeless',
'Migrant Program: 135':'migrant','Direct Certification':'direct_cert',
'Unduplicated Eligible Free/Reduced Meal Counts':'frpm_nodup',
'EL Funding Eligible (2)':'el', 'Total Unduplicated FRPM/EL Eligible (3)':'frpm_el_nodup'}
DATA_SUM_DICT = {'TOTAL - Selected Schools':'total','TOTAL LEA':'total'}
# these are used for recoding specific col values
INVALID_SCHOOL_CODE = 9999999
ALL_SCHOOL_TYPE = 'lea'
# these are used to identify metadata rows
METADATA_KEYS = ['Academic Year','View','As Of','Gender','School Type','School','User ID',
'Created Date','LEA']
# these are used to identify cols corresponding to
# metadata key-value pairs
METADATA_KEY_COLS = [0,2,4]
METADATA_VAL_COLS = [1,3,5]
# -
class mcSchoolDistInput(ABC):
"""
Base class for school district input.
"""
d_df = pd.DataFrame()
md_dict = {}
def __init__(self):
pass
@abstractmethod
def to_frame(self):
pass
@abstractmethod
def metadata(self):
pass
#
# Function to extract and return a dataframe from the input
# dataframe and the row and col indices specified. Additionally
# a column for school type is added with the specified value as
# well as a column (called 'index') with the original row indices.
#
def extract_df(df,row_idx,col_idx,school_type):
data = df.loc[row_idx,:].values
cols = df.loc[col_idx].values
ext_df = pd.DataFrame(data=data,columns=cols)
ext_df['school_type'] = school_type
ext_df['index'] = row_idx
ext_df.dropna(axis=1,how='all',inplace=True)
ext_df.dropna(axis=0,how='all',inplace=True)
return ext_df
# FIXME: refactor code in here
def parseXL(self,xlfile):
try:
xl = pd.ExcelFile(xlfile)
tmpdf = xl.parse(xl.sheet_names[0])
# get the indices for the rows where the L1 headers are present
data_l1 = tmpdf.index[tmpdf[tmpdf.isin(DATA_L1_HDR_KEYS)].notnull().any(axis=1)].tolist()
# get indices for rows where the L2 headers are present
# these will indicate the beginning of data
data_l2_begin = tmpdf.index[tmpdf[tmpdf.isin(DATA_L2_HDR_KEYS)].notnull().any(axis=1)].tolist()
# get indices for the rows where the misc headers are present
# these will indicate the end of data
data_l2_end = tmpdf.index[tmpdf[tmpdf.isin(DATA_SUM1_KEYS)].notnull().any(axis=1)].tolist()
# get indices for any other keys that are part of data
data_other = tmpdf.index[tmpdf[tmpdf.isin(DATA_SUM2_KEYS)].notnull().any(axis=1)].tolist()
# generate indices of non-data rows
metadata_idx = list(range(0,data_l1[0]))
n = len(DATA_L1_HDR_KEYS)
# TODO: malformed files may have any of the keys missing resulting in
# empty lists of indices
for i in range(0,n):
metadata_idx += list(range(data_l1[i]+1,data_l2_begin[i]))
if i < n-1:
metadata_idx += list(range(data_l2_end[i]+1,data_l1[i+1]))
metadata_idx += list(range(data_l2_end[n-1]+1,data_other[0]))
metadata_idx += list(range(data_other[-1]+1,tmpdf.shape[0]))
# copy metadata rows to its own dataframe
tmpdf_md = tmpdf.loc[metadata_idx,:]
# clean-up
tmpdf_md.dropna(axis=1,how='all',inplace=True)
tmpdf_md.dropna(axis=0,how='all',inplace=True)
# purge metadata rows (copied above) from the data df
tmpdf.drop(metadata_idx,inplace=True)
# clean-up
tmpdf.dropna(axis=1,how='all',inplace=True)
tmpdf.dropna(axis=0,how='all',inplace=True)
# collect l1 header names
# needed because we don't know the order in which the l1 headers occur in data
df_l1 = tmpdf.loc[data_l1]
df_l1 = df_l1.loc[:,df_l1.notnull().any()]
l1_hdrs = df_l1.T.unstack().tolist()
l1_hdrs = [s for s in l1_hdrs if str(s) != 'nan']
# drop all l1 headers
# we will be using a single-level index for the final df
# l1 headers will be used to populate a categorical var instead
tmpdf.drop(data_l1,inplace=True)
# create a new ddtaframe for each school type
df_list = []
for i in range(0,n):
row_idx = list(range(data_l2_begin[i]+1,data_l2_end[i]+1))
col_idx = data_l2_begin[i]
school_type = l1_hdrs[i]
df_list.append(extract_df(tmpdf,row_idx,col_idx,school_type))
# if this the last of the school types we need to append
# the aggregated lea rows. we do this as a separate df containing
# data_other rows.
if (i==n-1):
row_idx = data_other
df_list.append(extract_df(tmpdf,row_idx,col_idx,np.nan))
# we have a df with all data for all school types including aggregated
# rows at this point
df_full = pd.concat(df_list,axis=0,ignore_index=True)
# recode column names
df_full.rename(columns=DATA_L2_HDR_DICT,inplace=True)
# recode school_type
df_full['school_type'] = df_full['school_type'].map(DATA_L1_HDR_DICT)
# recode other fields
cond = df_full['index'].isin(data_l2_end + data_other)
df_full.loc[cond,'school_name'] = df_full[cond]['school_code'].map(DATA_SUM_DICT)
df_full.loc[cond,'school_code'] = INVALID_SCHOOL_CODE
cond = df_full['index'].isin(data_other)
df_full.loc[cond,'school_type'] = ALL_SCHOOL_TYPE
df_full.drop(['index'],axis=1,inplace=True)
# re-arrange cols to original order
df_full = df_full[list(DATA_L2_HDR_DICT.values()) + ['school_type']]
#
# METADATA
#
# add appropriate prefix and suffix to metadata keys
md_keys = [' ' + s + ':' for s in METADATA_KEYS]
# get indices for rows where the metadata keywords are present
md_idx = tmpdf_md.index[tmpdf_md[tmpdf_md.isin(md_keys)].notnull().any(axis=1)].tolist()
# extract non-null cols only for those rows containing metadata keys
tmpdf_md = tmpdf_md.loc[md_idx,:]
tmpdf_md.dropna(axis=1,how='all',inplace=True)
tmpdf_md.dropna(axis=0,how='all',inplace=True)
tmpdf_md.columns = list(range(0,tmpdf_md.shape[1]))
# extract metadata keys
md_keys = list(tmpdf_md.loc[:,METADATA_KEY_COLS].unstack().values)
md_keys = list(map(str.strip,md_keys))
md_keys = list(map(str.lower,md_keys))
md_keys = [s.replace(' ','_') for s in md_keys]
md_keys = [s[:-1] for s in md_keys]
# extract metadata values
md_vals = list(tmpdf_md.loc[:,METADATA_VAL_COLS].unstack().values)
md_vals = [s.lower() if isinstance(s, str) else s for s in md_vals]
md_dict = dict(zip(md_keys, md_vals))
# store only at the end when we have successfully completed all steps
# for both data and metadata
self.d_df = df_full
self.md_dict = md_dict
except Exception as e:
raise e
class mcXLSchoolDistInput(mcSchoolDistInput):
"""
Implementation for MealsCount Excel format school district input. Expects input
to be a file stored on the backend.
"""
def __init__(self, datafile):
mcSchoolDistInput.__init__(self)
self.__datafile = datafile
try:
self.__parse(self.__datafile)
except Exception as e:
raise e
def to_frame(self):
return self.d_df
def metadata(self):
return self.md_dict
__parse = parseXL
# ### Usage
#
# Below code fragments demonstrate the usage of the above functionality.
# +
CWD = os.getcwd()
DATADIR = "data"
DATAFILE = "calpads_sample_data.xlsx"
# -
data_in = mcXLSchoolDistInput(os.path.join(DATADIR,DATAFILE))
df = data_in.to_frame()
df.head()
df.tail()
df[df['school_name']=='total']
metadata = data_in.metadata()
print(metadata)
# ### TODO
#
# * Refactor ParseXL functionality
# * Add API for error status ?
# * Profile parser for large datasets ?
|
sandbox/backend_utils.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1-3.1 Intro Python
# ## Functions Arguments & Parameters
# - **Creating a simple Function with a parameter**
# - Exploring Functions with `return` values
# - Creating Functions with multiple parameters
# - Sequence in python
#
# -----
#
# ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font>
# - **create functions with a parameter**
# - create functions with a `return` value
# - create functions with multiple parameters
# - use knowledge of sequence in coding tasks
# - Use coding best practices
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Concept</B></font>
# ## Calling Functions with Arguments
# Functions are used for code tasks that are intended to be reused
#
# []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/621d10f8-23d5-4571-b0fd-aa12b0de98d8/Unit1_Section3.1-function-arguments.vtt","srclang":"en","kind":"subtitles","label":"english"}])
#
# Python allows us to create **User Defined Functions** and provides many **Built-in Functions** such as **`print()`**
# - **`print()`** can be called using arguments (or without) and sends text to standard output, such as the console.
# - **`print()`** uses **Parameters** to define the variable Arguments that can be passed to the Function.
# - **`print()`** defines multiple string/numbers parameters which means we can send a long list of Arguments to **`print()`**, separated by commas.
# - **`print()`** can also be called directly with just its name and empty parentheses and it will return a blank line to standard output
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font>
print('Hello World!', 'I am sending string arguments to print ')
student_age = 17
student_name = "<NAME>"
print(student_name,'will be in the class for',student_age, 'year old students.')
print("line 1")
print("line 2")
# line 3 is an empty return - the default when no arguments
print()
print("line 4")
# #
# <font size="6" color="#B24C00" face="verdana"> <B>Task 1</B></font>
# ## Passing Arguments to `print()`
# ### Many Arguments can be passed to print
#
# - update the print statement to use **`print()`** with **8** or more arguments
#[ ] increase the number of arguments used in print() to 8 or more
student_age = 17
student_name = "<NAME>"
print(student_name,'will be in the class for',student_age, 'year old students.')
print()
print()
print(student_name,'will be in the class for',student_age, 'year old students.',
student_name, 'loves to learn about math', 'but at the age of', student_age,
'he still likes to play with playdough.')
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Concept</B></font>
# ## Create a simple Function
# Creating user defined functions is at the core of computer programming. Functions enable code reuse and make code easier to develop and maintain.
# []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/35458114-6211-4d10-85bc-7c4eb7834c52/Unit1_Section3.1-Simplest_Functions.vtt","srclang":"en","kind":"subtitles","label":"english"}])
# ### basics of a user defined function
# - define a function with **`def`**
# - use indentation (4 spaces)
# - define parameters
# - optional parameters
# - **`return`** values (or none)
# - function scope (basics defaults)
#
# ### `def some_function():`
# use the **`def`** statement when creating a **function**
# - use a function name that **starts with a letter** or underscore (usually a lower-case letter)
# - function names can contain **letters, numbers or underscores**
# - parenthesis **()** follow the function name
# - a colon **:** follows the parenthesis
# - the code for the function is indented under the function definition (use 4 spaces for this course)
#
# ```python
# def some_function():
# #code the function tasks indented here
# ```
# The **end of the function** is denoted by returning to **no indentation**
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font>
# defines a function named say_hi
def say_hi():
print("Hello there!")
print("goodbye")
# define three_three
def three_three():
print(33)
#PRACTICE
def goodbye_yes():
print('goodbye')
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Concept</B></font>
# ## Call a function by name
# Call a simple function using the function name followed by parenthesis. For instance, calling print is
# **`print()`**
# ##
# <font size="6" color="#00A0B2" face="verdana"> <B>Examples</B></font>
# +
# Program defines and calls the say_hi & three_three functions
# [ ] review and run the code
def say_hi():
print("Hello there!")
print("goodbye")
# end of indentation ends the function
# define three_three
def three_three():
print(33)
# calling the functions
say_hi()
print()
three_three()
# -
# #
# <font size="6" color="#B24C00" face="verdana"> <B>Task 2</B></font>
# ## Define and call a simple function `yell_it()`
# ### `yell_it()` prints the phrase with "!" concatenated to the end
# - takes no arguments
# - indented function code does the following
# - define a variable for called **`phrase`** and intialize with a short *phrase*
# - prints **`phrase`** as all upper-case letters followed by "!"
# - call `yell_it` at the bottom of the cell after the function **`def`** (**Tip:** no indentation should be used)
# +
#[ ] define (def) a simple function called yell_it() and call the function
def yell_it():
print("I did it!".upper())
yell_it()
# -
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Concept</B></font>
# ## Functions that have Parameters
# []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/c84008fa-2ec9-4e4b-8b6b-15b9063852a1/Unit1_Section3.1-funct-parameter.vtt","srclang":"en","kind":"subtitles","label":"english"}])
# **`print()`** and **`type()`** are examples of built-in functions that have **Parameters** defined
#
# **`type()`** has a parameter for a **Python Object** and sends back the *type* of the object
#
# an **Argument** is a value given for a parameter when calling a function
# - **`type`** is called providing an **Argument** - in this case the string *"Hello"*
# ```python
# type("Hello")
# ```
#
# ## Defining Function Parameters
# - Parameters are defined inside of the parenthesis as part of a function **`def`** statement
# - Parameters are typically copies of objects that are available for use in function code
# ```python
# def say_this(phrase):
# print(phrase)
# ```
#
# ## Function can have default Arguments
# - Default Arguments are used if no argument is supplied
# - Default arguments are assigned when creating the parameter list
# ```python
# def say_this(phrase = "Hi"):
# print(phrase)
# ```
# ##
# <font size="6" color="#00A0B2" face="verdana"> <B>Example</B></font>
# +
# yell_this() yells the string Argument provided
def yell_this(phrase):
print(phrase.upper() + "!")
# call function with a string
yell_this("It is time to save the notebook")
# +
# use a default argument
def say_this(phrase = "Hi"):
print(phrase)
say_this()
say_this("Bye")
# -
# #
# <font size="6" color="#B24C00" face="verdana"> <B>Task 3</B></font>
#
# ## Define `yell_this()` and call with variable argument
# - define variable **`words_to_yell`** as a string gathered from user `input()`
# - Call **`yell_this()`** with **`words_to_yell`** as argument
# - get user input() for the string words_to_yell
# [ ] define yell_this()
# [ ] get user input in variable words_to_yell
# [ ] call yell_this function with words_to_yell as argument
def yell_this(words_to_yell):
works_to_yell=input("enter words to yell:")
print(words_to_yell)
words_to_yell= "yes"
yell_this(words_to_yell)
# [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) © 2017 Microsoft
|
Python Absolute Beginner/Module_2_1_Absolute_Beginner.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="SZIubkln0AI2"
# # Advanced Certification in AIML
# ## A Program by IIIT-H and TalentSprint
# + [markdown] colab_type="text" id="4LNbxek40AI4"
# # Hackathon : Voice commands based food ordering system
# The goal of the hacakthon is to train your model on different types of voice data (such as
# clean studio data, noisy data and finally your own data)
# + [markdown] colab_type="text" id="3e0e3sFh0JZJ"
# ### Setup Steps
# + colab={} colab_type="code" id="DqNBNvC25WNV"
import torch
from torch.autograd import Variable
import numpy as np
import librosa
import os
import warnings
from time import sleep
import sys
warnings.filterwarnings('ignore')
# + [markdown] colab_type="text" id="wqMmxLR38vJ3"
# ## Pretrained Network for deep features
#
#
# The following function contains code to load a pre-trained network to produces deep features for the audio sample. This network is trained with delta MFCC features of mono channel 8000 bit rate audio sample.
# + colab={} colab_type="code" id="NDbuxUiL2zYL"
def get_network():
net = torch.nn.Sequential()
saved_net = torch.load("net_speech_89.pt").cpu()
for index, module in enumerate(saved_net):
net.add_module("layer"+str(index),module)
if (index+1)%17 == 0 :
break
return net
# + colab={"base_uri": "https://localhost:8080/", "height": 336} colab_type="code" id="dmoIgxTG5ZnF" outputId="063ed3b8-dc99-4c48-a9f0-072e9c2d857f"
get_network()
# + [markdown] colab_type="text" id="sZS1NA1sATEf"
# ##Obtaining Features from Audio samples
# Generate features from a audio sample of '.wav' format
# * Generate Delta MFCC features of order 1 and 2
# * Passes them through the above mentioned deep neural net
# * the obtained deep features are returned
#
# Parameters: Filepath (path of audio sample),
# sr (sampling rate, all the samples provided are of 8000 bitrate)
#
# Caution: Do not change the default parameters
# + colab={} colab_type="code" id="eTtb2zAj5k0-"
def get_features(filepath, sr=8000, n_mfcc=30, n_mels=128, frames = 15):
y, sr = librosa.load(filepath, sr=sr)
D = np.abs(librosa.stft(y))**2
S = librosa.feature.melspectrogram(S=D)
S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=n_mels)
log_S = librosa.power_to_db(S,ref=np.max)
features = librosa.feature.mfcc(S=log_S, n_mfcc=n_mfcc)
if features.shape[1] < frames :
features = np.hstack((features, np.zeros((n_mfcc, frames - features.shape[1]))))
elif features.shape[1] > frames:
features = features[:, :frames]
# Find 1st order delta_mfcc
delta1_mfcc = librosa.feature.delta(features, order=1)
# Find 2nd order delta_mfcc
delta2_mfcc = librosa.feature.delta(features, order=2)
features = np.hstack((delta1_mfcc.flatten(), delta2_mfcc.flatten()))
features = features.flatten()[np.newaxis, :]
features = Variable(torch.from_numpy(features)).float()
deep_net = get_network()
deep_features = deep_net(features)
#print(features.shape)
#print(audio_file)
#features.flatten()[np.newaxis, :]
return deep_features.data.numpy().flatten()
# + [markdown] colab_type="text" id="NhLFY4n6BwIj"
# ## All the voice sample needed for training are present across the folders "Noisy_data" and "studio_data"
# + colab={"base_uri": "https://localhost:8080/", "height": 101} colab_type="code" id="lMF1AqHZhl1h" outputId="c92fde2d-3caa-494e-c6b5-723a6af8aa8a"
# %ls
# + [markdown] colab_type="text" id="SB-LowDuCMUL"
# ##Stage 1: Loading data (5 Marks)
#
# * Load 'Studio data' and extract features from the data
#
# ### Evaluation Criteria:
# * Complete the code in the load_data function
# * The function should take path of the folder containing audio samples as input
# * It should return features of all the audio samples present in the specified folder into single array (list of lists or 2-d numpy array) and their respective labels should be returned too
# + colab={} colab_type="code" id="qDzCa-532EUj"
import glob
def load_files(folder_path):
labels = []
wav_files = []
features = []
# This loop is to store the labels and filenames into list
for filename in glob.iglob(folder_path + '/*.wav'):
# Store the labels of each wav file in a list
labels.append(int((filename.split('/')[-1]).split('_')[0]))
# Store the wav files in a list
wav_files.append(filename)
# This Loop is to get features and store into features list
for filename in wav_files:
features.append(get_features(filename))
return np.asarray(features), np.asarray(labels)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="z5CG-d_yhpHX" outputId="616c1d50-ce5e-4e1a-b775-9822d4a81a55"
# !pwd
# + [markdown] colab_type="text" id="tnpuucBwbpl1"
#
# + [markdown] colab_type="text" id="7673ezpxFEfM"
# ####load data and labels from studio_data folder
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="u5CjrlPVPjNs" outputId="b4437e51-54ee-44d8-b964-9e0ed378683a"
studio_recorded_features, studio_recorded_labels = load_files('studio_data')
print(studio_recorded_features.shape, studio_recorded_labels.shape)
# + colab={} colab_type="code" id="sRsizBC6_aIf"
import joblib
studio_recorded_features, studio_recorded_labels = np.asarray(joblib.load('studio_data_features.sav')),np.asarray(joblib.load('studio_data_labels.sav'))
# + colab={} colab_type="code" id="7QNqBhLE3LkE"
from google.colab import files
joblib.dump(studio_recorded_features, 'studio_data_features.sav') # Command to save the model file
joblib.dump(studio_recorded_labels, 'studio_data_labels.sav')
files.download("studio_data_features.sav") # Download the model file to local PC
files.download("studio_data_labels.sav") # Download the model file to local PC
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="mP-4epdLzaC-" outputId="4c1718c4-e520-4dae-a1fb-f56870e31900"
studio_recorded_features.shape, studio_recorded_labels.shape
# + [markdown] colab_type="text" id="BGq6XpvhFynP"
# ## Stage 2: Training classifier on the studio_data (16 Marks)
# * The goal here is to train your model on voice samples collected in a noiseless studio
# setup above
#
# ### Evaluation Criteria:
# * Train the classifier, save the model
# * The score you get: Validation accuracy percentage of 15 (Validation data should be at
# least 20% of the total data)
# * Example: If a team gets, 80 % accuracy on the validation set, then the marks will be
# 80% of 15 marks i.e. 12 marks (will round of the score, in case of non - integer scores) and deploy the (refer colab notebook)
#
# #### Train a classifier on the features obtained from studio_data
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="VU5hdERsFw5o" outputId="6f59e6fa-552d-4c70-a1af-a195f11791bf"
# Import train_test_split header
from sklearn.model_selection import train_test_split
# Split files for training and testing (80:20)
X_train, X_test, y_train, y_test = train_test_split(studio_recorded_features, studio_recorded_labels, test_size=0.2, random_state=43)
# Train the Data as a numpy array
X_train = np.array(X_train)
y_train = np.array(y_train)
X_train.shape, y_train.shape
# + [markdown] colab_type="text" id="1Gyhg3hiecu5"
# ### Use the above data and train using MLPClassifier:
# + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="l4LpU0b3eWer" outputId="038dddd4-dcdd-48d6-e786-78a02815b15f"
# Import Necessary Headers
from sklearn.neural_network import MLPClassifier
# Train the data
clf = MLPClassifier(random_state = 12)
clf.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="DKVXqe4l51kE" outputId="15033206-7a0d-46de-f658-9030ab36d0fc"
# Import Necessary Headers
from sklearn.neural_network import MLPClassifier
# Train the data
clf = joblib.load('mlp_trained_model_studio.clf')
clf.fit(X_train, y_train)
# + [markdown] colab_type="text" id="5NxlvFcOiu9f"
# ### Use this to predict accuracy for the trained model:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="dkOPxjEgilhV" outputId="c972ab91-9855-4c7e-f844-9a141493063b"
# Import necessary headers
from sklearn.metrics import accuracy_score
# Save the predicted values in a variable
predicted_values = clf.predict(np.array(X_test))
# Predict Accuracy score (in %)
accuracy_score(np.array(y_test),predicted_values)*100
# + [markdown] colab_type="text" id="OGixO_z6Gf-Y"
# ####Save your model
#
# Hint:
# * Incase if you are using scikit learn model for training, you can use joblib package to save the model.
# * Manually implemented models as a function or class can be saved using pickle
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ut8aQN5_G7bx" outputId="1d54075d-3639-4d3c-f798-e5c6c769f04a"
import joblib # Import Necessary Headers
file = 'mlp_trained_model_studio.clf' # Specify filename of model file
joblib.dump(clf, file) # Command to save the model file
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="QWUSGj8bj-iQ" outputId="0ef57e28-b2db-4820-d4d2-3bc98b58bf42"
# %ls -F $file # Check if the created model file exists!
# + [markdown] colab_type="text" id="jsCHKXubHAJB"
# #### Download your trained model using the code below
# * given the path of model file the following code downloads it through the browser
# + colab={} colab_type="code" id="BDmWXfPaHJZG"
from google.colab import files
files.download(file) # Download the model file to local PC
# + [markdown] colab_type="text" id="R7ccsM_ZISWj"
# ## Stage 3: Deploy your classifier on the server (3 Marks)
#
# * Deploy your model on the server, check the deployment instructions in the hackathon documentation for details
#
# ### Evaluation Criteria:
#
# * There are two stages in the food ordering application
#
# * Ordering Item
# * Providing the number of servings
#
# * If both the stages are cleared with correct predictions you will get
# complete marks
# * Otherwise, no marks will be awarded
#
#
#
# #### Now deploy the model trained on studio_data in the sever to order food correctly.
# #### Deployment instruction are given in the Hackathon documentation
# #### After deploying and checking the application come back here to train on Noisy_data to generalise better in real situations
# + [markdown] colab_type="text" id="WZhvgJQZpAxk"
# ## Stage 4: Load 'Noisy_data', train a Classifier on the same and deploy (3 Marks)
#
# * The goal here is to train your model on voice samples collected in a noisy environment and save the model
#
# ### Evaluation Criteria:
#
# * Load 'Noisy_data'
# * Train the classifier, save the model
#
#
# + [markdown] colab_type="text" id="VFOudw7XGDeQ"
# #### load data and labels from Noisy_data folder
# + colab={} colab_type="code" id="5LLejdkbCat2"
noisy_data, noisy_data_labels = load_files('Noisy_data')
# + colab={} colab_type="code" id="z-J-1NvlhwZD"
noisy_data, noisy_data_labels = np.asarray(joblib.load('noisy_data_features.sav')),np.asarray(joblib.load('noisy_data_labels.sav'))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="_m24s9eFLVCX" outputId="a4d819f8-394c-4410-c4ae-89334204501f"
noisy_data.shape, noisy_data_labels.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="pYAcLvBDkgnL" outputId="aaea4149-740d-4ad8-87f2-17ea8bdab640"
type(noisy_data_labels)
# + [markdown] colab_type="text" id="HYK88qmLgzwc"
# #### Train a classifier on the features obtained from noisy_data
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0Kb2vC8orK8_" outputId="029db25a-3775-4164-a17c-9c0603e1eeb7"
# Split noise files for training and testing (80:20)
X_noise_train, X_noise_test, y_noise_train, y_noise_test = train_test_split(noisy_data, noisy_data_labels, test_size=0.2)
# Transform the Data as a numpy array and check shape
X_noise_train = np.array(X_noise_train)
y_noise_train = np.array(y_noise_train)
X_noise_train.shape, y_noise_train.shape
# Train the data
clf_noise = MLPClassifier(random_state = 60)
clf_noise.fit(X_noise_train, y_noise_train)
# Save the predicted values in a variable
predicted_noise_values = clf_noise.predict(np.array(X_noise_test))
# Predict Accuracy score (in %)
accuracy_score(np.array(y_noise_test),predicted_noise_values)*100
# + [markdown] colab_type="text" id="E2jfdJigrTkr"
# ####Save your model
#
# Hint:
# * Incase if you are using scikit learn model for training, you can use joblib package to save the model.
# * Manually implemented models as a function or class can be saved using pickle
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="GJBkPlNUrhbz" outputId="f4cf4575-f567-41a2-b469-ffc9d2e4be72"
noise_model_file = 'mlp_trained_model_noise.clf' # Specify filename of model file
joblib.dump(clf_noise, noise_model_file) # Command to save the model file
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Q9nlF00Q7iff" outputId="35afd66e-6f47-4a4e-acb1-168db14b1272"
# %ls -F $noise_model_file # Check if the created model file exists!
# + [markdown] colab_type="text" id="7T3v5XQbron7"
# #### Download your trained model using the code below
# * given the path of model file the following code downloads it through the browser
# + colab={} colab_type="code" id="sNhSG65BrqAm"
from google.colab import files
files.download(noise_model_file)
# + [markdown] colab_type="text" id="TwXw75OlrzJ8"
# #### Now deploy the model trained on noisy_data in the sever to order food correctly.
# + [markdown] colab_type="text" id="kA6G5tzShN5s"
# ## Stage 5: Use 'Noisy_data' and 'Studio-data' together, train a Classifier on the same and deploy (3 Marks)
#
# * The goal here is to train your model on voice samples collected in a noisy environment 'and' studio-data save the model
#
# ### Evaluation Criteria:
#
# * Use the 'Noisy_data' and 'studio-data' loaded above and train the classifier, save the model
#
#
# + [markdown] colab_type="text" id="wPmAbksHGK0a"
#
#
#
# #### Train a classifier on the features obtained from both the Noisy_data and Studio_data
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="yFclsavsngre" outputId="459f375b-c752-4cae-f22b-6eb73c17678a"
studio_recorded_labels.shape, noisy_data_labels.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="XyZ16AUKGeoN" outputId="db871f55-6f8c-4ce5-ec10-1bff5f9eb8af"
# Concatinate both Studio and Noisy data to all data
all_features = np.vstack((studio_recorded_features, noisy_data))
all_labels = np.vstack((studio_recorded_labels.reshape(8423,1), noisy_data_labels))
# Split noise files for training and testing (80:20)
X_all_train, X_all_test, y_all_train, y_all_test = train_test_split(all_features, all_labels, test_size=0.2)
# Transform the Data as a numpy array and check shape
X_all_train = np.array(X_noise_train)
y_all_train = np.array(y_noise_train)
X_all_train.shape, y_all_train.shape
# Train the data
clf_all = MLPClassifier()
clf_all.fit(X_all_train, y_all_train)
# Save the predicted values in a variable
predicted_all_values = clf_all.predict(np.array(X_all_test))
accuracy_score(np.array(y_all_test),predicted_all_values)*100
# + [markdown] colab_type="text" id="Y4IbSzHAsqWT"
# ####Save your model
#
# Hint:
# * Incase if you are using scikit learn model for training, you can use joblib package to save the model.
# * Manually implemented models as a function or class can be saved using pickle
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="4PZQ-jrPsreD" outputId="d7f06af1-9e79-45ce-e1c7-2be9abed7467"
all_model_file = 'mlp_trained_model_all.clf' # Specify filename of model file
joblib.dump(clf_all, all_model_file) # Command to save the model file
# + [markdown] colab_type="text" id="wJlS0qjmIEYz"
# #### Download your trained model using the code below
# * given the path of model file the following code downloads it through the browser
# + colab={} colab_type="code" id="mUM_6cP-IJy5"
from google.colab import files
files.download(all_model_file)
# + [markdown] colab_type="text" id="jA829HXjIe5Z"
# #### Now deploy the model trained on above in the sever to order food correctly.
# #### Deployment instruction are given the Hackathon documentation
# #### After deploying and checking the application, record your teams data from the web application provided in the Hackathon document
# + colab={} colab_type="code" id="nv3I24flWlLq"
# !mkdir teamdata
# + [markdown] colab_type="text" id="TWrJSY-OGyG9"
# #### Replace <YOUR_GROUP_ID> with your group id given in the lab
# + colab={} colab_type="code" id="gB_bSllKWJ5U"
# !wget -r -A .wav https://aiml-sandbox1.talentsprint.com/audio_recorder/<YOUR_GROUP_ID>/team_data/ -nH --cut-dirs=100 -P ./team_data
# + colab={} colab_type="code" id="3XUPRHG50rxW"
# %ls
# + colab={} colab_type="code" id="zU556OeCL0x9"
# !unzip <zip_file_name>
# + [markdown] colab_type="text" id="wH17k1RciuM_"
# ## Stage 6: Collect the voice samples and refine the classifier trained on noisy_data, by now using your team’s data (10 Marks)
#
#
# * The goal here is to refine the model that you trained on voice samples collected
# in a noisy environment
# * You will refine your model trained on noisy_data, save and download it.
# * Deploy your model on the server, check deployment section in the same
# document for details
#
# ### Evaluation Criteria:
# * There are two stages in the food ordering application
#
# * Ordering Item
# * Providing the number of servings
#
# * If both the stages are cleared with correct predictions you will get complete marks
# * Otherwise, no marks will be awarded
#
# #### Enhance the model trained with both the noisy data and studio_data to your team's voice samples
# + colab={} colab_type="code" id="cwKko3-yL-0a"
##YOUR CODE HERE
# + [markdown] colab_type="text" id="zQhiuXMaMRp2"
# #### Now deploy the model trained above in the sever to order food correctly.
# #### Deployment instruction are given the Hackathon documentation
|
HackathonOne/Hackathon_I_Starter_Code.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 07
#
# ## Solving a boundary value problem with shooting method
#
# ### <NAME>, Б01-818
#
# XI.9.7
# $$\epsilon y''=(y')^2,\ \ \ y(0)=1,\ \ \ y(1)=0,\ \ \ 0<\epsilon<<1$$
# $$y'=z$$
# $$\epsilon z'=(z)^2,\ \ \ z(0)=\alpha,\ \ \ 0<\epsilon<<1$$
import unittest
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# +
#logging.basicConfig(level=logging.INFO)
# -
class ODE:
def __init__(self):
self.log = logging.getLogger("ODE")
def solve(self, stages, c_vec, b_vec, a, f_vec, u_init, h, t_range):
u_res = [u_init,]
t_res = [t_range[0],]
while t_res[-1] < t_range[1]:
u_cur = [0 for _ in range(len(f_vec))]
k = [[0 for _ in range(stages)] for _ in range(len(f_vec))]
for s in range(stages):
u_k = [u_res[-1][j]+h*sum(a[s][m]*k[j][m] for m in range(s)) for j in range(len(f_vec))]
self.log.debug(f"Iter[{t_res[-1]}]: u_k: {u_k}")
for i in range(len(f_vec)):
k[i][s] = f_vec[i](t_res[-1]+c_vec[s]*h, u_k)
for i in range(len(f_vec)):
u_cur[i] = u_res[-1][i]+h*sum(b_vec[s]*k[i][s] for s in range(stages))
self.log.debug(f"Iter[{t_res[-1]}]: k: {k}")
u_res.append(u_cur)
t_res.append(t_res[-1]+h)
return (t_res, u_res)
class SM:
def __init__(self):
self.log = logging.getLogger("SM")
def stop(self, cur, exp, acc):
if abs(cur - exp) < acc:
return True
return False
def ode_solve(self, f_vec, u_init, acc):
c_vec = [1/2-np.sqrt(15)/10, 1/2, 1/2+np.sqrt(15)/10]
b_vec = [5/18, 4/9, 5/18]
a = [[5/36,2/9-np.sqrt(15)/15,5/36-np.sqrt(15)/30],
[5/36+np.sqrt(15)/24,2/9,5/36-np.sqrt(15)/24],
[5/36+np.sqrt(15)/30,2/9+np.sqrt(15)/15,5/36]]
t_range = (0, 1)
h = 0.00001
ode = ODE()
c_vec = [0, 1/2, 1/2, 1]
b_vec = [1/6, 2/6, 2/6, 1/6]
a = [[0,0,0,0],
[1/2,0,0,0],
[0,1/2,0,0],
[0,0,1,0]]
res = ode.solve(len(c_vec), c_vec, b_vec, a, f_vec, u_init, h, t_range)
df = pd.DataFrame({"t": res[0], "(y, y')": res[1]})
self.log.info(df)
return res
def solve(self, f_vec, acc, y0, y1):
a = -1
delta = 0.001
res = ()
y_res = 10*acc
while not self.stop(y_res, y1, acc):
self.log.info(f"Alpha: {a}")
res = self.ode_solve(f_vec, [y0, a], acc)
res_delta = self.ode_solve(f_vec, [y0, a+delta], acc)
plt.plot(res[0], [j[0] for j in res[1]], label=f"y(x)|a={a}")
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
y_res = res[1][-1][0]
F = res[1][-1][0] - y1
F_der = (res_delta[1][-1][0] - res[1][-1][0]) / delta
a = a - F/F_der
plt.show()
return res
# +
e = 0.1
acc = 0.01
y0 = 1
y1 = 0
f1 = lambda t, u_vec: u_vec[1]
f2 = lambda t, u_vec: (u_vec[1]**2)/e
sm = SM()
res = sm.solve([f1, f2], acc, y0, y1)
df = pd.DataFrame({"t": res[0], "(y, y')": res[1]})
print(df)
|
7/sm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import sklearn as sk
import matplotlib as mpl
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
def xml_to_dict(x):
a = x.attrib.copy()
if x.text:
a["text_"] = x.text
for e in x:
if not e.tag in a:
a[e.tag] = []
a[e.tag].append(xml_to_dict(e))
return a
fn = "data/apple_health_export/export.xml"
tree = ET.parse(fn)
root = tree.getroot()
apple_health = { root.tag: xml_to_dict(root) }
health_data = apple_health["HealthData"]
health_data.keys()
{ key: health_data[key] for key in health_data if type(health_data[key]) != list }
e_df = pd.DataFrame.from_dict(health_data["ExportDate"])
m_df = pd.DataFrame.from_dict(health_data["Me"])
r_df = pd.DataFrame.from_dict(health_data["Record"])
a_df = pd.DataFrame.from_dict(health_data["ActivitySummary"])
w_df = pd.DataFrame.from_dict(health_data["Workout"])
|
untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Generate Mock Data
# In this example we generate mock data with a variety of systematic effects including photometric redshifts, source galaxy distributions, and shape noise. We then populate a galaxy cluster object. This notebooks is organised as follows:
# - Imports and configuration setup
# - Generate mock data with different source galaxy options
# - Generate mock data with different field-of-view options
# - Generate mock data with different galaxy cluster options (only available with the Numcosmo and/or CCL backends). Use the `os.environ['CLMM_MODELING_BACKEND']` line below to select your backend.
import os
## Uncomment the following line if you want to use a specific modeling backend among 'ct' (cluster-toolkit), 'ccl' (CCL) or 'nc' (Numcosmo). Default is 'ct'
#os.environ['CLMM_MODELING_BACKEND'] = 'nc'
try: import clmm
except:
import notebook_install
notebook_install.install_clmm_pipeline(upgrade=False)
import clmm
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Make sure we know which version we're using
clmm.__version__
# ## Import mock data module and setup the configuration
from clmm.support import mock_data as mock
from clmm import Cosmology
# Mock data generation requires a defined cosmology
mock_cosmo = Cosmology(H0 = 70.0, Omega_dm0 = 0.27 - 0.045, Omega_b0 = 0.045, Omega_k0 = 0.0)
# Mock data generation requires some cluster information. The default is to work with the NFW profile, using the "200,mean" mass definition. The Numcosmo and CCL backends allow for more flexibility (see last section of this notebook)
cosmo = mock_cosmo
cluster_id = "Awesome_cluster"
cluster_m = 1.e15 # M200,m
cluster_z = 0.3
src_z = 0.8
concentration = 4
ngals = 1000 # number of source galaxies
cluster_ra = 0.0
cluster_dec = 0.0
# ## Generate the mock catalog with different source galaxy options
# - Clean data: no noise, all galaxies at the same redshift
zsrc_min = cluster_z + 0.1
ideal_data = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, src_z, ngals=ngals)
# - Noisy data: shape noise, all galaxies at the same redshift
noisy_data_src_z = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, src_z,
shapenoise=0.05, ngals=ngals)
# * Noisy data: shape noise plus measurement error, all galaxies at the same redshift
noisy_data_src_z_e_err = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, src_z,
shapenoise=0.05, mean_e_err=0.05, ngals=ngals)
# <div class="alert alert-warning">
#
# **WARNING:** Experimental feature. Uncertainties are created by simply drawing random numbers near the value specified by `mean_e_err`. Use at your own risk. This will be improved in future releases.
#
# </div>
# - Noisy data: photo-z errors (and pdfs!), all galaxies at the same redshift
noisy_data_photoz = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, src_z,
shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
# - Clean data: source galaxy redshifts drawn from a redshift distribution instead of fixed `src_z` value. Options are `chang13` for Chang et al. 2013 or `desc_srd` for the distribution given in the DESC Science Requirement Document. No shape noise or photoz errors.
ideal_with_src_dist = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, 'chang13', zsrc_min=zsrc_min,
zsrc_max=7.0, ngals=ngals)
# - Noisy data: galaxies following redshift distribution, redshift error, shape noise
allsystematics = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, 'chang13', zsrc_min=zsrc_min,
zsrc_max=7.0, shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
allsystematics2 = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, 'desc_srd', zsrc_min=zsrc_min,
zsrc_max=7.0, shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
# Sanity check: checking that no galaxies were originally drawn below zsrc_min, before photoz errors are applied (when relevant)
print('Number of galaxies below zsrc_min:')
print('ideal_data:',np.sum(ideal_data['ztrue']<zsrc_min))
print('noisy_data_src_z:',np.sum(noisy_data_src_z['ztrue']<zsrc_min))
print('noisy_data_photoz:',np.sum(noisy_data_photoz['ztrue']<zsrc_min))
print('ideal_with_src_dist:',np.sum(ideal_with_src_dist['ztrue']<zsrc_min))
print('allsystematics:',np.sum(allsystematics['ztrue']<zsrc_min))
# ### Inspect the catalog data
# - Ideal catalog first entries: no noise on the shape measurement, all galaxies at z=0.8, no redshift errors (z = ztrue)
for n in ideal_data.colnames:
if n!='id':
ideal_data[n].format = "%6.3e"
ideal_data[0:3].pprint(max_width=-1)
# - With photo-z errors
for n in noisy_data_photoz.colnames:
if n!='id':
noisy_data_photoz[n].format = "%6.3e"
noisy_data_photoz[0:3].pprint(max_width=-1)
# - Histogram of the redshift distribution of background galaxies, for the true (originally drawn) redshift and the redshift once photoz errors have been added. By construction no true redshift occurs below zsrc_min, but some 'observed' redshifts (i.e. including photoz errors) might be.
plt.hist(allsystematics['z'], bins=50, alpha=0.3, label='measured z (i.e. including photoz error)');
plt.hist(allsystematics['ztrue'], bins=50, alpha=0.3, label='true z');
plt.axvline(zsrc_min, color='red', label='requested zmin')
plt.xlabel('Source Redshift')
plt.legend()
plt.hist(allsystematics['ztrue'], bins=50, alpha=0.3, label='true z');
plt.hist(allsystematics2['ztrue'], bins=50, alpha=0.3, label='true z');
# pdz for one of the galaxy in the catalog,
galid = 0
plt.plot(allsystematics['pzbins'][galid], allsystematics['pzpdf'][galid])
plt.axvline(allsystematics['z'][galid], label='Observed z', color='red')
plt.axvline(allsystematics['ztrue'][galid], label='True z', color='g')
plt.xlabel('Redshift')
plt.ylabel('Photo-z Probability Distribution')
plt.legend(loc=1)
# Populate in a galaxy cluster object
# At the moment mock data only allow for a cluster centred on (0,0)
cluster_ra = 0.0
cluster_dec = 0.0
gc_object = clmm.GalaxyCluster(cluster_id, cluster_ra, cluster_dec,
cluster_z, allsystematics)
# Plot source galaxy ellipticities
# +
plt.scatter(gc_object.galcat['e1'],gc_object.galcat['e2'])
plt.xlim(-0.2, 0.2)
plt.ylim(-0.2, 0.2)
plt.xlabel('Ellipticity 1',fontsize='x-large')
plt.ylabel('Ellipticity 2',fontsize='x-large')
# -
# ## Generate the mock data catalog with different field-of-view options
# In the examples above, `ngals=1000` galaxies were simulated in a field corresponding to a 8 Mpc/h x 8 Mpc/h (proper distance) square box at the cluster redshift (this is the default). The user may however vary the field size and/or provide a galaxy density (instead of a number of galaxies). This is examplified below, using the `allsystematics` example.
# - `ngals = 1000` in a 4 x 4 Mpc/h box. Asking for the same number of galaxies in a smaller field of view yields high galaxy density
allsystematics2 = mock.generate_galaxy_catalog(cluster_m, cluster_z, concentration, cosmo,
'chang13', zsrc_min=zsrc_min, zsrc_max=7.0,
shapenoise=0.05, photoz_sigma_unscaled=0.05,
field_size=4, ngals=ngals)
plt.scatter(allsystematics['ra'],allsystematics['dec'], marker='.', label = 'default 8 x 8 Mpc/h FoV')
plt.scatter(allsystematics2['ra'],allsystematics2['dec'],marker='.', label = 'user-defined FoV')
plt.legend()
# - Alternatively, the user may provide a galaxy density (here 1 gal/arcmin2 to roughly match 1000 galaxies, given the configuration) and the number of galaxies to draw will automatically be adjusted to the box size.
allsystematics3 = mock.generate_galaxy_catalog(cluster_m, cluster_z, concentration, cosmo,
'chang13', zsrc_min=zsrc_min, zsrc_max=100.0,
shapenoise=0.05, photoz_sigma_unscaled=0.05,
ngal_density=25)
print(f'Number of drawn galaxies = {len(allsystematics3)}')
allsystematics4 = mock.generate_galaxy_catalog(cluster_m, cluster_z, concentration, cosmo,
'desc_srd', zsrc_min=zsrc_min, zsrc_max=100.0,
shapenoise=0.05, photoz_sigma_unscaled=0.05,
ngal_density=25)
print(f'Number of drawn galaxies = {len(allsystematics4)}')
plt.scatter(allsystematics['ra'],allsystematics['dec'], marker='.', label = 'ngals = 1000')
plt.scatter(allsystematics3['ra'],allsystematics3['dec'],marker='.', label = 'ngal_density = 1 gal / arcmin2')
plt.legend()
# ## Generate mock data with different galaxy cluster options
# WARNING: Available options depend on the modeling backend:
# - Cluster-toolkit allows for other values of the overdensity parameter, but is retricted to working with the mean mass definition
# - Both CCL and Numcosmo allow for different values of the overdensity parameter, but work with both the mean and critical mass definition
# - Numcosmo further allows for the Einasto or Burkert density profiles to be used instead of the NFW profile
#
#
# ### Changing the overdensity parameter (all backend) - `delta_so` keyword (default = 200)
allsystematics_500mean = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo, 'chang13', delta_so=500,
zsrc_min=zsrc_min,
zsrc_max=7.0, shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
# ### Using the critical mass definition (Numcosmo and CCL only) - `massdef` keyword (default = 'mean')
# WARNING: error will be raised if using the cluster-toolkit backend
allsystematics_200critical = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo,'chang13', massdef='critical', zsrc_min=zsrc_min,
zsrc_max=7.0, shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
# ### Changing the halo density profile (Numcosmo only) - `halo_profile_model` keyword (default = 'nfw')
# WARNING: error will be raised if using the cluster-toolkit or CCL backends
allsystematics_200m_einasto = mock.generate_galaxy_catalog(
cluster_m, cluster_z, concentration, cosmo,'chang13', halo_profile_model='einasto', zsrc_min=zsrc_min,
zsrc_max=7.0, shapenoise=0.05, photoz_sigma_unscaled=0.05, ngals=ngals)
|
examples/demo_generate_mock_cluster.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] nbsphinx="hidden" slideshow={"slide_type": "skip"}
# This notebook is part of the $\omega radlib$ documentation: http://wradlib.org/wradlib-docs.
#
# Copyright (c) 2016, $\omega radlib$ developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
# + [markdown] slideshow={"slide_type": "slide"}
# # Adjusting radar-base rainfall estimates by rain gauge observations
# + [markdown] slideshow={"slide_type": "skip"}
# ## Background
#
# There are various ways to correct specific errors and artifacts in radar-based quantitative precipitation estimates (*radar QPE*). Alternatively, you might want to correct your radar QPE regardless of the error source - by using ground truth, or, more specifically, rain gauge observations. Basically, you define the error of your radar QPE at a rain gauge location by the discrepancy between rain gauge observation (considered as "the truth") and radar QPE at that very location. Whether you consider this "discrepancy" as an *additive* or *multiplicative* error is somehow arbitrary - typically, it's a *mix* of both. If you quantify this error at various locations (i.e. rain gauges), you can go ahead and construct correction fields for your radar QPE. You might compute a single correction factor for your entire radar domain (which would e.g. make sense in case of hardware miscalibration), or you might want to compute a spatially variable correction field. This typically implies to interpolate the error in space.
# + [markdown] slideshow={"slide_type": "fragment"}
# $\omega radlib$ provides different error models and different spatial interpolation methods to address the adjustment problem. For details, please refer to $\omega radlib's$ [library reference](http://wradlib.org/wradlib-docs/latest/adjust.html).
# + slideshow={"slide_type": "fragment"}
import wradlib.adjust as adjust
import wradlib.verify as verify
import wradlib.util as util
import numpy as np
import matplotlib.pyplot as pl
try:
get_ipython().magic("matplotlib inline")
except:
pl.ion()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example for the 1-dimensional case
# + [markdown] slideshow={"slide_type": "fragment"}
# Looking at the 1-D (instead of 2-D) case is more illustrative.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Create synthetic data
# + [markdown] slideshow={"slide_type": "fragment"}
# First, we **create synthetic data**:
# - true rainfall,
# - point observations of the truth,
# - radar observations of the truth.
#
# The latter is disturbed by some kind of error, e.g. a combination between systemtic and random error.
# + slideshow={"slide_type": "fragment"}
# gage and radar coordinates
obs_coords = np.array([5, 10, 15, 20, 30, 45, 65, 70, 77, 90])
radar_coords = np.arange(0, 101)
# true rainfall
truth = np.abs(1.5 + np.sin(0.075 * radar_coords)) + np.random.uniform(
-0.1, 0.1, len(radar_coords))
# radar error
erroradd = 0.7 * np.sin(0.2 * radar_coords + 10.)
errormult = 0.75 + 0.015 * radar_coords
noise = np.random.uniform(-0.05, 0.05, len(radar_coords))
# radar observation
radar = errormult * truth + erroradd + noise
# gage observations are assumed to be perfect
obs = truth[obs_coords]
# add a missing value to observations (just for testing)
obs[1] = np.nan
# + [markdown] slideshow={"slide_type": "slide"}
# ### Apply different adjustment methods
# + [markdown] slideshow={"slide_type": "fragment"}
# - additive error, spatially variable (`AdjustAdd`)
# - multiplicative error, spatially variable (`AdjustMultiply`)
# - mixed error, spatially variable (`AdjustMixed`)
# - multiplicative error, spatially uniform (`AdjustMFB`)
# + slideshow={"slide_type": "fragment"}
# number of neighbours to be used
nnear_raws = 3
# adjust the radar observation by additive model
add_adjuster = adjust.AdjustAdd(obs_coords, radar_coords,
nnear_raws=nnear_raws)
add_adjusted = add_adjuster(obs, radar)
# adjust the radar observation by multiplicative model
mult_adjuster = adjust.AdjustMultiply(obs_coords, radar_coords,
nnear_raws=nnear_raws)
mult_adjusted = mult_adjuster(obs, radar)
# adjust the radar observation by AdjustMixed
mixed_adjuster = adjust.AdjustMixed(obs_coords, radar_coords,
nnear_raws=nnear_raws)
mixed_adjusted = mixed_adjuster(obs, radar)
# adjust the radar observation by MFB
mfb_adjuster = adjust.AdjustMFB(obs_coords, radar_coords,
nnear_raws=nnear_raws,
mfb_args = dict(method="median"))
mfb_adjusted = mfb_adjuster(obs, radar)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Plot adjustment results
# + slideshow={"slide_type": "fragment"}
# Enlarge all label fonts
font = {'size' : 15}
pl.rc('font', **font)
pl.figure(figsize=(10,5))
pl.plot(radar_coords, radar, 'k-', linewidth=2., linestyle="dashed", label="Unadjusted radar", )
pl.plot(radar_coords, truth, 'k-', linewidth=2., label="True rainfall", )
pl.plot(obs_coords, obs, 'o', markersize=10.0, markerfacecolor="grey", label="Gage observation")
pl.plot(radar_coords, add_adjusted, '-', color="red", label="Additive adjustment")
pl.plot(radar_coords, mult_adjusted, '-', color="green", label="Multiplicative adjustment")
pl.plot(radar_coords, mfb_adjusted, '-', color="orange", label="Mean Field Bias adjustment")
pl.plot(radar_coords, mixed_adjusted,'-', color="blue", label="Mixed (mult./add.) adjustment")
pl.xlabel("Distance (km)")
pl.ylabel("Rainfall intensity (mm/h)")
leg = pl.legend(prop={'size': 10})
# + [markdown] slideshow={"slide_type": "slide"}
# ### Verification
# + [markdown] slideshow={"slide_type": "fragment"}
# We use the `verify` module to compare the errors of different adjustment approaches.
#
# *Here, we compare the adjustment to the "truth". In practice, we would carry out a cross validation.*
# + slideshow={"slide_type": "fragment"}
# Verification for this example
rawerror = verify.ErrorMetrics(truth, radar)
mfberror = verify.ErrorMetrics(truth, mfb_adjusted)
adderror = verify.ErrorMetrics(truth, add_adjusted)
multerror = verify.ErrorMetrics(truth, mult_adjusted)
mixerror = verify.ErrorMetrics(truth, mixed_adjusted)
# Verification reports
maxval = 4.
# Enlarge all label fonts
font = {'size' : 10}
pl.rc('font', **font)
fig = pl.figure(figsize=(14, 8))
ax = fig.add_subplot(231, aspect=1.)
rawerror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Unadjusted radar")
ax = fig.add_subplot(232, aspect=1.)
adderror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Additive adjustment")
ax = fig.add_subplot(233, aspect=1.)
multerror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Multiplicative adjustment")
ax = fig.add_subplot(234, aspect=1.)
mixerror.report(ax=ax, unit="mm", maxval=maxval)
ax.text(0.2, 0.9 * maxval, "Mixed (mult./add.) adjustment")
mixerror.report(ax=ax, unit="mm", maxval=maxval)
ax = fig.add_subplot(235, aspect=1.)
mfberror.report(ax=ax, unit="mm", maxval=maxval)
txt = ax.text(0.2, 0.9 * maxval, "Mean Field Bias adjustment")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example for the 2-dimensional case
# + [markdown] slideshow={"slide_type": "fragment"}
# For the 2-D case, we follow the same approach as before:
#
# - create synthetic data: truth, rain gauge observations, radar-based rainfall estimates
# - apply adjustment methods
# - verification
#
# The way these synthetic data are created is totally arbitrary - it's just to show how the methods are applied.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Create 2-D synthetic data
# + slideshow={"slide_type": "fragment"}
# grid axes
xgrid = np.arange(0, 10)
ygrid = np.arange(20, 30)
# number of observations
num_obs = 10
# create grid
gridshape = len(xgrid), len(ygrid)
grid_coords = util.gridaspoints(ygrid, xgrid)
# Synthetic true rainfall
truth = np.abs(10. * np.sin(0.1 * grid_coords).sum(axis=1))
# Creating radar data by perturbing truth with multiplicative and
# additive error
# YOU CAN EXPERIMENT WITH THE ERROR STRUCTURE
radar = 0.6 * truth + 1. * np.random.uniform(low=-1., high=1,
size=len(truth))
radar[radar < 0.] = 0.
# indices for creating obs from raw (random placement of gauges)
obs_ix = np.random.uniform(low=0, high=len(grid_coords),
size=num_obs).astype('i4')
# creating obs_coordinates
obs_coords = grid_coords[obs_ix]
# creating gauge observations from truth
obs = truth[obs_ix]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Apply different adjustment methods
# + slideshow={"slide_type": "fragment"}
# Mean Field Bias Adjustment
mfbadjuster = adjust.AdjustMFB(obs_coords, grid_coords)
mfbadjusted = mfbadjuster(obs, radar)
# Additive Error Model
addadjuster = adjust.AdjustAdd(obs_coords, grid_coords)
addadjusted = addadjuster(obs, radar)
# Multiplicative Error Model
multadjuster = adjust.AdjustMultiply(obs_coords, grid_coords)
multadjusted = multadjuster(obs, radar)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Plot 2-D adjustment results
# + slideshow={"slide_type": "fragment"}
# Two helper functions for repeated plotting tasks
def scatterplot(x, y, title):
"""Quick and dirty helper function to produce scatter plots
"""
pl.scatter(x, y)
pl.plot([0, 1.2 * maxval], [0, 1.2 * maxval], '-', color='grey')
pl.xlabel("True rainfall (mm)")
pl.ylabel("Estimated rainfall (mm)")
pl.xlim(0, maxval + 0.1 * maxval)
pl.ylim(0, maxval + 0.1 * maxval)
pl.title(title)
def gridplot(data, title):
"""Quick and dirty helper function to produce a grid plot
"""
xplot = np.append(xgrid, xgrid[-1] + 1.) - 0.5
yplot = np.append(ygrid, ygrid[-1] + 1.) - 0.5
grd = ax.pcolormesh(xplot, yplot, data.reshape(gridshape), vmin=0,
vmax=maxval)
ax.scatter(obs_coords[:, 0], obs_coords[:, 1], c=obs.ravel(),
marker='s', s=50, vmin=0, vmax=maxval)
#pl.colorbar(grd, shrink=0.5)
pl.title(title)
# + slideshow={"slide_type": "fragment"}
# Maximum value (used for normalisation of colorscales)
maxval = np.max(np.concatenate((truth, radar, obs, addadjusted)).ravel())
# open figure
fig = pl.figure(figsize=(10, 6))
# True rainfall
ax = fig.add_subplot(231, aspect='equal')
gridplot(truth, 'True rainfall')
# Unadjusted radar rainfall
ax = fig.add_subplot(232, aspect='equal')
gridplot(radar, 'Radar rainfall')
# Adjusted radar rainfall (MFB)
ax = fig.add_subplot(234, aspect='equal')
gridplot(mfbadjusted, 'Adjusted (MFB)')
# Adjusted radar rainfall (additive)
ax = fig.add_subplot(235, aspect='equal')
gridplot(addadjusted, 'Adjusted (Add.)')
# Adjusted radar rainfall (multiplicative)
ax = fig.add_subplot(236, aspect='equal')
gridplot(multadjusted, 'Adjusted (Mult.)')
pl.tight_layout()
# + slideshow={"slide_type": "fragment"}
# Open figure
fig = pl.figure(figsize=(6, 6))
# Scatter plot radar vs. observations
ax = fig.add_subplot(221, aspect='equal')
scatterplot(truth, radar, 'Radar vs. Truth (red: Gauges)')
pl.plot(obs, radar[obs_ix], linestyle="None", marker="o", color="red")
# Adjusted (MFB) vs. radar (for control purposes)
ax = fig.add_subplot(222, aspect='equal')
scatterplot(truth, mfbadjusted, 'Adjusted (MFB) vs. Truth')
# Adjusted (Add) vs. radar (for control purposes)
ax = fig.add_subplot(223, aspect='equal')
scatterplot(truth, addadjusted, 'Adjusted (Add.) vs. Truth')
# Adjusted (Mult.) vs. radar (for control purposes)
ax = fig.add_subplot(224, aspect='equal')
scatterplot(truth, multadjusted, 'Adjusted (Mult.) vs. Truth')
pl.tight_layout()
|
notebooks/multisensor/wradlib_adjust_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import dependencies
# + jupyter={"source_hidden": true}
import numpy as np
import scipy as sp
import netCDF4 as nC4
import lmoments3 as lm3
# import lmoments3.distr
# import lmoments3.stats
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sn
import eofs
from cdo import Cdo
cdo = Cdo(tempdir='/network/aopp/chaos/pred/leach/.cdo_temp_files1')
# uncomment the below if you want to clean up the temporary directory
cdo.cleanTempDir()
import cartopy
from cartopy import crs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy.feature as cfeature
import xarray as xr
import xskillscore as xs
import os
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import sys
import glob
import datetime
import time
import multiprocessing
import tqdm
import cmocean
from shapely.geometry.polygon import LinearRing
import pyarrow as pa
import pyarrow.parquet as pq
sys.path.append('/home/leach/Downloads/')
import ScientificColourMaps6 as SCM6
from IPython.display import display, clear_output
# import my own statistics repository
from mystatsfunctions import LMoments,OLSE
## Matplotlib rcparams setup:
matplotlib.rcParams['font.family']='Helvetica'
matplotlib.rcParams['font.size']=11
# matplotlib.rcParams['font.weight']=400
matplotlib.rcParams['image.cmap']='cmo.ice'
matplotlib.rcParams['axes.prop_cycle']=matplotlib.cycler('color',['011936','FF7D00','225560','BFACAA','D72638','788C9B','A33F00','7CAAB0','685655','EB767C'])
matplotlib.rcParams['axes.formatter.limits']=-3,3
# matplotlib.rcParams['axes.labelweight']=300
matplotlib.rcParams['legend.frameon']=False
matplotlib.rcParams['boxplot.whiskers']=(5,95)
matplotlib.rcParams['boxplot.showfliers']=False
matplotlib.rcParams['boxplot.showfliers']=False
matplotlib.rcParams['boxplot.medianprops.color']='black'
matplotlib.rcParams['errorbar.capsize']=5
matplotlib.rcParams['hist.bins']='auto'
plt.rcParams['pdf.fonttype'] = 42
# %matplotlib inline
# + jupyter={"source_hidden": true}
## plotting functions:
def add_lat_lon_ticks(ax,lat=True,lon=True):
if lon:
ax.set_xticks(np.arange(-20,50,10), crs=crs.PlateCarree())
lon_formatter = LongitudeFormatter(zero_direction_label=True)
ax.xaxis.set_major_formatter(lon_formatter)
if lat:
ax.set_yticks(np.arange(40,75,10), crs=crs.PlateCarree())
lat_formatter = LatitudeFormatter()
ax.yaxis.set_major_formatter(lat_formatter)
# -
# ## Import Data
# #### Useful definitions
# Defines the PRUDENCE region extents and imports the a land-sea mask.
# + jupyter={"source_hidden": true}
## define EU domain:
EU_rg = "-27,45,33,73.5"
## define PRUDENCE region extents:
PD_rg = dict(zip(['BI','IP','FR','CE','SC','AL','MD','EE'],['-10,2,50,59','-10,3,36,44','-5,5,44,50','2,16,48,55','5,30,55,70','5,15,44,48','3,25,36,44','16,30,44,55']))
print(PD_rg)
## get land-sea mask & inversion:
lsm = cdo.setctomiss('0',input= '-gtc,0.5 ../../ERA5/ERA5_land_sea_mask.nc')
lsm_inv = cdo.setctomiss('0',input= '-ltc,0.5 ../../ERA5/ERA5_land_sea_mask.nc')
# -
# ### Get the operational forecast, M-climate and ERA5 reanalysis
## list of forecast start dates
fcdates = [x.split('/')[-1].split('_')[0] for x in glob.glob('../../OPERATIONAL/EU_FEB_19/sfc/*_sfc.nc')]
# #### mx2t
# operational forecast
# +
## get operational forecasts:
op_mx2t = []
for fcdate in fcdates:
pf = xr.open_dataset('../../OPERATIONAL/EU_FEB_19/sfc/'+fcdate+'_sfc.nc',chunks={'time':1,'number':10}).mx2t.loc['2019-02-25':'2019-02-27'].max('time')
cf = xr.open_dataset('../../OPERATIONAL/EU_FEB_19/sfc/ctrl/'+fcdate+'_sfc.nc',chunks={'time':1}).mx2t.loc['2019-02-25':'2019-02-27'].max('time')
op_mx2t += [xr.concat([pf,cf.expand_dims({'number':[51]})],dim='number').expand_dims({'inidate':[fcdate]})]
op_mx2t = xr.concat(op_mx2t,dim='inidate')
# -
# M-climate (model climatology based on reforecasts)
# +
## get reforecast data:
### need to grab reforecast data for each inidate:
rfc_dates = {
'2019-02-04':['2019-01-'+'{:0>2}'.format(x) for x in [21,24,28,31]]+['2019-02-'+'{:0>2}'.format(x) for x in [4,7,11,14,18]],
'2019-02-11':['2019-01-'+'{:0>2}'.format(x) for x in [28,31]]+['2019-02-'+'{:0>2}'.format(x) for x in [4,7,11,14,18,21,25]],
'2019-02-17':['2019-01-'+'{:0>2}'.format(x) for x in [31]]+['2019-02-'+'{:0>2}'.format(x) for x in [4,7,11,14,18,21,25,28]],
'2019-02-23':['2019-02-'+'{:0>2}'.format(x) for x in [7,11,14,18,21,25,28]]+['2019-03-'+'{:0>2}'.format(x) for x in [4,7]],
}
basedir = '/network/aopp/chaos/pred/leach/OPERATIONAL/EU_FEB_19/sfc/M-climate/2019-02-17/'
rfc_days = {
'2019-02-04':[x-4 for x in [25,26,27]],
'2019-02-11':[x-11 for x in [25,26,27]],
'2019-02-17':[x-17 for x in [25,26,27]],
'2019-02-23':[x-23 for x in [25,26,27]],
}
def get_rfc_data(fcdate):
print('getting rfc_data for '+fcdate)
rfc_data = []
for i,rfc_date in enumerate(rfc_dates[fcdate]):
cf_file = basedir+'rfc_init_'+rfc_date+'_cf.nc'
pf_file = basedir+'rfc_init_'+rfc_date+'_pf.nc'
year,mon,day = [int(x) for x in rfc_date.split('-')]
datetimes = ','.join([(datetime.datetime(year,mon,day)+datetime.timedelta(days=y)-pd.DateOffset(years=x)).strftime('%Y-%m-%d') for x in np.arange(1,21) for y in rfc_days[fcdate]])
cf = xr.open_dataset(cdo.yearmax(input="-select,date="+datetimes+" -selname,mx2t6 "+cf_file),chunks = {'time':10}).expand_dims({'number':[11]})
pf = xr.open_dataset(cdo.yearmax(input="-select,date="+datetimes+" -selname,mx2t6 "+pf_file),chunks = {'time':10})
rfc_data += [xr.concat([pf,cf],dim='number').stack(rfc_number=['time','number']).assign_coords(rfc_number=np.arange(i*220,(i+1)*220))]
cf.close()
pf.close()
return xr.concat(rfc_data,dim='rfc_number').expand_dims({'inidate':[fcdate]})
P1 = multiprocessing.Pool(processes=4)
rfc_mx2t = P1.map(get_rfc_data,list(rfc_dates.keys()))
P1.close()
rfc_mx2t = xr.concat(rfc_mx2t,dim='inidate')
# -
# ERA5
ERA5_mx2t = xr.open_dataset('../../ERA5/EU_FEB_19/ERA5_sfc_2019.nc').mx2t.loc['2019-02-25':'2019-02-27'].max('time')
# ### Z500
# operational forecast
# +
## get operational forecasts:
op_z500 = []
for fcdate in fcdates:
pf = xr.open_dataset('../../OPERATIONAL/EU_FEB_19/plev/'+fcdate+'.nc',chunks={'time':1,'number':10}).Z500.loc['2019-02-25':'2019-02-27'].resample(time='1D').mean()
cf = xr.open_dataset('../../OPERATIONAL/EU_FEB_19/plev/ctrl/'+fcdate+'.nc',chunks={'time':1}).Z500.loc['2019-02-25':'2019-02-27'].resample(time='1D').mean()
op_z500 += [xr.concat([pf,cf.expand_dims({'number':[51]})],dim='number').expand_dims({'inidate':[fcdate]})]
op_z500 = xr.concat(op_z500,dim='inidate')
# -
# M-climate (model climatology based on reforecasts)
# +
basedir = '/network/aopp/chaos/pred/leach/OPERATIONAL/EU_FEB_19/plev/M-climate/2019-02-17/'
def get_rfc_data_Z500(fcdate):
print('getting rfc_data for '+fcdate)
rfc_data = []
for i,rfc_date in enumerate(rfc_dates[fcdate]):
cf_file = basedir+'rfc_init_'+rfc_date+'_Z500_cf.nc'
pf_file = basedir+'rfc_init_'+rfc_date+'_Z500_pf.nc'
year,mon,day = [int(x) for x in rfc_date.split('-')]
datetimes = ','.join([(datetime.datetime(year,mon,day)+datetime.timedelta(days=y)-pd.DateOffset(years=x)).strftime('%Y-%m-%d') for x in np.arange(1,21) for y in rfc_days[fcdate]])
cf = xr.open_dataset(cdo.yearmean(input="-select,date="+datetimes+" -selname,z "+cf_file),chunks = {'time':10}).expand_dims({'number':[11]})
pf = xr.open_dataset(cdo.yearmean(input="-select,date="+datetimes+" -selname,z "+pf_file),chunks = {'time':10})
rfc_data += [xr.concat([pf,cf],dim='number').stack(rfc_number=['time','number']).assign_coords(rfc_number=np.arange(i*220,(i+1)*220))]
cf.close()
pf.close()
return xr.concat(rfc_data,dim='rfc_number').expand_dims({'inidate':[fcdate]})
P1 = multiprocessing.Pool(processes=4)
rfc_z500 = P1.map(get_rfc_data_Z500,list(rfc_dates.keys()))
P1.close()
rfc_z500 = xr.concat(rfc_z500,dim='inidate')
# -
# ERA5
ERA5_z500 = xr.open_dataset('../../ERA5/EU_FEB_19/ERA5_plev_500-850_2019.nc').z.loc['2019-02-25':'2019-02-27'].sel(level=500).resample(time='1D').mean()
# ### Postprocessing
# #### mx2t data to get regional mean values
# For each of the operational forecast / M-climate and ERA5 data.
# +
## derive regional data:
lsm_xr = xr.open_dataset(lsm).lsm.fillna(0).astype(bool)
lsm_xr = lsm_xr.assign_coords(longitude=(((lsm_xr.longitude + 180) % 360) - 180)).sortby('longitude')
PD_rg_xr = pd.DataFrame(columns=['lon_1','lon_2','lat_1','lat_2'])
for RG in PD_rg.keys():
PD_rg_xr.loc[RG] = [float(x) for x in PD_rg[RG].split(',')]
# +
ERA5_mx2t_rg = []
op_mx2t_rg = []
rfc_mx2t_rg = []
for RG in PD_rg_xr.index:
lsm_RG = lsm_xr.sel(longitude=slice(PD_rg_xr.loc[RG,'lon_1'],PD_rg_xr.loc[RG,'lon_2']),latitude=slice(PD_rg_xr.loc[RG,'lat_2'],PD_rg_xr.loc[RG,'lat_1']))
lat_weights = np.cos(np.deg2rad(lsm_RG.latitude))
# ensembles
rg_event_data = op_mx2t.sel(longitude=slice(PD_rg_xr.loc[RG,'lon_1'],PD_rg_xr.loc[RG,'lon_2']),latitude=slice(PD_rg_xr.loc[RG,'lat_2'],PD_rg_xr.loc[RG,'lat_1']))
op_mx2t_rg += [rg_event_data.where(lsm_RG).weighted(lat_weights).mean(['latitude','longitude']).expand_dims({'RG':[RG]})]
# M-climate
rg_event_data_rfc = rfc_mx2t.mx2t6.sel(longitude=slice(PD_rg_xr.loc[RG,'lon_1'],PD_rg_xr.loc[RG,'lon_2']),latitude=slice(PD_rg_xr.loc[RG,'lat_2'],PD_rg_xr.loc[RG,'lat_1']))
rfc_mx2t_rg += [rg_event_data_rfc.where(lsm_RG).weighted(lat_weights).mean(['latitude','longitude']).expand_dims({'RG':[RG]})]
# ERA5
rg_ERA5_data = ERA5_mx2t.sel(longitude=slice(PD_rg_xr.loc[RG,'lon_1'],PD_rg_xr.loc[RG,'lon_2']),latitude=slice(PD_rg_xr.loc[RG,'lat_2'],PD_rg_xr.loc[RG,'lat_1']))
ERA5_mx2t_rg += [rg_ERA5_data.where(lsm_RG).weighted(lat_weights).mean(['latitude','longitude']).expand_dims({'RG':[RG]})]
op_mx2t_rg = xr.concat(op_mx2t_rg,dim='RG').compute()
ERA5_mx2t_rg = xr.concat(ERA5_mx2t_rg,dim='RG').compute()
rfc_mx2t_rg = xr.concat(rfc_mx2t_rg,dim='RG').compute()
# -
# #### Z500 data to get correlations against ERA5
# For each of the operational forecast / M-climate data.
# +
## get correlations over 2019-02-25 to 2019-02-27
Z500_corr_23_27 = xs.pearson_r(op_z500.mean('time'),ERA5_z500.mean('time'),dim=['latitude','longitude'],weights = xr.broadcast(np.cos(np.deg2rad(ERA5_z500.latitude)),ERA5_z500.mean('time'))[0])
Z500_corr_23_27 = Z500_corr_23_27.assign_coords(inidate=pd.DatetimeIndex(Z500_corr_23_27.inidate)).sortby('inidate').to_pandas()
## and for reforecasts:
Z500_corr_rfc = xs.pearson_r(rfc_z500.z,ERA5_z500.mean('time'),dim=['latitude','longitude'],weights = xr.broadcast(np.cos(np.deg2rad(ERA5_z500.latitude)),ERA5_z500.mean('time'))[0])
Z500_corr_rfc = Z500_corr_rfc.assign_coords(inidate=pd.DatetimeIndex(Z500_corr_rfc.inidate)).sortby('inidate').to_pandas()
# -
# ## Create the figure
# +
## create individual region plots:
fig = plt.figure(figsize=(7.5,7.5))
gs = fig.add_gridspec(2,1,wspace=0.15,hspace=0.05)
# Panel A:
ax = fig.add_subplot(gs[:1,:])
## pick colors for figure
ens_clrs = dict(zip(['ERA5','OP','PI','INC'],['#CBA73D','#39398E','#268071','#6F2C86']))
## choose lead times to show:
fc_lead_times = ['2019-01-'+'{:0>2}'.format(x) for x in [14,21,28,31]] + ['2019-02-'+'{:0>2}'.format(x) for x in [4,7,11,15,17,19,21,23]]
## choose region to show:
for i,RG in enumerate(['BI']):
lead_times_sorted = sorted(fc_lead_times)
## compute kernel density estimates for tmax forecast / fldcor
kde_bw=0.25
kde_est_tmax = [sp.stats.gaussian_kde(op_mx2t_rg.sel(RG=RG,inidate=x).values-273.15,kde_bw) for x in lead_times_sorted]
## compute y-axis limits based on range of all forecasts
ylims = [op_mx2t_rg.sel(RG=RG).min()-273.15,op_mx2t_rg.sel(RG=RG).max()-273.15]
ylims += np.diff(ylims)*np.array([-0.2,0.2])
line_spacing = 1.1
lw0=0.8
lw1=0.7
lws = [lw0+lw1*(x in ['2019-02-04','2019-02-11','2019-02-17','2019-02-23']) for x in lead_times_sorted]
choose_plot_type = 'kde'
if choose_plot_type == 'kde':
## plot forecast kdes
[ax.plot(2*24*3600*kde_est_tmax[i].pdf(np.linspace(*ylims,500))/kde_est_tmax[i].pdf(np.linspace(*ylims,500)).max()+pd.to_datetime(x).timestamp(),np.linspace(*ylims,500),c='k',lw=lws[i]) for i,x in enumerate(lead_times_sorted)]
[ax.plot(pd.to_datetime(x).timestamp(),op_mx2t_rg.sel(RG=RG,inidate=x).mean('number')-273.15,'ok',ms=5,mec='none') for i,x in enumerate(lead_times_sorted)]
## add M-climate plot
choose_rfc_inidate = '2019-02-11'
kde_mclim = sp.stats.gaussian_kde(rfc_mx2t_rg.sel(RG=RG,inidate=choose_rfc_inidate).values.flatten()-273.15,kde_bw)
ax.plot(2*24*3600*kde_mclim.pdf(np.linspace(*ylims,500))/kde_mclim.pdf(np.linspace(*ylims,500)).max()+pd.to_datetime('2019-01-07').timestamp(),np.linspace(*ylims,500),c='grey',lw=1.4)
ax.plot(pd.to_datetime('2019-01-07').timestamp(),rfc_mx2t_rg.sel(RG=RG,inidate=choose_rfc_inidate).mean('rfc_number')-273.15,'o',c='grey',ms=5,mec='none')
elif choose_plot_type == 'hist':
## plot forecast kdes
[ax.hist(op_mx2t_rg.sel(RG=RG,inidate=x).values-273.15,bins=20,weights=[1/7]*51,histtype='step',orientation='horizontal',bottom=i*line_spacing,color='k',lw=lws[i]) for i,x in enumerate(lead_times_sorted)]
[ax.plot(i*line_spacing,op_mx2t_rg.sel(RG=RG,inidate=x).mean('number')-273.15,'ok',ms=5,mec='none') for i,x in enumerate(lead_times_sorted)]
## add M-climate plot
ax.hist(rfc_mx2t_rg.sel(RG=RG,inidate=choose_rfc_inidate).values.flatten()-273.15,bins=20,weights=[1/(7*1980/51)]*1980,histtype='step',orientation='horizontal',bottom=-1*line_spacing,color='grey',lw=1.4)
ax.plot(-1*line_spacing,rfc_mx2t_rg.sel(RG=RG,inidate=choose_rfc_inidate).mean('rfc_number')-273.15,'o',c='grey',ms=5,mec='none')
## add ERA5 line
ax.axhline(ERA5_mx2t_rg.sel(RG=RG)-273.15,color=ens_clrs['ERA5'],ls='--',lw=1,label='ERA5')
## add CRPS skill values
# [ax.text(i*line_spacing+0.05,ylims[1]-0.5,str(round(op_crps_clim.loc[fcdate,RG],2)),va='top',ha='left',fontweight=['normal','bold'][fcdate in ['2019-02-04','2019-02-11','2019-02-17','2019-02-23']]) for i,fcdate in enumerate(lead_times_sorted)]
## optional: histogram plot
# [ax[0].hist(tmax_forecasts_BI[x].flatten(),bottom=i*line_spacing,color='k',alpha=0.2,density=True,orientation='horizontal') for i,x in enumerate(lead_times_sorted)]
ax.set_yticks(np.arange(-5,30,5))
ax.set_ylim(*ylims)
ax.set_ylabel('maximum temperature / \N{DEGREE SIGN}C',va='bottom',labelpad=0)
ax.set_xlim(pd.to_datetime('2019-01-06 00:00:00').timestamp(),pd.to_datetime('2019-02-28 00:00:00').timestamp())
ax.patch.set_alpha(0)
ax.set_xticks([x.timestamp() for x in Z500_corr_23_27.index])
# ax.set_xticklabels([])
ax.fill_between([pd.to_datetime('2019-02-25 00:00:00').timestamp(),pd.to_datetime('2019-02-28 00:00:00').timestamp()],-5,30,color='k',alpha=0.2,lw=0)
ax.text(0.982,0.5,'heatwave period',va='center',ha='right',transform=ax.transAxes,fontsize='large',rotation=270)
## plot layout:
ax.set_xticklabels('')#["M-climate"]+lead_times_sorted,rotation=-30,ha='left',rotation_mode="anchor")
# ax.set_xlabel('ENS initialisation date',labelpad=10)
## plot a single dot for the legend
ax.plot([],[],'ok',ms=5,mec='none',label='ENS mean')
ax.plot([],[],c='k',lw=0.9,label='ENS kde')
ax.axvline(pd.to_datetime('2019-01-10').timestamp(),lw=1,ls='--')
## add in second axis for lead time scale
axes1 = plt.twiny(ax)
axes1.set_xlim(pd.to_datetime('2019-01-06 00:00:00').timestamp(),pd.to_datetime('2019-02-28 00:00:00').timestamp())
axes1.patch.set_alpha(0)
axes1.set_xticks([x.timestamp() for x in Z500_corr_23_27.index])
axes1.set_xticklabels([x.days for x in pd.to_datetime('2019-02-26')-Z500_corr_23_27.index[:9]]+[item for sublist in [['',x.days] for x in pd.to_datetime('2019-02-26')-Z500_corr_23_27.index[10::2]] for item in sublist]+[''])
axes1.set_xlabel('')
axes1.text(0,1.15,'lead time / days',transform=ax.transAxes,va='bottom',ha='left')
ax.legend(loc='lower right',bbox_to_anchor=(1,1.125),frameon=False,ncol=5,borderpad=0,labelspacing=0)
ax.text(1.02,1,'A',transform=ax.transAxes,fontweight='bold',fontsize=20,va='top',ha='left')
## panel B:
ax=fig.add_subplot(gs[1,:])
contour_data = Z500_corr_23_27.apply(lambda x:np.sort(x)[::-1],axis=1,raw=True).T
contour_data[pd.to_datetime('2019-01-10')] = np.interp(np.arange(0,1,1/51)+0.5/51,np.arange(0,1,1/1980)+0.5/1980,Z500_corr_rfc.loc['2019-02-11'].sort_values(ascending=False).values)
contour_data[pd.to_datetime('2019-01-01')] = np.interp(np.arange(0,1,1/51)+0.5/51,np.arange(0,1,1/1980)+0.5/1980,Z500_corr_rfc.loc['2019-02-11'].sort_values(ascending=False).values)
contour_data = contour_data.sort_index(axis=1)
cor_contour = ax.contour(contour_data.columns,contour_data.index/51,contour_data,levels=[0,0.2,0.4,0.5,0.6,0.7,0.8,0.9,0.95,0.97,0.99,0.999],extend='min',colors='k',linewidths=1.4)
cor_contour.collections[0].set_label('500 hPa geopotential height pattern correlation / ')
ax.set_xticks([pd.to_datetime('2019-01-08')]+contour_data.columns[2:].tolist())
ax.set_xticklabels(['M-climate']+list(Z500_corr_23_27.index.strftime('%Y-%m-%d')[:9])+[item for sublist in [['',x] for x in list(Z500_corr_23_27.index.strftime('%Y-%m-%d')[10::2])] for item in sublist]+[''],rotation=-30,ha='left',rotation_mode="anchor")
ax.fill_between(pd.to_datetime(['2019-02-25 00:00:00','2019-02-28 00:00:00']),1/51,1,color='k',alpha=0.2,lw=0)
ax.text(0.982,0.5+0.5/51,'heatwave period',va='center',ha='right',transform=ax.transAxes,fontsize='large',rotation=270)
x0 = ax.get_xticks()[-1]
y0 = ax.get_yticks()[-1]
rx = 0.5*(ax.get_xticks()[-1] - ax.get_xticks()[0])
ry = 0.7*(ax.get_yticks()[-1] - ax.get_yticks()[0])
y_vals = np.array([0.98,0.9,0.7,0.5,0.45,0.4,0.34,0.33,0.32,0.3])
x_vals = -rx*np.sqrt(1-((y0-y_vals)/ry)**2)+x0
cor_labels = ax.clabel(cor_contour,fmt='%.2f',manual = [[x_vals[i],y_vals[i]] for i in np.arange(x_vals.size)])
for dates in ['2019-02-04','2019-02-11','2019-02-17','2019-02-23']:
ax.axvline(dates,ls=':',lw=1)
ax.set_xlim(pd.to_datetime('2019-01-06 00:00:00'),pd.to_datetime('2019-02-28 00:00:00'))
ax.axvline(pd.to_datetime('2019-01-10'),lw=1,ls='--')
ax.set_xlabel('initialisation date')
ax.set_ylabel('fraction of ensemble greater')
ax.text(1.02,1,'B',transform=ax.transAxes,fontweight='bold',fontsize=20,va='top',ha='left')
# [fig.savefig('FIGS/Fig2.'+x,dpi=600,bbox_inches='tight') for x in ['png','pdf']]
|
The_forecasts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NAIVE BAYES
#
# **File:** NaiveBayes.ipynb
#
# **Course:** Data Science Foundations: Data Mining in Python
# # IMPORT LIBRARIES
import matplotlib.pyplot as plt # For plotting data
import seaborn as sns # For plotting data
import pandas as pd # For dataframes
from sklearn.naive_bayes import GaussianNB # For naive Bayes classifier
from sklearn.metrics import plot_confusion_matrix # Evaluation measures
# # LOAD AND PREPARE DATA
# Load the training data `trn` and testing data `tst` from the CSV files in the data directory. Separate the data matrix from the class variable.
# +
# Imports the training data
trn = pd.read_csv('data/spambase_trn.csv')
# Separates the attributes X0-X56 into X_trn
X_trn = trn.filter(regex='\d')
# Separates the class variable into y_trn
y_trn = trn.y
# Imports the testing data
tst = pd.read_csv('data/spambase_tst.csv')
# Separates the attributes X0-X56 into X_tst
X_tst = tst.filter(regex='\d')
# Separates the class variable into y_tst
y_tst = tst.y
# Class labels
spam = ['Not Spam','Spam']
# -
# Look at the first few rows of the training data.
trn.head()
# # NAIVE BAYES: TRAIN MODEL
# The code below creates a `GaussianNB` object to classify spam vs. not spam using the naive Bayes algorithm.
nb = GaussianNB() \
.fit(X_trn, y_trn)
# ## Calculate Mean Accuracy on Training Data
print(
'Accuracy on training data: '
+ str("{:.2%}".format(nb.score(X_trn, y_trn))))
# # TEST MODEL
# In this phase, we test the naive Bayes model on the test set `tst`. A good evaluation measure is the `confusion matrix` that gives the fraction of true positives, true negatives, false positives, and false negatives.
#
# ## Visualize the Confusion Matrix
#
# Normalize the scores to display as proportions across rows.
plot_confusion_matrix(
nb, X_tst, y_tst,
display_labels=spam,
normalize='true')
# ## Calculate Mean Accuracy on Testing Data
print(
'Accuracy on testing data: '
+ str("{:.2%}".format(nb.score(X_tst, y_tst))))
# # CLEAN UP
#
# - If desired, clear the results with Cell > All Output > Clear.
# - Save your work by selecting File > Save and Checkpoint.
# - Shut down the Python kernel and close the file by selecting File > Close and Halt.
|
NaiveBayes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from astropy.table import Table, join, MaskedColumn, vstack
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import scipy
from astropy.time import Time
import pandas as pd
import re
import seaborn as sns
import datetime
from datetime import datetime
from datetime import timedelta
from math import e
from math import pi
from astropy.table import Column
from math import sqrt
import numpy as np
import emcee
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table
import math
import corner
from numpy import exp
from scipy import integrate
from scipy.integrate import quad
import pdb
import powerlaw
import random
# +
AP_data_table=Table.read("APData_csv.csv")
print("AP_data_table:", AP_data_table.colnames)
M=np.array(AP_data_table['logMass-Best'])
Tau= np.array(AP_data_table['logAge-Best'])
def M_lim(Tau):
for age in Tau:
if 7.0 < age < 8.0 :
return np.log10(740)
if 8.0 < age < np.log10(300000000) :
return np.log10(1080)
def lnobs_like(M, Tau):
a_lim=5.0
for mass in M:
if mass > M_lim(Tau):
return -np.log(1+ exp(-a_lim*(M-M_lim(Tau))))
else:
return -np.inf
def lnZ(theta, M, Tau):
alpha, M_c = theta
a_lim=5.0
lin_M_c= 10**M_c
def f(M):
if M > 10**M_lim(Tau):
return (M**alpha)*exp(-M/lin_M_c)*((1+ exp(-a_lim*(np.log10(M)-M_lim(Tau)))**(-1)))
else:
return 0
ans, err = quad(f, 10**M_lim(Tau), np.inf)
return np.log(ans)
def lnlike(theta, M, Tau):
alpha, M_c = theta
lin_M= 10**M
lin_M_c= 10**M_c
return np.sum((-lin_M/lin_M_c) + alpha*np.log(lin_M) + lnobs_like(M, Tau) - lnZ(theta, lin_M, Tau))
def lnprior(theta):
alpha, M_c = theta
if -3 <= alpha <= -1 and 3 <= M_c <= 8:
return 0.0
return -np.inf
def lnprob(theta, M, Tau):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, M, Tau)
starting_point=np.array([-1.99, 3.93])
ndim, nwalkers = 2, 500
nsteps= 600
burnin=100
pos = starting_point + 1e-2*np.random.randn(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=([M, Tau]))
sampler.run_mcmc(pos, nsteps)
#plot chain
plt.plot(np.transpose(sampler.chain[:,:,0]))
plt.show()
sampler.chain
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=["Alpha", "Log(M_c)"], label_kwargs={"fontsize": 18},
quantiles=[0.16, 0.5, 0.84], show_titles=True, title_kwargs={"fontsize": 18})
fig.show()
|
M31_MF_replication/code_drafts/Shecter_Function_Completeness_incoporated.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Lambda Notebook (Python 3)
# language: python
# name: lambda-notebook
# ---
reload_lamb()
# # Compositional DRT in the Lambda Notebook
#
# ### Notebook author: <NAME>
#
# This notebook outlines one way to implement (part of) compositional DRT as developed in Reinhard Muskens, "[Combining Montague semantics and discourse representation,](http://cogprints.org/4715/2/combining.pdf)" Linguistics and Philosophy 19, 1996.
# First, I define a new type $b$, which will be the type of a DRS
# box in the typed lambda calculus.
# Add a type for boxes
drt_types = meta.get_type_system()
type_b = types.BasicType("b") # Type of boxes
drt_types.add_atomic(type_b)
meta.set_type_system(drt_types)
drt_types
# Next, I define a new binding operator, $\text{Box}$, in the metalanguage.
#
# The metalanguage expression $\text{Box}~u_1, u_2, \ldots, u_n~.~\phi(u_1, u_2, \ldots, u_n)$ is equivalent to the more conventional
# linearized box expression $[\;u_1, u_2, \ldots, u_n \mid \phi(u_1, u_2, \ldots, u_n)\;]$.
# +
class DRTBox(meta.BindingOp):
canonical_name = "Box"
op_name_latex = "\\text{Box}~"
allow_multivars=True # A box can introduce more than one
# discourse referent.
allow_novars=True # A box can also introduce no new
# discourse referents.
# Many of the following methods will be implemented in a
# future version of meta.BindingOp, so DRTBox will inherit
# them automatically.
def __init__(self, var_sequence, body, assignment=None):
self.derivation = None
self.type_guessed = False
self.defer = False
self.let = False
self.type = type_b
new_seq = list()
if isinstance(var_sequence, meta.Tuple):
var_sequence = var_sequence.tuple()
for v in var_sequence:
if isinstance(v, tuple):
v = meta.TypedExpr.term_factory(v[0], typ=v[1])
v = self.ensure_typed_expr(v)
if not isinstance(v.type, types.BasicType):
raise types.TypeMismatch(v, v.type, "DRTBox requires atomic non-variable type for universe")
if not meta.is_var_symbol(v.op):
raise ValueError("Need variable name (got '%s')" % v.op)
new_seq.append(v)
self.var_sequence = new_seq
self.init_body(self.ensure_typed_expr(body, types.type_t, assignment=self.scope_assignment(assignment)))
self.op = "%s:" % (self.canonical_name)
self.args[0] = meta.Tuple(self.var_sequence)
def scope_assignment(self, assignment=None):
if assignment is None:
assignment = dict()
else:
assignment = assignment.copy()
for v in self.var_sequence:
assignment[v.op] = v
@property
def varname(self):
return None
@property
def vartype(self):
return None
@property
def var_instance(self):
return meta.Tuple(self.var_sequence)
def latex_str(self, **kwargs):
var_repr = [v.latex_str() for v in self.var_sequence]
if self.body == meta.true_term:
return meta.ensuremath("[~%s~\mid~]" % (",".join(var_repr)))
else:
return meta.ensuremath("[~%s~\mid~%s~]" % (",".join(var_repr),
self.body.latex_str()))
def copy(self):
return DRTBox(self.var_sequence, self.body)
def copy_local(self, var_seq, body):
return DRTBox(var_seq, body)
meta.BindingOp.add_op(DRTBox)
# -
DRTBox([te("x_e"), te("y_e")], te("P_<e,t>(x_e)"))
# The next cell demonstrates how to create a box in the Lambda Notebook metalanguage.
#
# The following points are particularly important:
# * The variables introduced by a box must be of type $e$. This differs from Muskens 1996, who defines a new type $\pi$ for _registers_.
# * The _conditions_ in the body of the box must be of type $t$. If a box has multiple conditions, they are linked using conjunction `&`.
# * Boxes can also have empty variable lists if they introduce no new discourse referents.
# * Boxes with no conditions—that is, boxes that _only_ introduce new discourse referents—should have $True$ as their body.
# +
# %%lamb
# This is the denotation of example (1), "A man adores a woman. She abhors him.", in Muskens 1996.
box1 = Box x1_e, x2_e : Man(x1) & Woman(x2) & Adores(x1, x2) & Abhors(x2, x1)
# An example of a box with an empty variable list
box2 = Box : Adores(John_e, Mary_e)
# An example of a box with an "empty" body
box3 = Box x_e, y_e, z_e : True
# -
# Next, I define the semicolon operator that "chains" two boxes together. This is equivalent to sentential conjunction in dynamic semantics and hence will be denoted by '&' in the metalanguage; in Muskens 1996, it is denoted by the semicolon operator. Additionally, I define a reduction operation on boxes that merges them together as described by Muskens's _Merging Lemma_.
# +
class BinaryJoinExpr(meta.BinaryOpExpr):
def __init__(self, arg1, arg2):
super().__init__(type_b, "&", arg1, arg2, op_name_latex = ";")
def reducible(self):
return all(isinstance(x, DRTBox) for x in self.args)
def reduce(self):
b1 = self.args[0]; b2 = self.args[1]
b1_free_vars = b1.body.free_variables()
# Only merge if none of the variables introduced by the second
# argument are free in the body of the first
if all(x.op not in b1_free_vars for x in b2.var_sequence):
combined_vars = b1.var_sequence + b2.var_sequence
combined_body = meta.BinaryAndExpr(b1.body, b2.body).simplify_all()
return meta.derived(DRTBox(combined_vars, combined_body), self, desc="Merging Lemma")
else:
return BinaryJoinExpr(b1, b2)
# Add the new operation to the metalanguage
def and_factory(arg1, arg2):
arg1 = meta.TypedExpr.ensure_typed_expr(arg1)
arg2 = meta.TypedExpr.ensure_typed_expr(arg2)
ts = meta.get_type_system()
if ts.eq_check(arg1.type, types.type_t):
return meta.BinaryAndExpr(arg1, arg2)
elif ts.eq_check(arg1.type, type_b):
return BinaryJoinExpr(arg1, arg2)
else:
raise types.TypeMismatch(arg1, arg2, "Unknown types for operator &")
meta.binary_symbols_to_op_exprs['&'] = and_factory
# -
# The following cell shows the semicolon operator in action.
# %%lamb
box1 = Box x1_e, x2_e : True
box2 = Box : Man(x1_e)
box3 = Box : Woman(x2_e)
box4 = box1 & box2 & box3
# The last box, which contains several boxes linked by the semicolon operator, can be reduced with the Merging Lemma; note that the compositional system will automaticallly apply this operation by default.
box4.reduce_all()
# We now have all the machinery needed to define some simple lexical entries from Muskens 1996.
# %%lamb
||man|| = L u_e : (Box : Man(u))
||runs|| = L u_e : (Box : Runs(u))
||fluffy|| = L p_<e,b> : p(Fluffy_e)
||loves|| = L p_<<e,b>,b> : L u_e : p(L v_e : (Box : Loves(u, v)))
||cat|| = L u_e : (Box : Cat(u))
# The next entry is the indefinite article "a" with the subscript 1;
# Later, we will see a more elegant way to handle indexed lexical entries.
||a1|| = L p_<e,b> : L q_<e,b> : (Box u1 : True_t) & p(u1) & q(u1)
# The indefinite article "a" with the subscript 2
||a2|| = L p_<e,b> : L q_<e,b> : (Box u2 : True_t) & p(u2) & q(u2)
# Composition now works as expected:
(fluffy * runs).trace()
r = ((a1 * cat) * (loves * (a2 * man)))
r
r.tree()
r[0].content.derivation # show the reduction / simplification of the last step
# Finally, the current solution of defining a separate lexical entry for each index that a word like "a" or "himself" can take is cumbersome. The `indexed_item` function defined in the next cell is one way around this problem. The first argument of `indexed_item` is a string defining the name of the lexical item, and the second is a lambda calculus expression defining its content. Wherever something should depend on the value of an index, such as in the name of a discourse referent introduced by "a", use the `#` character.
# +
def indexed_item(name, raw_string):
new_name = name + "{0}"
ex_string = raw_string.replace("#", "{0}")
return lambda n: lang.Item(new_name.format(n), te(ex_string.format(n)))
a = indexed_item("a", "L p_<e,b> : L q_<e,b> : (Box u# : True_t) & p(u#) & q(u#)")
himself = indexed_item("himself", "L p_<e,b> : p(u#)")
# -
# The following cells show how these indexed items can be used in composition.
((a(1) * man) * (loves * himself(1)))
(a(3) * cat) * (loves * (a(5) * man))
# #### TODO:
#
# * Operations that take boxes to conditions, like **not**, **or**, and $\implies$
# * Other composition operations, like Muskens's $T_3$ SEQUENCING and $T_4$ QUANTIFYING-IN
# * Referent accessibility
|
notebooks/fragments/Compositional DRT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rate limiting
#
# Requests are rate limited to prevent a single user from monopolising the resources.
#
# Response headers show we are allowed 55000 requests over an hour (3600 seconds): an average 15 requests per second.
#
# ## Exercises 7
#
# This script queries the ping endpoint 25 times, printing the count, the HTTP Status Code, and the X-RateLimit-Remaining header each time.
#
# 1\. Increase the number of loops, do you start to get 429 errors?
#
# 2\. Can you add in a step to make it wait a few seconds every iteration? Or every 100 iterations?
# +
# Exercise 7.1
import requests, sys, time
server = "http://rest.ensembl.org/"
con = "application/json"
ext_ping = "/info/ping?"
x = 0
while x < 10:
if x
# submit the query
ping = requests.get(server+ext_ping, headers={ "Accept" : con})
x += 1
print ("count:", x, "status:", ping.status_code, "remaining:", ping.headers['X-RateLimit-Remaining'])
# -
# [Next page: Exercises 7 – answers](7_Rate_limiting_answers.ipynb)
|
Python3/7_Rate_limiting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://www.accelebrate.com/blog/using-defaultdict-python/
from collections import defaultdict
ice_cream = defaultdict(lambda: 'Vanilla')
# +
ice_cream['Sarah'] = 'Chunky Monkey'
ice_cream['Abdul'] = 'Butter Pecan'
print (ice_cream['Sarah'])
print (ice_cream['Joe'])
# -
from collections import defaultdict
food_list = 'spam spam spam spam spam spam eggs spam'.split()
food_count = defaultdict(int) # default value is 0
for food in food_list:
food_count[food] += 1
food_count
|
temp_defaultdict_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# % PACKAGES INCLUDED HERE
# % DO NOT NEED TO CHANGE
# \documentclass[conference]{IEEEtran}
# %\IEEEoverridecommandlockouts
# % The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out.
# \usepackage{cite}
# \usepackage{amsmath,amssymb,amsfonts}
# \usepackage{algorithmic}
# \usepackage{graphicx}
# \usepackage{textcomp}
# \def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
# T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
# \begin{document}
# + active=""
# % TITLE GOES HERE
#
# \title{Paper Title*\\}
#
# + active=""
# % AUTHOR NAMES GOES HERE
#
# \author{\IEEEauthorblockN{1\textsuperscript{st} Given Name Surname}
# \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
# \textit{name of organization (of Aff.)}\\
# City, Country \\
# email address}
# \and
# \IEEEauthorblockN{2\textsuperscript{nd} Given Name Surname}
# \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
# \textit{name of organization (of Aff.)}\\
# City, Country \\
# email address}
# \and
# \IEEEauthorblockN{3\textsuperscript{rd} Given Name Surname}
# \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
# \textit{name of organization (of Aff.)}\\
# City, Country \\
# email address}
# \and
# \IEEEauthorblockN{4\textsuperscript{th} Given Name Surname}
# \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
# \textit{name of organization (of Aff.)}\\
# City, Country \\
# email address}
# \and
# \IEEEauthorblockN{5\textsuperscript{th} Given Name Surname}
# \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
# \textit{name of organization (of Aff.)}\\
# City, Country \\
# email address}
# \and
# \IEEEauthorblockN{6\textsuperscript{th} Given Name Surname}
# \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
# \textit{name of organization (of Aff.)}\\
# City, Country \\
# email address}
# }
#
# \maketitle
# + active=""
# % ABSTRACT
#
# \begin{abstract}
# This document is a model and instructions for \LaTeX.
# This and the IEEEtran.cls file define the components of your paper [title, text, heads, etc.]. *CRITICAL: Do Not Use Symbols, Special Characters, Footnotes,
# or Math in Paper Title or Abstract.
# \end{abstract}
#
# + active=""
# % KEYWORDS
#
# \begin{IEEEkeywords}
# component, formatting, style, styling, insert
# \end{IEEEkeywords}
# + active=""
# % INTRODUCTION SECTION
# \section{Introduction}
#
# Start typing here \cite{b1}.
# + active=""
# % BACKGROUND SECTION
# \section{Background}
#
# Start typing here \cite{b2}.
# + active=""
# % METHODS SECTION
# \section{Methods}
#
# Start typing here \cite{b3}.
# + active=""
# % RESULTS SECTION
# \section{Results}
#
# Start typing here \cite{b4}.
# + active=""
# % DISCUSSION SECTION
# \section{Discussion}
#
# Start typing here \cite{b5}.
# + active=""
# % REFERENCES
# % THIS IS CREATED AUTOMATICALLY
# \bibliographystyle{IEEEtran}
# \bibliography{References} % change if another name is used for References file
# + active=""
# \end{document}
|
Paper/Paper_Template.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pyremo-dev
# language: python
# name: pyremo-dev
# ---
# +
import glob
import xarray as xr
import pyremo as pr
# -
# ## Monthly
files = glob.glob("/work/ch0636/g300046/from_Mistral/remo_results_056000/2000/e*m*.nc")
files.sort()
files
# +
import xarray as xr
ds = xr.open_mfdataset(files, data_vars="minimal", coords="minimal")
# -
ds = ds.copy()
da = ds.TEMP2.to_dataset()
da = da.merge(ds.rotated_latitude_longitude)
da = pr.parse_dates(da, use_cftime=True)
da.TEMP2.encoding["_FillValue"] = 1.0e20
da.to_netcdf("remo_EUR-11_TEMP2_2000.nc")
# ## Hourly
# +
from dask.distributed import Client
client = Client()
# -
client
# +
from pyremo.archive import RemoArchive
archive = RemoArchive("/work/ch0636/g300046/from_Mistral/remo_results_056000")
# -
files = archive._extract_code(167, time_range=("1999-12", "2000-01"), parallel=True)
files
ds = xr.open_mfdataset(files, preprocess=pr.parse_dates, data_vars="minimal")
ds
ds.TEMP2.encoding["_FillValue"] = 1.0e20
for c in ds.coords:
ds[c].encoding["_FillValue"] = None
ds.sel(time=slice("2000-01-01T00:00:00", "2000-01-03T00:00:00")).to_netcdf(
"remo_EUR-11_TEMP2_1hr.nc"
)
|
notebooks/prepare-test-data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="W-1zl5XdYInf"
# # Question Answer Application using BRET
# -
# This Code Template is for Question Answer Application using BRET which stands for Bidirectional Encoder Representations from Transformers.
# + [markdown] id="gVq-TuylYRDW"
# ## Required Packages
# + id="aQl0MMrOGIup"
# !pip install transformers
# + id="4mzZJJII62wG"
from transformers import BertForQuestionAnswering
from transformers import BertTokenizer
import torch
import textwrap
# + [markdown] id="aIb7HYgt7G-r"
# ### Initialization
# Enter the text from which we want to ask the question.
# + id="3ov4qMBm7XKY"
Text="We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models (Peters et al., 2018a; Radford et al., 2018), BERT is designed to pretrain deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be finetuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial taskspecific architecture modifications. BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement)."
# + [markdown] id="1WThOUtpYvG-"
# ### Model
#
# The BERT model was proposed in BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding by <NAME>, <NAME>, <NAME> and <NAME>. It’s a bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia.
#
# Refer [API](https://huggingface.co/transformers/model_doc/bert.html) for the parameters
# + [markdown] id="Kyg45Fbm9ZD-"
# As our reference text, I've taken the Abstract of the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf).
#
# + id="-Mnv95sX-U9K"
model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
# + [markdown] id="BSL8qvfRBh77"
# #steps involved in answer_question Function
#
# The function answer_question() accepts the text from the user. Applies the tokenizer to the input text.A segment_id for every input token is allocated. Find's the tokens with the highest start and end scores. Get the string versions of the input tokens and Select the remaining tokens and join them with whitespace. Then recombine it with the previous token.
# + id="rH8NbBlsfxZ_"
def answer_question(question, answer_text):
'''
Takes a `question` string and an `answer_text` string (which contains the
answer), and identifies the words within the `answer_text` that are the
answer. Prints them out.
'''
input_ids = tokenizer.encode(question, answer_text)
print('Query has {:,} tokens.\n'.format(len(input_ids)))
sep_index = input_ids.index(tokenizer.sep_token_id)
num_seg_a = sep_index + 1
num_seg_b = len(input_ids) - num_seg_a
segment_ids = [0]*num_seg_a + [1]*num_seg_b
assert len(segment_ids) == len(input_ids)
outputs = model(torch.tensor([input_ids]),
token_type_ids=torch.tensor([segment_ids]),
return_dict=True)
start_scores = outputs.start_logits
end_scores = outputs.end_logits
answer_start = torch.argmax(start_scores)
answer_end = torch.argmax(end_scores)
tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer = tokens[answer_start]
for i in range(answer_start + 1, answer_end + 1):
if tokens[i][0:2] == '##':
answer += tokens[i][2:]
else:
answer += ' ' + tokens[i]
print('Answer: "' + answer + '"')
# + id="y4VPq6FdjxyX" colab={"base_uri": "https://localhost:8080/"} outputId="79614afa-984e-47fa-a935-928c5453ad69"
wrapper = textwrap.TextWrapper(width=80)
bert_abstract =Text
print(wrapper.fill(bert_abstract))
# + [markdown] id="tEB654YCknYv"
# -----------------------------
# Ask BERT what its name stands for (the answer is in the first sentence of the abstract).
# + id="wfntqRCBegGj" colab={"base_uri": "https://localhost:8080/"} outputId="f254d4c7-3e97-4a8f-c383-915bcc2e68eb"
question = "What does the 'B' in BERT stand for?"
answer_question(question, bert_abstract)
|
Natural Language Processing/NLP/QuestionAnswer_BERT.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Installing Python
# <img src="https://www.python.org/static/community_logos/python-logo.png" align="left"> This chapter contains some instructions about how to install Python on your personal computer.
#
# There are many ways to install Python on your computer (and while most of them work, some of them are less practical than others for scientific applications). Unless you know what you are doing (i.e.: you have been doing this before), please follow these instructions strictly.
#
#
# ```{admonition} Important! What to do if you **already** have python installed on your laptop
# :class: warning, dropdown
#
# **If you already have anaconda / conda / miniconda installed**:
#
# You can keep your installation if it works for you. If you want to start from scratch, uninstall anaconda and start again as explained below.
#
# ---
#
# **If you don't know what I'm talking about:**
#
# Then you should probably follow the instructions below.
# ```
# ```{admonition} Important! For Windows 7 users
# :class: warning, dropdown
#
# I'm sorry Windows 7 is [not supported since January 2020](https://en.wikipedia.org/wiki/Windows_7). It seems that you can't install `mambaforge` on Windows 7. Regardless of python, I strongly recommend to update to a version with security updates.
# ```
# ## Install Miniconda
# ### On Windows
#
# Go to the miniforge download website: https://github.com/conda-forge/miniforge#mambaforge
#
# Download the installers for **Mambaforge** and your operating system (Windows, Mac OS or Linux).
# Unless you have an uncommon architecture (unlikely), you should choose the `x86_64` files.
#
# <img src="../img/download_options.png" width="400"> <br>
#
# Double-click the `.exe` file.
#
# Follow the instructions on the screen.
#
# If you are unsure about any setting, accept the defaults. You can change them later. At this stage, I recommend:
#
# <img src="../img/install_options.png" width="400">
#
# ### On Mac OS and Linux
#
# For these platforms, no need to download the files yourself. Open a terminal and do:
#
# ```none
# curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"
# bash Mambaforge-$(uname)-$(uname -m).sh
# ```
#
# or, if this doesn't work:
#
# ```none
# wget "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"
# bash Mambaforge-$(uname)-$(uname -m).sh
# ```
#
# The first command downloads the tool, and the second installs it. Follow the prompts on the installer screens.
#
# If you are unsure about any setting, accept the defaults **except for the last question asking you if you wish to run `conda init`: here, answer "yes".**
#
# To make the changes take effect, close and then re-open your terminal window.
#
# ### Testing your installation
#
#
# To see if everything worked well, open a terminal (Mac and Linux). On Windows, open the `miniforge prompt` (from the Start menu, search for and open "miniforge prompt"):
#
# <img src="../img/miniforge.png" width="400"> <br>
#
# and type in:
#
# ```none
# mamba list
# ```
#
# You should see a long list of package names.
#
# If you type:
#
# ```none
# python
# ```
#
# A new python prompt should appear, with something like:
#
# ```none
# Python 3.9.7 | packaged by conda-forge | (default, Sep 29 2021, 19:15:42) [MSC v.1916 64 bit (AMD64)] on win32
# Type "help", "copyright", "credits" or "license" for more information.
# >>>
# ```
#
# You can type ``exit()`` to get out of the python interpreter.
#
#
# ```{admonition} Optional: a short explanation of what we've just done
# :class: note, dropdown
#
# Although the instructions above should work in almost all situations and should get you ready for the class,
# I should explain what we have just done in case you are interested or if you google:
#
# We have just installed [miniconda](https://docs.conda.io/en/latest/miniconda.html) which is a minimal installer for the larger project Anaconda. Anaconda is a scientific Python installation which, in my opinion, has too many options and tools that you won't need.
#
# **But** we haven't installed `miniconda` only: we have also set some options per default automatically:
# - we now use [conda-forge](https://conda-forge.org/) as default channels to download the python packages we will use later in the lecture (instead of anaconda channels).
# - we now use [mamba](https://mamba.readthedocs.io) as the default python package install tool (instead of `conda`). `mamba install` behaves exactly like `conda install`, but is significantly faster.
# ```
# ## Learning summary
# - you know how to install python + mamba on your computer. What this means (especially mamba) will be learned later.
# - you know how to open a python interpreter from the miniforge prompt (or the terminal in Linux / Mac OS) and close it with `exit()`.
# - you are ready for the introduction lectures!
|
book/week_01/01-Installation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting Housing Prices using Tensorflow + Cloud AI Platform
#
# This notebook will show you how to create a tensorflow model, train it on the cloud in a distributed fashion across multiple CPUs or GPUs and finally deploy the model for online prediction. We will demonstrate this by building a model to predict housing prices.
#
import pandas as pd
import tensorflow as tf
# + slideshow={"slide_type": "-"}
print(tf.__version__)
# -
# ## Tensorflow APIs
# <img src="assets/TFHierarchy.png" width="50%">
#
# Tensorflow is a hierarchical framework. The further down the hierarchy you go, the more flexibility you have, but that more code you have to write. A best practice is to start at the highest level of abstraction. Then if you need additional flexibility for some reason drop down one layer.
#
# For this tutorial we will be operating at the highest level of Tensorflow abstraction, using the Estimator API.
# ## Steps
#
# 1. Load raw data
#
# 2. Write Tensorflow Code
#
# 1. Define Feature Columns
#
# 2. Define Estimator
#
# 3. Define Input Function
#
# 4. Define Serving Function
#
# 5. Define Train and Eval Function
#
# 3. Package Code
#
# 4. Train
#
# 5. Deploy Model
#
# 6. Get Predictions
# ### 1) Load Raw Data
#
# This is a publically available dataset on housing prices in Boston area suburbs circa 1978. It is hosted in a Google Cloud Storage bucket.
#
# For datasets too large to fit in memory you would read the data in batches. Tensorflow provides a queueing mechanism for this which is documented [here](https://www.tensorflow.org/guide).
#
# In our case the dataset is small enough to fit in memory so we will simply read it into a pandas dataframe.
# +
#downlad data from GCS and store as pandas dataframe
data_train = pd.read_csv(
filepath_or_buffer='https://storage.googleapis.com/spls/gsp418/housing_train.csv',
names=["CRIM","ZN","INDUS","CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","MEDV"])
data_test = pd.read_csv(
filepath_or_buffer='https://storage.googleapis.com/spls/gsp418/housing_test.csv',
names=["CRIM","ZN","INDUS","CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","MEDV"])
# -
data_train.head()
# #### Column Descriptions:
#
# 1. CRIM: per capita crime rate by town
# 2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
# 3. INDUS: proportion of non-retail business acres per town
# 4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
# 5. NOX: nitric oxides concentration (parts per 10 million)
# 6. RM: average number of rooms per dwelling
# 7. AGE: proportion of owner-occupied units built prior to 1940
# 8. DIS: weighted distances to five Boston employment centers
# 9. RAD: index of accessibility to radial highways
# 10. TAX: full-value property-tax rate per $10,000
# 11. PTRATIO: pupil-teacher ratio by town
# 12. MEDV: Median value of owner-occupied homes
# ### 2) Write Tensorflow Code
# #### 2.A Define Feature Columns
#
# Feature columns are your Estimator's data "interface." They tell the estimator in what format they should expect data and how to interpret it (is it one-hot? sparse? dense? continuous?). https://www.tensorflow.org/api_docs/python/tf/feature_column
#
#
#
# +
FEATURES = ["CRIM", "ZN", "INDUS", "NOX", "RM",
"AGE", "DIS", "TAX", "PTRATIO"]
LABEL = "MEDV"
feature_cols = [tf.feature_column.numeric_column(k)
for k in FEATURES] #list of Feature Columns
# -
# #### 2.B Define Estimator
#
# An Estimator is what actually implements your training, eval and prediction loops. Every estimator has the following methods:
#
# - fit() for training
# - eval() for evaluation
# - predict() for prediction
# - export_savedmodel() for writing model state to disk
#
# Tensorflow has several canned estimator that already implement these methods (DNNClassifier, LogisticClassifier etc..) or you can implement a custom estimator. Instructions on how to implement a custom estimator [here](https://www.tensorflow.org/guide/estimator) and see an example [here](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/blogs/timeseries/rnn_cloudmle.ipynb).
#
# For simplicity we will use a canned estimator. To instantiate an estimator simply pass it what Feature Columns to expect and specify a directory for it to output to.
#
# Notice we wrap the estimator with a function. This is to allow us to specify the 'output_dir' at runtime, instead of having to hardcode it here
def generate_estimator(output_dir):
return tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir=output_dir)
# #### 2.C Define Input Function
#
# Now that you have an estimator and it knows what type of data to expect and how to interpret, you need to actually pass the data to it! This is the job of the input function.
#
# The input function returns a (features, label) tuple
# - features: A python dictionary. Each key is a feature column name and its value is the tensor containing the data for that Feature
# - label: A Tensor containing the label column
def generate_input_fn(data_set):
def input_fn():
features = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return features, labels
return input_fn
# #### 2.D Define Serving Input Function
#
# To predict with the model, we need to define a serving input function which will be used to read inputs from a user at prediction time.
#
# Why do we need a separate serving function? Don't we input the same features during training as in serving?
#
# Yes, but we may be *receiving* data in a different format during serving. The serving input function performs transformations necessary to get the data provided at prediction time into the format compatible with the Estimator API.
#
# returns a (features, inputs) tuple
# - features: A dict of features to be passed to the Estimator
# - inputs: A dictionary of inputs the predictions server should expect from the user
def serving_input_fn():
#feature_placeholders are what the caller of the predict() method will have to provide
feature_placeholders = {
column.name: tf.placeholder(column.dtype, [None])
for column in feature_cols
}
#features are what we actually pass to the estimator
features = {
# Inputs are rank 1 so that we can provide scalars to the server
# but Estimator expects rank 2, so we expand dimension
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(
features, feature_placeholders
)
# ### 3) Package Code
#
# You've now written all the tensoflow code you need!
#
# To make it compatible with Cloud AI Platform we'll combine the above tensorflow code into a single python file with two simple changes
#
# 1. Add some boilerplate code to parse the command line arguments required for gcloud.
# 2. Use the learn_runner.run() function to run the experiment
#
# We also add an empty \__init__\.py file to the folder. This is just the python convention for identifying modules.
# + language="bash"
# mkdir trainer
# touch trainer/__init__.py
# +
# %%writefile trainer/task.py
import argparse
import pandas as pd
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
print(tf.__version__)
tf.logging.set_verbosity(tf.logging.ERROR)
data_train = pd.read_csv(
filepath_or_buffer='https://storage.googleapis.com/spls/gsp418/housing_train.csv',
names=["CRIM","ZN","INDUS","CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","MEDV"])
data_test = pd.read_csv(
filepath_or_buffer='https://storage.googleapis.com/spls/gsp418/housing_test.csv',
names=["CRIM","ZN","INDUS","CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","MEDV"])
FEATURES = ["CRIM", "ZN", "INDUS", "NOX", "RM",
"AGE", "DIS", "TAX", "PTRATIO"]
LABEL = "MEDV"
feature_cols = [tf.feature_column.numeric_column(k)
for k in FEATURES] #list of Feature Columns
def generate_estimator(output_dir):
return tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir=output_dir)
def generate_input_fn(data_set):
def input_fn():
features = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return features, labels
return input_fn
def serving_input_fn():
#feature_placeholders are what the caller of the predict() method will have to provide
feature_placeholders = {
column.name: tf.placeholder(column.dtype, [None])
for column in feature_cols
}
#features are what we actually pass to the estimator
features = {
# Inputs are rank 1 so that we can provide scalars to the server
# but Estimator expects rank 2, so we expand dimension
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(
features, feature_placeholders
)
train_spec = tf.estimator.TrainSpec(
input_fn=generate_input_fn(data_train),
max_steps=3000)
exporter = tf.estimator.LatestExporter('Servo', serving_input_fn)
eval_spec=tf.estimator.EvalSpec(
input_fn=generate_input_fn(data_test),
steps=1,
exporters=exporter)
######START CLOUD ML ENGINE BOILERPLATE######
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--output_dir',
help='GCS location to write checkpoints and export models',
required=True
)
parser.add_argument(
'--job-dir',
help='this model ignores this field, but it is required by gcloud',
default='junk'
)
args = parser.parse_args()
arguments = args.__dict__
output_dir = arguments.pop('output_dir')
######END CLOUD ML ENGINE BOILERPLATE######
#initiate training job
tf.estimator.train_and_evaluate(generate_estimator(output_dir), train_spec, eval_spec)
# -
# ### 4) Train
# Now that our code is packaged we can invoke it using the gcloud command line tool to run the training.
#
# Note: Since our dataset is so small and our model is simply the overhead of provisioning the cluster is longer than the actual training time. Accordingly you'll notice the single VM cloud training takes longer than the local training, and the distributed cloud training takes longer than single VM cloud. For larger datasets and more complex models this will reverse
# #### Set Environment Vars
# We'll create environment variables for our project name GCS Bucket and reference this in future commands.
#
# If you do not have a GCS bucket, you can create one using [these](https://cloud.google.com/storage/docs/creating-buckets) instructions.
GCS_BUCKET = 'gs://BUCKET_NAME' #CHANGE THIS TO YOUR BUCKET
PROJECT = 'PROJECT_ID' #CHANGE THIS TO YOUR PROJECT ID
REGION = 'us-central1' #OPTIONALLY CHANGE THIS
import os
os.environ['GCS_BUCKET'] = GCS_BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
# #### Run local
# It's a best practice to first run locally on a small dataset to check for errors. Note you can ignore the warnings in this case, as long as there are no errors.
# + language="bash"
# gcloud ai-platform local train \
# --module-name=trainer.task \
# --package-path=trainer \
# -- \
# --output_dir='./output'
# -
# #### Run on cloud (1 cloud ML unit)
# Here we specify which GCS bucket to write to and a job name.
# Job names submitted to the Cloud AI Platform must be project unique, so we append the system date/time. Update the cell below to point to a GCS bucket you own.
# + language="bash"
# JOBNAME=housing_$(date -u +%y%m%d_%H%M%S)
#
# gcloud ai-platform jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=./trainer \
# --job-dir=$GCS_BUCKET/$JOBNAME/ \
# --runtime-version 1.15 \
# -- \
# --output_dir=$GCS_BUCKET/$JOBNAME/output
#
# -
# #### Run on cloud (10 cloud ML units)
# Because we are using the TF Estimators interface, distributed computing just works! The only change we need to make to run in a distributed fashion is to add the [--scale-tier](https://cloud.google.com/ml/pricing#ml_training_units_by_scale_tier) argument. Cloud AI Platform then takes care of distributing the training across devices for you!
#
# + language="bash"
# JOBNAME=housing_$(date -u +%y%m%d_%H%M%S)
#
# gcloud ai-platform jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=./trainer \
# --job-dir=$GCS_BUCKET/$JOBNAME \
# --runtime-version 1.15 \
# --scale-tier=STANDARD_1 \
# -- \
# --output_dir=$GCS_BUCKET/$JOBNAME/output
# -
# #### Run on cloud GPU (3 cloud ML units)
# Also works with GPUs!
#
# "BASIC_GPU" corresponds to one Tesla K80 at the time of this writing, hardware subject to change. 1 GPU is charged as 3 cloud ML units.
# + language="bash"
# JOBNAME=housing_$(date -u +%y%m%d_%H%M%S)
#
# gcloud ai-platform jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=./trainer \
# --job-dir=$GCS_BUCKET/$JOBNAME \
# --runtime-version 1.15 \
# --scale-tier=BASIC_GPU \
# -- \
# --output_dir=$GCS_BUCKET/$JOBNAME/output
# -
# #### Run on 8 cloud GPUs (24 cloud ML units)
# To train across multiple GPUs you use a [custom scale tier](https://cloud.google.com/ml/docs/concepts/training-overview#job_configuration_parameters).
#
# You specify the number and types of machines you want to run on in a config.yaml, then reference that config.yaml via the --config config.yaml command line argument.
#
# Here I am specifying a master node with machine type complex_model_m_gpu and one worker node of the same type. Each complex_model_m_gpu has 4 GPUs so this job will run on 2x4=8 GPUs total.
#
# WARNING: The default project quota is 10 cloud ML units, so unless you have requested a quota increase you will get a quota exceeded error. This command is just for illustrative purposes.
# %%writefile config.yaml
trainingInput:
scaleTier: CUSTOM
masterType: complex_model_m_gpu
workerType: complex_model_m_gpu
workerCount: 1
# + language="bash"
# JOBNAME=housing_$(date -u +%y%m%d_%H%M%S)
#
# gcloud ai-platform jobs submit training $JOBNAME \
# --region=$REGION \
# --module-name=trainer.task \
# --package-path=./trainer \
# --job-dir=$GCS_BUCKET/$JOBNAME \
# --runtime-version 1.15 \
# --config config.yaml \
# -- \
# --output_dir=$GCS_BUCKET/$JOBNAME/output
# -
# ### 5) Deploy Model For Predictions
#
# Cloud AI Platform has a prediction service that will wrap our tensorflow model with a REST API and allow remote clients to get predictions.
#
# You can deploy the model from the Google Cloud Console GUI, or you can use the gcloud command line tool. We will use the latter method. Note this will take up to 5 minutes.
# + language="bash"
# gcloud config set ai_platform/region global
# + language="bash"
# MODEL_NAME="housing_prices"
# MODEL_VERSION="v1"
# MODEL_LOCATION=output/export/Servo/$(ls output/export/Servo | tail -1)
#
# #gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME} #Uncomment to overwrite existing version
# #gcloud ai-platform models delete ${MODEL_NAME} #Uncomment to overwrite existing model
# gcloud ai-platform models create ${MODEL_NAME} --regions $REGION
# gcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --staging-bucket=$GCS_BUCKET --runtime-version=1.15
# -
# ### 6) Get Predictions
#
# There are two flavors of the AI Platform Prediction Service: Batch and online.
#
# Online prediction is more appropriate for latency sensitive requests as results are returned quickly and synchronously.
#
# Batch prediction is more appropriate for large prediction requests that you only need to run a few times a day.
#
# The prediction services expect prediction requests in standard JSON format so first we will create a JSON file with a couple of housing records.
#
# %%writefile records.json
{"CRIM": 0.00632,"ZN": 18.0,"INDUS": 2.31,"NOX": 0.538, "RM": 6.575, "AGE": 65.2, "DIS": 4.0900, "TAX": 296.0, "PTRATIO": 15.3}
{"CRIM": 0.00332,"ZN": 0.0,"INDUS": 2.31,"NOX": 0.437, "RM": 7.7, "AGE": 40.0, "DIS": 5.0900, "TAX": 250.0, "PTRATIO": 17.3}
# Now we will pass this file to the prediction service using the gcloud command line tool. Results are returned immediately!
# !gcloud ai-platform predict --model housing_prices --json-instances records.json
# ### Conclusion
#
# #### What we covered
# 1. How to use Tensorflow's high level Estimator API
# 2. How to deploy tensorflow code for distributed training in the cloud
# 3. How to deploy the resulting model to the cloud for online prediction
#
# #### What we didn't cover
# 1. How to leverage larger than memory datasets using Tensorflow's queueing system
# 2. How to create synthetic features from our raw data to aid learning (Feature Engineering)
# 3. How to improve model performance by finding the ideal hyperparameters using Cloud AI Platform's [HyperTune](https://cloud.google.com/ml-engine/docs/how-tos/using-hyperparameter-tuning) feature
#
# This lab is a great start, but adding in the above concepts is critical in getting your models to production ready quality. These concepts are covered in Google's 1-week on-demand Tensorflow + Cloud ML course: https://www.coursera.org/specializations/machine-learning-tensorflow-gcp
|
blogs/housing_prices/cloud-ml-housing-prices.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dirichlet Process Mixture Models
#
# The goal of this notebook is to implement DPMM on an appropriate dataset.
#
# First explain what a DPMM is
#
# ### Questions
# - How does the clustering actually work?
# - Can we use it for high-dimensional data?
# - At what dimension does it start to break down?
# - Can we use a dimensionality reduction technique, then DPMM?
# - How is the clustering different to t-SNE?
#
#
# #### Inconsistency of Number of Components Papers:
# - Do the results in these papers hold true? [Posterior Distribution for the Number of Clusters in Dirichlet Process Mixture Models](https://arxiv.org/abs/1905.09959) and [Finite mixture models are typically inconsistent
# for the number of components](http://approximateinference.org/2017/accepted/CaiEtAl2017.pdf) and [Inconsistency of Pitman–Yor Process Mixtures
# for the Number of Components](http://jmlr.org/papers/volume15/miller14a/miller14a.pdf)
# ## What is a DPMM?
#
# Dirichlet process with a base distribution
# ### DPMM in sklearn
#
# - [DPMM](https://ogrisel.github.io/scikit-learn.org/sklearn-tutorial/modules/generated/sklearn.mixture.DPGMM.html)
# - Example: https://scikit-learn.org/stable/auto_examples/mixture/plot_gmm.html#sphx-glr-download-auto-examples-mixture-plot-gmm-py
# - mixtures: https://scikit-learn.org/stable/modules/mixture.html
#
#
# +
import pandas as pd
import numpy as np
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
# for fancy python printing
from IPython.display import Markdown, display
def printmd(string):
display(Markdown(string))
import warnings
warnings.filterwarnings('ignore')
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 150
# -
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title(title)
# +
# Number of samples per component
n_samples = 500
# Generate random sample, two components
#np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# +
X = data
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=100,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
# -
# ## Pyro
# +
import torch
from torch.distributions import constraints
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from tqdm import tqdm
from pyro.distributions import *
import pyro
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, Predictive
assert pyro.__version__.startswith('1')
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(0)
# +
data = torch.cat((MultivariateNormal(-8 * torch.ones(2), torch.eye(2)).sample([50]),
MultivariateNormal(8 * torch.ones(2), torch.eye(2)).sample([50]),
MultivariateNormal(torch.tensor([1.5, 2]), torch.eye(2)).sample([50]),
MultivariateNormal(torch.tensor([-0.5, 1]), torch.eye(2)).sample([50])))
plt.scatter(data[:, 0], data[:, 1])
plt.title("Data Samples from Mixture of 4 Gaussians")
plt.show()
N = data.shape[0];
# -
# First, let’s define the “stick-breaking” function that generates our weights, given our samples of 𝛽:
def mix_weights(beta):
beta1m_cumprod = (1 - beta).cumprod(-1)
return F.pad(beta, (0, 1), value=1) * F.pad(beta1m_cumprod, (1, 0), value=1)
# Next, let’s define our model. It may be helpful to refer the definition of the stick-breaking model presented in the first part of this tutorial.
#
# Note that all 𝛽𝑖 samples are conditionally independent, so we model them using a pyro.plate of size T-1; we do the same for all samples of our cluster parameters 𝜇𝑖. We then construct a Categorical distribution whose parameters are the mixture weights using our sampled 𝛽 values (line 9) below, and sample the cluster assignment 𝑧𝑛 for each data point from that Categorical. Finally, we sample our observations from a multivariate Gaussian distribution whose mean is exactly the cluster parameter corresponding to the assignment 𝑧𝑛 we drew for the point 𝑥𝑛. This can be seen in the Pyro code below:
def model(data):
with pyro.plate("beta_plate", T-1):
beta = pyro.sample("beta", Beta(1, alpha))
with pyro.plate("mu_plate", T):
mu = pyro.sample("mu", MultivariateNormal(torch.zeros(2), 5 * torch.eye(2)))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(mix_weights(beta)))
pyro.sample("obs", MultivariateNormal(mu[z], torch.eye(2)), obs=data)
# Now, it’s time to define our guide and perform inference.
#
# The variational family 𝑞(𝛽,𝜃,𝑧) that we are optimizing over during variational inference is given by:
#
# 𝑞(𝛽,𝜃,𝑧)=∏𝑡=1𝑇−1𝑞𝑡(𝛽𝑡)∏𝑡=1𝑇𝑞𝑡(𝜃𝑡)∏𝑛=1𝑁𝑞𝑛(𝑧𝑛)
# Note that since we are unable to computationally model the infinite clusters posited by the model, we truncate our variational family at 𝑇 clusters. This does not affect our model; rather, it is a simplification made in the inference stage to allow tractability.
#
# The guide is constructed exactly according to the definition of our variational family 𝑞(𝛽,𝜃,𝑧) above. We have 𝑇−1 conditionally independent Beta distributions for each 𝛽 sampled in our model, 𝑇 conditionally independent multivariate Gaussians for each cluster parameter 𝜇𝑖, and 𝑁 conditionally independent Categorical distributions for each cluster assignment 𝑧𝑛.
#
# Our variational parameters (pyro.param) are therefore the 𝑇−1 many positive scalars that parameterize the second parameter of our variational Beta distributions (the first shape parameter is fixed at 1, as in the model definition), the 𝑇 many two-dimensional vectors that parameterize our variational multivariate Gaussian distributions (we do not parameterize the covariance matrices of the Gaussians, though this should be done when analyzing a real-world dataset for more flexibility), and the 𝑁 many 𝑇-dimensional vectors that parameterize our variational Categorical distributions:
def guide(data):
kappa = pyro.param('kappa', lambda: Uniform(0, 2).sample([T-1]), constraint=constraints.positive)
tau = pyro.param('tau', lambda: MultivariateNormal(torch.zeros(2), 3 * torch.eye(2)).sample([T]))
phi = pyro.param('phi', lambda: Dirichlet(1/T * torch.ones(T)).sample([N]), constraint=constraints.simplex)
with pyro.plate("beta_plate", T-1):
q_beta = pyro.sample("beta", Beta(torch.ones(T-1), kappa))
with pyro.plate("mu_plate", T):
q_mu = pyro.sample("mu", MultivariateNormal(tau, torch.eye(2)))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(phi))
# +
T = 6
optim = Adam({"lr": 0.05})
svi = SVI(model, guide, optim, loss=Trace_ELBO())
losses = []
def train(num_iterations):
pyro.clear_param_store()
for j in tqdm(range(num_iterations)):
loss = svi.step(data)
losses.append(loss)
def truncate(alpha, centers, weights):
threshold = alpha**-1 / 100.
true_centers = centers[weights > threshold]
true_weights = weights[weights > threshold] / torch.sum(weights[weights > threshold])
return true_centers, true_weights
alpha = 0.1
train(1000)
# We make a point-estimate of our model parameters using the posterior means of tau and phi for the centers and weights
Bayes_Centers_01, Bayes_Weights_01 = truncate(alpha,
pyro.param("tau").detach(),
torch.mean(pyro.param("phi").detach(), dim=0))
alpha = 1.5
train(1000)
# We make a point-estimate of our model parameters using the posterior means of tau and phi for the centers and weights
Bayes_Centers_15, Bayes_Weights_15 = truncate(alpha, pyro.param("tau").detach(), torch.mean(pyro.param("phi").detach(), dim=0))
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.scatter(data[:, 0], data[:, 1], color="blue")
plt.scatter(Bayes_Centers_01[:, 0], Bayes_Centers_01[:, 1], color="red")
plt.subplot(1, 2, 2)
plt.scatter(data[:, 0], data[:, 1], color="blue")
plt.scatter(Bayes_Centers_15[:, 0], Bayes_Centers_15[:, 1], color="red")
plt.tight_layout()
plt.show();
# -
# #### Sun Spots
# +
df = pd.read_csv('http://www.sidc.be/silso/DATA/SN_y_tot_V2.0.csv', sep=';', names=['time', 'sunspot.year'], usecols=[0, 1])
data = torch.tensor(df['sunspot.year'].values, dtype=torch.float32)
N = data.shape[0]
plt.hist(df['sunspot.year'].values, bins=40)
plt.title("Number of Years vs. Sunspot Counts")
plt.xlabel("Sunspot Count")
plt.ylabel("Number of Years")
plt.show();
# +
def train(num_iterations):
pyro.clear_param_store()
for j in tqdm(range(num_iterations)):
loss = svi.step(data)
losses.append(loss)
def model(data):
with pyro.plate("beta_plate", T-1):
beta = pyro.sample("beta", Beta(1, alpha))
with pyro.plate("lambda_plate", T):
lmbda = pyro.sample("lambda", Gamma(3, 0.05))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(mix_weights(beta)))
pyro.sample("obs", Poisson(lmbda[z]), obs=data)
def guide(data):
kappa = pyro.param('kappa', lambda: Uniform(0, 2).sample([T-1]), constraint=constraints.positive)
tau_0 = pyro.param('tau_0', lambda: Uniform(0, 5).sample([T]), constraint=constraints.positive)
tau_1 = pyro.param('tau_1', lambda: LogNormal(-1, 1).sample([T]), constraint=constraints.positive)
phi = pyro.param('phi', lambda: Dirichlet(1/T * torch.ones(T)).sample([N]), constraint=constraints.simplex)
with pyro.plate("beta_plate", T-1):
q_beta = pyro.sample("beta", Beta(torch.ones(T-1), kappa))
with pyro.plate("lambda_plate", T):
q_lambda = pyro.sample("lambda", Gamma(tau_0, tau_1))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(phi))
T = 20
alpha = 1.1
n_iter = 1500
optim = Adam({"lr": 0.05})
svi = SVI(model, guide, optim, loss=Trace_ELBO())
losses = []
train(n_iter)
samples = torch.arange(0, 300).type(torch.float)
tau0_optimal = pyro.param("tau_0").detach()
tau1_optimal = pyro.param("tau_1").detach()
kappa_optimal = pyro.param("kappa").detach()
# We make a point-estimate of our latent variables using the posterior means of tau and kappa for the cluster params and weights
Bayes_Rates = (tau0_optimal / tau1_optimal)
Bayes_Weights = mix_weights(1. / (1. + kappa_optimal))
def mixture_of_poisson(weights, rates, samples):
return (weights * Poisson(rates).log_prob(samples.unsqueeze(-1)).exp()).sum(-1)
likelihood = mixture_of_poisson(Bayes_Weights, Bayes_Rates, samples)
plt.title("Number of Years vs. Sunspot Counts")
plt.hist(data, bins=60, density=True, lw=0, alpha=0.75);
plt.plot(samples, likelihood, label="Estimated Mixture Density")
plt.legend()
plt.show();
# -
# #### ELBO Plots
#
# Below are plots of the behavior of the loss function (negative Trace_ELBO) over the SVI iterations during inference using Pyro, as well as a plot of the autocorrelations of the ELBO ‘time series’ versus iteration number. We can see that around 500 iterations, the loss stops decreasing significantly, so we can assume it takes around 500 iterations to achieve convergence. The autocorrelation plot reaches an autocorrelation very close to 0 around a lag of 500, further corroborating this hypothesis. Note that these are heuristics and do not necessarily imply convergence.
# +
elbo_plot = plt.figure(figsize=(15, 5))
elbo_ax = elbo_plot.add_subplot(1, 2, 1)
elbo_ax.set_title("ELBO Value vs. Iteration Number for Pyro BBVI on Sunspot Data")
elbo_ax.set_ylabel("ELBO")
elbo_ax.set_xlabel("Iteration Number")
elbo_ax.plot(np.arange(n_iter), losses)
autocorr_ax = elbo_plot.add_subplot(1, 2, 2)
autocorr_ax.acorr(np.asarray(losses), detrend=lambda x: x - x.mean(), maxlags=750, usevlines=False, marker=',')
autocorr_ax.set_xlim(0, 500)
autocorr_ax.axhline(0, ls="--", c="k", lw=1)
autocorr_ax.set_title("Autocorrelation of ELBO vs. Lag for Pyro BBVI on Sunspot Data")
autocorr_ax.set_xlabel("Lag")
autocorr_ax.set_ylabel("Autocorrelation")
elbo_plot.tight_layout()
elbo_plot.show()
|
_posts/notebooks/dirichlet-process-mixture-models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # The Series Data Structure
# +
import pandas as pd
# pd.Series?
# -
animals = ['Tiger', 'Bear', 'Moose']
pd.Series(animals)
numbers = [1, 2, 3]
pd.Series(numbers)
animals = ['Tiger', 'Bear', None]
pd.Series(animals)
numbers = [1, 2, None]
pd.Series(numbers)
import numpy as np
np.nan == None
np.nan == np.nan
np.isnan(np.nan)
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.index
s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
s
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])
s
# # Querying a Series
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
s.iloc[3]
s.loc['Golf']
s[3]
s['Golf']
sports = {99: 'Bhutan',
100: 'Scotland',
101: 'Japan',
102: 'South Korea'}
s = pd.Series(sports)
s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead
s = pd.Series([100.00, 120.00, 101.00, 3.00])
s
total = 0
for item in s:
total+=item
print(total)
# +
import numpy as np
total = np.sum(s)
print(total)
# -
#this creates a big series of random numbers
s = pd.Series(np.random.randint(0,1000,10000))
s.head()
len(s)
# %%timeit -n 100
summary = 0
for item in s:
summary+=item
# %%timeit -n 100
summary = np.sum(s)
s+=2 #adds two to each item in s using broadcasting
s.head()
for label, value in s.iteritems():
s.set_value(label, value+2)
s.head()
# %%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
for label, value in s.iteritems():
s.loc[label]= value+2
# %%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
s+=2
s = pd.Series([1, 2, 3])
s.loc['Animal'] = 'Bears'
s
original_sports = pd.Series({'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'})
cricket_loving_countries = pd.Series(['Australia',
'Barbados',
'Pakistan',
'England'],
index=['Cricket',
'Cricket',
'Cricket',
'Cricket'])
all_countries = original_sports.append(cricket_loving_countries)
original_sports
cricket_loving_countries
all_countries
all_countries.loc['Cricket']
# # The DataFrame Data Structure
import pandas as pd
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df.head()
df.loc['Store 2']
type(df.loc['Store 2'])
df.loc['Store 1']
df.loc['Store 1', 'Cost']
df.T
df.T.loc['Cost']
df['Cost']
df.loc['Store 1']['Cost']
df.loc[:,['Name', 'Cost']]
df.drop('Store 1')
df
copy_df = df.copy()
copy_df = copy_df.drop('Store 1')
copy_df
# +
# copy_df.drop?
# -
del copy_df['Name']
copy_df
df['Location'] = None
df
# # Dataframe Indexing and Loading
costs = df['Cost']
costs
costs+=2
costs
df
# !cat olympics.csv
df = pd.read_csv('olympics.csv')
df.head()
df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1)
df.head()
df.columns
# +
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold' + col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver' + col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze' + col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#' + col[1:]}, inplace=True)
df.head()
# -
# # Querying a DataFrame
df['Gold'] > 0
only_gold = df.where(df['Gold'] > 0)
only_gold.head()
only_gold['Gold'].count()
df['Gold'].count()
only_gold = only_gold.dropna()
only_gold.head()
only_gold = df[df['Gold'] > 0]
only_gold.head()
len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)])
df[(df['Gold.1'] > 0) & (df['Gold'] == 0)]
# # Indexing Dataframes
df.head()
df['country'] = df.index
df = df.set_index('Gold')
df.head()
df = df.reset_index()
df.head()
df = pd.read_csv('census.csv')
df.head()
df['SUMLEV'].unique()
df=df[df['SUMLEV'] == 50]
df.head()
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df = df[columns_to_keep]
df.head()
df = df.set_index(['STNAME', 'CTYNAME'])
df.head()
df.loc['Michigan', 'Washtenaw County']
df.loc[ [('Michigan', 'Washtenaw County'),
('Michigan', 'Wayne County')] ]
# # Missing values
df = pd.read_csv('log.csv')
df
# +
# df.fillna?
# -
df = df.set_index('time')
df = df.sort_index()
df
df = df.reset_index()
df = df.set_index(['time', 'user'])
df
df = df.fillna(method='ffill')
df.head()
|
Week+2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Model Selection & Evaluation
#
# <hr>
#
# ### Agenda
# 1. Cross Validation
# 2. Hyperparameter Tuning
# 3. Model Evaluation
# 4. Model Persistance
# 5. Validation Curves
# 6. Learning Curves
#
# <hr>
# ### 1. Cross Validation
# * Simple models underfit.
# * Accuracy for training data & validation data is not much different.
# * But, accurcy ain't that great.
# * This situation is of low variance & high bias
# * On moving towards complex models, accuracy improves.
# * But, gap between accuracy on training data & validation data increases
# * This situation is of high variance & low bias
# <img src="https://www.researchgate.net/profile/Ljubomir_Jacic2/post/How_does_model_complexity_impact_the_bias-variance_tradeoff/attachment/59d6233579197b807798188f/AS%3A306150770184192%401450003439733/image/biasvariance.png" width="400px">
# * We need to compare across models to find the best model.
# * We need to compare across all hyper-parameters for a particular model.
# * The data that is used for training should not be used for validation.
# * The validation accuracy is the one that we claims
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_digits
digits = load_digits()
import matplotlib.pyplot as plt
# %matplotlib inline
plt.imshow(digits.images[0],cmap='gray')
dt = DecisionTreeClassifier(max_depth=10)
from sklearn.model_selection import train_test_split
trainX, testX, trainY, testY = train_test_split(digits.data, digits.target)
dt.fit(trainX,trainY)
dt.score(testX,testY)
dt.score(trainX,trainY)
# * Decreasing the complexity of model
dt = DecisionTreeClassifier(max_depth=7)
dt.fit(trainX,trainY)
dt.score(testX,testY)
dt.score(trainX,trainY)
# * Observation : With decrease in complexity the gap in training & validation accuracy also decreased
# #### Cross Validation API
# * Splits data into k parts.
# * Use k - 1 parts for training the model
# * Use kth part for validation
# * Repeat the above steps multiple times to get a genralized behaviour
from sklearn.model_selection import cross_val_score
scores = cross_val_score(dt, digits.data, digits.target)
scores
scores.mean()
# #### Cross-validate Function : Scores for multiple matrices
from sklearn.model_selection import cross_validate
scoring = ['precision_macro', 'recall_macro', 'accuracy']
cross_validate(dt, digits.data, digits.target, scoring=scoring, cv=5)
# #### Stratification for dealing with imbalanced Classes
# * StratifiedKFold
# - Class frequencies are preserved in data splitting
import numpy as np
Y = np.append(np.ones(12),np.zeros(6))
X = np.ones((18,3))
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=3)
list(skf.split(X,Y))
Y[[ 4, 5, 6, 7, 8, 9, 10, 11, 14, 15, 16, 17]]
# ### 2. Hyperparameter Tuning
# * Model parameters are learnt by learning algorithms based on data
# * Hyper-parameters needs to be configured
# * Hyper-parameters are data dependent & many times need experiments to find the best
# * sklearn provides GridSerach for finding the best hyper-parameters
#
# ##### Exhaustive GridSearch
# * Searches sequentially for all the configued params
# * For all possible combinations
trainX, testX, trainY, testY = train_test_split(digits.data, digits.target)
dt = DecisionTreeClassifier()
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(dt, param_grid={'max_depth':range(5,30,5)}, cv=5)
grid_search.fit(digits.data,digits.target)
grid_search.best_params_
grid_search.best_score_
grid_search.best_estimator_
# #### RandomizedSearch
# * Unlike GridSearch, not all parameters are tried & tested
# * But rather a fixed number of parameter settings is sampled from the specified distributions.
#
# ##### Comparing GridSearch and RandomSearchCV
# +
from time import time
#randint is an intertor for generating numbers between range specified
from scipy.stats import randint
# -
X = digits.data
Y = digits.target
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": randint(1,11),
"min_samples_split": randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
param_dist
rf = RandomForestClassifier(n_estimators=20)
# +
n_iter_search = 20
random_search = RandomizedSearchCV(rf, param_distributions=param_dist,
n_iter=n_iter_search, cv=5)
start = time()
random_search.fit(X, Y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
# -
random_search.best_score_
# +
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(rf, param_grid=param_grid, cv=5)
start = time()
grid_search.fit(X, Y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_['params'])))
# -
grid_search.best_score_
# * GridSearch & RandomizedSearch can fine tune hyper-parameters of transformers as well when part of pipeline
# ### 3. Model Evaluation
# * Three different ways to evaluate quality of model prediction
# - score method of estimators, a default method is configured .i.e r2_score for regression, accuracy for classification
# - Model evalutaion tools like cross_validate or cross_val_score also returns accuracy
# - Metrices module is rich with various prediction error calculation techniques
trainX, testX, trainY, testY = train_test_split(X,Y)
rf.fit(trainX, trainY)
# * Technique 1 - Using score function
rf.score(testX,testY)
# * Technique 2 - Using cross_val_score as discussed above
cross_val_score(rf,X,Y,cv=5)
# #### Cancer prediction sample for understanding metrices
from sklearn.datasets import load_breast_cancer
dt = DecisionTreeClassifier()
cancer_data = load_breast_cancer()
trainX, testX, trainY, testY = train_test_split(cancer_data.data, cancer_data.target)
dt.fit(trainX,trainY)
pred = dt.predict(testX)
# #### Technique 3 - Using metrices
# ##### Classfication metrices
# * Accuracy Score - Correct classification vs ( Correct classification + Incorrect Classification )
from sklearn import metrics
metrics.accuracy_score(y_pred=pred, y_true=testY)
# * Confusion Matrix - Shows details of classification inclusing TP,FP,TN,FN
# - True Positive (TP), Actual class is 1 & prediction is also 1
# - True Negative (TN), Actual class is 0 & prediction is also 0
# - False Positive (FP), Acutal class is 0 & prediction is 1
# - False Negative (FN), Actual class is 1 & prediction is 0
metrics.confusion_matrix(y_pred=pred, y_true=testY, labels=[0,1])
# <img src="https://github.com/awantik/machine-learning-slides/blob/master/confusion_matrix.png?raw=true" width="400px">
# * Precision Score
# - Ability of a classifier not to label positive if the sample is negative
# - Claculated as TP/(TP+FP)
# - We don't want a non-spam mail to be marked as spam
metrics.precision_score(y_pred=pred, y_true=testY)
# * Recall Score
# - Ability of classifier to find all positive samples
# - It's ok to predict patient tumor to be cancer so that it undergoes more test
# - But it is not ok to miss a cancer patient without further analysis
metrics.recall_score(y_pred=pred, y_true=testY)
# * F1 score
# - Weighted average of precision & recall
metrics.f1_score(y_pred=pred, y_true=testY)
# * ROC & AUC
# ##### House Price Prediction - Understanding matrices
from sklearn.datasets import california_housing
house_data = california_housing.fetch_california_housing()
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(house_data.data, house_data.target)
pred = lr.predict(house_data.data)
# #### Matrices for Regression
# * mean squared error
# - Sum of squares of difference between expected value & actual value
metrics.mean_squared_error(y_pred=pred, y_true=house_data.target)
# * mean absolute error
# - Sum of abs of difference between expected value & actual value
metrics.mean_absolute_error(y_pred=pred, y_true=house_data.target)
# * r2 score
# - Returns accuracy of model in the scale of 0 & 1
# - It measures goodness of fit for regression models
# - Calculated as = (variance explained by the model)/(Total variance)
# - High r2 means target is close to prediction
#
#
# <img src="https://github.com/awantik/machine-learning-slides/blob/master/Capture.PNG?raw=true" width="400px">
metrics.r2_score(y_pred=pred, y_true=house_data.target)
# ### Metrices for Clustering
# * Two forms of evaluation
# * supervised, which uses a ground truth class values for each sample.
# - completeness_score
# - homogeneity_score
# * unsupervised, which measures the quality of model itself
# - silhoutte_score
# - calinski_harabaz_score
# ##### completeness_score
# - A clustering result satisfies completeness if all the data points that are members of a given class are elements of the same cluster.
# - Accuracy is 1.0 if data belonging to same class belongs to same cluster, even if multiple classes belongs to same cluster
from sklearn.metrics.cluster import completeness_score
completeness_score( labels_true=[10,10,11,11],labels_pred=[1,1,0,0])
# * The acuracy is 1.0 because all the data belonging to same class belongs to same cluster
completeness_score( labels_true=[11,22,22,11],labels_pred=[1,0,1,1])
# * The accuracy is .3 because class 1 - [11,22,11], class 2 - [22]
print(completeness_score([10, 10, 11, 11], [0, 0, 0, 0]))
# ##### homogeneity_score
# - A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class.
from sklearn.metrics.cluster import homogeneity_score
homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3])
homogeneity_score([0, 0, 0, 0], [1, 1, 0, 0])
# * Same class data is broken into two clusters
# #### silhoutte_score
# * The Silhouette Coefficient is calculated using the mean intra-cluster distance (a) and the mean nearest-cluster distance (b) for each sample.
# * The Silhouette Coefficient for a sample is (b - a) / max(a, b). To clarify, b is the distance between a sample and the nearest cluster that the sample is not a part of.
# ##### Selecting the number of clusters with silhouette analysis on KMeans clustering
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1)
plt.scatter(X[:,0],X[:,1],s=10)
range_n_clusters = [2, 3, 4, 5, 6]
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
for n_cluster in range_n_clusters:
kmeans = KMeans(n_clusters=n_cluster)
kmeans.fit(X)
labels = kmeans.predict(X)
print (n_cluster, silhouette_score(X,labels))
# * The best number of clusters is 2
# #### calinski_harabaz_score
# * The score is defined as ratio between the within-cluster dispersion and the between-cluster dispersion.
# +
from sklearn.metrics import calinski_harabaz_score
for n_cluster in range_n_clusters:
kmeans = KMeans(n_clusters=n_cluster)
kmeans.fit(X)
labels = kmeans.predict(X)
print (n_cluster, calinski_harabaz_score(X,labels))
# -
# ### 4. Model Persistance
# * Model training is an expensive process
# * It is desireable to save the model for future reuse
# * using pickle & joblib this can be achieved
import pickle
s = pickle.dumps(dt)
pickle.loads(s)
type(s)
# * joblib is better extension of pickle
# * Doesn't convert into string
from sklearn.externals import joblib
joblib.dump(dt, 'dt.joblib')
# * Loading the file back into model
dt = joblib.load('dt.joblib')
dt
# ### 5. Validation Curves
# * To validate a model, we need a scoring function.
# * Create a grid of possible hyper-prameter configuration.
# * Select the hyper-parameter which gives the best score
# +
from sklearn.model_selection import validation_curve
param_range = np.arange(1, 50, 2)
train_scores, test_scores = validation_curve(RandomForestClassifier(),
digits.data,
digits.target,
param_name="n_estimators",
param_range=param_range,
cv=3,
scoring="accuracy",
n_jobs=-1)
# +
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, label="Training score", color="black")
plt.plot(param_range, test_mean, label="Cross-validation score", color="dimgrey")
plt.title("Validation Curve With Random Forest")
plt.xlabel("Number Of Trees")
plt.ylabel("Accuracy Score")
plt.tight_layout()
plt.legend(loc="best")
plt.show()
# -
# ### 6. Learning Curves
# * Learning curves shows variation in training & validation score on increasing the number of samples
from sklearn.model_selection import learning_curve
|
Code/9. Model Selection _ Evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import pandas as pd
w = pd.io.excel.read_excel('participant_ratings_raw.xlsx',header=None)
trials = w.as_matrix()
# -
trials[0]
# +
Valence_Lo=[]
Valence_Hi=[]
Arousal_Lo=[]
Arousal_Hi=[]
count = 0
valence = 0
arousal = 0.0
for row in range(1280):
id = row
count+=1
# valence
if trials[row][4] <= 3.9:
Valence_Lo.append(id)
if trials[row][4] >= 7.0:
Valence_Hi.append(id)
# arousal
if trials[row][5] <= 3.8:
Arousal_Lo.append(id)
if trials[row][5] >= 6.9:
Arousal_Hi.append(id)
print count
# -
print Valence_Lo
# print Valence_Hi
# print Arousal_Lo
# print Arousal_Hi
print len(Valence_Lo)
print len(Valence_Hi)
print len(Arousal_Lo)
print len(Arousal_Hi)
|
Filtered-Trials/Filtered Trials.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from src.utils import *
from collections import Counter
from math import ceil
puzzle_input = parse_puzzle_input(14)
puzzle_input[:3]
sample_input = [
'NNCB',
'',
'CH -> B',
'HH -> N',
'CB -> H',
'NH -> C',
'HB -> C',
'HC -> B',
'HN -> C',
'NN -> C',
'BH -> H',
'NC -> B',
'NB -> B',
'BN -> B',
'BB -> N',
'BC -> B',
'CC -> N',
'CN -> C'
]
def parse_polymer_info(puzzle_input):
polymer_template = puzzle_input[0]
pair_insertion_dict = {}
for line in puzzle_input[2:]:
key, value = line.split(' -> ')
pair_insertion_dict[key] = value
return pair_insertion_dict, polymer_template
def split_template_to_pairs(template):
return [
template[index - 1] + template[index]
for index in range(1, len(template))
]
# +
def generate_insertion_list(pair_list, pair_insertion_dict):
insertion_list = []
for pair in pair_list:
if pair not in pair_insertion_dict:
insertion_list.append(None)
else:
insertion_list.append(pair_insertion_dict[pair])
return insertion_list
# -
def insert_polymers(template, insertion_list):
new_polymer = ''
for index in range(len(template)):
new_polymer += template[index]
if index < len(insertion_list) and insertion_list[index]:
new_polymer += insertion_list[index]
return new_polymer
# +
def simulate_step(polymer, pair_insertion_dict):
pair_list = split_template_to_pairs(polymer)
insertion_list = generate_insertion_list(pair_list, pair_insertion_dict)
return insert_polymers(polymer, insertion_list)
# -
def part_1_answer(puzzle_input, steps):
pair_insertion_dict, polymer = parse_polymer_info(puzzle_input)
for _ in range(steps + 1):
polymer = simulate_step(polymer, pair_insertion_dict)
count_chars = Counter(polymer)
return count_chars.most_common()[0][1] - count_chars.most_common()[-1][1]
part_1_answer(sample_input, 10)
part_1_answer(puzzle_input, 10)
# ## Part 2
def template_to_dict(template):
pairs = split_template_to_pairs(template)
pair_dict = {}
for pair in pairs:
if pair not in pair_dict:
pair_dict[pair] = 1
else:
pair_dict[pair] += 1
return pair_dict
def parse_polymer_info(puzzle_input):
polymer_template = puzzle_input[0]
pair_insertion_dict = {}
for line in puzzle_input[2:]:
key, value = line.split(' -> ')
pair_insertion_dict[key] = {key[0] + value, value + key[1]}
return pair_insertion_dict, template_to_dict(polymer_template)
def simulate_step(pair_dict, pair_insertion_dict):
new_pair_dict = {}
for pair in pair_dict:
for new_pair in pair_insertion_dict[pair]:
if new_pair not in new_pair_dict:
new_pair_dict[new_pair] = pair_dict[pair]
else:
new_pair_dict[new_pair] += pair_dict[pair]
return new_pair_dict
def char_counter(pair_dict):
counter_dict = {}
for pair in pair_dict:
for character in pair:
if character not in counter_dict:
counter_dict[character] = pair_dict[pair] / 2
else:
counter_dict[character] += pair_dict[pair] / 2
# add an extra N and B to the end
for character in {'N', 'B'}:
if character not in counter_dict:
counter_dict[character] = 1
else:
counter_dict[character] = int(counter_dict[character]) + 1
return counter_dict
def part_2_answer(puzzle_input, steps):
pair_insertion_dict, polymer_dict = parse_polymer_info(puzzle_input)
for _ in range(steps):
polymer_dict = simulate_step(polymer_dict, pair_insertion_dict)
count_chars = Counter(char_counter(polymer_dict))
# return count_chars
return int(count_chars.most_common()[0][1] - count_chars.most_common()[-1][1])
part_2_answer(sample_input, 40)
part_2_answer(puzzle_input, 40)
|
notebooks/day_14.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial: optimal binning with continuous target
# ## Basic
# To get us started, let's load a well-known dataset from the UCI repository and transform the data into a ``pandas.DataFrame``.
import pandas as pd
from sklearn.datasets import load_boston
data = load_boston()
df = pd.DataFrame(data.data, columns=data.feature_names)
# We choose a variable to discretize and the continuous target.
variable = "LSTAT"
x = df[variable].values
y = data.target
# Import and instantiate an ``ContinuousOptimalBinning`` object class. We pass the variable name and its data type.
from optbinning import ContinuousOptimalBinning
optb = ContinuousOptimalBinning(name=variable, dtype="numerical")
# We fit the optimal binning object with arrays ``x`` and ``y``.
optb.fit(x, y)
# You can check if an optimal solution has been found via the ``status`` attribute:
optb.status
# You can also retrieve the optimal split points via the ``splits`` attribute:
optb.splits
# #### The binning table
# The optimal binning algorithms return a binning table; a binning table displays the binned data and several metrics for each bin. Class ``ContinuousOptimalBinning`` returns an object ``ContinuousBinningTable`` via the ``binning_table`` attribute.
binning_table = optb.binning_table
type(binning_table)
# The `binning_table` is instantiated, but not built. Therefore, the first step is to call the method `build`, which returns a ``pandas.DataFrame``.
binning_table.build()
# Let's describe the columns of this binning table:
#
# - Bin: the intervals delimited by the optimal split points.
# - Count: the number of records for each bin.
# - Count (%): the percentage of records for each bin.
# - Sum: the target sum for each bin.
# - Mean: the target mean for each bin.
#
# The last row shows the total number of records, sum and mean.
# You can use the method ``plot`` to visualize the histogram and mean curve. Note that the Bin ID corresponds to the binning table index.
binning_table.plot()
# ##### Mean transformation
# Now that we have checked the binned data, we can transform our original data into mean values. You can check the correctness of the transformation using pandas ``value_counts`` method, for instance.
x_transform_mean = optb.transform(x)
pd.Series(x_transform_mean).value_counts()
# ## Advanced
# Many of the advanced options have been covered in the previous tutorials with a binary target. **Check it out!** In this section, we focus on the mean monotonicity trend and the mean difference between bins.
# #### Mean monotonicity
# The monotonic_trend option permits forcing a monotonic trend to the mean curve. The default setting “auto” should be the preferred option, however, some business constraints might require to impose different trends. The default setting “auto” chooses the monotonic trend most likely to minimize the L1-norm from the options “ascending”, “descending”, “peak” and “valley” using a machine-learning-based classifier.
variable = "INDUS"
x = df[variable].values
y = data.target
optb = ContinuousOptimalBinning(name=variable, dtype="numerical",
monotonic_trend="auto")
optb.fit(x, y)
binning_table = optb.binning_table
binning_table.build()
binning_table.plot()
# A smoother curve, keeping the valley monotonicity, can be achieved by using ``monotonic_trend="convex"``.
optb = ContinuousOptimalBinning(name=variable, dtype="numerical",
monotonic_trend="convex")
optb.fit(x, y)
binning_table = optb.binning_table
binning_table.build()
binning_table.plot()
# For example, we can force the variable INDUS (proportion of non-retail business acres per town) to be monotonically descending with respect to the house-price.
optb = ContinuousOptimalBinning(name=variable, dtype="numerical",
monotonic_trend="descending")
optb.fit(x, y)
binning_table = optb.binning_table
binning_table.build()
binning_table.plot()
# #### Mininum mean difference between consecutive bins
# Now, we note that the mean difference between consecutive bins is not significant enough. Therefore, we decide to set ``min_mean_diff=2.0``:
optb = ContinuousOptimalBinning(name=variable, dtype="numerical",
monotonic_trend="descending", min_mean_diff=2.0)
optb.fit(x, y)
binning_table = optb.binning_table
binning_table.build()
binning_table.plot()
|
doc/source/tutorials/tutorial_continuous.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py3]
# language: python
# name: conda-env-py3-py
# ---
# +
import cmocean.cm as cm
import importlib
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
from salishsea_tools import river_201803 as rivers
from salishsea_tools import river_201702 as old_rivers
# %matplotlib inline
# -
cmap = plt.get_cmap(cm.deep)
cmap.set_bad('darkgreen')
cmap_bwr = plt.get_cmap(cm.oxy)
cmap_bwr.set_bad('black')
# +
# 201702b bathymetry
mesh = nc.Dataset('/home/sallen/MEOPAR/grid/mesh_mask201702.nc')
mbathy = mesh.variables['mbathy'][0,:,:]
gdepw = mesh.variables['gdepw_0'][0,:,:,:]
deptht = mesh.variables['gdept_1d'][0,:]
surface_tmask = mesh.variables['tmask'][0,0,:,:]
surface_tmask_7 = np.abs(surface_tmask-1)
mesh.close()
NEMO_bathy_7 = np.zeros(mbathy.shape)
for i in range(NEMO_bathy_7.shape[1]):
for j in range(NEMO_bathy_7.shape[0]):
level = mbathy[j,i]
NEMO_bathy_7[j,i] = gdepw[level,j,i]
NEMO_bathy_7 = np.ma.masked_array(NEMO_bathy_7, mask = surface_tmask_7)
# +
# 201803p bathymetry
mesh = nc.Dataset('/data/sallen/results/MEOPAR/mudflats/201803b/mesh_mask.nc')
mbathy = mesh.variables['mbathy'][0,:,:]
gdepw = mesh.variables['gdepw_0'][0,:,:,:]
deptht = mesh.variables['gdept_1d'][0,:]
surface_tmask = mesh.variables['tmask'][0,0,:,:]
surface_tmask_8 = np.abs(surface_tmask-1)
mesh.close()
NEMO_bathy_8 = np.zeros(mbathy.shape)
for i in range(NEMO_bathy_8.shape[1]):
for j in range(NEMO_bathy_8.shape[0]):
level = mbathy[j,i]
NEMO_bathy_8[j,i] = gdepw[level,j,i]
NEMO_bathy_8 = np.ma.masked_array(NEMO_bathy_8, mask = surface_tmask_8)
# -
# ## Rivers
# Rivers
importlib.reload(rivers)
# Rivers
def plot_rivers(ax1, ax2, imin, imax, jmin, jmax, surface_tmask_7, surface_tmask_8):
for region in rivers.prop_dict:
for river in rivers.prop_dict[region]:
ii = rivers.prop_dict[region][river]['i']
jj = rivers.prop_dict[region][river]['j']
if ii < imax and ii >= imin and jj < jmax and jj >= jmin:
if surface_tmask_8[ii, jj] == 0:
ax2.plot(jj+0.5, ii+0.5, 'ms')
else:
ax2.plot(jj+0.5, ii+0.5, 'yo')
print (ii, jj, 'on land new')
for region in old_rivers.prop_dict:
for river in old_rivers.prop_dict[region]:
ii = old_rivers.prop_dict[region][river]['i']
jj = old_rivers.prop_dict[region][river]['j']
if ii < imax and ii >= imin and jj < jmax and jj >= jmin:
if surface_tmask_7[ii, jj] == 0:
ax1.plot(jj+0.5, ii+0.5, 'ms')
else:
ax1.plot(jj+0.5, ii+0.5, 'yo')
print (ii, jj, 'on land old')
def do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8, vmax=20, bigger=10):
fig, ax = plt.subplots(2, 2, figsize=(bigger, bigger))
ax[0, 0].pcolormesh(range(jmin,jmax), range(imin,imax), NEMO_bathy_7[imin:imax, jmin:jmax], cmap=cmap, vmax=vmax)
ax[0, 1].pcolormesh(range(jmin,jmax), range(imin,imax), NEMO_bathy_8[imin:imax, jmin:jmax], cmap=cmap, vmax=vmax)
ax[1, 0].pcolormesh(range(jmin,jmax), range(imin,imax), surface_tmask_7[imin:imax, jmin:jmax], cmap=cm.dense)
plot_rivers(ax[0, 0], ax[0, 1], imin, imax, jmin, jmax, surface_tmask_7, surface_tmask_8)
ax[1, 1].pcolormesh(range(jmin,jmax), range(imin,imax), surface_tmask_8[imin:imax, jmin:jmax], cmap=cm.dense)
for ii in range(imin, imax):
for jj in range(jmin, jmax):
if surface_tmask_8[ii, jj] > surface_tmask_7[ii, jj]:
ax[1, 0].plot(jj+0.5, ii+0.5, 'r*')
if NEMO_bathy_8[ii, jj] != NEMO_bathy_7[ii, jj]:
ax[1, 1].plot(jj+0.5, ii+0.5, 'yo')
return fig, ax
imin, imax = 800, 898
jmin, jmax = 0, 100
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 800, 898
jmin, jmax = 100, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 800, 898
jmin, jmax = 200, 301
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 800, 898
jmin, jmax = 300, 398
# empty
imin, imax = 700, 800
jmin, jmax = 0, 100
# empty
imin, imax = 700, 800
jmin, jmax = 100, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 700, 800
jmin, jmax = 200, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 700, 800
jmin, jmax = 300, 398
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 600, 700
jmin, jmax = 0, 100
#empty
imin, imax = 600, 700
jmin, jmax = 100, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 600, 700
jmin, jmax = 200, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 600, 700
jmin, jmax = 300, 398
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8, vmax=40);
imin, imax = 500, 600
jmin, jmax = 0, 100
#empty
imin, imax = 500, 601
jmin, jmax = 100, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 500, 600
jmin, jmax = 200, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 500, 600
jmin, jmax = 300, 398
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 400, 500
jmin, jmax = 0, 100
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 400, 500
jmin, jmax = 100, 200
#empty
imin, imax = 400, 500
jmin, jmax = 200, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 380, 500
jmin, jmax = 280, 398
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 300, 400
jmin, jmax = 0, 100
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 290, 400
jmin, jmax = 100, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 300, 400
jmin, jmax = 190, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 300, 400
jmin, jmax = 290, 398
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 200, 300
jmin, jmax = 0, 100
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 200, 300
jmin, jmax = 100, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 200, 300
jmin, jmax = 190, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 200, 300
jmin, jmax = 300, 398
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8,surface_tmask_8);
imin, imax = 100, 200
jmin, jmax = 0, 100
#empty
imin, imax = 100, 200
jmin, jmax = 100, 205
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 100, 200
jmin, jmax = 200, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 120, 220
jmin, jmax = 250, 350
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
# Need to go through my patches carefully. Snohomish Bay here is a mess.
# Fixed in p3
imin, imax = 0, 100
jmin, jmax = 0, 100
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
# This one needs fixing. I have a cutoff!
# Fixed in beta.
# ## Rivers Done to Here
imin, imax = 0, 100
jmin, jmax = 90, 200
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
# This one needs fixing. I have a cutoff!
# Not in alpha
imin, imax = 0, 100
jmin, jmax = 200, 300
do_plots(imin, imax, jmin, jmax, NEMO_bathy_7, surface_tmask_7, NEMO_bathy_8, surface_tmask_8);
imin, imax = 0, 100
jmin, jmax = 300, 398
# empty
|
bathymetry/LookAt201803Bathymetry.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="n_5oRe0SXilM"
# # Introduction to machine learning & Data Analysis
#
# Basic introduction on how to perform typical machine learning tasks with Python.
#
# Prepared by <NAME> & <NAME>,
# Science IT Support, University Of Bern, 2021
#
# This work is licensed under <a href="https://creativecommons.org/share-your-work/public-domain/cc0/">CC0</a>.
#
# # Part 1.
# + [markdown] id="9SxiIczg1s1k"
# # What is Machine Learning?
# + [markdown] id="ll5e8N9SVwVa"
# ## Why Machine Learning?
# + [markdown] id="q6tHZQCywhGB"
#
#
# 1.
# 2.
# 3.
#
#
#
#
# + [markdown] id="5PbjhPxLmsI4"
# ## Learning from data
# + [markdown] id="Xsd1MyT9eIdW"
# Unlike classical algorithms, created by a human to analyze some data:
# + [markdown] id="XtoqE5XO3L1j"
# <img src="https://github.com/neworldemancer/DSF5/raw/master/figures/alg_1.png" width="60%"/>
# + [markdown] id="cu4Uq4k_ePoo"
# in machine learning the data itself is used for to define the algorithm:
# + [markdown] id="e2xIgm223vfa"
# <img src="https://github.com/neworldemancer/DSF5/raw/master/figures/alg_2.png" width="60%"/>
# + [markdown] id="LvAyI1uzfBUT"
# Machine learning - is learnign from data.
# If
# * performance on a task **T**
# * improves according to measure **P**
# * with experience **E**
#
# we say that model learns form data.
#
# + [markdown] id="U9YsgnbD32dk"
# <img src="https://github.com/neworldemancer/DSF5/raw/master/figures/alg_3.png" width="60%"/>
# + [markdown] id="to8vOJeC1xjE"
#
# The boundary is a bit fuzzy.
#
# In fact when we create algorithms, the problem in hand (namely the data related to the problem), drives us to choose one or another algorithm. And we then tune it, to perform well on a task in hand.
#
# Machine Learning (ML) formalized this procedure, allowing us to automate (part) of this process.
#
# In this course you will get acquainted with the basics of Machine Learning — where the approach to handling the data (the algorithm) is defined, or as we say "learned" from data in hand.
# + [markdown] id="ROFPolZpm21t"
# ## Classification vs Regression.
# + [markdown] id="70_dMCX340Rm"
# The two main tasks handled by (supervised) ML is regression and classification.
# In regression we aim at modeling the relationship between the system's response (dependent variable) and one or more explanatory variables (independent variables).
#
# Examples of regression would be predicting the temperature for each day of the year, or expenses of the household as a function of the number of children and adults.
#
# In classification the aim is to identify what class does a data-point belong to. For example, the species or the iris plant based on the size of its petals, or whether an email is spam or not based on its content.
# + [markdown] id="qBXGs0xRERuv"
# ## Performance measures
#
# + [markdown] id="Lx37P09Vkepw"
# 1. Regression:
# * Mean Square Error: $\textrm{MSE}=\frac{1}{n}\sum_i(y_i - \hat y(\bar x_i))^2$
# * Mean Absolute Error: $\textrm{MAE}=\frac{1}{n}\sum_i|y_i - \hat y(\bar x_i)|$
# * Median Absolute Deviation: $\textrm{MAD}=\textrm{median}(|y_i - \hat y(\bar x_i)|)$
# * Fraction of the explained variance: $R^2=1-\frac{\sum_i(y_i - \hat y(\bar x_i))^2}{\sum_i(y_i - \bar y_i)^2}$, where $\bar y=\frac{1}{n}\sum_i y_i$
#
# 2. Classification:
# * Confusion matrix
#
# + [markdown] id="ZSH3blOw36jz"
# <img src="https://github.com/neworldemancer/DSF5/raw/master/figures/confusion_mtr.png" width="46%"/>
# + [markdown] id="MK2gGVJyfdUJ"
# * Accuracy $=\frac{\textrm{TP} + \textrm{TN}}{\textrm{TP} + \textrm{FP} + \textrm{FN} + \textrm{TN}}$
# * Precision $=\frac{\textrm{TP}}{\textrm{TP} + \textrm{FP}}$
# * Recall $=\frac{\textrm{TP}}{\textrm{TP} + \textrm{FN}}$
# * F1 $=\frac{2}{\frac{1}{\textrm{Precision}} + \frac{1}{\textrm{Recall}} } = 2\frac{\textrm{Precision} \cdot \textrm{Recall}}{\textrm{Precision} + \textrm{Recall}} = \frac{2 \textrm{TP}}{2 \textrm{TP} + \textrm{FP} + \textrm{FN}} = \frac{\textrm{TP}}{\textrm{TP} + \frac{\textrm{FP} + \textrm{FN}}{2}}$
# * F2 $=\frac{5}{\frac{1}{\textrm{Precision}} + \frac{4}{\textrm{Recall}} } = 5\frac{\textrm{Precision} \cdot \textrm{Recall}}{4 \cdot \textrm{Precision} + \textrm{Recall}} = \frac{5 \textrm{TP}}{5 \textrm{TP} + \textrm{FP} + 4 \cdot \textrm{FN}} = \frac{\textrm{TP}}{\textrm{TP} + \frac{\textrm{FP} + 4 \cdot\textrm{FN}}{5}}$
# * Threat score (TS), or Intersection over Union: $\mathrm{IoU}=\frac{\mathrm{TP}}{\mathrm{TP}+\mathrm{FN}+\mathrm{FP}}$
#
#
# During model optimization the used measure in most cases must be differentiable. To this end usually some measure of similarities of distributions are employed (e.g. cross-entropy).
# + [markdown] id="AD6zwuTHiYKA"
# ## Actual aim: Generalization
# + [markdown] id="lNsD3FQS4JP7"
# <img src="https://github.com/neworldemancer/DSF5/raw/master/figures/Bias_variance_1.png" width="35%"/>
#
# <img src="https://github.com/neworldemancer/DSF5/raw/master/figures/Bias_variance_2.png" width="60%"/>
# + [markdown] id="QoONru7ji3QD"
# To measure model performance in an unbiassed way, we need to use different data than the data that the model was trained on. For this we use the 'train-test' split: e.g. 20% of all available dataset is reserved for model performance test, and the remaining 80% is used for actual model training.
# + [markdown] id="NVSRftm8X1m1"
# # Load libraries
# + id="hVJn0ilgOS8F"
# Scikit-learn (formerly scikits.learn and also known as sklearn) is a free
# software machine learning library for the Python programming language.
# It features various classification, regression and clustering algorithms,
# and is designed to interoperate with the Python numerical and scientific
# libraries NumPy and SciPy. (from wiki)
from sklearn import linear_model
from sklearn import tree
from sklearn import ensemble
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.inspection import permutation_importance
# common visualization module
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
# numeric library
import numpy as np
import os
import pandas as pd
from time import time as timer
import tarfile
import tensorflow as tf
# %matplotlib inline
from matplotlib import animation
from IPython.display import HTML
# + id="Rg0LDjc5nECH"
pip install dtreeviz
# + id="3gFMn8yunHDI"
from dtreeviz.trees import dtreeviz # remember to load the package
# + id="8Y7aMevU3Ug8"
if not os.path.exists('data'):
path = os.path.abspath('.')+'/colab_material.tgz'
tf.keras.utils.get_file(path, 'https://github.com/neworldemancer/DSF5/raw/master/colab_material.tgz')
tar = tarfile.open(path, "r:gz")
tar.extractall()
tar.close()
# + [markdown] id="pclZR6uFklf_"
# # Datasets
# + [markdown] id="s_wxOrdWko8W"
# In this course we will use several synthetic and real-world datasets to illustrate the behavior of the models and exercise our skills.
# + [markdown] id="8UQgU5I-lEll"
# ## 1. Synthetic linear
# + id="jGfWOWRjlWPa"
def get_linear(n_d=1, n_points=10, w=None, b=None, sigma=5):
x = np.random.uniform(0, 10, size=(n_points, n_d))
w = w or np.random.uniform(0.1, 10, n_d)
b = b or np.random.uniform(-10, 10)
y = np.dot(x, w) + b + np.random.normal(0, sigma, size=n_points)
print('true slopes: w =', w, '; b =', b)
return x, y
# + id="5RLYxGy_nBZG"
x, y = get_linear(n_d=1, sigma=1)
plt.plot(x[:, 0], y, '*')
plt.show()
# + id="10ODDOp4nX4S"
n_d = 2
x, y = get_linear(n_d=n_d, n_points=100)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[:,0], x[:,1], y, marker='x', color='b',s=10)
plt.show()
# + [markdown] id="FJ5rjq7fIe8Q"
# ## 2. House prices
# + [markdown] id="A-45usskInlD"
# Subset of the Ames Houses dataset: http://jse.amstat.org/v19n3/decock.pdf
# + id="dVv2ID96IyN0"
def house_prices_dataset(return_df=False, return_df_xy=False, price_max=400000, area_max=40000):
path = 'data/AmesHousing.csv'
df = pd.read_csv(path, na_values=('NaN', ''), keep_default_na=False, )
rename_dict = {k:k.replace(' ', '').replace('/', '') for k in df.keys()}
df.rename(columns=rename_dict, inplace=True)
useful_fields = ['LotArea',
'Utilities', 'OverallQual', 'OverallCond',
'YearBuilt', 'YearRemodAdd', 'ExterQual', 'ExterCond',
'HeatingQC', 'CentralAir', 'Electrical',
'1stFlrSF', '2ndFlrSF','GrLivArea',
'FullBath', 'HalfBath',
'BedroomAbvGr', 'KitchenAbvGr', 'KitchenQual', 'TotRmsAbvGrd',
'Functional','PoolArea',
'YrSold', 'MoSold'
]
target_field = 'SalePrice'
df.dropna(axis=0, subset=useful_fields+[target_field], inplace=True)
cleanup_nums = {'Street': {'Grvl': 0, 'Pave': 1},
'LotFrontage': {'NA':0},
'Alley': {'NA':0, 'Grvl': 1, 'Pave': 2},
'LotShape': {'IR3':0, 'IR2': 1, 'IR1': 2, 'Reg':3},
'Utilities': {'ELO':0, 'NoSeWa': 1, 'NoSewr': 2, 'AllPub': 3},
'LandSlope': {'Sev':0, 'Mod': 1, 'Gtl': 3},
'ExterQual': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'ExterCond': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'BsmtQual': {'NA':0, 'Po':1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex':5},
'BsmtCond': {'NA':0, 'Po':1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex':5},
'BsmtExposure':{'NA':0, 'No':1, 'Mn': 2, 'Av': 3, 'Gd': 4},
'BsmtFinType1':{'NA':0, 'Unf':1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ':5, 'GLQ':6},
'BsmtFinType2':{'NA':0, 'Unf':1, 'LwQ': 2, 'Rec': 3, 'BLQ': 4, 'ALQ':5, 'GLQ':6},
'HeatingQC': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'CentralAir': {'N':0, 'Y': 1},
'Electrical': {'':0, 'NA':0, 'Mix':1, 'FuseP':2, 'FuseF': 3, 'FuseA': 4, 'SBrkr': 5},
'KitchenQual': {'Po':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'Functional': {'Sal':0, 'Sev':1, 'Maj2': 2, 'Maj1': 3, 'Mod': 4, 'Min2':5, 'Min1':6, 'Typ':7},
'FireplaceQu': {'NA':0, 'Po':1, 'Fa': 2, 'TA': 3, 'Gd': 4, 'Ex':5},
'PoolQC': {'NA':0, 'Fa': 1, 'TA': 2, 'Gd': 3, 'Ex':4},
'Fence': {'NA':0, 'MnWw': 1, 'GdWo': 2, 'MnPrv': 3, 'GdPrv':4},
}
df_X = df[useful_fields].copy()
df_X.replace(cleanup_nums, inplace=True) # convert continous categorial variables to numerical
df_Y = df[target_field].copy()
x = df_X.to_numpy().astype(np.float32)
y = df_Y.to_numpy().astype(np.float32)
if price_max>0:
idxs = y<price_max
x = x[idxs]
y = y[idxs]
if area_max>0:
idxs = x[:,0]<area_max
x = x[idxs]
y = y[idxs]
return (x, y, df) if return_df else ((x, y, (df_X, df_Y)) if return_df_xy else (x,y))
# + id="YqWU0eHts1RM"
x, y, df = house_prices_dataset(return_df=True)
print(x.shape, y.shape)
df.head()
# + id="YDtzVS-1Mxxe"
df.describe()
# + id="91nj7znzMEpA"
plt.plot(x[:, 0], y, '.')
plt.xlabel('area, sq.ft')
plt.ylabel('price, $');
plt.show()
# + [markdown] id="q7CNxkPdNB4L"
# ## 3. Blobs
# + id="j8wXhleONKgZ"
x, y = make_blobs(n_samples=1000, centers=[[0,0], [5,5], [10, 0]])
colors = "ygr"
for i, color in enumerate(colors):
idx = y == i
plt.scatter(x[idx, 0], x[idx, 1], c=color, edgecolor='gray', s=25)
plt.show()
# + id="NKcmdcZf0VO8"
x, y = make_blobs(n_samples=1000, centers=[[0,0], [5,5], [10, 0]])
transformation = [[0.4, 0.2], [-0.4, 1.2]] # affine transformation matrix
x = np.dot(x, transformation) # applied to point coordinated to make blobs less separable
colors = "ygr"
for i, color in enumerate(colors):
idx = y == i
plt.scatter(x[idx, 0], x[idx, 1], c=color, edgecolor='gray', s=25)
plt.show()
# + [markdown] id="8S1jwU4cXQX4"
# ## 4. MNIST
# + [markdown] id="e2u82UQ5XQX4"
# The MNIST database of handwritten digits has a training set of 60,000 examples, and a test set of 10,000 examples. The digits have been size-normalized and centered in a fixed-size image.
# It is a good database for people who want to try learning techniques and pattern recognition methods on real-world data while spending minimal efforts on preprocessing and formatting (taken from http://yann.lecun.com/exdb/mnist/). Each example is a 28x28 grayscale image and the dataset can be readily downloaded from Tensorflow.
# + id="JaNaGGOkXQX5"
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# + [markdown] id="dlUY5gl8XQX7"
# Let's check few samples:
# + id="qtYtGEDdXQX8"
n = 3
fig, ax = plt.subplots(n, n, figsize=(2*n, 2*n))
ax = [ax_xy for ax_y in ax for ax_xy in ax_y]
for axi, im_idx in zip(ax, np.random.choice(len(train_images), n**2)):
im = train_images[im_idx]
im_class = train_labels[im_idx]
axi.imshow(im, cmap='gray')
axi.text(1, 4, f'{im_class}', color='r', size=16)
axi.grid(False)
plt.tight_layout(0,0,0)
plt.show()
# + [markdown] id="ITfbaOgfYNsq"
# ## 5. Fashion MNIST
# + [markdown] id="jgzzOS7YYTru"
# `Fashion-MNIST` is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. (from https://github.com/zalandoresearch/fashion-mnist)
# + id="RcV2gzmuYljJ"
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# + [markdown] id="SPw6-GoPbT6U"
# Let's check few samples:
# + id="tHFd0sFHY4Li"
n = 3
fig, ax = plt.subplots(n, n, figsize=(2*n, 2*n))
ax = [ax_xy for ax_y in ax for ax_xy in ax_y]
for axi, im_idx in zip(ax, np.random.choice(len(train_images), n**2)):
im = train_images[im_idx]
im_class = train_labels[im_idx]
axi.imshow(im, cmap='gray')
axi.text(1, 4, f'{im_class}', color='r', size=16)
axi.grid(False)
plt.tight_layout(0,0,0)
plt.show()
# + id="b2LkoWfZEi4g"
fmnist_class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + [markdown] id="iHEA0tCLagoV"
# Each of the training and test examples is assigned to one of the following labels:
#
# | Label | Description |
# | --- | --- |
# | 0 | T-shirt/top |
# | 1 | Trouser |
# | 2 | Pullover |
# | 3 | Dress |
# | 4 | Coat |
# | 5 | Sandal |
# | 6 | Shirt |
# | 7 | Sneaker |
# | 8 | Bag |
# | 9 | Ankle boot |
# + [markdown] id="RHRXds9U9134"
# # `scikit-learn` interface
# + [markdown] id="I2toQKrAzH_U"
# In this course we will primarily use the `scikit-learn` module.
# You can find extensive documentation with examples in the [user guide](https://scikit-learn.org/stable/user_guide.html)
#
# The module contains A LOT of different machine learning methods, and here we will cover only few of them. What is great about `scikit-learn` is that it has a uniform and consistent interface.
#
# All the different ML approaches are implemented as classes with a set of same main methods:
#
# 1. `fitter = ...`: Create fitter object.
# 2. `fitter.fit(x, y[, sample_weight])`: Fit model to predict from list of smaples `x` a list of target values `y`.
# 3. `y_pred = fitter.predict(X)`: Predict using the trained model.
# 4. `s = fitter.score(x, y[, sample_weight])`: Obtain a relevant performance measure of the trained model.
#
# This allows one to easily replace one approach with another and find the best one for the problem at hand, by simply using a regression/classification object of another class, while the rest of the code can remain the same.
#
# + [markdown] id="xqLR5-eQ2vtz"
# It is useful to know that generally in scikit-learn the input data is represented as a matrix $X$ of dimensions `n_samples x n_features` , whereas the supervised labels/values are stored in a matrix $Y$ of dimensions `n_samples x n_target` .
# + [markdown] id="K4qgOdz7Yyeb"
# # 1.Linear models
# + [markdown] id="Hh6lII-Hz8u-"
# In many cases the scalar value of interest - dependent variable - is (or can be approximated as) linear combination of the independent variables.
#
# In linear regression the estimator is searched in the form: $$\hat{y}(\bar{x} | \bar{w}) = w_0 + w_1 x_1 + ... + w_p x_p$$
#
# The parameters $\bar{w} = (w_1,..., w_p)$ and $w_0$ are designated as `coef_` and `intercept_` in `sklearn`.
#
# Reference: https://scikit-learn.org/stable/modules/linear_model.html
# + [markdown] id="Vlf6_berQ1vq"
# ## 1. Linear regression
# + [markdown] id="zatxRr8bOuTs"
# [LinearRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html) fits a linear model with coefficients $\bar{w} = (w_1,..., w_p)$ and $w_0$ to minimize the residual sum of squares between the observed targets in the dataset, and the targets predicted by the linear approximation.
#
# Mathematically it solves a problem of the form: $$\bar{w} = \min_{w_i} || \bar{X} \cdot \bar{w} - y||_2^2$$
# + id="sqh7XwGkNg6r"
x, y = get_linear(n_d=1, sigma=3, n_points=30) # p==1, 1D input
plt.scatter(x, y);
plt.show()
# + id="IFawJfQJOKX3"
reg = linear_model.LinearRegression()
reg.fit(x, y)
# + id="diHNLTNMOek5"
w, w0 = reg.coef_, reg.intercept_
print(w, w0)
# + id="hyeHY3bxPYSF"
plt.scatter(x, y, marker='*', label='data points')
x_f = np.linspace(x.min(), x.max(), 10)
y_f = w0 + w[0] * x_f
plt.plot(x_f, y_f, label='fit', c='r')
plt.legend()
plt.show()
# + id="dNX-5gYOIi40"
# mse
np.std(y - reg.predict(x)) # or use metrics.mean_squared_error(..., squared=False)
# + id="ID0Hdzx0NvxF"
# R2
reg.score(x, y)
# + [markdown] id="7rg2_DZCHgJE"
# Let's try 2D input.
# Additionally, here we will split the whole dataset into training and test subsets using the `train_test_split` function:
# + id="oK5MILosSI7d"
n_d = 2
x, y = get_linear(n_d=n_d, n_points=1000, sigma=5)
# train test split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
reg = linear_model.LinearRegression()
reg.fit(x_train, y_train)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_train[:,0], x_train[:,1], y_train, marker='x', s=40)
ax.scatter(x_test[:,0], x_test[:,1], y_test, marker='+', s=80)
xx0 = np.linspace(x[:,0].min(), x[:,0].max(), 10)
xx1 = np.linspace(x[:,1].min(), x[:,1].max(), 10)
xx0, xx1 = [a.flatten() for a in np.meshgrid(xx0, xx1)]
xx = np.stack((xx0, xx1), axis=-1)
yy = reg.predict(xx)
ax.plot_trisurf(xx0, xx1, yy, alpha=0.25, linewidth=0)
plt.show()
# + id="kW5GiLlhS3Y8"
# mse
print('train mse =', np.std(y_train - reg.predict(x_train)))
print('test mse =', np.std(y_test - reg.predict(x_test)))
# + id="O_Fb1zb5S3ZG"
# R2
print('train R2 =', reg.score(x_train, y_train))
print('test R2 =', reg.score(x_test, y_test))
# + [markdown] id="zI6s2Amob48j"
# ## EXERCISE 1.
# + [markdown] id="zRi8SPiMb9FM"
# Use linear regression to fit house prices dataset.
# + id="vaQVHyvPcHW2"
# 1. make train/test split
# 2. fit the model
# 3. evaluate MSE, MAD, and R2 on train and test datasets
# 4. plot y vs predicted y for test and train parts
# + [markdown] id="zZX9MQlORLfY"
# ## 2. Logistic regression
# + [markdown] id="yRUwQD5UR0Vf"
# Logistic regression, despite its name, is a linear model for classification rather than regression. In this model, the probabilities describing the possible outcomes of a single trial are modeled using a logistic function.
#
# In logistic regression the probability $p$ of a point belonging to a class is modeled as: $$\frac{p}{1-p} = e^{w_0 + w_1 x_1 + ... + w_p x_p}$$
#
# The binary class $\ell_2$-penalized logistic regression minimizes the following cost function:
# $$\min_{w, c} \sum_{i=1}^n \log(\exp(- y_i (X_i^T w + c)) + 1) + \lambda \frac{1}{2}w^T w$$.
# + id="1BnNRNDj-zbE"
# routine for coloring 2d space according to class prediction
def plot_prediction_2d(x_min, x_max, y_min, y_max, classifier, ax=None):
"""
Creates 2D mesh, predicts class for each point on the mesh, and visualises it
"""
mesh_step = .02 # step size in the mesh
x_coords = np.arange(x_min, x_max, mesh_step) # coordinates of mesh colums
y_coords = np.arange(y_min, y_max, mesh_step) # coordinates of mesh rows
# create mesh, and get x and y coordinates of each point point
# arrenged as array of shape (n_mesh_rows, n_mesh_cols)
mesh_nodes_x, mesh_nodes_y = np.meshgrid(x_coords, y_coords)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
# prepare xy pairs for prediction: matrix of size (n_mesh_rows*n_mesh_cols, 2)
mesh_xy_coords = np.stack([mesh_nodes_x.flatten(),
mesh_nodes_y.flatten()], axis=-1)
# obtain class for each node
mesh_nodes_class = classifier.predict(mesh_xy_coords)
# reshape to the shape (n_mesh_rows, n_mesh_cols)==mesh_nodes_x.shape for visualization
mesh_nodes_class = mesh_nodes_class.reshape(mesh_nodes_x.shape)
# Put the result into a color countour plot
ax = ax or plt.gca()
ax.contourf(mesh_nodes_x,
mesh_nodes_y,
mesh_nodes_class,
cmap='Pastel1', alpha=0.5)
# + id="4bJHWawkq0ev"
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
x, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
x = np.dot(x, transformation)
for multi_class in ('multinomial', 'ovr'):
# do fit
clf = linear_model.LogisticRegression(solver='sag', max_iter=100,
multi_class=multi_class)
clf.fit(x, y)
# print the training scores
print("training accuracy : %.3f (%s)" % (clf.score(x, y), multi_class))
# get range for visualization
x_0 = x[:, 0]
x_1 = x[:, 1]
x_min = x_0.min() - 1
x_max = x_0.max() + 1
y_min = x_1.min() - 1
y_max = x_1.max() + 1
plt.figure(figsize=(10,10))
plot_prediction_2d(x_min, x_max, y_min, y_max, classifier=clf)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = 'rbg'
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(x_0[idx], x_1[idx], c=color, cmap=plt.cm.Paired,
edgecolor='gray', s=30, linewidth=0.2)
# Plot the three one-against-all classifiers
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([x_min, x_max], [line(x_min), line(x_max)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
# + [markdown] id="AQ69XKdbZcA3"
# ## EXERCISE 2.
# + id="__9jcqXzZaQp"
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# + [markdown] id="aRBuV1m_E4Ll"
#
# + [markdown] id="304Ul40adUT2"
# We will reshape 2-d images to 1-d arrays for use in scikit-learn:
# + id="DtD8C8_4a7dP"
n_train = len(train_labels)
x_train = train_images.reshape((n_train, -1))
y_train = train_labels
n_test = len(test_labels)
x_test = test_images.reshape((n_test, -1))
y_test = test_labels
# + [markdown] id="UJj7ofWD_Wp2"
# Now use a multinomial logistic regression classifier, and measure the accuracy:
# + id="CeIKcMeV_rmk"
# 1. Create classifier
# 2. fit the model
# 3. evaluate accuracy on train and test datasets
# + [markdown] id="7-CGSS2OZKHD"
#
# # 2. Trees & Forests
# + [markdown] id="Bxtv48o-F1Ku"
# ## 1. Decision Tree
# + [markdown] id="l582Sr0_WGXj"
# Decision Trees are a non-parametric supervised learning method used for classification and regression. The goal is to create a model that predicts the value of a target variable by learning **simple** decision rules inferred from the data features.
#
# They are fast to train, easily interpretable, capture non-linear dependencies, and require small amount of data.
# + id="fz5raIG_WQfg"
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
dtcs = []
for depth in (1, 2, 3, 4):
# do fit
dtc = tree.DecisionTreeClassifier(max_depth=depth)
dtcs.append(dtc)
dtc.fit(X, y)
# print the training scores
print("training score : %.3f (depth=%d)" % (dtc.score(X, y), depth))
# get range for visualization
x_0 = X[:, 0]
x_1 = X[:, 1]
x_min = x_0.min() - 1
x_max = x_0.max() + 1
y_min = x_1.min() - 1
y_max = x_1.max() + 1
fig, ax = plt.subplots(1, 2, figsize=(14,7), dpi=300)
plot_prediction_2d(x_min, x_max, y_min, y_max, classifier=dtc, ax=ax[0])
ax[0].set_title("Decision surface of DTC (%d)" % depth)
# Plot also the training points
colors = "rbg"
for i, color in zip(dtc.classes_, colors):
idx = np.where(y == i)
ax[0].scatter(x_0[idx], x_1[idx], c=color,
edgecolor='black', s=20, linewidth=0.2)
with plt.style.context('classic'):
tree.plot_tree(dtc, ax=ax[1]);
plt.tight_layout(0.5,0)
plt.show()
# + [markdown] id="0IgYhWghyUhL"
# Given fraction of samples of class $i$ as $p_i$ the Gini index is:
# $$G = 1 - \sum_i {p_i^2}, $$
#
# + id="mYpitlKczjki"
text_representation = tree.export_text(dtcs[2])
print(text_representation)
# + id="hRsuFUM2nlP5"
for i, dtc in enumerate(dtcs):
viz = dtreeviz(dtc, X, y, feature_names=['x[0]', 'x[1]'])
viz.scale=1.2
display(viz)
# + [markdown] id="ZJK1XADt94uK"
# Additionally we can directly inspect relevance of the input features for the classification (impurity based):
# + id="MxEFpn2P9Uek"
plt.plot(dtcs[2].feature_importances_, '.')
plt.xlabel('feature')
plt.ylabel('importance')
plt.ylim(0, 1);
# + [markdown] id="EHZ-hHGuY5aG"
# ## 2. Random Forest
# + [markdown] id="zETTKyFmwTae"
# The `sklearn.ensemble` provides several ensemble algorithms. RandomForest is an averaging algorithm based on randomized decision trees. This means a diverse set of classifiers is created by introducing randomness in the classifier construction. The prediction of the ensemble is given as the averaged prediction of the individual classifiers.
#
# Individual decision trees typically exhibit high variance and tend to overfit.
# In random forests:
# * each tree in the ensemble is built from a sample drawn with replacement (i.e., a bootstrap sample) from the training set.
# * when splitting each node during the construction of a tree, the best split is found either from all input features or a random subset.
#
# The injected randomness in forests yield decision trees with somewhat decoupled prediction errors. By taking an average of those predictions, some errors can cancel out. Random forests achieve a reduced variance by combining diverse trees, sometimes at the cost of a slight increase in bias. In practice the variance reduction is often significant, hence yielding an overall better model.
#
# + id="s4hfPUqSZCbH"
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for n_est in (1, 4, 50):
# do fit
rfc = ensemble.RandomForestClassifier(max_depth=4, n_estimators=n_est,)
rfc.fit(X, y)
# print the training scores
print("training score : %.3f (n_est=%d)" % (rfc.score(X, y), n_est))
# get range for visualization
x_0 = X[:, 0]
x_1 = X[:, 1]
x_min = x_0.min() - 1
x_max = x_0.max() + 1
y_min = x_1.min() - 1
y_max = x_1.max() + 1
plt.figure(figsize=(10,10))
plot_prediction_2d(x_min, x_max, y_min, y_max, classifier=rfc)
# Plot also the training points
colors = 'rbg'
for i, color in enumerate(colors):
idx = np.where(y == i)
plt.scatter(x_0[idx], x_1[idx], c=color,
edgecolor='black', s=20, linewidth=0.2)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
plt.show()
# + id="yY6EVEestysr"
plt.figure(dpi=300)
with plt.style.context('classic'):
tree.plot_tree(rfc.estimators_[20]);
# + id="x-YP_GTWsLZI"
viz = dtreeviz(rfc.estimators_[20], X, y, feature_names=[f'{i}' for i in np.arange(len(X[0]))])
viz.scale=1.2
display(viz)
# + [markdown] id="TLruRG-g_pyD"
# For a forest we can also evaluate the feature importance:
# + id="Ks42i4mq-RM-"
importances = np.array([e.feature_importances_ for e in rfc.estimators_])
#plt.plot(importances.T, '.')
plt.bar(['0', '1'], importances.mean(axis=0), yerr=importances.std(axis=0))
plt.xlabel('feature')
plt.ylabel('importance')
plt.ylim(0, 1);
# + [markdown] id="jTNL6_57A0rq"
# Alternatively we can use permutation to study feature importance.
# It evaluates decrease in performance (model's `.score` or specified in `scoring` parameter) for each variable when its values is shuffled between samples.
# It can be time-consuming as requires re-evaluation for each feature:
# + id="UrD0XYtI_8mQ"
p_importances = permutation_importance(rfc, X, y, n_repeats=10, n_jobs=-1)
plt.bar(['0', '1'],
p_importances.importances_mean,
yerr=p_importances.importances_std)
plt.xlabel('feature')
plt.ylabel('importance')
plt.ylim(0, 1);
# + [markdown] id="puQNgKN0wS7H"
# ## 3. Boosted Decision Trees
# + [markdown] id="TkL6_R05wez3"
# Another approach to the ensemble tree modeling is Boosted Decision Trees. In a boosting framework, the treees are created sequentially. This way each next tree reduces error of the ensamble, by fitting residuals of previous trees.
#
# Usually shallow trees are used in boosting framework. In boosting primarily the bias is reduced, thus increasing variance. Interpretability of this model is low.
#
# To avoid overfitting the learning rate and subsampling parameter can be tuned.
# + id="_qVKJ-YawFo9"
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for n_est in (1, 4, 50):
# do fit
dtc = ensemble.GradientBoostingClassifier(max_depth=1, n_estimators=n_est,
learning_rate=0.1, subsample=0.5)
dtc.fit(X, y)
# print the training scores
print("training score : %.3f (n_est=%d)" % (dtc.score(X, y), n_est))
x_0 = X[:, 0]
x_1 = X[:, 1]
x_min = x_0.min() - 1
x_max = x_0.max() + 1
y_min = x_1.min() - 1
y_max = x_1.max() + 1
plt.figure(figsize=(10,10))
plot_prediction_2d(x_min, x_max, y_min, y_max, classifier=rfc)
plt.title(f'Decision surface of DTC ({n_est})')
plt.axis('tight')
# Plot also the training points
colors = 'rbg'
for i, color in enumerate(colors):
idx = np.where(y == i)
plt.scatter(x_0[idx], x_1[idx], c=color,
edgecolor='black', s=20, linewidth=0.2)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
plt.show()
# + [markdown] id="vhhycm2S6wbz"
# ## EXERCISE 3 : Random forest classifier for FMNIST
#
# + id="I20uBLiMoURH"
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
n = len(train_labels)
x_train = train_images.reshape((n, -1))
y_train = train_labels
n_test = len(test_labels)
x_test = test_images.reshape((n_test, -1))
y_test = test_labels
# + [markdown] id="Jd59TNBWfHgX"
# Classify fashion MNIST images with Random Forest classifier.
# + id="8waJCx33peIG"
# 1. Create classifier. As the number of features is big (784), use bigger tree
# depth (max_depth parameter), try in range 10-500.
# 2. What is the maximum number of leaves in tree of depth n?
# To reduce variance we should avoid leaves with too litle samples. You could
# limit the total number of tree leaves (max_leaf_nodes parameter) to 10-1000.
# Alternatively you can use min_samples_split & min_samples_leaf
# 3. Try different number of estimators (n_estimators)
# 4. Fit the model
# 5. Inspect training and test accuracy
# + [markdown] id="0dG6U6s3T95t"
# ## EXERCISE 4: Random forest regression
#
# + id="DlypA1MaT95u"
X, y, (df_x, df_y) = house_prices_dataset(return_df_xy=True)
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# + [markdown] id="FYTwkoSpT95u"
# Predict the house prices.
# + id="V4wobzB_T95u"
# 1. Create regressor. (ensemble.RandomForestRegressor)
# Try different number of estimators (n_estimators)
# 2. Fit the model
# 3. Inspect training and test accuracy
# 4. Try to improve performance by adjusting hyperparameters.
# How does it compare to linear model?
# 5. Use dtreeviz to visualize a tree from the ensamble
# 6. Study the feature importance
|
Course_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Massachusetts Eviction Data and Housing Court Statistics
# import code block
from selenium import webdriver
from bs4 import BeautifulSoup
import zipfile
import pandas as pd
from io import StringIO
import requests
import csv
import time
import random
import matplotlib.pyplot as plt
from urllib.request import urlopen
import re
from pandas import ExcelWriter
# ## Get all the links for dates and store them in a list
# +
html = urlopen('https://masslandlords.net/policy/eviction-data/?ct=t%28Event+Webinar+2021-01-22+T-21+v1%29')
bs = BeautifulSoup(html, 'html.parser')
# collect all of the dates urls into one list
urls = bs.find_all('a',{'href':re.compile('https://masslandlords.net/policy/eviction-data/filings-week-ending-*')})
# clean urls of unnecessary attribute data - only need href
urls = [url['href'] for url in urls]
# we want the urls for the first 6 weeks of data, let's verify we have them by printing them out
# Should be weeks 10/24 to 11/28 i n2020
print(urls[0:6])
# -
# ## Define some functions so we don't have to rewrite code to scrape each week data
# +
def getSoup(url):
""" param: url of site to get soup for. Returns soup (AKA text) for site url"""
html = urlopen(url)
bs = BeautifulSoup(html, 'html.parser')
soup = bs.find('section',id='main-content').find('p', class_='monospace').get_text()
return soup
def getSections(soup):
"""param: soup/text for url. Returns text divided into sections for easier conversion to tables"""
# page is split into 5 sections separted by '--'
a,b,c,d,e,f =soup.split('--')
# section c has many different tables within, not split by '--' but rather by newlines.
# we'll clean it up using a regular expression (regex) and then split it into 11 sections to better disect each
# table individually
# remove beginning and ending newlines
reg = re.compile('^\r\n')
c = reg.sub('',c,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
c=reg2.sub('',c,count=1)
c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11 = c.split("\r\n\r\n\r\n")
# we'll return all these variables
return a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f
def convertToDf(htmlTable,cNumSection):
"""param: html table (aka one of the sections divided aboce), and a cNumSection param which is an int indicating
which section of c (if any) we are converting. This helps us define our indexes for our table.
returns table as pandas dataframe
NOTE: While the startIndex and columnIndex values below work most of the time, there are small discrepancies
beterrn data for different weeks that may cause certain tables to come off a column short. Feel free to
change the values below as you see fit. If we decide in the future that it is easier to pass them is an parameters,
we change this function easily.
"""
# set default start values
startIndex = 0
endIndex = 0
columnIndex = 0
if cNumSection in [1,2,4,10,11]:
startIndex = 2
columnIndex = 1
elif cNumSection in [8]:
startIndex = 1
columnIndex = 0
elif cNumSection in [3,5,7,9]:
startIndex = 1
columnIndex = 0
elif cNumSection == 6:
startIndex = 4
columnIndex = 3
else:
startIndex = 1
columnIndex = 0
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
htmlTable = reg.sub('',htmlTable,count=1)
# split the table into separate columns
htmlTable = htmlTable.split('\r\n')
for index in range(len(htmlTable)):
htmlTable[index] = re.split(' +',htmlTable[index])
# load the data into a dataframe
df = pd.DataFrame(htmlTable[startIndex:],columns=htmlTable[columnIndex])
return df
def createCsv(dataFrames, sheetNames, currUrlIndex):
"""param: list of data frames, list of strings for respective name of sheet for each data frame, local file path
for creation of CSV, and index of current URL (from list created in code block above)
"""
#name file
name=re.sub('\/', '', urls[currUrlIndex])
name=name[50:]
print(name)
# Create a Pandas Excel writer using XlsxWriter as the engine.
# NOTE: Update the below path to your computer's
writer = pd.ExcelWriter(r'/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'+name+".xlsx")
for i in range(len(dataFrames)):
if i in [1,3,5]:
dataFrames[i].to_excel(writer, sheet_name=sheetNames[i],startrow=6 , startcol=0,index=False)
else:
dataFrames[i].to_excel(writer, sheet_name=sheetNames[i],startrow=0 , startcol=0,index=False)
#close the Pandas Excel writer and output the Excel file
writer.save()
# -
# # week-ending-2020-10-24
#
soup = getSoup(urls[0])
a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f = getSections(soup)
# +
# NOTE: Had to do the parsing for d manually, as its formatted differently than the 'c' section
# remove beginning and ending newlines
reg = re.compile('^\r\n')
d = reg.sub('',d,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
d=reg2.sub('',d,count=1)
d1 = d.split("\r\n\r\n")
d1 = d1[1]
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
d1 = reg.sub('',d1,count=1)
# split the table into separate columns
d1 = d1.split('\r\n')
for index in range(len(d1)):
d1[index] = re.split(' +',d1[index])
# load the data into a dataframe
countyRentersPercent = pd.DataFrame(d1[1:],columns=['County', 'Households', '% Renters'])
# +
# Create dataframes for tables in 'c'
courts = convertToDf(c1,1)
partyTypeNum = convertToDf(c2,2)
partyTypePercent = convertToDf(c3,3)
plaintiffRepNum = convertToDf(c4,4)
plaintiffRepPercent = convertToDf(c5,5)
defendantRepNum = convertToDf(c6,6)
defendantRepPercent = convertToDf(c7,7)
numAdultsHouseholds = convertToDf(c8,8)
initiatingAction = convertToDf(c9,9)
ratePer100k = convertToDf(c10, 10)
ratePer100kRenters = convertToDf(c11,11)
# display data frames
display(courts)
display(partyTypeNum)
display(partyTypePercent)
display(plaintiffRepNum)
display(plaintiffRepPercent)
display(defendantRepNum)
display(defendantRepPercent)
display(numAdultsHouseholds)
display(initiatingAction)
display(ratePer100k)
display(ratePer100kRenters)
display(countyRentersPercent)
# -
# ## Write to CSV
# +
dataFrames = [courts,partyTypeNum,partyTypePercent,plaintiffRepNum,plaintiffRepPercent,defendantRepNum,
defendantRepPercent,numAdultsHouseholds,initiatingAction,ratePer100k,ratePer100kRenters,countyRentersPercent]
sheetNames = ['Courts','Party Type','Party Type','Plaintiff Representation', 'Plaintiff Representation',
'Defendant Representation','Defendant Representation','Number of Adults in Households',
'Initiating Action','Municipality rate per 100k','County rate per 100k','County Households % Renters']
# path = '/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'
createCsv(dataFrames, sheetNames, 0)
# -
# # week-ending-2020-10-31
# +
soup = getSoup(urls[1])
a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f = getSections(soup)
# NOTE: Had to do the parsing for d manually, as its formatted differently than the 'c' section
# remove beginning and ending newlines
reg = re.compile('^\r\n')
d = reg.sub('',d,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
d=reg2.sub('',d,count=1)
d1 = d.split("\r\n\r\n")
d1 = d1[1]
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
d1 = reg.sub('',d1,count=1)
# split the table into separate columns
d1 = d1.split('\r\n')
for index in range(len(d1)):
d1[index] = re.split(' +',d1[index])
# load the data into a dataframe
countyRentersPercent = pd.DataFrame(d1[1:],columns=['County', 'Households', '% Renters'])
# Create dataframes for tables in 'c'
courts = convertToDf(c1,1)
partyTypeNum = convertToDf(c2,2)
partyTypePercent = convertToDf(c3,3)
plaintiffRepNum = convertToDf(c4,4)
plaintiffRepPercent = convertToDf(c5,5)
defendantRepNum = convertToDf(c6,6)
defendantRepPercent = convertToDf(c7,7)
numAdultsHouseholds = convertToDf(c8,8)
initiatingAction = convertToDf(c9,9)
ratePer100k = convertToDf(c10, 10)
ratePer100kRenters = convertToDf(c11,11)
# display data frames
display(courts)
display(partyTypeNum)
display(partyTypePercent)
display(plaintiffRepNum)
display(plaintiffRepPercent)
display(defendantRepNum)
display(defendantRepPercent)
display(numAdultsHouseholds)
display(initiatingAction)
display(ratePer100k)
display(ratePer100kRenters)
display(countyRentersPercent)
# +
dataFrames = [courts,partyTypeNum,partyTypePercent,plaintiffRepNum,plaintiffRepPercent,defendantRepNum,
defendantRepPercent,numAdultsHouseholds,initiatingAction,ratePer100k,ratePer100kRenters,countyRentersPercent]
sheetNames = ['Courts','Party Type','Party Type','Plaintiff Representation', 'Plaintiff Representation',
'Defendant Representation','Defendant Representation','Number of Adults in Households',
'Initiating Action','Municipality rate per 100k','County rate per 100k','County Households % Renters']
# path = '/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'
createCsv(dataFrames, sheetNames, 1)
# -
# # week-ending-2020-11-07
#
# +
soup = getSoup(urls[2])
a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f = getSections(soup)
# NOTE: Had to do the parsing for d manually, as its formatted differently than the 'c' section
# remove beginning and ending newlines
reg = re.compile('^\r\n')
d = reg.sub('',d,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
d=reg2.sub('',d,count=1)
d1 = d.split("\r\n\r\n")
d1 = d1[1]
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
d1 = reg.sub('',d1,count=1)
# split the table into separate columns
d1 = d1.split('\r\n')
for index in range(len(d1)):
d1[index] = re.split(' +',d1[index])
# load the data into a dataframe
countyRentersPercent = pd.DataFrame(d1[1:],columns=['County', 'Households', '% Renters'])
# Create dataframes for tables in 'c'
courts = convertToDf(c1,1)
partyTypeNum = convertToDf(c2,2)
partyTypePercent = convertToDf(c3,3)
plaintiffRepNum = convertToDf(c4,4)
plaintiffRepPercent = convertToDf(c5,5)
defendantRepNum = convertToDf(c6,6)
defendantRepPercent = convertToDf(c7,7)
numAdultsHouseholds = convertToDf(c8,8)
initiatingAction = convertToDf(c9,9)
ratePer100k = convertToDf(c10, 10)
ratePer100kRenters = convertToDf(c11,11)
# display data frames
display(courts)
display(partyTypeNum)
display(partyTypePercent)
display(plaintiffRepNum)
display(plaintiffRepPercent)
display(defendantRepNum)
display(defendantRepPercent)
display(numAdultsHouseholds)
display(initiatingAction)
display(ratePer100k)
display(ratePer100kRenters)
display(countyRentersPercent)
# +
dataFrames = [courts,partyTypeNum,partyTypePercent,plaintiffRepNum,plaintiffRepPercent,defendantRepNum,
defendantRepPercent,numAdultsHouseholds,initiatingAction,ratePer100k,ratePer100kRenters,countyRentersPercent]
sheetNames = ['Courts','Party Type','Party Type','Plaintiff Representation', 'Plaintiff Representation',
'Defendant Representation','Defendant Representation','Number of Adults in Households',
'Initiating Action','Municipality rate per 100k','County rate per 100k','County Households % Renters']
# path = '/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'
createCsv(dataFrames, sheetNames, 2)
# -
# # week-ending-2020-11-14
#
# +
soup = getSoup(urls[3])
a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f = getSections(soup)
# NOTE: Had to do the parsing for d manually, as its formatted differently than the 'c' section
# remove beginning and ending newlines
reg = re.compile('^\r\n')
d = reg.sub('',d,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
d=reg2.sub('',d,count=1)
d1 = d.split("\r\n\r\n")
d1 = d1[1]
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
d1 = reg.sub('',d1,count=1)
# split the table into separate columns
d1 = d1.split('\r\n')
for index in range(len(d1)):
d1[index] = re.split(' +',d1[index])
# load the data into a dataframe
countyRentersPercent = pd.DataFrame(d1[1:],columns=['County', 'Households', '% Renters'])
# Create dataframes for tables in 'c'
courts = convertToDf(c1,1)
partyTypeNum = convertToDf(c2,2)
partyTypePercent = convertToDf(c3,3)
plaintiffRepNum = convertToDf(c4,4)
plaintiffRepPercent = convertToDf(c5,5)
defendantRepNum = convertToDf(c6,6)
defendantRepPercent = convertToDf(c7,7)
numAdultsHouseholds = convertToDf(c8,8)
initiatingAction = convertToDf(c9,9)
ratePer100k = convertToDf(c10, 10)
ratePer100kRenters = convertToDf(c11,11)
# display data frames
display(courts)
display(partyTypeNum)
display(partyTypePercent)
display(plaintiffRepNum)
display(plaintiffRepPercent)
display(defendantRepNum)
display(defendantRepPercent)
display(numAdultsHouseholds)
display(initiatingAction)
display(ratePer100k)
display(ratePer100kRenters)
display(countyRentersPercent)
# +
dataFrames = [courts,partyTypeNum,partyTypePercent,plaintiffRepNum,plaintiffRepPercent,defendantRepNum,
defendantRepPercent,numAdultsHouseholds,initiatingAction,ratePer100k,ratePer100kRenters,countyRentersPercent]
sheetNames = ['Courts','Party Type','Party Type','Plaintiff Representation', 'Plaintiff Representation',
'Defendant Representation','Defendant Representation','Number of Adults in Households',
'Initiating Action','Municipality rate per 100k','County rate per 100k','County Households % Renters']
# path = '/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'
createCsv(dataFrames, sheetNames, 3)
# -
# # week-ending-2020-11-21
# +
soup = getSoup(urls[4])
a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f = getSections(soup)
# NOTE: Had to do the parsing for d manually, as its formatted differently than the 'c' section
# remove beginning and ending newlines
reg = re.compile('^\r\n')
d = reg.sub('',d,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
d=reg2.sub('',d,count=1)
d1 = d.split("\r\n\r\n")
d1 = d1[1]
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
d1 = reg.sub('',d1,count=1)
# split the table into separate columns
d1 = d1.split('\r\n')
for index in range(len(d1)):
d1[index] = re.split(' +',d1[index])
# load the data into a dataframe
countyRentersPercent = pd.DataFrame(d1[1:],columns=['County', 'Households', '% Renters'])
# Create dataframes for tables in 'c'
courts = convertToDf(c1,1)
partyTypeNum = convertToDf(c2,2)
partyTypePercent = convertToDf(c3,3)
plaintiffRepNum = convertToDf(c4,4)
plaintiffRepPercent = convertToDf(c5,5)
defendantRepNum = convertToDf(c6,6)
defendantRepPercent = convertToDf(c7,7)
numAdultsHouseholds = convertToDf(c8,8)
initiatingAction = convertToDf(c9,9)
ratePer100k = convertToDf(c10, 10)
ratePer100kRenters = convertToDf(c11,11)
# display data frames
display(courts)
display(partyTypeNum)
display(partyTypePercent)
display(plaintiffRepNum)
display(plaintiffRepPercent)
display(defendantRepNum)
display(defendantRepPercent)
display(numAdultsHouseholds)
display(initiatingAction)
display(ratePer100k)
display(ratePer100kRenters)
display(countyRentersPercent)
# +
dataFrames = [courts,partyTypeNum,partyTypePercent,plaintiffRepNum,plaintiffRepPercent,defendantRepNum,
defendantRepPercent,numAdultsHouseholds,initiatingAction,ratePer100k,ratePer100kRenters,countyRentersPercent]
sheetNames = ['Courts','Party Type','Party Type','Plaintiff Representation', 'Plaintiff Representation',
'Defendant Representation','Defendant Representation','Number of Adults in Households',
'Initiating Action','Municipality rate per 100k','County rate per 100k','County Households % Renters']
# path = '/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'
createCsv(dataFrames, sheetNames, 4)
# -
# # week-ending-2020-11-28
# +
soup = getSoup(urls[5])
a,b,c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,d,e,f = getSections(soup)
# NOTE: Had to do the parsing for d manually, as its formatted differently than the 'c' section
# remove beginning and ending newlines
reg = re.compile('^\r\n')
d = reg.sub('',d,count=1)
reg2 = re.compile('\r\n\r\n\r\n$')
d=reg2.sub('',d,count=1)
d1 = d.split("\r\n\r\n")
d1 = d1[1]
# remove newline at the beginning of the table "block"
reg = re.compile('^\r\n')
d1 = reg.sub('',d1,count=1)
# split the table into separate columns
d1 = d1.split('\r\n')
for index in range(len(d1)):
d1[index] = re.split(' +',d1[index])
# load the data into a dataframe
countyRentersPercent = pd.DataFrame(d1[1:],columns=['County', 'Households', '% Renters'])
# Create dataframes for tables in 'c'
courts = convertToDf(c1,1)
partyTypeNum = convertToDf(c2,2)
partyTypePercent = convertToDf(c3,3)
plaintiffRepNum = convertToDf(c4,4)
plaintiffRepPercent = convertToDf(c5,5)
defendantRepNum = convertToDf(c6,6)
defendantRepPercent = convertToDf(c7,7)
numAdultsHouseholds = convertToDf(c8,8)
initiatingAction = convertToDf(c9,9)
ratePer100k = convertToDf(c10, 10)
ratePer100kRenters = convertToDf(c11,11)
# display data frames
display(courts)
display(partyTypeNum)
display(partyTypePercent)
display(plaintiffRepNum)
display(plaintiffRepPercent)
display(defendantRepNum)
display(defendantRepPercent)
display(numAdultsHouseholds)
display(initiatingAction)
display(ratePer100k)
display(ratePer100kRenters)
display(countyRentersPercent)
# +
dataFrames = [courts,partyTypeNum,partyTypePercent,plaintiffRepNum,plaintiffRepPercent,defendantRepNum,
defendantRepPercent,numAdultsHouseholds,initiatingAction,ratePer100k,ratePer100kRenters,countyRentersPercent]
sheetNames = ['Courts','Party Type','Party Type','Plaintiff Representation', 'Plaintiff Representation',
'Defendant Representation','Defendant Representation','Number of Adults in Households',
'Initiating Action','Municipality rate per 100k','County rate per 100k','County Households % Renters']
# path = '/Users/gonzo/Documents/AP/AP-Evictions-Tracker/'
createCsv(dataFrames, sheetNames, 5)
# -
|
MassLandlordsScraper.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # More on Widgets: Using Interact
#
# The `interact` function [`ipywidgets.interact`](https://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html) is the quickest way to create user interface controls in code. It is the easiest way to get up and running!
#
import ipywidgets as widgets
# ## Basic `interact`
#
# At the most basic level, `interact` autogenerates controls for function arguments, and then calls the function with those arguments when you manipulate the controls interactively. To use `interact`, you need to define a function that you want to explore.
def f(x):
return 3*x
# When you pass this function as the first argument to `interact` along with an integer keyword argument (`x=10`), a slider is generated and bound to the function parameter.
widgets.interact(f, x=10);
# If you pass `True` or `False`, `interact` will generate a checkbox:
widgets.interact(f, x=True);
# If you pass a string, `interact` will generate a `Text` field.
widgets.interact(f, x='Hello!');
# `interact` can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, `interact` also works with functions that have multiple arguments.
@widgets.interact(x=True, y=1.0)
def g(x, y):
return (x, y)
# ### Fixing arguments with `fixed`
#
# There are times when you may want to explore a function using `interact`, but fix one or more of its arguments to specific values. This can be accomplished by wrapping values with the `fixed` function.
def h(p, q):
return (p, q)
widgets.interact(h, p=5, q=widgets.fixed(20));
# ### Widget abbreviations
#
# When you pass an integer-valued keyword argument of `10` (`x=10`) to `interact`, it generates an integer-valued slider control with a range of `[-10,+3*10]`. In this case, `10` is an *abbreviation* for an actual slider widget:
#
# ```python
# IntSlider(min=-10,max=30,step=1,value=10)
# ```
#
# In fact, we can get the same result if we pass this `IntSlider` as the keyword argument for `x`:
widgets.interact(
f, x=widgets.IntSlider(min=-10, max=30, step=1, value=10)
);
# This examples clarifies how `interact` proceses its keyword arguments:
#
# 1. If the keyword argument is a `Widget` instance with a `value` attribute, that widget is used. Any widget with a `value` attribute can be used, even custom ones.
# 2. Otherwise, the value is treated as a *widget abbreviation* that is converted to a widget before it is used.
#
# The following table gives an overview of different widget abbreviations:
#
# <table class="table table-condensed table-bordered">
# <tr><td><strong>Keyword argument</strong></td><td><strong>Widget</strong></td></tr>
# <tr><td>`True` or `False`</td><td>Checkbox</td></tr>
# <tr><td>`'Hi there'`</td><td>Text</td></tr>
# <tr><td>`value` or `(min,max)` or `(min,max,step)` if integers are passed</td><td>IntSlider</td></tr>
# <tr><td>`value` or `(min,max)` or `(min,max,step)` if floats are passed</td><td>FloatSlider</td></tr>
# <tr><td>`['orange','apple']` or `[('one', 1), ('two', 2)]`</td><td>Dropdown</td></tr>
# </table>
# Note that a dropdown is used if a list or a list of tuples is given (signifying discrete choices), and a slider is used if a tuple is given (signifying a range).
# You have seen how the checkbox and textarea widgets work above. Here, more details about the different abbreviations for sliders and dropdowns are given.
#
# If a 2-tuple of integers is passed `(min,max)`, an integer-valued slider is produced with those minimum and maximum values (inclusively). In this case, the default step size of `1` is used.
widgets.interact(f, x=(0, 4));
# A `FloatSlider` is generated if any of the values are floating point. The step size can be changed by passing a third element in the tuple.
widgets.interact(f, x=(0., 10, 1));
# For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to `5.5`.
@widgets.interact(x=(0.0, 20.0, 0.5))
def h(x=5.5):
return x
# Dropdown menus are constructed by passing a list of strings. In this case, the strings are both used as the names in the dropdown menu UI and passed to the underlying Python function.
widgets.interact(f, x=['apples','oranges']);
# If you want a dropdown menu that passes non-string values to the Python function, you can pass a list of tuples of the form `('label', value)`. The first items are the names in the dropdown menu UI and the second items are values that are the arguments passed to the underlying Python function.
widgets.interact(f, x=[('one', 10), ('two', 20)]);
# ## `interactive`
#
# `interactive` is useful when you want to reuse the widgets that are produced or access the data that is bound to the UI controls.
#
# Note that unlike `interact`, the return value of the function will not be displayed automatically, but you can display a value inside the function with `IPython.display.display`.
# +
from IPython.display import display
def f(a, b):
display(a + b)
return a+b
# -
# Unlike `interact`, `interactive` returns a `Widget` instance rather than immediately displaying the widget.
w = widgets.interactive(f, a=10, b=20)
w
# The widget is an `interactive`, a subclass of `VBox`, which is a container for other widgets.
type(w)
# The children of the `interactive` are two integer-valued sliders and an output widget, produced by the widget abbreviations above.
w.children
# To actually display the widgets, you can use IPython's `display` function.
display(w)
# At this point, the UI controls work just like they would if `interact` had been used. You can manipulate them interactively and the function will be called. However, the widget instance returned by `interactive` also gives you access to the current keyword arguments and return value of the underlying Python function.
#
# Here are the current keyword arguments. If you rerun this cell after manipulating the sliders, the values will have changed.
w.kwargs
# Here is the current return value of the function.
w.result
# ## Basic interactive plot
#
# Though the examples so far in this notebook had very basic output, more interesting possibilities are straightforward.
#
# The function below plots a straight line whose slope and intercept are given by its arguments.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def f(m, b):
plt.figure(2)
x = np.linspace(-10, 10, num=1000)
plt.plot(x, m * x + b)
plt.ylim(-5, 5)
plt.show()
# -
# The interactive below displays a line whose slope and intercept is set by the sliders. Note that if the variable containing the widget, `interactive_plot`, is the last thing in the cell it is displayed.
interactive_plot = widgets.interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5))
interactive_plot
# ## Disabling continuous updates
#
# When interacting with long running functions, or even with short functions whose results take some to display, realtime feedback is a burden instead of being helpful. You might have noticed the output of some of the widgets above "flickering" as you adjusted the controls. By default, `interact` and `interactive` call the function for every update of the widgets value.
#
# There are two ways to mitigate this. You can either only execute on demand, or restrict execution to mouse release events.
# ### `interact_manual`
#
# The `interact_manual` function provides a variant of interaction that allows you to restrict execution so it is only done on demand. A button is added to the interact controls that allows you to trigger an execute event.
# +
def slow_function(i):
"""
Sleep for 1 second then print the argument
"""
from time import sleep
print('Sleeping...')
sleep(1)
print(i)
widgets.interact_manual(slow_function,i=widgets.FloatSlider(min=1e4, max=1e6, step=1e4));
# -
# You can do the same thing with `interactive` by using the a `dict` as the second argument, as shown below.
foo = widgets.interactive(
slow_function, {'manual': True},
i=widgets.FloatSlider(min=1e4, max=1e6, step=1e4)
)
foo
# ### `continuous_update`
#
# If you are using slider widgets, you can set the `continuous_update` kwarg to `False`. `continuous_update` is a keyword argument of slider widgets that restricts executions to mouse release events.
#
# In ipywidgets 7, the `Text` and `Textarea` controls also have a `continuous_update` argument.
#
# The first example below provides the `continuous_update` argument when the widget is created.
widgets.interact(
slow_function,
i=widgets.FloatSlider(min=1e4, max=1e6, step=5e4, continuous_update=False)
);
# ## Some details on widgets
#
# A Python widget is an object that represents a control on the front end, like a slider. A single control can be displayed multiple times - they all represent the same python object.
# +
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='$\phi$',
)
slider
# -
slider
# The control attributes, like its value, are automatically synced between the frontend and the kernel.
slider.value
slider.value = 8
# You can trigger actions in the kernel when a control value changes by "observing" the value. Here we set a global variable when the slider value changes.
square = slider.value * slider.value
def handle_change(change):
global square
square = change.new * change.new
slider.observe(handle_change, 'value')
square
# You can link control attributes and lay them out together.
text = widgets.FloatText(description='Value')
widgets.link((slider, 'value'), (text, 'value'))
widgets.VBox([slider, text])
# ## For more information:
#
# For more extended examples of `interact` and `interactive`, see [the example in the ipywidgets source repository](https://github.com/jupyter-widgets/ipywidgets/blob/master/docs/source/examples/Index.ipynb).
|
3-more-on-widgets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# + [markdown] editable=true
# ## Hyperparameter Tuning in SageMaker
# + editable=true papermill={"duration": 1.250042, "end_time": "2021-06-04T00:19:10.040468", "exception": false, "start_time": "2021-06-04T00:19:08.790426", "status": "completed"} tags=[]
# !pip install torchvision
import sagemaker
from sagemaker.tuner import (
IntegerParameter,
CategoricalParameter,
ContinuousParameter,
HyperparameterTuner,
)
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
prefix = "sagemaker/DEMO-pytorch-cifar"
role = sagemaker.get_execution_role()
# + editable=true papermill={"duration": 349.34758, "end_time": "2021-06-04T00:24:59.422559", "exception": false, "start_time": "2021-06-04T00:19:10.074979", "status": "completed"} tags=[]
from torchvision.datasets import CIFAR10
from torchvision import transforms
local_dir = 'data'
CIFAR10.mirrors = ["https://sagemaker-sample-files.s3.amazonaws.com/datasets/image/CIFAR10/"]
CIFAR10(
local_dir,
download=True,
transform=transforms.Compose(
[transforms.ToTensor()]
)
)
# + editable=true papermill={"duration": 2.242278, "end_time": "2021-06-04T00:25:01.708498", "exception": false, "start_time": "2021-06-04T00:24:59.466220", "status": "completed"} tags=[]
# TODO: Upload the data to an S3 bucket. You can use the sagemaker_session object, boto3 or the AWS CLI
inputs = sagemaker_session.upload_data(path="data", bucket=bucket, key_prefix=prefix)
print("input spec (in this case, just an S3 path): {}".format(inputs))
# + editable=true papermill={"duration": 0.137601, "end_time": "2021-06-04T00:25:02.795452", "exception": true, "start_time": "2021-06-04T00:25:02.657851", "status": "failed"} tags=[]
from sagemaker.pytorch import PyTorch
estimator = PyTorch(
entry_point="cifar.py",
role=role,
py_version='py36',
framework_version="1.8",
instance_count=1,
instance_type="ml.m5.large"
)
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
hyperparameter_ranges = {
"lr": ContinuousParameter(0.001, 0.1),
"batch-size": CategoricalParameter([32, 64, 128, 256, 512]),
"epochs": IntegerParameter(2, 4)
}#TODO: Initialise your hyperparameters
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
objective_metric_name = "average test loss"
objective_type = "Minimize"
metric_definitions = [{"Name": "average test loss", "Regex": "Test set: Average loss: ([0-9\\.]+)"}]
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
tuner = HyperparameterTuner(
estimator,
objective_metric_name,
hyperparameter_ranges,
metric_definitions,
max_jobs=4,
max_parallel_jobs=2,
objective_type=objective_type,
)#TODO: Create your HyperparameterTuner Object
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
tuner.fit({"training": inputs})#TODO: Train your model
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
predictor = tuner.deploy(initial_instance_count=1, instance_type="ml.t2.medium")
# + [markdown] editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Query the Endpoint
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
import gzip
import numpy as np
import random
import os
file = 'data/cifar-10-batches-py/data_batch_1'
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
data=unpickle(file)
data=np.reshape(data[b'data'][0], (3, 32, 32))
# + editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# response = predictor.predict(np.expand_dims(data, axis=1))
# print("Raw prediction result:")
# print(response)
# print()
response = predictor.predict(np.expand_dims(data, axis=1))
print("Raw prediction result:")
print(response)
print()
labeled_predictions = list(zip(range(10), response[0]))
print("Labeled predictions: ")
print(labeled_predictions)
print()
labeled_predictions.sort(key=lambda label_and_prob: 1.0 - label_and_prob[1])
print("Most likely answer: {}".format(labeled_predictions[0])) # TODO: Query the endpoint
print(response)
# labeled_predictions = list(zip(range(10), response[0]))
# print("Labeled predictions: ")
# print(labeled_predictions)
# print()
# labeled_predictions.sort(key=lambda label_and_prob: 1.0 - label_and_prob[1])
# print("Most likely answer: {}".format(labeled_predictions[0]))# TODO: Query the endpoint
# print(response)
# + [markdown] editable=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ### Cleanup
#
# After you have finished with this exercise, remember to delete the prediction endpoint to release the instance associated with it
# -
tuner.delete_endpoint()
|
hpo-deploy-3/hpo_deploy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # Extracting time course from source_estimate object
#
#
# Load a SourceEstimate object from stc files and
# extract the time course of activation in
# individual labels, as well as in a complex label
# formed through merging two labels.
#
#
#
# +
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
meg_path = data_path + '/MEG/sample'
# load the stc
stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
# load the labels
aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
# extract the time course for different labels from the stc
stc_lh = stc.in_label(aud_lh)
stc_rh = stc.in_label(aud_rh)
stc_bh = stc.in_label(aud_lh + aud_rh)
# calculate center of mass and transform to mni coordinates
vtx, _, t_lh = stc_lh.center_of_mass('sample')
mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
vtx, _, t_rh = stc_rh.center_of_mass('sample')
mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
# plot the activation
plt.figure()
plt.axes([.1, .275, .85, .625])
hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0]
hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0]
hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0]
plt.xlabel('Time (s)')
plt.ylabel('Source amplitude (dSPM)')
plt.xlim(stc.times[0], stc.times[-1])
# add a legend including center-of-mass mni coordinates to the plot
labels = ['LH: center of mass = %s' % mni_lh.round(2),
'RH: center of mass = %s' % mni_rh.round(2),
'Combined LH & RH']
plt.figlegend([hl, hr, hb], labels, 'lower center')
plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
plt.show()
|
0.14/_downloads/plot_label_activation_from_stc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercice 3
# ## Exercice 3.1
import os
import glob
def sequence_parser (fasta):
"""input: a Fasta file
output : tuple (descriptor,sequence) with descriptor being the first line of the fasta file
"""
with open (fasta,'r') as f:
lines = f.readlines()
descriptor =lines[0]
sequence = lines[1:]
sequence = [line.rstrip() for line in sequence]
sequence = ''.join(sequence)
return descriptor, sequence
descr_salmo,sequence_salmo =sequence_parser('data/salmonella_spi1_region.fna')
print(descr_salmo)
sequence_salmo[:1000]
# ## Exercice 3.2
#
# First, we need to copy and paste the NEB file in a text editor, and put the extension .fna
fasta_phage = 'data/lambda.fna'
descr_phage,sequence_phage = sequence_parser(fasta_phage)
sequence_phage[:1000]
def restriction_sites(seq, recog_seq):
"""Determines the positions of the first base of the enzyme's recognition site
in a given genome sequence """
recog_index = []
seq_length = len(seq)
recog_length = len(recog_seq)
for i in range (0,seq_length-recog_length):
if recog_seq == seq[i:i+recog_length]:
recog_index.append(i)
return recog_index
# ?restriction_sites
# +
HindIII = 'AAGCTT'
EcoRI = 'GAATTC'
KpnI = 'GGTACC'
HIII = restriction_sites(sequence_phage,HindIII)
EcI = restriction_sites(sequence_phage,EcoRI)
KpI = restriction_sites(sequence_phage,KpnI)
print(HIII)
print(EcI)
print(KpI)
# -
sequence_phage[37583:37583+len(HindIII)] == HindIII
# # Exercice 4
#
# ## exercice 4.1
seq = 'ATGACTACGT'
block_size = 4
def seq_block(seq,block_size):
blocks = []
for i in range(0,len(seq),block_size):
block = seq[i:i+block_size]
if len(block) == block_size:
blocks.append(block)
return blocks
seq_block(seq,block_size)
# +
def gc_block(seq,block_size):
gc = []
seq.upper()
for i in range(0,len(seq),block_size):
block = seq[i:i+block_size]
if len(block) == block_size:
gc.append((block.count('G')+block.count('C'))/block_size)
return tuple(gc)
# -
gc_block(seq,block_size)
def gc_map(seq, block_size, gc_thresh):
#First make sure all the bases are uppercase
seq.upper()
bl=''
gc_content = gc_block(seq,block_size)
seq_blocks =seq_block(seq,block_size)
for i,_ in enumerate(gc_content):
if gc_content[i] < gc_thresh:
bl += seq_blocks[i].lower()
else:
bl += seq_blocks[i]
return bl
gc_map(seq, block_size, 0.4)
salmo_map = gc_map(sequence_salmo,1000,0.45)
20000//60
# +
length_salmo =len(sequence_salmo)
print(length_salmo)
salmo_ = sequence_salmo.split(' ',60)
#if os.path.isfile('salmo_map.txt'):
# raise RuntimeError('salmo_map.txt already exists.')
#with open('salmo_map.txt','w') as f:
# f.write(salmo_map)
# -
# ## Exercice 4.2
#
#we work with codons so we might need codons conversion
import bootcamp_utils
seq = 'GGATGATGATGTAAAAC'
seq.find('TAA')
def all_start(seq):
i =seq.find('ATG')
starts = []
while i >= 0:
starts.append(i)
i = seq.find('ATG', i + 1)
return starts
def first_stop1(seq):
stop = []
i = 0
while i < len(seq) - 2 and seq[i:i+3] not in ('TAA', 'TAG', 'TGA'):
i += 3
#we need to return the last base of stop codon
return i + 3
# +
def first_stop(seq):
i = seq.find('TAA')
j = seq.find('TAG')
k = seq.find('TGA')
first =
# -
print(first_stop(seq))
min(1,2,3)
# +
def longest_ORF1(seq):
ORF = []
start = all_start(seq)
stop = 0
for i,id_start in enumerate(start):
stop = first_stop(seq[id_start:])
print(stop, id_start)
if (stop - id_start) % 3 == 0 :
ORF.append(seq[id_start:stop])
return ORF
# -
longest_ORF1(seq)
# +
def all_stop(seq):
taa = []
tag = []
tga = []
#first start with taa
i = seq.find('TAA')
while i >= 0:
taa.append(i+3)
i = seq.find('ATG', i + 1)
#then tag
j = seq.find('TAG')
while j >= 0:
tag.append(j+3)
j = seq.find('TAG', j + 1)
#and tga
k = seq.find('TGA')
while k >= 0:
tga.append(k+3)
k = seq.find('TGA', k + 1)
return taa+tga+tag
# -
a= all_start(seq)
a
b = all_stop(seq)
b
def find_ORF(seq):
ORF = []
start = all_start(seq)
stop = all_stop(seq)
for i in start:
for j in stop :
if (j - i) % 3 == 0:
ORF.append(seq[i:j])
return tuple(ORF)
find_ORF(seq)
# +
def longestORF(seq):
temp =[""]
ORF = find_ORF(seq)
for i in ORF :
print(len(i))
if len(i) > len(temp[0]):
temp[0] = i
return temp
# +
#long_ORF_salmo = longestORF(sequence_salmo) #I can't do it, my code is not optimized at all because I get all the
#a = all_start(sequence_salmo)
# -
len(a)
longestORF(seq)
def DNA_prot(seq):
#we will assume that the protein is the transduction/translation of the longest ORF
#ORF = longestORF(seq)
prot_seq = ""
for i in range(0,len(seq)-3,3):
prot_seq += bootcamp_utils.codons[seq[i:i+3]]
return prot_seq
prot_seq = DNA_prot('ATGATGATGGAATAA')
prot_seq2 = DNA_prot('ATGAGGTTCTTATCTTCAGGGGGAGGC')
prot_seq2
|
.ipynb_checkpoints/Exercice3-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
import keras
from setuptools.command.setopt import option_base
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import InputLayer
from tensorflow.python.keras.layers import MaxPooling2D, MaxPool2D
from tensorflow.python.keras.layers import Conv2D, Dense, Flatten,BatchNormalization,Dropout
from tensorflow.python.keras.optimizers import Adam
from keras.optimizers import RMSprop
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
train = pd.read_csv("../input/digit-recognizer/train.csv")
test_orj = pd.read_csv("../input/digit-recognizer/test.csv")
print("Training set has {0[0]} rows and {0[1]} columns".format(train.shape))
print("Test set has {0[0]} rows and {0[1]} columns".format(test_orj.shape))
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
# put labels into y_train variable
Y_train = train["label"]
# Drop 'label' column
X_train = train.drop(labels = ["label"],axis = 1)
# + _uuid="35e207a11a147650860ec4889b1515799a529885"
import seaborn as sns
import matplotlib.pyplot as plt
# visualize number of digits classes
plt.figure(figsize=(15,7))
sns.countplot(Y_train)
plt.title("Number of digit classes")
Y_train.value_counts()
# + _uuid="5b4dd7b0a5005464e77b980c4e6e2a63df55f150"
# plot some samples
print(type(X_train))
img = X_train.iloc[1].as_matrix()
img = img.reshape((28,28))
plt.imshow(img,cmap='gray')
plt.title(train.iloc[0,0])
plt.axis("off")
plt.show()
# + _uuid="2f0cdbd5e088c07d477b766f91ebcb69125ee147"
# Normalize the data
X_train = X_train / 255.0
test = test_orj.copy() / 255.0
# Reshape
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
# Label Encoding
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
Y_train = to_categorical(Y_train, num_classes = 10)
# Split the train and the validation set for the fitting
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2)
print("x_train shape",X_train.shape)
print("x_val shape",X_val.shape)
print("y_train shape",Y_train.shape)
print("y_test shape",Y_val.shape)
# + _uuid="4453d627f2b6fed7afddfbb2eb8e1bed2711d464"
Y_train[0]
# + _uuid="0f2ca4953cb0065569ed3923cc6f903d38fce271"
from keras import backend as K
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
# Select optimizer
sgd = keras.optimizers.SGD(lr=1e-4, momentum=0.9)
rms_prop = keras.optimizers.RMSprop(lr=1e-4)
adam = keras.optimizers.adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
adamax = keras.optimizers.Adamax(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
adadelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.0001)
# + _uuid="f9561e3b0b02fd0abb68a1d4fe5f4d33e0e6d1e8"
batch_size = 86
num_classes = 10
epochs = 100
input_shape = (28, 28, 1)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),padding='same',activation='relu',input_shape=input_shape))
model.add(Conv2D(32,kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,kernel_size=(3, 3),padding='same', activation='relu'))
model.add(Conv2D(64,kernel_size=(3, 3),activation='relu'))
model.add(MaxPool2D((2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
# + _uuid="ed0626815a145d30148252b50d0080b4f517fea6"
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=adam,
metrics=['accuracy', recall])
# + _uuid="2df66047b68371d20c8efa9852791fad1981c2ed"
datagenn = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagenn.fit(X_train)
# + _uuid="9497324027111394e3d1c48550ff140a5c964319"
history = model.fit_generator(datagenn.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction],)
result = model.evaluate(x=X_train, y=Y_train)
print('Accuracy:', result[1])
# + _uuid="3726e09f97f6cee21063897ab2033ab0077c202d"
plt.plot(history.history['loss'])
plt.title("Model Loss")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(['Train', 'Test'])
plt.show()
# + _uuid="d1f996725cb2ee0513fce930917275a468a58b2a"
plt.plot(history.history['acc'])
plt.title('Model Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(['Train','Test'])
plt.show()
# + _uuid="6cf1f1c8264b5d615ec1501ad7c96f1a53cab06d"
# confusion matrix
import seaborn as sns
from sklearn.metrics import confusion_matrix
# Predict the values from the validation dataset
Y_pred = model.predict(X_val)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(Y_val,axis = 1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(confusion_mtx, annot=True, linewidths=0.01,cmap="Greens",linecolor="gray", fmt= '.1f',ax=ax)
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.title("Confusion Matrix")
plt.show()
# + _uuid="f8b5b42f52afc340748bce360cb45090b0d9853a"
pred_digits_test=np.argmax(model.predict(test),axis=1)
image_id_test=[]
for i in range (len(pred_digits_test)):
image_id_test.append(i+1)
d={'ImageId':image_id_test,'Label':pred_digits_test}
answer=pd.DataFrame(d)
answer.to_csv('result_0601_1401.csv',index=False)
plt.figure(figsize=(10,5))
sns.countplot(pred_digits_test)
plt.title("(Test data) Number of Digits classes")
|
2 digit recognizer/mnist-for-beginners-tensorflow-dnn-cnn-keras.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp scraper
# -
# # Scraper
#
# > This module is used to scrape https://www.point2homes.com for all land available on Vancouver Island.
#hide
from nbdev.showdoc import *
from lxml.etree import HTML
from selenium import webdriver
from selenium.webdriver.common.by import By
import xmltodict as xd
from time import sleep
from lxml import etree
import pandas as pd
import re
def get_text(xpath):
return re.sub('<[^<]+?>', '', etree.tostring(xpath, method='html', with_tail=False).decode('ascii')).replace('\n', '').replace(' ', '')
def extract_properties(l):
price = l.xpath(".//div[@class='price']")[0].text.replace('\n', '')
name = get_text(l.xpath(".//h3[@class='name']")[0])
location = l.xpath(".//div[@class='location']")[0].getchildren()[1].text
size = get_text(l.xpath(".//div[@class='accommodation']")[0])
description = get_text(l.xpath(".//div[@class='description-text']")[0])
link = l.xpath(".//a")[0].attrib['href']
properties = {
'price': price,
'name': name,
'location': location,
'size': size,
'description': description,
'link': link,
}
return properties
def read_land_column(lands):
land_list = []
for land in lands:
land_list.append(extract_properties(land))
return land_list
url = "https://www.thailand-property.com/commercial-property-for-sale?page=1"
browser = webdriver.Firefox()
browser.get(url)
land_data = []
while(1):
# Read Page
page_source = HTML(browser.page_source)
# Extract Land Column
lands = page_source.xpath("//div[@class='description-block']")
# Append data
land_data = land_data + read_land_column(lands)
if not lands:
print("Likely at the last page. Ending scraping. Goodbye.")
break
# Report in.
print(f"Successfully Scraped: {len(lands)}")
# Go to next page
url = browser.current_url.split('=')
url[-1] = str(int(browser.current_url.split('=')[-1])+1)
url = '='.join(url)
browser.get(url)
sleep(10)
# +
def extract_numbers(s):
try:
return float(''.join(re.findall("[-+]?\d*\.\d+|\d+", s)) or 0)
except:
return 0
def sub_billion(s):
try:
return str(int(extract_numbers(''.join(s.split('.'))))) + '0' * (9 - len(str(int(extract_numbers(s.split('.')[1])))))
except:
return s.replace('billion','0'*9).strip()
# -
df[df['price'].str.contains('billion')]['price'].apply(sub_billion)
df = pd.DataFrame(land_data)
pd.set_option('display.max_colwidth', -1)
df.to_csv("raw_commercial_thailand.csv",index=False)
def explode(s):
d = {}
if 'm2' in s:
t = s.split('m2')
d['m2'] = t[0].split(' ')[-2]
if len(t[0].split(' ')) > 3:
d['beds'] = t[0].split(' ')[0]
d['baths'] = t[0].split(' ')[1]
elif len(s.split(' ')) > 1:
d['beds'] = s.split(' ')[0]
d['baths'] = s.split(' ')[1]
return d
from pandas.io.json import json_normalize
df['sizes'] = df['size'].apply(explode)
df = df.merge(json_normalize(df['sizes']), left_index=True, right_index=True).drop(['size','sizes'],axis=1)
df
df['price_float (USD)'] = df['price'].apply(sub_billion).apply(extract_numbers) * 0.031 # Baht to USD
df['lot_size_float (ACRE)'] = df['m2'].apply(extract_numbers) / 4046.86 # Square meters to acres
df['size'] = df['m2'] + ' m2'
df
df.to_csv("Thailand_Commercial2.csv",index=False)
df.sort_values('price_float (USD)').head(20).replace(0,np.inf)
df['']
len(str(int(extract_numbers('1.533 billion'.split('.')[1]))))
'0' * (9 - (len('1.5'.split('.')) - 1))
df[df['price'].str.contains('billion')]['price'].apply(sub_billion)
# # Working with the data
# +
df = pd.read_csv("Thailand_Commercial.csv")
df['price_float (USD)'] = df['price'].apply(extract_numbers) * 0.031 # Baht to USD
df['lot_size_float (ACRE)'] = df['m2'].apply(extract_numbers) / 4046.86 # Square meters to acres
# -
df
t[0].split(' ')
d
beds
baths
t
df['size']
df
df
# ### Ideas
# * For each property give an absolute profit value given a sale at market value
# * Time since discount
df['Dollar per Acre'] = df['price_float (USD)'] / df['lot_size_float (ACRE)']
df = df.sort_values('Dollar per Acre')
df['Dollar per Acre'] = df['Dollar per Acre'].apply(lambda x: str(round(x, 2)))
df
df.head(100).to_csv('ThailandTop100.csv', index=False)
df['Dollar per Acre'] = df['Dollar per Acre'].apply(float)
df_regions = df.head(100)[df.head(100)['location'].str.contains(', ')].copy(deep=True)
df_regions['region'] = df_regions['location'].apply(lambda x: x.split(',')[1])
df_regions
a = df_regions.groupby('region').sum()[['price_float (USD)', 'lot_size_float (ACRE)']]
b = df_regions.groupby('region').count()[['location']]
c = df_regions.groupby('region').mean()[['Dollar per Acre']]
info = a.merge(b, left_index=True, right_index=True).merge(c, left_index=True, right_index=True)[['location','price_float (USD)', 'lot_size_float (ACRE)', 'Dollar per Acre']]
info.columns = ['Discounted Properties', 'Total Price (USD)', 'Total Size (ACRE)', 'Mean Dollar Per Acre on Discounted Properties']
info
df.head()
df['Dollar per Acre'].mean() * 0.01
df_discounted = df[df['Dollar per Acre'] < df['Dollar per Acre'].mean() * 0.01]
len(df_discounted)
len(df)
df['Dollar per Acre'].mean()
# ### We can filter by discount per region, that might be more interesting.
# We can also make a panel widget that allows us to slide our discount threshold.
# Additionally, we can render a map showing the regions. We can encode dollar per acre per property on the map.
|
05_thailand-2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # World development indicators data set
#
#
#
# ## Introduction
# Globalization increased interdependence between countries in terms of trade and technology. Globalization also allwoed international companies to expand to multiple countires. Globalization also seemed to have caused increase in enegy consumption by countries. This inturn casues CO2 emissions to go up in these countries.
# In recent climate change conferences developing countries are considered to be big pollutors. Developing economies have energy needs to support their economies and lead to pollution. Developed countries were asked to innovate ways to curb the growing emissions. In this article I choose to look at 3 countries, USA, India and China to look into globalization inde and energy consumption.
#
# I also belive that increased energy consumption and industrialization caused an increase in GDP of countries. There are many indicators of quality of life. In this article I want to use GDP as one of the indicators to indetify increase in wealth of the citizens of hte countries.
#
# ## Questions to explore
# I want to understand how globalization has shifted energy needs from developed countries to developing countries. If globalization helped developing economies support their citizens they could be paying a price in pollution which could be impacting the health.
#
# My hypothesis is globalizaton increased energy consumption for developing economies and decreased the enery consumption of developed countries.
#
# The second quesion I want to pursue is improvement in quality of life in developing countries as a result of globalization.
# This is very broad question we can answer this with GDP, mortality rate, education and health metrics.
#
# My hypothesis is globalization improved the quality of life developing countries.
#
# ## Data set
# The data set to use for this analysis is [world development indicators](https://datacatalog.worldbank.org/dataset/world-development-indicators). This data has close to 1600 indicators. Energy consumption and infrastructure metrics are tracked on a per country level. More information on the indicators can be found [here](https://data.worldbank.org/indicator).
# This data is public and uses CC-BY 4.0 licenses. It is permitted to download, redistribute the data with proper attribution
#
#
# KOF Globalisation Index is used to track the spread of globalization of each contries. The data set is avilable for download [here](https://ethz.ch/content/dam/ethz/special-interest/dual/kof-dam/documents/Medienmitteilungen/Globalisierungsindex/KOFGI_2020_public.xlsx) The inde tracts 23 variables for each country and computes an index. The index is computed as described [here](https://ethz.ch/content/dam/ethz/special-interest/dual/kof-dam/documents/Medienmitteilungen/Globalisierungsindex/KOFGI_2020_method.pdf) Description of the variables is provided [here](https://ethz.ch/content/dam/ethz/special-interest/dual/kof-dam/documents/Medienmitteilungen/Globalisierungsindex/KOFGI_2020_variables.pdf) In this article I use the final index rather than individual values of the globalization index. I couldn't find an attached license to data but its available for download publicly.
#
# Copy of the data can be downloaded by python code below. The script assumes an active network connection to pull the data from its sources.
#
# ## Analysis
#
# The analysis in this case is mostly exploratory. I want to consider data from year 1970 onwards. There are enegry indicators for each country in the list. In the total energy consumption the indicator of interest is amount of energy imported. I want to look at energy consumption of countries like India, China and compare that to developed countries like USA countries. For the second question I am going to use GDP of economies.
import pandas as pd
import os
import requests
import matplotlib.pyplot as plt
import zipfile
# Next section of code downloads the data from the data source. World Data indicators is downloaded in compressed format. After downlaod is complete data is extracted to data folder. Globalization data set is downloaded in excel format.
#
# The total download time varies based on download speeds. You can retry this method multiple times if download fails.
# +
data_folder = 'data'
compressed_file = os.path.join(data_folder, 'wdi_data.zip')
wdi_download_url = 'http://databank.worldbank.org/data/download/WDI_csv.zip'
koi_globalization_url = 'https://ethz.ch/content/dam/ethz/special-interest/dual/kof-dam/documents/Medienmitteilungen/Globalisierungsindex/KOFGI_2020_public.xlsx'
def download_data_from_world_bank_source():
h = requests.head(wdi_download_url, allow_redirects=True)
header = h.headers
content_length = header.get('content-length', None)
print('Size of file to download {}'.format(content_length))
if not os.path.exists(data_folder):
os.makedir(data_folder)
resp = requests.get(wdi_download_url, allow_redirects=True)
open(compressed_file, 'wb').write(resp.content)
print("Downloaded data to {}".format(compressed_file))
def download_data_from_KOI_source():
if not os.path.exists(data_folder):
os.makedir(data_folder)
resp = requests.get(koi_globalization_url, allow_redirects=True)
open('KOFGI_2020_public.xlsx', 'wb').write(resp.content)
print("Downloaded data to {}".format('KOFGI_2020_public.xlsx'))
def extract_data():
with zipfile.ZipFile(compressed_file, 'r') as zip_data:
zip_data.extractall(data_folder)
print("Extracted file {} to folder {}".format(compressed_file, data_folder))
download_data_from_KOI_source()
download_data_from_world_bank_source()
extract_data()
# load data from
# -
# World bank data has multiple files in the compressed folder. Here is a list of files
# * WDICountry-Series.csv
# * WDICountry.csv
# * WDIData.csv
# * WDIFootNote.csv
# * WDISeries-Time.csv
# * WDISeries.csv
#
# The file I am interested inis WDIData.csv.
#
# # Analysis
#
# In the following sections, i perfrom basic exploratory analysis of the data. I start with plotting globalization index for the the three countries. I start by reading the data previosuly downloaded. Data for globalization is plotted first then , gdp and energy consumption. Globalizaton data is read into a pandas data frame. The year columns of the dataframe is marked as index for the frame. This is useful in plotting the data as time series data.
wdidata_df_orig = pd.read_csv('data/WDIData.csv')
globalization_df = pd.read_excel('data/KOFGI_2020_public.xlsx')
country_code = ['IND', 'CHN', 'USA']
globalization_df.set_index('year', inplace=True)
fig, ax = plt.subplots(figsize=(7,7))
globalization_df[globalization_df['code'].isin(country_code)].groupby('code')['KOFGI'].plot(legend=True, title='Globalization Index over years', )
ax.set_xlabel('Year')
ax.set_ylabel('KOF Globalization Index')
# From the graph above its evident that globalization increased significantly in the two developoing economies, albeit its slowing down in recent years. There seems to be a correlation between the time series data of three countries.
#
# # Energy Consumption and GDP graphs.
#
# To analyze world data indicators I used Electric power consumption (kWh per capita) and GDP (current US$ of the countries. World data indicators have years as columns instead of rows. To make is easy to parse and plot i used pandas melt function to pivot columns into rows. The section of code below walks through these stepa and plots data.
# emission columns. There are multiple indicators for this
greenhouse_gases = 'Total greenhouse gas emissions (kt of CO2 equivalent)'
Electric_power_consumption = 'EG.USE.ELEC.KH.PC'
energy_oil_percapita = 'EG.USE.PCAP.KG.OE'
energy_kw_percapita = 'EG.USE.ELEC.KH.PC'
gdp_per_capita = 'NY.GDP.MKTP.CD'
gdp_growth_percent = 'NY.GDP.MKTP.KD.ZG'
wdidata_df_orig.head(1)
encergy_consumption_df = wdidata_df_orig[(wdidata_df_orig['Indicator Code'].isin([gdp_per_capita, gdp_growth_percent, energy_kw_percapita]) ) & (wdidata_df_orig['Country Code'].isin(country_code))]
encergy_consumption_df.head()
pivot_df = pd.melt(encergy_consumption_df, id_vars=['Country Code', 'Indicator Code'], value_vars=[str(x) for x in range(1970,2020)])
pivot_df.set_index('variable', inplace=True)
# ## Pivoting table
# This is how the pivoted data looks like. Notice year is in one coulmn instead of several columns in the original row.
pivot_df.head()
fig, ax = plt.subplots(figsize=(7,7))
pivot_df[pivot_df['Indicator Code']== gdp_per_capita].groupby('Country Code')['value'].plot(legend=True, title='GDP per country.', )
ax.set_xlabel('Year')
ax.set_ylabel('GDP (current US$)')
fig, ax = plt.subplots(figsize=(7,7))
pivot_df[pivot_df['Indicator Code']== 'EG.USE.ELEC.KH.PC'].groupby('Country Code')['value'].plot(legend=True, title='Electric power consumption', )
ax.set_xlabel('Year')
ax.set_ylabel('Electric power consumption (kWh per capita)')
# # Findings
#
# The three plots above are very similar for all the three countries. Developed country like USA is ina different band than developing economies. This might not be true for all developed economies that need to be ivestigated.
# * Power consumption.
# All three countries seem to be consuming more energy as time progressed. Energy consumption seems to be related to globalization index. Overall the trends are the same. USA started seeinga decrease in energy consumption towards the end of the curve. I didn't have time to investigate the reasons here but according to paper presented, this could be related to enery efficiency gains.
#
# * GDP
# Overall gdp for all three coutnries increaed over the years. China seemed to have gained more than India overall. This again seems to be related to GLobalizations.
#
# # Next steps.
#
# In order to conclusively prove the above coreleation. I want to build pearson corelation between these time series data at a minimum. There are more advanced casaulity models for time series data that I came accross in literature that I need time to udnerstand. It will be workthwhile to investigate this further.
|
Final Report.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Stepwise Regression
#
# ### Forward selection with adjusted R-squared:
# +
# code from http://planspace.org/20150423-forward_selection_with_statsmodels/
import statsmodels.formula.api as smf
def forward_selection(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response) # remove dependent variable
selected = [] # to hold selected independent variables
current_score, best_new_score = 0.0, 0.0 # set scores to 0 before iterations
while remaining and current_score == best_new_score: # while there are still independent vars to test
scores_with_candidates = []
for candidate in remaining: # each possible ind. var.
formula = "{} ~ {} + 1".format(response,
' + '.join(selected + [candidate])) # add to already selected ind. vars
score = smf.ols(formula, data).fit().rsquared_adj # run the reg. and get the adj. rsquared
scores_with_candidates.append((score, candidate)) # append the adj. rsquared and ind. var. name
scores_with_candidates.sort() # sort scores low to high
best_new_score, best_candidate = scores_with_candidates.pop() # assign and remove highest score and name
if current_score < best_new_score: # if the new score is better than the old
remaining.remove(best_candidate) # remove ind. var. from remaining
selected.append(best_candidate) # add ind. var. to final selection
current_score = best_new_score # make this score the new one to beat
# if all variables were tested or the score did not improve
formula = "{} ~ {} + 1".format(response,
' + '.join(selected)) # format the formula string for smf
model = smf.ols(formula, data).fit() # fit and return the final model
return model
# -
# The `statsmodel` library prefers `pandas` data frames over `numpy` arrays as in `scikit-learn`, but luckily `pandas` has some nice methods that can read in data directly from the web. Let's grab a dataset from a Princeton class used for linear regression. The data consists "of observations on six variables for 52 tenure-track professors in a small college."
# +
import pandas as pd
url = "http://data.princeton.edu/wws509/datasets/salary.dat"
data = pd.read_csv(url, sep='\\s+')
# -
# data description: http://data.princeton.edu/wws509/datasets/#salary
#
# - sx = Sex, coded 1 for female and 0 for male
# - rk = Rank, coded
# - 1 for assistant professor,
# - 2 for associate professor, and
# - 3 for full professor
# - yr = Number of years in current rank
# - dg = Highest degree, coded 1 if doctorate, 0 if masters
# - yd = Number of years since highest degree was earned
# - sl = Academic year salary, in dollars.
#
# Used in: <NAME> (1985). Applied Linear Regression, Second Edition. New York: John Wiley and Sons. Page 194.
data
model = forward_selection(data, 'sl')
model.model.formula
model.rsquared_adj
model.summary()
# ### scikit-learn's F Regression
# +
import numpy as np
cols_to_transform = ["sx", "rk", "dg"]
df_with_dummies = pd.get_dummies(data, columns = cols_to_transform )
np.array(df_with_dummies.drop("sl", 1))
print(df_with_dummies.drop("sl", 1))
# -
from sklearn.feature_selection import f_regression
f_regression(np.array(df_with_dummies.drop("sl", 1)), np.array(data["sl"]), center=True)
|
machine-learning/examples/stepwise.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # mpl-scatter-density
#
# [mpl-scatter-density](https://github.com/astrofrog/mpl-scatter-density) speeds up density graph. [matplotlib](https://matplotlib.org/) is very slow when it comes to draw millions of points. [datashader](https://github.com/bokeh/datashader) is one alternative but was meant for zooming/dezooming. This package provides a simple functionality. The example comes the documentation.
# %matplotlib inline
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# ## example
# +
import numpy as np
import mpl_scatter_density
import matplotlib.pyplot as plt
import matplotlib.colors as col
# Generate fake data
N = 10000000
x = np.random.normal(4, 2, N)
y = np.random.normal(3, 1, N)
# Make the plot - note that for the projection option to work, the
# mpl_scatter_density module has to be imported above.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='scatter_density')
try:
ax.scatter_density(x, y)
ax.set_xlim(-5, 10)
ax.set_ylim(-5, 10)
except AttributeError as e:
print('issue with more recent version')
print(e)
# -
# The corresponding *matplotib* function does not exist as is. The module [seaborn](https://seaborn.pydata.org/examples/multiple_joint_kde.html) provides density visualization but it is not designed for such big sample.
# +
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Generate fake data
N = 10000
x = np.random.normal(4, 2, N)
y = np.random.normal(3, 1, N)
#import seaborn as sns
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=(8, 8))
ax = sns.kdeplot(x, y, shade=True, shade_lowest=False)
# -
|
_doc/notebooks/2016/pydata/im_mpl_scatter_density.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The *shutil* module include high-level file operations.
# # Copying files
import glob
import shutil
print('Before: ', glob.glob('shutil.*'))
shutil.copyfile('shutil.ipynb', 'shutil.copy.ipynb')
print('After:' , glob.glob('shutil.*'))
# +
import glob
import os
import shutil
os.mkdir('examples')
print('Before:', glob.glob('examples/*'))
shutil.copy('shutil.ipynb', 'examples')
print('After:', glob.glob('examples/*'))
# -
import os
import shutil
import time
def show_file_info(filename):
stat_info = os.stat(filename)
print(' mode ', oct(stat_info.st_mode))
print(' created: ', time.ctime(stat_info.st_ctime))
print(' accessed: ', time.ctime(stat_info.st_atime))
print(' Modified: ', time.ctime(stat_info.st_mtime))
os.mkdir('examples')
print('Source:')
show_file_info('shutil.ipynb')
shutil.copy2('shutil.ipynb', 'examples')
print('Dest:')
show_file_info('examples/shutil.ipynb')
|
Python-Standard-Library/FileSystem/examples/shutil.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# import SentimentIntensityanalyzer class from vaderSentiment.vadersentiment module.
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# function to print sentiments of the sentence.
# -
# Creating a pandas dataframe
data = pd.read_csv("dataset.csv")
data.head()
# # Data visualization
data.info()
data.describe() # describing the data
sns.heatmap(data.isnull(), yticklabels = False, cbar = False, cmap="Blues")
# to check whether there is any missing data
# +
data['neg'] = ''
data['neu'] = ''
data['pos'] = ''
data['compound'] = ''
data['sentiment'] = ''
# Create a SentimentIntensityAnalyzer object.
sid_obj = SentimentIntensityAnalyzer()
# polarity_scores method of SentimentIntensityAnalyzer
# object gives a sentiment dictionary.
# which contains pos, neg, neu, and compound scores.
for i in range(len(data)):
sentiment_dict = sid_obj.polarity_scores(data["tweetcaption"][i])
data.loc[i,'neg'] = sentiment_dict['neg']
data.loc[i,'neu'] = sentiment_dict['neu']
data.loc[i,'pos'] = sentiment_dict['pos']
data.loc[i,'compound'] = sentiment_dict['compound']
if sentiment_dict['compound'] >= 0.05 :
data.loc[i,'sentiment'] = "Positive"
elif sentiment_dict['compound'] <= - 0.05 :
data.loc[i,'sentiment'] = "Negative"
else :
data.loc[i,'sentiment'] = "Neutral"
# -
data.head(10)
# +
# visulalizing the number of positive negative and nuetral sentiments in the data
x = data['sentiment']
sns.countplot(x)
plt.show()
# -
# ### Analizing the tweets
# +
data['length'] = data["tweetcaption"].apply(len)
# -
data
# Analysing the frequency of tweets
data['length'].plot(bins=250, kind='hist')
#Checking the length of tweets (mean ,std ,min )
data.describe()
positive = data[data['sentiment']=="Positive"]
positive
negative = data[data['sentiment']=="Negative"]
negative
neutral = data[data['sentiment']=="Neutral"]
neutral
# ## Story Generation and Visualization from Tweets from wordcloud
# #A wordcloud is a visualization wherein the most frequent words appear in large size and the less frequent words appear in smaller sizes.
# +
sentences = data['tweetcaption'].tolist()
sentences_as_one_string = " ".join(sentences)
from wordcloud import WordCloud
wordcloud = WordCloud().generate(sentences_as_one_string)
plt.figure(figsize=(20,20))
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
# -
# # Building Functions
# punctuations
import string
string.punctuation
# stopwords
from nltk.corpus import stopwords
stopwords.words('english')
# +
from sklearn.feature_extraction.text import CountVectorizer
# +
# defining a function to clean up all the messages
# The function performs the following:
#(1) remove punctuation
#(2) remove stopwords
def message_cleaning(message):
Test_punc_removed = [char for char in message if char not in string.punctuation] #removing punctuation
Test_punc_removed_join = ''.join(Test_punc_removed)
Test_punc_removed_join_clean = [word for word in Test_punc_removed_join.split() if word.lower() not in stopwords.words('english')]
return Test_punc_removed_join_clean
# -
# applying the newly added function to the data
tweets_df_clean = data['tweetcaption'].apply(message_cleaning)
print(tweets_df_clean[0]) # showing the cleaned up version
print(data['tweetcaption'][0]) # showing the original version
#TOKENIZATION
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(analyzer = message_cleaning)
tweets_countvectorizer = CountVectorizer(analyzer = message_cleaning, dtype = 'uint8').fit_transform(data["tweetcaption"])
tweets_countvectorizer.shape
tweets_countvectorizer.todense()
X = tweets_countvectorizer
Y = data["sentiment"]
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# # Using Naive Bayes model
# +
from sklearn.naive_bayes import MultinomialNB
NB_classifier = MultinomialNB()
NB_classifier.fit(X_train, Y_train)
# -
from sklearn.metrics import classification_report, confusion_matrix
# Predicting the Test set results
prediction_NB = NB_classifier.predict(X_test)
cm_NB = confusion_matrix(Y_test, prediction_NB)
cm_NB
#classification_report
print(classification_report(Y_test, prediction_NB))
# ## Using SVC model
from sklearn.svm import SVC
from sklearn import svm
from sklearn.metrics import accuracy_score
# +
svc = svm.SVC(kernel='linear', C=1, probability=True).fit(X_train, Y_train)
prediction_svc = svc.predict(X_test)
# -
cm_svc = confusion_matrix(Y_test, prediction_svc)
cm_svc
#classification_report
print(classification_report(Y_test, prediction_svc))
# ## Using random forest model
from sklearn.ensemble import RandomForestClassifier
# +
rf = RandomForestClassifier(n_estimators=400, random_state=11).fit(X_train, Y_train)
prediction_rf = rf.predict(X_test)
# -
cm_rf = confusion_matrix(Y_test, prediction_rf)
cm_rf
#classification_report
print(classification_report(Y_test, prediction_rf))
|
CONTRIBUTION/Jupyter notebooks/Abhinay_beerukuri.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="XHFnthirwlfn"
# ### 1.1 Loading libraries
# -
import scipy.misc
import random
import numpy as np
import scipy
# ### 1.2 Load data
# +
xs = []
ys = []
#read data.txt
with open("driving_dataset/data.txt") as f:
for line in f:
xs.append("driving_dataset/" + line.split()[0])
#the paper by Nvidia uses the inverse of the turning radius,
#but steering wheel angle is proportional to the inverse of turning radius
#so the steering wheel angle in radians is used as the output
ys.append(float(line.split()[1]) * scipy.pi / 180)
#get number of images
num_images = len(xs)
train_xs = xs[:int(len(xs) * 0.7)]
train_ys = ys[:int(len(xs) * 0.7)]
val_xs = xs[-int(len(xs) * 0.3):]
val_ys = ys[-int(len(xs) * 0.3):]
num_train_images = len(train_xs)
num_val_images = len(val_xs)
# +
# scipy.misc.imresize(scipy.misc.imread(train_xs[(train_batch_pointer + i) % num_train_images])[-150:], [66, 200]) / 255.0
# you can break the whole line into parts like this
# here (train_batch_pointer + i) % num_train_images => "% num_train_images" is used to make sure that the
# (train_batch_pointer + i) values should not cross number of train images.
# lets explain whats happening with the first images
image_read = scipy.misc.imread(train_xs[0])
print("original image size",image_read.shape)
print("After taking the last 150 rows i.e lower part of the images where road is present, ",image_read[-150:].shape)
image_read = image_read[-150:]
resized_image = scipy.misc.imresize(image_read, [66, 200])
print("After resizing the images into 66*200, ",resized_image.shape)
# 200/66 = 455/150 = 3.0303 => we are keeping aspect ratio when we are resizing it
# -
scipy.misc.imresize(scipy.misc.imread(train_xs[0])[-150:], [66, 200])
# ### 1.3 EDA
print("Approx %d minutes of total data"%int(num_images/(30*60)))
print("Approx %d minutes of train data"%int(num_train_images/(30*60)))
print("Approx %d minutes of test data"%int(num_val_images/(30*60)))
# #### 1.3.1 Histogram
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="vgcc6iQobKHi"
# read images and steering angles from driving_dataset folder
from __future__ import division
import os
import numpy as np
import random
from scipy import pi
from itertools import islice
DATA_FOLDER = './driving_dataset/' # change this to your folder
TRAIN_FILE = os.path.join(DATA_FOLDER, 'data.txt')
split =0.7
LIMIT = None
X = []
y = []
with open(TRAIN_FILE) as fp:
for line in islice(fp, LIMIT):
path, angle = line.strip().split()
full_path = os.path.join(DATA_FOLDER, path)
X.append(full_path)
# converting angle from degrees to radians
y.append(float(angle) * pi / 180 )
y = np.array(y)
print("Completed processing data.txt")
split_index = int(len(y)*0.7)
train_y = y[:split_index]
test_y = y[split_index:]
# +
import numpy;
# PDF of train and test 'y' values.
import matplotlib.pyplot as plt
plt.figure(figsize=(10,8))
plt.hist(train_y, bins=50, normed=1, color='green', histtype ='step');
plt.hist(test_y, bins=50, normed=1, color='red', histtype ='step');
plt.show()
# -
# ### 1.4 Baseline Models
# +
#Model 0: Base line Model: y_test_pred = mean(y_train_i)
train_mean_y = np.mean(train_y)
print('Test_MSE(MEAN):%f' % np.mean(np.square(test_y-train_mean_y)) )
print('Test_MSE(ZERO):%f' % np.mean(np.square(test_y-0.0)) )
# -
# ### 1.5 NN model
# +
#points to the end of the last batch
train_batch_pointer = 0
val_batch_pointer = 0
def LoadTrainBatch(batch_size):
global train_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(scipy.misc.imresize(scipy.misc.imread(train_xs[(train_batch_pointer + i) % num_train_images])[-150:], [66, 200]) / 255.0)
y_out.append([train_ys[(train_batch_pointer + i) % num_train_images]])
train_batch_pointer += batch_size
return x_out, y_out
def LoadValBatch(batch_size):
global val_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(scipy.misc.imresize(scipy.misc.imread(val_xs[(val_batch_pointer + i) % num_val_images])[-150:], [66, 200]) / 255.0)
y_out.append([val_ys[(val_batch_pointer + i) % num_val_images]])
val_batch_pointer += batch_size
return x_out, y_out
# +
import tensorflow as tf
import scipy
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='VALID')
x = tf.placeholder(tf.float32, shape=[None, 66, 200, 3])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
x_image = x
#first convolutional layer
W_conv1 = weight_variable([5, 5, 3, 24])
b_conv1 = bias_variable([24])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, 2) + b_conv1)
#second convolutional layer
W_conv2 = weight_variable([5, 5, 24, 36])
b_conv2 = bias_variable([36])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)
#third convolutional layer
W_conv3 = weight_variable([5, 5, 36, 48])
b_conv3 = bias_variable([48])
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 2) + b_conv3)
#fourth convolutional layer
W_conv4 = weight_variable([3, 3, 48, 64])
b_conv4 = bias_variable([64])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4, 1) + b_conv4)
#fifth convolutional layer
W_conv5 = weight_variable([3, 3, 64, 64])
b_conv5 = bias_variable([64])
h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5, 1) + b_conv5)
#FCL 1
W_fc1 = weight_variable([1152, 1164])
b_fc1 = bias_variable([1164])
h_conv5_flat = tf.reshape(h_conv5, [-1, 1152])
h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#FCL 2
W_fc2 = weight_variable([1164, 100])
b_fc2 = bias_variable([100])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)
#FCL 3
W_fc3 = weight_variable([100, 50])
b_fc3 = bias_variable([50])
h_fc3 = tf.nn.relu(tf.matmul(h_fc2_drop, W_fc3) + b_fc3)
h_fc3_drop = tf.nn.dropout(h_fc3, keep_prob)
#FCL 3
W_fc4 = weight_variable([50, 10])
b_fc4 = bias_variable([10])
h_fc4 = tf.nn.relu(tf.matmul(h_fc3_drop, W_fc4) + b_fc4)
h_fc4_drop = tf.nn.dropout(h_fc4, keep_prob)
#Output
W_fc5 = weight_variable([10, 1])
b_fc5 = bias_variable([1])
y = tf.identity(tf.matmul(h_fc4_drop, W_fc5) + b_fc5) #scale the atan output
# +
import os
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
import driving_data
import model
from datetime import datetime
start = datetime.now()
LOGDIR = './save'
sess = tf.InteractiveSession()
L2NormConst = 0.001
train_vars = tf.trainable_variables()
loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
sess.run(tf.initialize_all_variables())
# create a summary to monitor cost tensor
tf.summary.scalar("loss", loss)
# merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V1)
# op to write logs to Tensorboard
logs_path = './logs'
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
epochs = 30
batch_size = 100
# train over the dataset about 30 times
for epoch in range(epochs):
epoch_time = datetime.now()
for i in range(int(driving_data.num_images/batch_size)):
xs, ys = driving_data.LoadTrainBatch(batch_size)
train_step.run(feed_dict={model.x: xs, model.y_: ys, model.keep_prob: 0.5})
if i % 10 == 0:
xs, ys = driving_data.LoadValBatch(batch_size)
loss_value = loss.eval(feed_dict={model.x:xs, model.y_: ys, model.keep_prob: 1.0})
prev_i = i
prev_loss = loss_value
#print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value))
# write logs at every iteration
summary = merged_summary_op.eval(feed_dict={model.x:xs, model.y_: ys, model.keep_prob: 1.0})
summary_writer.add_summary(summary, epoch * driving_data.num_images/batch_size + i)
if i % batch_size == 0:
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
checkpoint_path = os.path.join(LOGDIR, "model.ckpt")
filename = saver.save(sess, checkpoint_path)
print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value))
print("\nTime taken to run this epoch: ")
print(datetime.now() - epoch_time)
print("Model saved in file: %s" % filename)
print("Run the command line:\n" \
"--> tensorboard --logdir=./logs " \
"\nThen open http://0.0.0.0:6006/ into your web browser")
print("\nTime taken to train the model: ")
print(datetime.now() - start)
sess.close()
# -
# * To run this model run run_dataset.py in command prompt.
|
Self_driving_car-manu.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Modelling Optimal Strategies for a Single Car
# Some examples of using the Python functions defined in *modelling_utilities.py* to optimise race strategy. Here, we are optimising the strategy for a car running on its own.
import modelling_utilities as mu
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# ## Plotting the laptimes from a F1 Grand Prix
year = 2019
round_num = 2
driver_name = '<NAME>'
results_df = mu.download_laptimes(year, round_num, driver_name)
mu.plot_laptimes(results_df)
# ## Finding & plotting optimal strategies with known tyre deg variables
# The code below plots the optimal strategy based off a quadratic tyre deg function.
# **Mandatory Inputs**
# +
laps_complete = 3 # Full racing laps already completed
total_race_laps = 52
pitstop_time = 24 # Total extra time in seconds to make a pitstop
current_tyre_description = 'Soft' # Either 'Soft', 'Medium' or 'Hard'
current_tyre_age = 3 # Total laps the current tyre has already done
need_to_use_different_tyre = True # Boolean, depending on whether you still need to use a different compound of tyre before the end of the race
# The following parameters define the shape of the quadratic tyre deg curve
tyre_deg_curve = mu.tyre_deg_curve_quadratic
k = 0.6
d = 0.6
soft_tyre_deg_quadratic = 0.012,
soft_tyre_deg_linear = -0.01,
medium_tyre_pace_deficit = d,
medium_tyre_deg_quadratic = 0.012*k,
medium_tyre_deg_linear = -0.01*k,
hard_tyre_pace_deficit = 2*d,
hard_tyre_deg_quadratic = 0.012*(k**2),
hard_tyre_deg_linear = -0.01*(k**2)
# -
# **Optional Inputs**
max_pitstops = 3 # Here you can choose a maximum number of pitstops. The find_optimium_strategy function has a absolute maximum of 3 that it can handle, and by default this parameter is set to be 3.
base_laptime = 76.52 # The race laptime (measured in seconds) from a brand new Soft tyre with 1 lap of fuel remaining. By default, this is 0. (N.B. this parameter only affects the size of the laptimes - the choice of optimal strategy remains the same regardless of this parameter.)
fuel_laptime_correction = 0.06 # The improvement in laptime, measured in seconds per lap (assumed to be linear), from decreasing fuel load. This is assuming all other variables (including tyre deg) are constant. By default, this is 0. (N.B. this parameter only affects the size of the laptimes - the choice of optimal strategy remains the same regardless of this parameter.)
detailed_logs = True # Boolean, whether or not to return extra detail on where the function is up to in the batching process. Mainly used for diagnostics and/or understanding any processes taking a long time.
batch_size = 50000 # Controls the number of potential strategies that are computed over in a single batch. If you have issues with memory, then reduce this batch size.
optimal_strategy = mu.find_optimum_strategy(laps_complete, total_race_laps, pitstop_time, current_tyre_description,
current_tyre_age, need_to_use_different_tyre, tyre_deg_curve,
base_laptime, fuel_laptime_correction, max_pitstops, batch_size, detailed_logs,
soft_tyre_deg_quadratic = soft_tyre_deg_quadratic, soft_tyre_deg_linear = soft_tyre_deg_linear,
medium_tyre_pace_deficit = medium_tyre_pace_deficit, medium_tyre_deg_quadratic = medium_tyre_deg_quadratic, medium_tyre_deg_linear = medium_tyre_deg_linear,
hard_tyre_pace_deficit = hard_tyre_pace_deficit, hard_tyre_deg_quadratic = hard_tyre_deg_quadratic, hard_tyre_deg_linear = hard_tyre_deg_linear
)
for i in range(0,len(optimal_strategy.index)):
results_df = pd.DataFrame({'lap_number': optimal_strategy.iloc[i]['lap_number_list'],
'tyre_stint_number': optimal_strategy.iloc[i]['tyre_stint_number_list'],
'tyre_description': optimal_strategy.iloc[i]['tyre_description_list'],
'tyre_status': optimal_strategy.iloc[i]['tyre_status_list'],
'lap_time': optimal_strategy.iloc[i]['lap_times_adjusted']})
mu.plot_laptimes(results_df, 'Optimal Strategy #{0}'.format(i))
# Note here that there are in fact 3 strategies that minimise the total race time; all 3 are plotted.
# ## Finding optimal strategies across multiple tyre deg variables
# The following code plots a heatmap of optimal strategies via a grid search across multiple tyre deg parameters. The idea here is to identify how close we are to the boundary of a different race strategy being optimal.
# +
laps_complete = 3
total_race_laps = 52
pitstop_time = 24
current_tyre_description = 'Soft'
current_tyre_age = 3
need_to_use_different_tyre = True
max_pitstops = 2
base_laptime = 76.52
fuel_laptime_correction = 0.06
detailed_logs = False
batch_size = 50000
# The following parameters will define the shape of the quadratic tyre deg curve
tyre_deg_curve = mu.tyre_deg_curve_quadratic
param1_range = np.arange(0.1, 0.2, 0.01) # Parameter 1 will be called 'k' later, and is the multiplicative factor by which each harder step of tyre compound has less degradation.
param2_range = np.arange(0.8, 1.3, 0.1) # Parameter 2 will be called 'd' later, and is the difference in seconds per lap between each step of tyre compound, without any degradation factor.
# -
# Cross join to get all the possible combinations of parameters
parameter_grid = np.transpose([np.tile(param1_range, len(param2_range)), np.repeat(param2_range, len(param1_range))])
optimal_number_pitstops = []
optimal_tyre_choice = []
# Get the optimal strategy for these different parameter ranges
for i in range(0,len(parameter_grid)):
row = parameter_grid[i]
k = row[0]
d = row[1]
results_df = mu.find_optimum_strategy(laps_complete, total_race_laps, pitstop_time, current_tyre_description,
current_tyre_age, need_to_use_different_tyre, tyre_deg_curve,
base_laptime, fuel_laptime_correction, max_pitstops, batch_size, detailed_logs,
soft_tyre_deg_quadratic = 0.012, soft_tyre_deg_linear = -0.01,
medium_tyre_pace_deficit = d, medium_tyre_deg_quadratic = 0.012*k, medium_tyre_deg_linear = -0.01*k,
hard_tyre_pace_deficit = 2*d, hard_tyre_deg_quadratic = 0.012*(k**2), hard_tyre_deg_linear = -0.01*(k**2))
if results_df["pitstop_1_lap"][0] == -1:
optimal_number_pitstops.append(0)
optimal_tyre_choice.append(str(results_df["pitstop_0_tyre"][0]))
elif results_df["pitstop_2_lap"][0] == -1:
optimal_number_pitstops.append(1)
optimal_tyre_choice.append(str(results_df["pitstop_0_tyre"][0]) + " " + str(results_df["pitstop_1_tyre"][0]))
else:
optimal_number_pitstops.append(2)
alphabetical_tyres = [str(results_df["pitstop_1_tyre"][0]),str(results_df["pitstop_2_tyre"][0])]
alphabetical_tyres.sort()
optimal_tyre_choice.append(str(results_df["pitstop_0_tyre"][0]) + " " + alphabetical_tyres[0] + " " + alphabetical_tyres[1])
print("Finished optimising for k = {:.2f} and d = {:.1f}".format(k,d))
parameter_df = pd.DataFrame(parameter_grid, columns = ['k','d'])
parameter_df = parameter_df.round({'k': 2, 'd': 1})
parameter_df["optimal_number_pitstops"] = optimal_number_pitstops
parameter_df["optimal_tyre_choice"] = optimal_tyre_choice
pivot_tab = parameter_df.pivot_table('optimal_number_pitstops', index = 'k', columns = 'd')
annotations = parameter_df.pivot_table('optimal_tyre_choice', index = 'k', columns = 'd', aggfunc = lambda x: ' '.join(x))
sns.heatmap(pivot_tab, annot = annotations, annot_kws={"size": 7}, fmt = '', cmap = 'Blues', cbar = False, linewidths=.3)
plt.xlabel("d")
plt.ylabel("k")
plt.title("Optimal strategy for various tyre parameters")
plt.show()
# **End**
|
build/jupyter_notebooks/parameter_sweep_optimisation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Timeseries
#
# Pandas started out in the financial world, so naturally it has strong timeseries support.
#
# The first half of this post will look at pandas' capabilities for manipulating time series data.
# The second half will discuss modelling time series data with statsmodels.
# +
# %matplotlib inline
import os
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='ticks', context='talk')
if int(os.environ.get("MODERN_PANDAS_EPUB", 0)):
import prep # noqa
# -
# Let's grab some stock data for Goldman Sachs using the [`pandas-datareader`](http://pandas-datareader.readthedocs.io/en/latest/) package, which spun off of pandas:
gs = web.DataReader("GS", data_source='yahoo', start='2006-01-01',
end='2010-01-01')
gs.head().round(2)
# There isn't a special data-container just for time series in pandas, they're just `Series` or `DataFrame`s with a `DatetimeIndex`.
# ## Special Slicing
#
# Looking at the elements of `gs.index`, we see that `DatetimeIndex`es are made up of `pandas.Timestamp`s:
# Looking at the elements of `gs.index`, we see that `DatetimeIndex`es are made up of `pandas.Timestamp`s:
gs.index[0]
# A `Timestamp` is mostly compatible with the `datetime.datetime` class, but much amenable to storage in arrays.
#
# Working with `Timestamp`s can be awkward, so Series and DataFrames with `DatetimeIndexes` have some special slicing rules.
# The first special case is *partial-string indexing*. Say we wanted to select all the days in 2006. Even with `Timestamp`'s convenient constructors, it's a pai
gs.loc[pd.Timestamp('2006-01-01'):pd.Timestamp('2006-12-31')].head()
# Thanks to partial-string indexing, it's as simple as
gs.loc['2006'].head()
# Since label slicing is inclusive, this slice selects any observation where the year is 2006.
# The second "convenience" is `__getitem__` (square-bracket) fall-back indexing. I'm only going to mention it here, with the caveat that you should never use it.
# DataFrame `__getitem__` typically looks in the column: `gs['2006']` would search `gs.columns` for `'2006'`, not find it, and raise a `KeyError`. But DataFrames with a `DatetimeIndex` catch that `KeyError` and try to slice the index.
# If it succeeds in slicing the index, the result like `gs.loc['2006']` is returned.
# If it fails, the `KeyError` is re-raised.
# This is confusing because in pretty much every other case `DataFrame.__getitem__` works on columns, and it's fragile because if you happened to have a column `'2006'` you *would* get just that column, and no fall-back indexing would occur. Just use `gs.loc['2006']` when slicing DataFrame indexes.
#
# ## Special Methods
# ### Resampling
# Resampling is similar to a `groupby`: you split the time series into groups (5-day buckets below), apply a function to each group (`mean`), and combine the result (one row per group).
gs.resample("5d").mean().head()
gs.resample("W").agg(['mean', 'sum']).head()
# You can up-sample to convert to a higher frequency.
# The new points are filled with NaNs.
gs.resample("6H").mean().head()
# ### Rolling / Expanding / EW
# These methods aren't unique to `DatetimeIndex`es, but they often make sense with time series, so I'll show them here.
# +
gs.Close.plot(label='Raw')
gs.Close.rolling(28).mean().plot(label='28D MA')
gs.Close.expanding().mean().plot(label='Expanding Average')
gs.Close.ewm(alpha=0.03).mean().plot(label='EWMA($\\alpha=.03$)')
plt.legend(bbox_to_anchor=(1.25, .5))
plt.tight_layout()
plt.ylabel("Close ($)")
sns.despine()
# -
# Each of `.rolling`, `.expanding`, and `.ewm` return a deferred object, similar to a GroupBy.
roll = gs.Close.rolling(30, center=True)
roll
m = roll.agg(['mean', 'std'])
ax = m['mean'].plot()
ax.fill_between(m.index, m['mean'] - m['std'], m['mean'] + m['std'],
alpha=.25)
plt.tight_layout()
plt.ylabel("Close ($)")
sns.despine()
# ## Grab Bag
#
# ### Offsets
#
# These are similar to `dateutil.relativedelta`, but works with arrays.
gs.index + pd.DateOffset(months=3, days=-2)
# ### Holiday Calendars
#
# There are a whole bunch of special calendars, useful for traders probabaly.
from pandas.tseries.holiday import USColumbusDay
USColumbusDay.dates('2015-01-01', '2020-01-01')
# ### Timezones
#
# Pandas works with `pytz` for nice timezone-aware datetimes.
# The typical workflow is
#
# 1. localize timezone-naive timestamps to some timezone
# 2. convert to desired timezone
#
# If you already have timezone-aware Timestamps, there's no need for step one.
# tz naiive -> tz aware..... to desired UTC
gs.tz_localize('US/Eastern').tz_convert('UTC').head()
# ## Modeling Time Series
#
# The rest of this post will focus on time series in the econometric sense.
# My indented reader for this section isn't all that clear, so I apologize upfront for any sudden shifts in complexity.
# I'm roughly targeting material that could be presented in a first or second semester applied statisctics course.
# What follows certainly isn't a replacement for that.
# Any formality will be restricted to footnotes for the curious.
# I've put a whole bunch of resources at the end for people earger to learn more.
#
# We'll focus on modelling Average Monthly Flights. Let's download the data.
# If you've been following along in the series, you've seen most of this code before, so feel free to skip.
# +
import os
import io
import glob
import zipfile
from utils import download_timeseries
import statsmodels.api as sm
def download_many(start, end):
months = pd.period_range(start, end=end, freq='M')
# We could easily parallelize this loop.
for i, month in enumerate(months):
download_timeseries(month)
def time_to_datetime(df, columns):
'''
Combine all time items into datetimes.
2014-01-01,1149.0 -> 2014-01-01T11:49:00
'''
def converter(col):
timepart = (col.astype(str)
.str.replace('\.0$', '') # NaNs force float dtype
.str.pad(4, fillchar='0'))
return pd.to_datetime(df['fl_date'] + ' ' +
timepart.str.slice(0, 2) + ':' +
timepart.str.slice(2, 4),
errors='coerce')
return datetime_part
df[columns] = df[columns].apply(converter)
return df
def read_one(fp):
df = (pd.read_csv(fp, encoding='latin1')
.rename(columns=str.lower)
.drop('unnamed: 6', axis=1)
.pipe(time_to_datetime, ['dep_time', 'arr_time', 'crs_arr_time',
'crs_dep_time'])
.assign(fl_date=lambda x: pd.to_datetime(x['fl_date'])))
return df
# +
store = 'data/ts.hdf5'
if not os.path.exists(store):
download_many('2000-01-01', '2016-01-01')
zips = glob.glob(os.path.join('data', 'timeseries', '*.zip'))
dfs = [read_one(fp) for fp in csvs]
df = pd.concat(dfs, ignore_index=True)
df['origin'] = df['origin'].astype('category')
df.to_hdf(store, 'ts', format='table')
else:
df = pd.read_hdf(store, 'ts')
# -
with pd.option_context('display.max_rows', 100):
print(df.dtypes)
# We can calculate the historical values with a resample.
daily = df.fl_date.value_counts().sort_index()
y = daily.resample('MS').mean()
y.head()
# Note that I use the `"MS"` frequency code there.
# Pandas defaults to end of month (or end of year).
# Append an `'S'` to get the start.
ax = y.plot()
ax.set(ylabel='Average Monthly Flights')
sns.despine()
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
import statsmodels.api as sm
# One note of warning: I'm using the development version of statsmodels (commit `de15ec8` to be precise).
# Not all of the items I've shown here are available in the currently-released version.
#
# Think back to a typical regression problem, ignoring anything to do with time series for now.
# The usual task is to predict some value $y$ using some a linear combination of features in $X$.
#
# $$y = \beta_0 + \beta_1 X_1 + \ldots + \beta_p X_p + \epsilon$$
#
# When working with time series, some of the most important (and sometimes *only*) features are the previous, or *lagged*, values of $y$.
#
# Let's start by trying just that "manually": running a regression of `y` on lagged values of itself.
# We'll see that this regression suffers from a few problems: multicollinearity, autocorrelation, non-stationarity, and seasonality.
# I'll explain what each of those are in turn and why they're problems.
# Afterwards, we'll use a second model, seasonal ARIMA, which handles those problems for us.
#
# First, let's create a dataframe with our lagged values of `y` using the `.shift` method, which shifts the index `i` periods, so it lines up with that observation.
X = (pd.concat([y.shift(i) for i in range(6)], axis=1,
keys=['y'] + ['L%s' % i for i in range(1, 6)])
.dropna())
X.head()
# We can fit the lagged model using statsmodels (which uses [patsy](http://patsy.readthedocs.org) to translate the formula string to a design matrix).
mod_lagged = smf.ols('y ~ trend + L1 + L2 + L3 + L4 + L5',
data=X.assign(trend=np.arange(len(X))))
res_lagged = mod_lagged.fit()
res_lagged.summary()
# There are a few problems with this approach though.
# Since our lagged values are highly correlated with each other, our regression suffers from [multicollinearity](https://en.wikipedia.org/wiki/Multicollinearity).
# That ruins our estimates of the slopes.
sns.heatmap(X.corr());
# Second, we'd intuitively expect the $\beta_i$s to gradually decline to zero.
# The immediately preceding period *should* be most important ($\beta_1$ is the largest coefficient in absolute value), followed by $\beta_2$, and $\beta_3$...
# Looking at the regression summary and the bar graph below, this isn't the case (the cause is related to multicollinearity).
ax = res_lagged.params.drop(['Intercept', 'trend']).plot.bar(rot=0)
plt.ylabel('Coefficeint')
sns.despine()
# Finally, our degrees of freedom drop since we lose two for each variable (one for estimating the coefficient, one for the lost observation as a result of the `shift`).
# At least in (macro)econometrics, each observation is precious and we're loath to throw them away, though sometimes that's unavoidable.
# ### Autocorrelation
# Another problem our lagged model suffered from is [autocorrelation](https://en.wikipedia.org/wiki/Autocorrelation) (also know as serial correlation).
# Roughly speaking, autocorrelation is when there's a clear pattern in the residuals of your regression (the observed minus the predicted).
# Let's fit a simple model of $y = \beta_0 + \beta_1 T + \epsilon$, where `T` is the time trend (`np.arange(len(y))`).
# `Results.resid` is a Series of residuals: y - ŷ
mod_trend = sm.OLS.from_formula(
'y ~ trend', data=y.to_frame(name='y')
.assign(trend=np.arange(len(y))))
res_trend = mod_trend.fit()
# Residuals (the observed minus the expected, or $\hat{e_t} = y_t - \hat{y_t}$) are supposed to be [white noise](https://en.wikipedia.org/wiki/White_noise).
# That's [one of the assumptions](https://en.wikipedia.org/wiki/Gauss–Markov_theorem) many of the properties of linear regression are founded upon.
# In this case there's a correlation between one residual and the next: if the residual at time $t$ was above expectation, then the residual at time $t + 1$ is *much* more likely to be above average as well ($e_t > 0 \implies E_t[e_{t+1}] > 0$).
#
# We'll define a helper function to plot the residuals time series, and some diagnostics about them.
def tsplot(y, lags=None, figsize=(10, 8)):
fig = plt.figure(figsize=figsize)
layout = (2, 2)
ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)
acf_ax = plt.subplot2grid(layout, (1, 0))
pacf_ax = plt.subplot2grid(layout, (1, 1))
y.plot(ax=ts_ax)
smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
[ax.set_xlim(1.5) for ax in [acf_ax, pacf_ax]]
sns.despine()
plt.tight_layout()
return ts_ax, acf_ax, pacf_ax
# Calling it on the residuals from the linear trend:
tsplot(res_trend.resid, lags=36);
# The top subplot shows the time series of our residuals $e_t$, which should be white noise (but it isn't).
# The bottom shows the [autocorrelation](https://www.otexts.org/fpp/2/2#autocorrelation) of the residuals as a correlogram.
# It measures the correlation between a value and it's lagged self, e.g. $corr(e_t, e_{t-1}), corr(e_t, e_{t-2}), \ldots$.
# The partial autocorrelation plot in the bottom-right shows a similar concept.
# It's partial in the sense that the value for $corr(e_t, e_{t-k})$ is the correlation between those two periods, after controlling for the values at all shorter lags.
#
# Autocorrelation is a problem in regular regressions like above, but we'll use it to our advantage when we setup an ARIMA model below.
# The basic idea is pretty sensible: if your regression residuals have a clear pattern, then there's clearly some structure in the data that you aren't taking advantage of.
# If a positive residual today means you'll likely have a positive residual tomorrow, why not incorporate that information into your forecast, and lower your forecasted value for tomorrow?
# That's pretty much what ARIMA does.
# It's important that your dataset be stationary, otherwise you run the risk of finding [spurious correlations](http://www.tylervigen.com/spurious-correlations).
# A common example is the relationship between number of TVs per person and life expectancy.
# It's not likely that there's an actual causal relationship there.
# Rather, there could be a third variable that's driving both (wealth, say).
# [Granger and Newbold (1974)](http://wolfweb.unr.edu/homepage/zal/STAT758/Granger_Newbold_1974.pdf) had some stern words for the econometrics literature on this.
#
# > We find it very curious that whereas virtually every textbook on econometric methodology contains explicit warnings of the dangers of autocorrelated errors, this phenomenon crops up so frequently in well-respected applied work.
#
# (:fire:), but in that academic passive-aggressive way.
#
# The typical way to handle non-stationarity is to difference the non-stationary variable until is is stationary.
y.to_frame(name='y').assign(Δy=lambda x: x.y.diff()).plot(subplots=True)
sns.despine()
# Our original series actually doesn't look *that* bad.
# It doesn't look like nominal GDP say, where there's a clearly rising trend.
# But we have more rigorous methods for detecting whether a series is non-stationary than simply plotting and squinting at it.
# One popular method is the Augmented Dickey-Fuller test.
# It's a statistical hypothesis test that roughly says:
#
# $H_0$ (null hypothesis): $y$ is non-stationary, needs to be differenced
#
# $H_A$ (alternative hypothesis): $y$ is stationary, doesn't need to be differenced
#
# I don't want to get into the weeds on exactly what the test statistic is, and what the distribution looks like.
# This is implemented in statsmodels as [`smt.adfuller`](http://www.statsmodels.org/dev/generated/statsmodels.tsa.stattools.adfuller.html).
# The return type is a bit busy for me, so we'll wrap it in a `namedtuple`.
# +
from collections import namedtuple
ADF = namedtuple("ADF", "adf pvalue usedlag nobs critical icbest")
# -
ADF(*smt.adfuller(y))._asdict()
# So we failed to reject the null hypothesis that the original series was non-stationary.
# Let's difference it.
ADF(*smt.adfuller(y.diff().dropna()))._asdict()
# This looks better.
# It's not statistically significant at the 5% level, but who cares what statisticins say anyway.
#
# We'll fit another OLS model of $\Delta y = \beta_0 + \beta_1 L \Delta y_{t-1} + e_t$
data = (y.to_frame(name='y')
.assign(Δy=lambda df: df.y.diff())
.assign(LΔy=lambda df: df.Δy.shift()))
mod_stationary = smf.ols('Δy ~ LΔy', data=data.dropna())
res_stationary = mod_stationary.fit()
tsplot(res_stationary.resid, lags=24);
# So we've taken care of multicolinearity, autocorelation, and stationarity, but we still aren't done.
# ## Seasonality
# We have strong monthly seasonality:
smt.seasonal_decompose(y).plot();
# There are a few ways to handle seasonality.
# We'll just rely on the `SARIMAX` method to do it for us.
# For now, recognize that it's a problem to be solved.
# ## ARIMA
#
# So, we've sketched the problems with regular old regression: multicollinearity, autocorrelation, non-stationarity, and seasonality.
# Our tool of choice, `smt.SARIMAX`, which stands for Seasonal ARIMA with eXogenous regressors, can handle all these.
# We'll walk through the components in pieces.
#
# ARIMA stands for AutoRegressive Integrated Moving Average.
# It's a relatively simple yet flexible way of modeling univariate time series.
# It's made up of three components, and is typically written as $\mathrm{ARIMA}(p, d, q)$.
# ARIMA stands for AutoRegressive Integrated Moving Average, and it's a relatively simple way of modeling univariate time series.
# It's made up of three components, and is typically written as $\mathrm{ARIMA}(p, d, q)$.
# ### [AutoRegressive](https://www.otexts.org/fpp/8/3)
#
# The idea is to predict a variable by a linear combination of its lagged values (*auto*-regressive as in regressing a value on its past *self*).
# An AR(p), where $p$ represents the number of lagged values used, is written as
#
# $$y_t = c + \phi_1 y_{t-1} + \phi_2 y_{t-2} + \ldots + \phi_p y_{t-p} + e_t$$
#
# $c$ is a constant and $e_t$ is white noise.
# This looks a lot like a linear regression model with multiple predictors, but the predictors happen to be lagged values of $y$ (though they are estimated differently).
# ### Integrated
#
# Integrated is like the opposite of differencing, and is the part that deals with stationarity.
# If you have to difference your dataset 1 time to get it stationary, then $d=1$.
# We'll introduce one bit of notation for differencing: $\Delta y_t = y_t - y_{t-1}$ for $d=1$.
# ### [Moving Average](https://www.otexts.org/fpp/8/4)
#
# MA models look somewhat similar to the AR component, but it's dealing with different values.
#
# $$y_t = c + e_t + \theta_1 e_{t-1} + \theta_2 e_{t-2} + \ldots + \theta_q e_{t-q}$$
#
# $c$ again is a constant and $e_t$ again is white noise.
# But now the coefficients are the *residuals* from previous predictions.
# ### Combining
#
# Putting that together, an ARIMA(1, 1, 1) process is written as
#
# $$\Delta y_t = c + \phi_1 \Delta y_{t-1} + \theta_t e_{t-1} + e_t$$
#
# Using *lag notation*, where $L y_t = y_{t-1}$, i.e. `y.shift()` in pandas, we can rewrite that as
#
# $$(1 - \phi_1 L) (1 - L)y_t = c + (1 + \theta L)e_t$$
#
# That was for our specific $\mathrm{ARIMA}(1, 1, 1)$ model. For the general $\mathrm{ARIMA}(p, d, q)$, that becomes
#
# $$(1 - \phi_1 L - \ldots - \phi_p L^p) (1 - L)^d y_t = c + (1 + \theta L + \ldots + \theta_q L^q)e_t$$
#
# We went through that *extremely* quickly, so don't feel bad if things aren't clear.
# Fortunately, the model is pretty easy to use with statsmodels (using it *correctly*, in a statistical sense, is another matter).
mod = smt.SARIMAX(y, trend='c', order=(1, 1, 1))
res = mod.fit()
tsplot(res.resid[2:], lags=24);
res.summary()
# There's a bunch of output there with various tests, estimated parameters, and information criteria.
# Let's just say that things are looking better, but we still haven't accounted for seasonality.
#
# A seasonal ARIMA model is written as $\mathrm{ARIMA}(p,d,q)×(P,D,Q)_s$.
# Lowercase letters are for the non-seasonal component, just like before. Upper-case letters are a similar specification for the seasonal component, where $s$ is the periodicity (4 for quarterly, 12 for monthly).
#
# It's like we have two processes, one for non-seasonal component and one for seasonal components, and we multiply them together with regular algebra rules.
#
# The general form of that looks like (quoting the [statsmodels docs](http://www.statsmodels.org/dev/examples/notebooks/generated/statespace_sarimax_stata.html) here)
#
# $$\phi_p(L)\tilde{\phi}_P(L^S)\Delta^d\Delta_s^D y_t = A(t) + \theta_q(L)\tilde{\theta}_Q(L^s)e_t$$
#
# where
#
# - $\phi_p(L)$ is the non-seasonal autoregressive lag polynomial
# - $\tilde{\phi}_P(L^S)$ is the seasonal autoregressive lag polynomial
# - $\Delta^d\Delta_s^D$ is the time series, differenced $d$ times, and seasonally differenced $D$ times.
# - $A(t)$ is the trend polynomial (including the intercept)
# - $\theta_q(L)$ is the non-seasonal moving average lag polynomial
# - $\tilde{\theta}_Q(L^s)$ is the seasonal moving average lag polynomial
#
# I don't find that to be very clear, but maybe an example will help.
# We'll fit a seasonal ARIMA$(1,1,2)×(0, 1, 2)_{12}$.
#
# So the nonseasonal component is
#
# - $p=1$: period autoregressive: use $y_{t-1}$
# - $d=1$: one first-differencing of the data (one month)
# - $q=2$: use the previous two non-seasonal residual, $e_{t-1}$ and $e_{t-2}$, to forecast
#
# And the seasonal component is
#
# - $P=0$: Don't use any previous seasonal values
# - $D=1$: Difference the series 12 periods back: `y.diff(12)`
# - $Q=2$: Use the two previous seasonal residuals
mod_seasonal = smt.SARIMAX(y, trend='c',
order=(1, 1, 2), seasonal_order=(0, 1, 2, 12),
simple_differencing=False)
res_seasonal = mod_seasonal.fit()
res_seasonal.summary()
tsplot(res_seasonal.resid[12:], lags=24);
# Things look much better now.
#
# One thing I didn't really talk about is order selection. How to choose $p, d, q, P, D$ and $Q$.
# R's forecast package does have a handy `auto.arima` function that does this for you.
# Python / statsmodels don't have that at the minute.
# The alternative seems to be experience (boo), intuition (boo), and good-old grid-search.
# You can fit a bunch of models for a bunch of combinations of the parameters and use the [AIC](https://en.wikipedia.org/wiki/Akaike_information_criterion) or [BIC](https://en.wikipedia.org/wiki/Bayesian_information_criterion) to choose the best.
# [Here](https://www.otexts.org/fpp/8/7) is a useful reference, and [this](http://stackoverflow.com/a/22770973) StackOverflow answer recommends a few options.
#
# ## Forecasting
# Now that we fit that model, let's put it to use.
# First, we'll make a bunch of one-step ahead forecasts.
# At each point (month), we take the history up to that point and make a forecast for the next month.
# So the forecast for January 2014 has available all the data up through December 2013.
pred = res_seasonal.get_prediction(start='2001-03-01')
pred_ci = pred.conf_int()
ax = y.plot(label='observed')
pred.predicted_mean.plot(ax=ax, label='Forecast', alpha=.7)
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax.set_ylabel("Monthly Flights")
plt.legend()
sns.despine()
# There are a few places where the observed series slips outside the 95% confidence interval.
# The series seems especially unstable before 2005.
#
# Alternatively, we can make *dynamic* forecasts as of some month (January 2013 in the example below).
# That means the forecast from that point forward only use information available as of January 2013.
# The predictions are generated in a similar way: a bunch of one-step forecasts.
# Only instead of plugging in the *actual* values beyond January 2013, we plug in the *forecast* values.
pred_dy = res_seasonal.get_prediction(start='2002-03-01', dynamic='2013-01-01')
pred_dy_ci = pred_dy.conf_int()
# +
ax = y.plot(label='observed')
pred_dy.predicted_mean.plot(ax=ax, label='Forecast')
ax.fill_between(pred_dy_ci.index,
pred_dy_ci.iloc[:, 0],
pred_dy_ci.iloc[:, 1], color='k', alpha=.25)
ax.set_ylabel("Monthly Flights")
# Highlight the forecast area
ax.fill_betweenx(ax.get_ylim(), pd.Timestamp('2013-01-01'), y.index[-1],
alpha=.1, zorder=-1)
ax.annotate('Dynamic $\\longrightarrow$', (pd.Timestamp('2013-02-01'), 550))
plt.legend()
sns.despine()
# -
# ## Resources
#
# This is a collection of links for those interested.
#
# ### Time series modeling in Python
#
# + [Statsmodels Statespace Notebooks](http://www.statsmodels.org/dev/examples/index.html#statespace)
# + [Statsmodels VAR tutorial](http://www.statsmodels.org/dev/vector_ar.html#var)
# - [ARCH Library by <NAME>](https://github.com/bashtage/arch)
#
# ### General Textbooks
#
# - [Forecasting: Principles and Practice](https://www.otexts.org/fpp/): A great introduction
# - [Stock and Watson](http://wps.aw.com/aw_stock_ie_3/178/45691/11696965.cw/): Readable undergraduate resource, has a few chapters on time series
# - [Greene's Econometric Analysis](http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm): My favorite PhD level textbook
# - [Hamilton's Time Series Analysis](http://www.amazon.com/Time-Analysis-James-Douglas-Hamilton/dp/0691042896): A classic
# - [Lutkehpohl's New Introduction to Multiple Time Series Analysis](http://www.amazon.com/New-Introduction-Multiple-Time-Analysis/dp/3540262393): Extremely dry, but useful if you're implementing this stuff
#
# ## Conclusion
#
# Congratulations if you made it this far, this piece just kept growing (and I still had to cut stuff).
# The main thing cut was talking about how `SARIMAX` is implemented on top of using statsmodels' statespace framework.
# The statespace framework, developed mostly by <NAME> over the past couple years, is really nice.
# You can pretty easily [extend it](http://www.statsmodels.org/dev/examples/notebooks/generated/statespace_local_linear_trend.html) with custom models, but still get all the benefits of the framework's estimation and results facilities.
# I'd recommend reading the [notebooks](http://www.statsmodels.org/dev/examples/index.html#statespace).
# We also didn't get to talk at all about <NAME>'s work on VARs, but maybe some other time.
#
# As always, [feedback is welcome](https://twitter.com/tomaugspurger).
|
.ipynb_checkpoints/modern_7_timeseries-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import IPython
from matplotlib import pyplot as plt
import gym
env = gym.make('CartPole-v1') # 初始化场景
env.reset() # 初始状态
env.render(mode='rgb_array')
t = 0
# 随便动一步(初始状态)
action = env.action_space.sample()
state, _, done, _ = env.step(action)
env.render(mode='rgb_array')
# 绘图
plt.figure()
plt.clf()
plt.title('Example extracted screen')
while True:
action = env.action_space.sample()
# 往后走一步
state, _, done, _ = env.step(action)
# 生成走后的场景
current_screen = env.render(mode='rgb_array') # 返回现在的图像, 用于可视化
# 绘制画面
plt.pause(0.7) # pause a bit so that plots are updated
IPython.display.clear_output(wait=True)
IPython.display.display(plt.gcf())
plt.title('Action: {}'.format(action))
plt.imshow(current_screen, interpolation='none')
t = t + 1
if done:
break
if t > 100:
break
plt.show()
env.close()
print(t)
# + pycharm={"name": "#%%\n"}
text_input = ['1','233']
Label_list = ['11','1233']
text_i = text_input['1']
prob_max_i = Label_list[text_i]
# + pycharm={"name": "#%%\n"}
|
notebook/test01.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # **Machine Learning using scikit-learn**
# This notebook demonstrates the usage of pipelines to promote best practices for Machine Learning in Python. As a reminder, below are the following best practices we should follow when performing Machine Learning in Python:
#
# 1. Perform all pre-processing steps within cross-validation
# 2. Measure model performance (and model selection) using cross-validation
# 3. Follow literate programming practices to make code readable and make collaboration easier
#
# ## Problem Formulation
#
# In this example, we will use <NAME>'s Palmer Penguins dataset, available here: https://github.com/allisonhorst/palmerpenguins.
#
# The dataset contains data on 344 penguins. There are three different species of penguins in the dataset, collected from three separate islands in the Palmer Archipelago, Antarctica.
# +
#tables and visualizations
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#machine learning
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer, make_column_selector
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler
from sklearn import config_context
from sklearn.metrics import classification_report, confusion_matrix, ConfusionMatrixDisplay
# -
# ## Load Data
#
# Here we first load the data into python using pandas and read it in as a pandas dataframe which is the format which we will use throughout the example.
penguins = pd.read_csv('https://raw.githubusercontent.com/allisonhorst/palmerpenguins/master/inst/extdata/penguins.csv')
display(penguins.head())
penguins.info()
penguins.isna().sum()
# ## Data cleaning and EDA
#
# We can now explore our data. We leave this exercise to the reader. For now, we can observe that there are a few NA values which will likely need imputation. We'll wait for this step so that we can put it within our training loop. For now, we'll just drop all of the sex NAs out of the dataframe.
penguins = penguins.dropna(subset=['sex'])
penguins.shape
# +
class_column = 'sex'
random_seed = 2435
X_train, X_test, y_train, y_test = train_test_split(penguins.drop(columns=class_column), penguins[class_column],
test_size=0.25, random_state=random_seed, stratify=penguins[class_column])
# -
# Quick sanity check to make sure that everything seems correct:
# +
# X Train
print('On X train: ')
print('X train dimensions: ', X_train.shape)
display(X_train.head())
# X test
print('\nOn X test: ')
print('X test dimensions: ', X_test.shape)
display(X_test.head())
# +
# X Train
print('On y train: ')
print('y train dimensions: ', y_train.shape)
display(y_train.head())
# X test
print('\nOn y test: ')
print('y test dimensions: ', y_test.shape)
display(y_test.head())
# -
# ## Establish the training pipeline
#
# We can now establish the training pipeline for our models. Since this is a process we would need to repeat several times, it's good to essentially functionalize the process so we do not need to re-write redundant code. Here, we can impute some values that were missing, and encode any categorical values. Note that these pipelines will change according to the model and methodology you choose - additionally, the pipelines will also change depending on the data types of the columns in your dataset.
#individual pipelines for differing datatypes
cat_pipeline = Pipeline(steps=[('cat_impute', SimpleImputer(missing_values=np.nan, strategy='most_frequent')),
('onehot_cat', OneHotEncoder(drop='if_binary'))])
num_pipeline = Pipeline(steps=[('impute_num', SimpleImputer(missing_values=np.nan, strategy='mean')),
('scale_num', StandardScaler())])
#establish preprocessing pipeline by columns
preproc = ColumnTransformer([('cat_pipe', cat_pipeline, make_column_selector(dtype_include=object)),
('num_pipe', num_pipeline, make_column_selector(dtype_include=np.number))],
remainder='passthrough')
# +
#generate the whole modeling pipeline with preprocessing
pipe = Pipeline(steps=[('preproc', preproc),
('mdl', LogisticRegression(penalty='elasticnet', solver='saga', tol=0.01))])
#visualization for steps
with config_context(display='diagram'):
display(pipe)
# -
# ## Cross-validation with hyperparameter tuning
#
# Now that we have our pipelines, we can now use this as part of cross validation and hyperparameter tuning.
tuning_grid = {'mdl__l1_ratio' : np.linspace(0,1,5),
'mdl__C': np.logspace(-1, 6, 3) }
grid_search = GridSearchCV(pipe, param_grid = tuning_grid, cv = 5, return_train_score=True)
tuning_grid
grid_search.fit(X_train, y_train)
print(grid_search.best_score_)
grid_search.best_params_
pd.DataFrame(grid_search.cv_results_)
# ## Final fit
#
# The final fit here is already present in the generated model due to the way we set our parameters in the grid search. If we want to look at the performance, we can do so. Here is a non-helpful description of the best model:
grid_search.best_estimator_
# ## Variable importance
#
# Now we assess the importance in the selected model to reveal any potential insights.
grid_search.classes_
vip = grid_search.best_estimator_['mdl'].coef_[0]
vip
# +
#get names in correct preproc order
cat_names = grid_search.best_estimator_.named_steps['preproc'].transformers_[0][1].named_steps['onehot_cat'].get_feature_names()
num_names = grid_search.best_estimator_.named_steps['preproc'].transformers_[1][2]
#create df with vip info
coef_info = pd.DataFrame({'feat_names':np.hstack([cat_names, num_names]), 'vip': vip})
#get sign and magnitude information
coef_info = coef_info.assign(coef_mag = abs(coef_info['vip']),
coef_sign = np.sign(coef_info['vip']))
#sort and plot
coef_info = coef_info.set_index('feat_names').sort_values(by='coef_mag', ascending=False)
sns.barplot(y=coef_info.index, x='coef_mag', hue='coef_sign', data=coef_info, orient='h', dodge=False);
# -
# ## Performance metrics on test data
#
#
# Here, we can see the performance of the model, which is pretty nice! We can also look into different scores specifically for more insight into the performance.
# + tags=[]
print(classification_report(y_test, grid_search.best_estimator_.predict(X_test)))
# +
cm = confusion_matrix(y_test, grid_search.best_estimator_.predict(X_test))
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=grid_search.classes_)
disp.plot()
plt.show()
# -
# ## Try it yourself!
#
# Now that we've seen the power of pipelines in sklearn, let's now try implementing our own pipelines.
# +
# Try implementing a pipeline where we use median imputation for numeric columns instead of mean imputation.
#individual pipelines for differing datatypes
cat_pipeline = Pipeline(steps=[('cat_impute', SimpleImputer(missing_values=np.nan, strategy='most_frequent')),
('onehot_cat', OneHotEncoder(drop='if_binary'))])
num_pipeline = Pipeline(steps=[('impute_num', SimpleImputer(missing_values=np.nan, strategy='median')),
('scale_num', StandardScaler())])
#establish preprocessing pipeline by columns
preproc = ColumnTransformer([('cat_pipe', cat_pipeline, make_column_selector(dtype_include=object)),
('num_pipe', num_pipeline, make_column_selector(dtype_include=np.number))],
remainder='passthrough')
# -
# With this new pipeline, now train a Random Forest model. Refer to the documentation for the parameters for the random forest classifier here: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
#
# Is the performance better?
# +
#generate the whole modeling pipeline with preprocessing
pipe = Pipeline(steps=[('preproc', preproc),
('mdl', RandomForestClassifier())])
#visualization for steps
with config_context(display='diagram'):
display(pipe)
# -
# Now perform cross validation and modify the n_estimators parameter to values of [100, 200,500] and max_depth parameter to values of [10,15,50] for the random forest classifier for hyperparameter tuning.
tuning_grid = {'mdl__n_estimators' : [100, 200 ,500],
'mdl__max_depth': [10, 15, 20] }
grid_search = GridSearchCV(pipe, param_grid = tuning_grid, cv = 5, return_train_score=True)
tuning_grid
grid_search.fit(X_train, y_train)
print(grid_search.best_score_)
grid_search.best_params_
pd.DataFrame(grid_search.cv_results_)
grid_search.best_estimator_
grid_search.classes_
vip = grid_search.best_estimator_['mdl'].feature_importances_
vip
# +
#get names in correct preproc order
cat_names = grid_search.best_estimator_.named_steps['preproc'].transformers_[0][1].named_steps['onehot_cat'].get_feature_names()
num_names = grid_search.best_estimator_.named_steps['preproc'].transformers_[1][2]
#create df with vip info
coef_info = pd.DataFrame({'feat_names':np.hstack([cat_names, num_names]), 'vip': vip})
#get sign and magnitude information
coef_info = coef_info.assign(coef_mag = abs(coef_info['vip']),
coef_sign = np.sign(coef_info['vip']))
#sort and plot
coef_info = coef_info.set_index('feat_names').sort_values(by='coef_mag', ascending=False)
sns.barplot(y=coef_info.index, x='coef_mag', hue='coef_sign', data=coef_info, orient='h', dodge=False);
# -
print(classification_report(y_test, grid_search.best_estimator_.predict(X_test)))
# +
cm = confusion_matrix(y_test, grid_search.best_estimator_.predict(X_test))
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
display_labels=grid_search.classes_)
disp.plot()
plt.show()
# -
# # GBM
# +
from sklearn.ensemble import GradientBoostingClassifier
#generate the whole modeling pipeline with preprocessing
pipe = Pipeline(steps=[('preproc', preproc),
('mdl', GradientBoostingClassifier())])
#visualization for steps
with config_context(display='diagram'):
display(pipe)
# -
tuning_grid = {'mdl__n_estimators' : [100, 200 ,500],
'mdl__max_depth': [10, 15, 20],
'mdl__learning_rate': [1.0, 0.5],
'mdl__random_state' : [0]}
grid_search = GridSearchCV(pipe, param_grid = tuning_grid, cv = 5, return_train_score=True)
tuning_grid
grid_search.fit(X_train, y_train)
print(grid_search.best_score_)
grid_search.best_params_
pd.DataFrame(grid_search.cv_results_)
grid_search.best_estimator_
# # Light-GBM
import lightgbm
from lightgbm import LGBMClassifier
# +
pipe = Pipeline(steps=[('preproc', preproc),
('mdl', LGBMClassifier())])
#visualization for steps
with config_context(display='diagram'):
display(pipe)
# -
tuning_grid = {'mdl__num_leaves' : [10, 20, 30],
'mdl__random_state' : [0]}
grid_search = GridSearchCV(pipe, param_grid = tuning_grid, cv = 5, return_train_score=True)
tuning_grid
grid_search.fit(X_train, y_train)
print(grid_search.best_score_)
grid_search.best_params_
|
sklearn-demo-solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cvxpy as cp
import seaborn as sns
import emm
import warnings
import sklearn as sk
warnings.filterwarnings('ignore')
# Plot styles
plt.style.use('seaborn-notebook')
import matplotlib.pyplot as plt
SMALL_SIZE = 15
MEDIUM_SIZE = 16
BIGGER_SIZE = 22
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# +
processed_dir = "../assets/data/processed/pima_diabetes/"
# Get data into dataframe
df = pd.read_csv(processed_dir + "diabetes.csv",index_col=0)
# Replace 0 with nan
nan_cols = ['Glucose', 'BloodPressure','SkinThickness','Insulin','BMI']
df[nan_cols]=df[nan_cols].replace({'0':np.nan,0:np.nan})
# -
fig, ax = plt.subplots(4,2, figsize=(16,16))
sns.distplot(df.Age, bins = 20, ax=ax[0,0])
sns.distplot(df.Pregnancies, bins = 20, ax=ax[0,1])
sns.distplot(df.Glucose, bins = 20, ax=ax[1,0])
sns.distplot(df.BloodPressure, bins = 20, ax=ax[1,1])
sns.distplot(df.SkinThickness, bins = 20, ax=ax[2,0])
sns.distplot(df.Insulin, bins = 20, ax=ax[2,1])
sns.distplot(df.DiabetesPedigreeFunction, bins = 20, ax=ax[3,0])
sns.distplot(df.BMI, bins = 20, ax=ax[3,1])
plt.show()
marginals_mean = df.groupby('Outcome').mean()
m_0 = marginals_mean.loc[0]
m_1 = marginals_mean.loc[1]
display(marginals_mean.T)
print(marginals_mean.T.round(2).to_latex())
df[df['Outcome']==1].shape[0] / df.shape[0]
# +
from emm.losses import LeastSquaresLoss
from emm.losses import CorpusKLLoss
from emm.losses import EqualityLoss
loss_gl0 = CorpusKLLoss(m_0['Glucose'])
loss_gl1 = CorpusKLLoss(m_1['Glucose'])
loss_bmi0 = CorpusKLLoss(m_0['BMI'])
loss_bmi1 =CorpusKLLoss(m_1['BMI'])
loss_age0 = CorpusKLLoss(m_0['Age'])
loss_age1 = CorpusKLLoss(m_1['Age'])
# margs = {0 : [emm.reweighting.marginal('Glucose', loss_gl0.fun, loss_gl0),
# emm.reweighting.marginal('BMI', loss_gl1.fun, loss_gl1),
# emm.reweighting.marginal('Age', loss_bmi0.fun, loss_bmi0)],
# 1 : [emm.reweighting.marginal('Glucose', loss_bmi1.fun, loss_bmi1),
# emm.reweighting.marginal('BMI', loss_age0.fun, loss_age0),
# emm.reweighting.marginal('Age', loss_age1.fun, loss_age1)]
# }
margs = {0 : [emm.reweighting.marginal('Glucose', 'mean', LeastSquaresLoss(m_0['Glucose']),True),
emm.reweighting.marginal('BMI', 'mean', LeastSquaresLoss(m_0['BMI']),True),
emm.reweighting.marginal('Age', 'mean', LeastSquaresLoss(m_0['Age']),True)],
1 : [emm.reweighting.marginal('Glucose', 'mean', LeastSquaresLoss(m_1['Glucose'], scale=100),True),
emm.reweighting.marginal('BMI', 'mean', LeastSquaresLoss(m_1['BMI'], scale=100),True),
emm.reweighting.marginal('Age', 'mean', LeastSquaresLoss(m_1['Age'], scale =100),True)]
}
# margs = {0 : [emm.reweighting.marginal('Glucose', 'mean', EqualityLoss(m_0['Glucose'])),
# emm.reweighting.marginal('BMI', 'mean', EqualityLoss(m_0['BMI'])),
# emm.reweighting.marginal('Age', 'mean', EqualityLoss(m_0['Age']))],
# 1 : [emm.reweighting.marginal('Glucose', 'mean', EqualityLoss(m_1['Glucose'])),
# emm.reweighting.marginal('BMI', 'mean', EqualityLoss(m_1['BMI'])),
# emm.reweighting.marginal('Age', 'mean', EqualityLoss(m_1['Age']))]
# }
target = df
corpus = df.drop(columns = 'Outcome')
from sklearn.linear_model import LogisticRegression
param_grid_log = {"classifier" : [LogisticRegression()],
"classifier__C" : np.logspace(-3,3,5)}
from sklearn.tree import DecisionTreeClassifier
param_grid_tree = {'classifier' : [DecisionTreeClassifier()],
'classifier__max_depth' : [5,10,20,50]}
from sklearn.ensemble import RandomForestClassifier
param_grid_rf = {'classifier' : [RandomForestClassifier()],
'classifier__bootstrap': [True],
'classifier__max_depth': [5,10,20,50],
'classifier__max_features' : ['sqrt'],
'classifier__n_estimators': [200,300,400]
}
param_grid = [param_grid_log, param_grid_tree, param_grid_rf]
from sklearn.impute import SimpleImputer
pipe = [("imputer", SimpleImputer())]
(rw_corpus, js, ml) = emm.metrics.multiple_models(target,corpus,margs,
param_grid, regularizer = emm.regularizers.EntropyRegularizer(),
lam=0.01, verbose=True, pipeline_steps = pipe,
scoring = sk.metrics.roc_auc_score, metrics=[sk.metrics.accuracy_score, sk.metrics.roc_auc_score, sk.metrics.confusion_matrix])
# +
fig, ax = plt.subplots(2,3, figsize=(16,8))
sns.distplot(df[df['Outcome']==0].Glucose, bins = 'auto', ax=ax[0,0])
sns.distplot(df[df['Outcome']==0].BMI, bins = 'auto', ax=ax[0,1])
sns.distplot(df[df['Outcome']==0].Age, bins = 'auto', ax=ax[0,2])
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==0].Glucose, rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[0,0])
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==0].BMI, rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[0,1])
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==0].Age, rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[0,2])
sns.distplot(df[df['Outcome']==1].Glucose, bins = 'auto', ax=ax[1,0])
sns.distplot(df[df['Outcome']==1].BMI, bins = 'auto', ax=ax[1,1])
sns.distplot(df[df['Outcome']==1].Age, bins = 'auto', ax=ax[1,2])
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==1].Glucose, rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[1,0])
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==1].BMI, rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[1,1])
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==1].Age, rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[1,2])
plt.tight_layout()
plt.show()
# -
ax
# +
fig, ax = plt.subplots( figsize=(10, 12) , nrows=3, ncols=2)
ax = ax.flatten()
sns.distplot(df[df['Outcome']==0].Glucose, bins = 'auto', ax=ax[0], label='Target', kde_kws={'linewidth':3})
sns.distplot(df[df['Outcome']==0].BMI, bins = 'auto', ax=ax[2], label='Target', kde_kws={'linewidth':3})
sns.distplot(df[df['Outcome']==0].Age, bins = 'auto', ax=ax[4],label='Target', kde_kws={'linewidth':3})
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==0].Glucose, rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[0],label='Reweighted corpus', kde_kws={'linewidth':3})
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==0].BMI, rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[2], label='Reweighted corpus', kde_kws={'linewidth':3})
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==0].Age, rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[4],label='Reweighted corpus', kde_kws={'linewidth':3})
ax[4].legend()
sns.distplot(df[df['Outcome']==1].Glucose, bins = 'auto', ax=ax[1], kde_kws={'linewidth':3})
sns.distplot(df[df['Outcome']==1].BMI, bins = 'auto', ax=ax[3], kde_kws={'linewidth':3})
sns.distplot(df[df['Outcome']==1].Age, bins = 'auto', ax=ax[5], kde_kws={'linewidth':3})
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==1].Glucose, rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[1], kde_kws={'linewidth':3})
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==1].BMI, rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[3], kde_kws={'linewidth':3})
emm.utils.weighted_hist(rw_corpus[rw_corpus['Outcome']==1].Age, rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[5], kde_kws={'linewidth':3})
fig.set_facecolor('w')
#plt.suptitle('Marginal feature distribution for target and reweighted corpus')
ax[0].set_title('Label 0', fontsize=24)
ax[1].set_title('Label 1', fontsize=24)
plt.tight_layout()
plt.savefig('PIMA_dist_reweighted.jpg')
plt.show()
# -
ml
js
print(pd.DataFrame(js).to_latex())
m = len(corpus)
base_test = pd.concat([corpus.copy(), corpus.copy()], ignore_index=True)
base_test['weights'] = np.ones(2*m) /(2*m)
base_test['Outcome'] = np.concatenate([np.zeros(m), np.ones(m)],axis=0)
emm.compute_js_distance(target,base_test)
# +
fig, ax = plt.subplots(figsize=(10, 12) , nrows=3, ncols=2, sharey=True)
ax = ax.flatten()
sns.ecdfplot(df[df['Outcome']==0].Glucose, ax=ax[0], label='Target')
sns.ecdfplot(df[df['Outcome']==0].BMI, ax=ax[2], label='Target')
sns.ecdfplot(df[df['Outcome']==0].Age, ax=ax[4], label='Target')
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==0].Glucose, weights=rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[0], label='Reweighted corpus')
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==0].BMI, weights=rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[2], label='Reweighted corpus')
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==0].Age, weights=rw_corpus[rw_corpus['Outcome']==0].weights, ax=ax[4], label='Reweighted corpus')
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==0].Glucose, ax=ax[0], label='Unweighted corpus')
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==0].BMI, ax=ax[2], label='Unweighted corpus')
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==0].Age, ax=ax[4], label='Unweighted corpus')
ax[0].legend()
sns.ecdfplot(df[df['Outcome']==1].Glucose, ax=ax[1])
sns.ecdfplot(df[df['Outcome']==1].BMI, ax=ax[3])
sns.ecdfplot(df[df['Outcome']==1].Age, ax=ax[5])
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==1].Glucose, weights=rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[1])
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==1].BMI, weights=rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[3])
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==1].Age, weights=rw_corpus[rw_corpus['Outcome']==1].weights, ax=ax[5])
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==1].Glucose, ax=ax[1])
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==1].BMI, ax=ax[3])
sns.ecdfplot(x=rw_corpus[rw_corpus['Outcome']==1].Age, ax=ax[5])
fig.set_facecolor('w')
#plt.suptitle('Conditional feature CDF for target, reweighted corpus, unweighted corpus')
plt.tight_layout()
plt.savefig('PIMA_cdf_reweighted.jpg')
plt.show()
# -
print(emm.utils.weighted_mean(rw_corpus.drop(columns=['Outcome', 'weights']), rw_corpus['weights']))
target['Outcome'].value_counts()
corpus.mean()
base_test = pd.concat([corpus.copy(), corpus.copy()], ignore_index=True)
base_test['weights'] = np.ones(2*m) /(2*m)
base_test['Outcome'] = np.concatenate([np.zeros(m), np.ones(m)],axis=0)
emm.compute_js_distance(target,base_test)
marginals_mean
|
notebooks/pima_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PageRank
# #### Author : <NAME>
#
# In this notebook, we will show how to use multi-GPU features in cuGraph to compute the PageRank of each user in Twitter's dataset.
#
# Please be aware that your system may be different, and you may need to modify the code or install packages to run the below examples. If you think you have found a bug or an error, please file an issue in [cuGraph](https://github.com/rapidsai/cugraph/issues)
#
# This notebook was run on 2 NVIDIA Tesla V100 GPUs (connected with nvlink) using RAPIDS 0.9.0 and CUDA 10.0.
#
# ## Introduction
# Pagerank is measure of the relative importance of a vertex based on the relative importance of its neighbors. PageRank was invented by Google Inc. and is (was) used to rank its search results. PageRank uses the connectivity information of a graph to rank the importance of each vertex. See [Wikipedia](https://en.wikipedia.org/wiki/PageRank) for more details on the algorithm.
#
# CuGraph's multi-GPU features leverage Dask. RAPIDS has other projects based on Dask such as dask-cudf and dask-cuda. These products will also be used in this example. Check out [RAPIDS.ai](https://rapids.ai/) to learn more about these technologies.
#
# ---
#
# To compute the Pagerank with cuGraph we use:<br>
#
# ```python
# cugraph.dask.pagerank.pagerank(edge_list, alpha=0.85, max_iter=30)
# ```
# Parameters
#
# * *edge_list* : `dask_cudf.DataFrame`<br>
# Contain the connectivity information as an edge list. Source 'src' and destination 'dst' columns must be of type 'int32'. Edge weights are not used for this algorithm. Indices must be in the range [0, V-1], where V is the global number of vertices. The input edge list should be provided in dask-cudf DataFrame with one partition per GPU.
# * *alpha* : `float`<br>
# The damping factor alpha represents the probability to follow an outgoing edge, standard value is 0.85. Thus, 1.0-alpha is the probability to “teleport” to a random vertex. Alpha should be greater than 0.0 and strictly lower than 1.0.
# * *max_iter* : `int`<br>
# The maximum number of iterations before an answer is returned. If this value is lower or equal to 0 cuGraph will use the default value, which is 30. In this notebook, we will use 20 to compare against published results.<br>
#
# Returns
#
# * *PageRank* : `dask_cudf.DataFrame`<br>
# Dask GPU DataFrame containing two columns of size V: the vertex identifiers and the corresponding PageRank values.
# ## Data
# We will be analyzing 41.7 million user profiles and 1.47 billion social relations from the Twitter dataset. The CSV file is 26GB and was collected in :<br>
# *What is Twitter, a social network or a news media? <NAME>, <NAME>, <NAME>, and <NAME>. 2010.*<br>
#
# ---
#
# The fastest way to obtain the dataset is to run :
# ```bash
# sh ./get_data.sh
# ```
#
# Please refer to the README for further information and more options on how to obtain this dataset.
#
# ## Multi-GPU PageRank with cuGraph
# ### Basic setup
# +
# Let's check out our hardware setup
# !nvidia-smi
# GPUs should be connected with NVlink
# !nvidia-smi nvlink --status
# For best performance, we can limit the number of available devices
# For this dataset, we can use only 2 Tesla V100(32GB) or 4 Tesla P100 (16GB)
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
# List available devices
# !echo Available GPUs: $CUDA_VISIBLE_DEVICES
# -
# Import needed libraries
import time
from dask.distributed import Client, wait
import dask_cudf
from dask_cuda import LocalCUDACluster
import cugraph.dask.pagerank as dcg
# ### Setup multi-GPU and Dask
#
# Before we get started, we need to setup a Dask local cluster of workers to execute our work and a client to coordinate and schedule work for that cluster. As we see below, we can initiate a `cluster` and `client` using only 2 lines of code.
cluster = LocalCUDACluster(threads_per_worker=1)
client = Client(cluster)
# ### Read the data from disk
# cuGraph depends on dask-cudf for data loading and the initial DataFrame creation. The CSV data file contains an edge list, which represents the connection of a vertex to another. The source to destination pairs is what is known as Coordinate Format (COO). In this test case, the data is just two columns.
# +
# File path, assuming Notebook directory
input_data_path = r"twitter-2010.csv"
# Helper function to set the reader chunk size to automatically get one partition per GPU
chunksize = dcg.get_chunksize(input_data_path)
# Start timer
t_start = time.time()
# Multi-GPU CSV reader
e_list = dask_cudf.read_csv(input_data_path, chunksize = chunksize, delimiter=' ', names=['src', 'dst'], dtype=['int32', 'int32'])
# Wait for the lazy reader
tmp = wait(client.compute(e_list.to_delayed()))
# Print time
print(time.time()-t_start, "s")
# -
# ### Call the Multi-GPU PageRank algorithm
#
# +
# Start timer
t_start = time.time()
# Get the pagerank scores
pr_ddf = dcg.pagerank(e_list, max_iter=20)
# Print time
print(time.time()-t_start, "s")
# -
# It was that easy! PageRank should only take a few seconds to run on this 26GB input with 2 Tesla V100 GPUs.<br>
# Check out how it compares to published results in the [Annex](#annex_cell).
# ### Further analysis on the Pagerank result
#
# We can now identify the most influent users in the network.<br>
# Notice that the PageRank result can fit in one GPU. Hence, we can gather it in a regular `cudf.DataFrame`. We will then sort by PageRank value and print the *Top 3*.
# +
# Start timer
t_start = time.time()
# Dask Data Frame to regular cuDF Data Frame
pr_df = pr_ddf.compute()
# Sort, descending order
pr_sorted_df = pr_df.sort_values('pagerank',ascending=False)
# Print time
print(time.time()-t_start, "s")
# Print the Top 3
print(pr_sorted_df.head(3))
# -
# We can now use the [map](https://s3.us-east-2.amazonaws.com/rapidsai-data/cugraph/benchmark/twitter-2010-ids.csv.gz) to convert Vertex ID into to Twitter's numeric ID. The user name can also be retrieved using the [TwitterID](https://tweeterid.com/) web app.<br>
# The table below shows more information on our *Top 3*. Notice that this ranking is much better at capturing network influence compared the number of followers for instance. Further analysis of this dataset was published [here](https://doi.org/10.1145/1772690.1772751).
#
# | Vertex ID | Twitter ID | User name | Description |
# | --------- | --------- | -------- | ---------- |
# | 21513299 | 813286 | barackobama | US President (2009-2017) |
# | 23933989 | 14224719 | 10DowningStreet | UK Prime Minister office |
# | 23933986 | 15131310 | WholeFoods | Food store from Austin |
#
#
# ### Close the multi-GPU environment
client.close()
cluster.close()
# ## Annex
# <a id='annex_cell'></a>
# An experiment comparing various porducts for this workflow was published in *GraphX: Graph Processing in a Distributed Dataflow Framework,OSDI, 2014*. They used 16 m2.4xlarge worker nodes on Amazon EC2. There was a total of 128 CPU cores and 1TB of memory in this 2014 setup.
#
# 
# ___
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# ___
|
intermediate_notebooks/examples/cugraph/multi_gpu_pagerank.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="nepPEqtbvQIL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bf237377-3414-470c-9222-c713dea1c564"
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount('/content/drive')
# + colab_type="code" id="a1utTHKbYuDJ" colab={}
#Import the libraries
import math
#import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# + colab_type="code" id="wtFDdWVbwOf5" colab={}
import numpy as np
import tensorflow as tf
tf.random.set_seed(4)
np.random.seed(4)
# + colab_type="code" id="rxSbXccdZGkV" colab={}
# Loading Data
cols=['Timestamp','Memory_Allocated','Memory_Used','CPU_Allocated','CPU_Used','Network_Bandwidth_Utilization','Storage_Space_Utilization',
'Group_ID','Instance']
df = pd.read_csv("/content/drive/My Drive/REVA/Interview/inputData.csv", sep=',',
header=1,
names=cols,
engine="python",
encoding="latin1")
# + colab_type="code" id="YvAmbDuVU7Y8" colab={"base_uri": "https://localhost:8080/", "height": 535} outputId="5c6425cc-17cf-4e37-a353-a6e395ad0b27"
#Visualize the closing price history
plt.figure(figsize=(16,8))
plt.title('CPU_Used History')
plt.plot(df['CPU_Used'])
plt.ylabel('CPU_Used',fontsize=18)
plt.show()
# + colab_type="code" id="aQs9csOVZRWM" colab={}
df1=df.reset_index()['CPU_Used']
# + colab_type="code" id="bQjApQ145Ixi" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b1fad679-3406-4094-f1ad-1f0e8c87d7b4"
df1.shape
# + colab_type="code" id="2-eC-GOwZokc" colab={}
### LSTM are sensitive to the scale of the data. so we apply MinMax scaler
# + colab_type="code" id="9Bwdix_1ZszT" colab={}
import numpy as np
# + colab_type="code" id="zGfIlhixZu6U" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="342429cc-3832-4e85-ee28-776ef6b13836"
df1
# + colab_type="code" id="NhYQt4cgZw1c" colab={}
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
df1=scaler.fit_transform(np.array(df1).reshape(-1,1))
# + colab_type="code" id="D1yJdAVBZyh0" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="a4178620-9f6c-4df7-957c-8c7050041c69"
print(df1)
# + colab_type="code" id="P8pT_wMXZ0r8" colab={}
##splitting dataset into train and test split
training_size=int(len(df1)*0.65)
test_size=len(df1)-training_size
train_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1]
# + colab_type="code" id="HuX8p9r9aGtM" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6e643858-3263-41c0-ab87-7e186cde2bdf"
training_size,test_size
# + colab_type="code" id="6HDUZK3TaIWF" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="32c6d597-78fc-453a-9d9a-a1004c03c07f"
train_data
# + colab_type="code" id="zPlMOcEuaM80" colab={}
import numpy
# convert an array of values into a dataset matrix
def create_dataset(dataset, time_step=1):
dataX, dataY = [], []
for i in range(len(dataset)-time_step-1):
a = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100
dataX.append(a)
dataY.append(dataset[i + time_step, 0])
return numpy.array(dataX), numpy.array(dataY)
# + colab_type="code" id="GRqE18gOhB5-" colab={}
# reshape into X=t,t+1,t+2,t+3 and Y=t+4
time_step = 100
X_train, y_train = create_dataset(train_data, time_step)
X_test, ytest = create_dataset(test_data, time_step)
# + colab_type="code" id="gLyHCmYx7OsG" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="cf58c2b1-1c97-44f7-e5ad-8472904e59d9"
print(X_train)
# + colab_type="code" id="65mHGb3UhHxk" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="c6a59641-ace7-41f5-d16d-0001d08f2a53"
print(X_train.shape), print(y_train.shape)
# + colab_type="code" id="361mu-DRhKXD" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="c7cf4b93-86b9-417c-dc91-5f4a739d5057"
print(X_test.shape), print(ytest.shape)
# + colab_type="code" id="cGCVUYrxhNGz" colab={}
# reshape input to be [samples, time steps, features] which is required for LSTM
X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1)
X_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1)
# + colab_type="code" id="iiG-crnohPDL" colab={}
### Create the Stacked LSTM model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
# + colab_type="code" id="gyk6Z9KOKWj9" colab={}
def fit_lstm(X_train, test, raw, scaler, batch_size, nb_epoch, neurons):
X, y = X_train[:, 0:-1], X_train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
# prepare model
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# fit model
X_train_rmse, test_rmse = list(), list()
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
# evaluate model on train data
raw_X_train = raw[-(len(train)+len(test)+1):-len(test)]
train_rmse.append(evaluate(model, raw_train, train, scaler, 0, batch_size))
model.reset_states()
# evaluate model on test data
raw_test = raw[-(len(test)+1):]
test_rmse.append(evaluate(model, raw_test, test, scaler, 0, batch_size))
model.reset_states()
history = DataFrame()
history['X_train'], history['test'] = X_train_rmse, test_rmse
return history
# + colab_type="code" id="WB5eMuAfhRuE" colab={}
model=Sequential()
model.add(LSTM(50,return_sequences=True,input_shape=(100,1)))
model.add(LSTM(50,return_sequences=True))
model.add(LSTM(50))
model.add(Dense(1))
model.compile(loss='mean_squared_error',optimizer='adam',metrics=['accuracy'])
# + colab_type="code" id="IEYZBDirhTAz" colab={"base_uri": "https://localhost:8080/", "height": 301} outputId="461a2e57-a849-4c10-9016-96eeb382c344"
model.summary()
# + colab_type="code" id="TlvzFuE-hZW8" colab={"base_uri": "https://localhost:8080/", "height": 727} outputId="80d35996-cf2c-4f2b-f935-3a6a9903e3e9"
history = model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=20,batch_size=64,verbose=1)
# + colab_type="code" id="Cz37G0vIaehI" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2147dc88-a563-4939-80ab-1c395dfdaf5f"
print(history.history.keys())
# + colab_type="code" id="WhZZioMYapvO" colab={"base_uri": "https://localhost:8080/", "height": 324} outputId="6d101dcd-c218-42b3-dca3-e4f988d85ed1"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + colab_type="code" id="BsWxAej4a4Qx" colab={"base_uri": "https://localhost:8080/", "height": 324} outputId="cad0dceb-b6a5-456b-be69-21ab8736883e"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + colab_type="code" id="5zU8ZsUOa4nH" colab={}
# + colab_type="code" id="T8UbucIky5X9" colab={}
import tensorflow as tf
# + colab_type="code" id="D3LeiOC_y8QB" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="74801ddb-da0b-4d3e-b1f9-52556f7f79e4"
tf.__version__
# + colab_type="code" id="3fvmCzCgy-rR" colab={}
### Lets Do the prediction and check performance metrics
train_predict=model.predict(X_train)
test_predict=model.predict(X_test)
# + colab_type="code" id="-UnUiX__zA4B" colab={}
##Transformback to original form
train_predict=scaler.inverse_transform(train_predict)
test_predict=scaler.inverse_transform(test_predict)
# + colab_type="code" id="EkOo9F0zzDMh" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2521a054-d134-44d3-bbe0-423debec8a3c"
### Calculate RMSE performance metrics
import math
from sklearn.metrics import mean_squared_error
math.sqrt(mean_squared_error(y_train,train_predict))
# + colab_type="code" id="DZtrVjTuzHhL" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="462b1f49-91c5-40c8-b1bd-3c4f27dd1ba9"
### Plotting
# shift train predictions for plotting
look_back=100
trainPredictPlot = numpy.empty_like(df1)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(df1)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :] = test_predict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(df1))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
# + colab_type="code" id="5-nP4k7AzKbx" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7a662bd5-4ea4-4395-aaf0-ecf41013ca2e"
len(test_data)
# + colab_type="code" id="aEwt7D0IHDZR" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e040d450-94f7-45f5-f89b-6a8717ed515c"
test_data.shape
# + colab_type="code" id="yTlG5UEgzNHq" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f4dafdff-b9dc-4bff-ceb1-25a08b7c9482"
x_input=test_data[854:].reshape(1,-1)
x_input.shape
# + id="BJ-u26J41sAi" colab_type="code" colab={}
|
Forecasting_LSTM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Diva-python3.6
# language: python
# name: diva-python3.6
# ---
import os
import numpy as np
from mpl_toolkits.basemap import Basemap
outputdir = "/home/ctroupin/Presentations/PresentationsLatex/20180122_Eudat_Porto/data/"
outputfile = "grid.js"
outputfile2 = "grid2.js"
lonmin, lonmax = -5, 36.
latmin, latmax = 30., 46.
dlon, dlat = 1., 1.
dlon2, dlat2 = .25, .25
m = Basemap(projection='merc',
llcrnrlon=lonmin-1., llcrnrlat=latmin-1.,
urcrnrlon=lonmax+1., urcrnrlat=latmax+1.,
lat_ts=0.5 * (latmin + latmax), resolution='l')
# ## Broad grid
# +
lon = np.arange(lonmin, lonmax, dlon)
lat = np.arange(latmin, latmax, dlat)
llon, llat = np.meshgrid(lon, lat)
with open(os.path.join(outputdir, outputfile), "w") as f:
f.write("var grid = [\n")
for longrid, latgrid in zip(llon.flatten(), llat.flatten()):
x, y = m(longrid, latgrid)
if not(m.is_land(x, y)):
f.write("[" + str(latgrid) + ", " + str(longrid) + "],\n")
f.write("]")
# -
# ## Fine grid
# +
lon = np.arange(lonmin, lonmax, dlon2)
lat = np.arange(latmin, latmax, dlat2)
llon, llat = np.meshgrid(lon, lat)
with open(os.path.join(outputdir, outputfile2), "w") as f:
f.write("var finegrid = [\n")
for longrid, latgrid in zip(llon.flatten(), llat.flatten()):
x, y = m(longrid, latgrid)
if not(m.is_land(x, y)):
f.write("[" + str(latgrid) + ", " + str(longrid) + "],\n")
f.write("]")
# -
|
200+ beamer 模板合集/Eudat_Porto(简洁)/python/grid_json.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from network import deblur
from datagen import flow,image_resize
from config import param_maps
import cv2
import numpy as np
from tensorflow.keras.preprocessing.image import load_img,img_to_array
from helpers import SnapshotCallbackBuilder,TensorBoard,LOSSES,PSNRLoss
MODEL = deblur(upsample=param_maps["upsample_config"],rdb_depth=param_maps["Depth"])
MODEL.load_weights("fas_blur.h5")
# +
def deblur(img):
"""forced to resize on its best working scale,
training required for large images"""
read = cv2.imread(img)
height,width = read.shape[0],read.shape[1]
if height % 2 != 0:
height = height-1
if width % 2 != 0:
width = width-1
resized_height,resized_width = height,width
ima = load_img(img,target_size=(height,width))
image = ima/np.array(127.5)-1
image = np.expand_dims(image,axis=0)
prediction = MODEL.predict(image)
prediction = prediction+1
prediction = prediction*127.5
out = np.abs(prediction).astype(np.uint16)[0]
out = np.clip(out,0,255)
cv2.imwrite("outs1.jpg",out)
return out
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation = inter)
return resized
# -
def predict(image_file):
image = image_file
read = cv2.imread(image)
read = image_resize(read,height = param_maps["scale"])
cv2.imwrite("outs1.jpg",read)
deb = deblur("outs1.jpg")
predict("input.jpg")
|
Inference.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Least-squares parameter estimation of AR models
#
# The model structure considered is the (Auto-Regressive) model
# $$ A(\text{q})y(k) = e(k+n), $$
# where $e(k)$ is a zero-mean white noise sequence.
#
# The model can be written
# \begin{align*}
# (\text{q}^n + a_1\text{q}^{n-1} + \cdots + a_n)y(k) &= \text{q}^n e(k)\\
# y(k+n) + a_1 y(k+n-1) + \cdots + a_n y(k) &= e(k+n)\\
# y(k+1) + a_1y(k) + \cdots + a_n y(k-n+1) &= e(k+1)
# \end{align*}
# The one-step-ahead predictor for this model becomes
# \begin{align*}
# \hat{y}(k+1) &= -a_1 y(k) - a_2 y(k-1) - \cdots - a_n y(k-n+1)
# & = \underbrace{\begin{bmatrix} -y(k) & \cdots & -y(k-n+1)\end{bmatrix}}_{\varphi^{T}(k+1)} \underbrace{\begin{bmatrix} a_1\\\vdots\\a_n\end{bmatrix}}_{\theta}\\
# &= \varphi^{T}(k+1)\theta.
# \end{align*}
# Note that the white noise term $e(k+1)$ by definition cannot be predicted from knowledge of previous values in the sequence (which we don't know) nor from previous output values $y(t), \; t \le k$ (which could have been used to estimate $\hat{e}(k)$). Therefore $e(k+1)$ is predicted by its mean value which is zero. Note also that if our model with $\theta = \theta^*$ is perfect ($\theta^*$ contains the true parameters for the system which generated the data), then the prediction error equals the white noise disturbance: $\epsilon(k+1) = y(k+1) - \varphi^{T}(k+1)\theta^* = e(k+1)$. Therefore, we can check how good a models is by testing how close the prediction errors resembles a white noise sequence.
#
# The system of equations in the unknown system parameters $\theta$ (the normal equations) is
# $ \Phi \theta = y, $
# where
# \begin{align*}
# \Phi &= \begin{bmatrix} \varphi^{T}(n+1)\\\varphi^{T}(n+2)\\\vdots\\\varphi^{T}(N)\end{bmatrix},\\
# y &= \begin{bmatrix} y(n+1)\\y(n+2)\\\vdots\\y(N)\end{bmatrix}.
# \end{align*}
#
# The least-squares solution to this system of equations is, by definition, the solution $\hat{\theta}$ which minimizes the sum of squares of the residuals $\epsilon = y-\Phi\theta$, i.e. the solution that minimizes the criterion
# $ J(\theta) = \epsilon^{T}\epsilon = \sum_i \epsilon_i^2. $
# It is given by
# $ \hat{\theta}_{LS} = \underbrace{(\Phi^{T}\Phi)^{-1}\Phi^{T}}_{\Phi^+} y, $
# where $\Phi^+$ is called the *Moore-Penrose inverse* of the (typically) non-square, tall matrix $\Phi$.
#
#
# ## Example
# For a first-order model,
# $$ y(k+1) + ay(k) = e(k+1) $$ we have only one parameter $\theta = a$ to estimate. The one-step-ahead predictor is
# $$\hat{y}_{k+1} = -a y_k = -y_k a = -\varphi_{k+1} \theta, \qquad \varphi_{k+1} = -y_k$$
# The normal equations become
# $$ \begin{bmatrix} \varphi_2^T\\\varphi_3^T\\\vdots\\\varphi_N^T\end{bmatrix}\theta = \begin{bmatrix}y_2\\y_3\\\vdots\\y_N\end{bmatrix} $$
# $$ \begin{bmatrix} -y_1\\-y_2\\\vdots\\-y_{N-1}\end{bmatrix}a = \begin{bmatrix}y_2\\y_3\\\vdots\\y_N\end{bmatrix} $$
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
import control.matlab as cm
# %matplotlib notebook
# ### Simulate some data
a0 = -0.5; # TRUE VALUE
H = cm.tf([1,0],[1, a0], 1)
N = 100
e = np.random.randn(N+1) # Gaussian white noise
e_v = np.random.randn(N+1) # Gaussian white noise for validation
y, t, x = cm.lsim(H, e)
y_v, t_v, x_v = cm.lsim(H, e_v)
plt.figure(figsize = (8,3))
plt.stem(t, y);
plt.title('Data for identification')
plt.figure(figsize = (8,3))
plt.stem(t_v, y_v);
plt.title('Data for validation')
# ## Identify first order model
Phi = -y[1:-1]
Phi.shape = (N-1,1)
yy = y[2:]
theta_ls = np.linalg.lstsq(Phi, yy)
theta_ls
# ## Validation
# The validation is always done one input-output data that were **not** used in the parameter estimation. It is also recommended to use a k-step-ahead predictor. See section about validation further down.
# +
y_v = y_v[:N]
ahat = theta_ls[0][0]
ypred = -ahat*y_v[:-1]
tpred = np.arange(1, N)
plt.figure(figsize=(8,3))
plt.step(range(N), y_v)
plt.plot(tpred, ypred, 'ro')
# Calculate the Root Mean Square Error (RMSE) and fit (in %)
err = y_v[tpred] - ypred
RMSE = np.sqrt(1.0/N * np.sum( np.square(y_v[tpred] - ypred)))
fit = 100 * (1 - np.linalg.norm(err)/np.linalg.norm(y_v - np.mean(y_v)))
plt.title("RMSE = %f, fit = %f %%" % (RMSE, fit))
# Check whiteness of residual. The autocorrelation function should be close
# to a pulse
def estimated_autocorrelation(x):
"""
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))
result = r/(variance*(np.arange(n, 0, -1)))
return result
autocorr = estimated_autocorrelation(np.ravel(err))
plt.figure(figsize=(8,3))
plt.stem(autocorr[:20]);
# -
err
|
system-identification/notebooks/AR-example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.019081, "end_time": "2021-08-17T08:02:25.309623", "exception": false, "start_time": "2021-08-17T08:02:25.290542", "status": "completed"} tags=[]
# # **Training Notebook for Santander Dataset with AutoML**
# + [markdown] papermill={"duration": 0.017484, "end_time": "2021-08-17T08:02:25.345619", "exception": false, "start_time": "2021-08-17T08:02:25.328135", "status": "completed"} tags=[]
# ## **1. Required Libraries & Setup**
# + papermill={"duration": 1.161429, "end_time": "2021-08-17T08:02:26.524938", "exception": false, "start_time": "2021-08-17T08:02:25.363509", "status": "completed"} tags=[]
# General Data Manipulation Libraries
import numpy as np; print('Numpy Version:', np.__version__)
import pandas as pd; print('Pandas Version:', pd.__version__)
# Model & Helper Libraries
from sklearn.model_selection import train_test_split
# + [markdown] papermill={"duration": 0.018622, "end_time": "2021-08-17T08:02:26.562840", "exception": false, "start_time": "2021-08-17T08:02:26.544218", "status": "completed"} tags=[]
# ## **2. Results with Different Classifier models**
# + papermill={"duration": 11.379397, "end_time": "2021-08-17T08:02:37.961017", "exception": false, "start_time": "2021-08-17T08:02:26.581620", "status": "completed"} tags=[]
# Load Data
input_dir = '/kaggle/input/santander-customer-transaction-prediction/'
df_train = pd.read_csv(input_dir + 'train.csv')
# Train-Validation Split
var_colums = [c for c in df_train.columns if c not in ['ID_code','target']]
X = df_train.loc[:, var_colums]
y = df_train.loc[:, 'target']
# We are performing a 80-20 split for Training and Validation
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=11)
X_train.shape, X_valid.shape, y_train.shape, y_valid.shape
# + [markdown] papermill={"duration": 0.018366, "end_time": "2021-08-17T08:02:37.998362", "exception": false, "start_time": "2021-08-17T08:02:37.979996", "status": "completed"} tags=[]
# ### **2.1 Using LightGBM**
# + papermill={"duration": 1.156459, "end_time": "2021-08-17T08:02:39.175864", "exception": false, "start_time": "2021-08-17T08:02:38.019405", "status": "completed"} tags=[]
# Import LightGBM
import lightgbm as lgb
# + papermill={"duration": 0.029061, "end_time": "2021-08-17T08:02:39.224235", "exception": false, "start_time": "2021-08-17T08:02:39.195174", "status": "completed"} tags=[]
# Create Dataset for LightGBM
lgb_train = lgb.Dataset(X_train.values, label=y_train.values)
lgb_eval = lgb.Dataset(X_valid.values, y_valid.values, reference=lgb_train)
# + papermill={"duration": 0.033099, "end_time": "2021-08-17T08:02:39.276967", "exception": false, "start_time": "2021-08-17T08:02:39.243868", "status": "completed"} tags=[]
clf = lgb.LGBMClassifier()
params = { 'boosting_type': 'gbdt',
'objective': 'binary',
'num_leaves': 40,
'learning_rate': 0.05,
'feature_fraction': 0.5,
'metric': 'auc',
'bagging_fraction': 0.5,
'is_unbalance': 'true',
'n_estimators': 200
}
clf.set_params(**params)
clf.get_params()
# + papermill={"duration": 14.568441, "end_time": "2021-08-17T08:02:53.865153", "exception": false, "start_time": "2021-08-17T08:02:39.296712", "status": "completed"} tags=[]
clf = lgb.LGBMClassifier()
clf.fit(X_train, y_train,eval_set=[(X_valid.values, y_valid.values)], eval_metric='auc', early_stopping_rounds=5, verbose=True)
# + papermill={"duration": 0.263031, "end_time": "2021-08-17T08:02:54.162111", "exception": false, "start_time": "2021-08-17T08:02:53.899080", "status": "completed"} tags=[]
# Predictions
y_pred=clf.predict(X_valid)
np.unique(y_pred, return_counts=True)
# + [markdown] papermill={"duration": 0.033144, "end_time": "2021-08-17T08:02:54.230453", "exception": false, "start_time": "2021-08-17T08:02:54.197309", "status": "completed"} tags=[]
# **View Accuracy**
# + papermill={"duration": 0.049842, "end_time": "2021-08-17T08:02:54.313155", "exception": false, "start_time": "2021-08-17T08:02:54.263313", "status": "completed"} tags=[]
# view accuracy
from sklearn.metrics import accuracy_score
accuracy=accuracy_score(y_pred, y_valid)
print('LightGBM Model accuracy score: {0:0.4f}'.format(accuracy_score(y_valid, y_pred)))
# + [markdown] papermill={"duration": 0.033326, "end_time": "2021-08-17T08:02:54.380183", "exception": false, "start_time": "2021-08-17T08:02:54.346857", "status": "completed"} tags=[]
# **Classification Metrics**
# + papermill={"duration": 0.109066, "end_time": "2021-08-17T08:02:54.522479", "exception": false, "start_time": "2021-08-17T08:02:54.413413", "status": "completed"} tags=[]
from sklearn.metrics import classification_report
print(classification_report(y_valid, y_pred))
# + [markdown] papermill={"duration": 0.035215, "end_time": "2021-08-17T08:02:54.591483", "exception": false, "start_time": "2021-08-17T08:02:54.556268", "status": "completed"} tags=[]
# **Confusion-matrix**
# + papermill={"duration": 0.694777, "end_time": "2021-08-17T08:02:55.320477", "exception": false, "start_time": "2021-08-17T08:02:54.625700", "status": "completed"} tags=[]
from sklearn.metrics import confusion_matrix
import seaborn as sns
cm = confusion_matrix(y_valid, y_pred)
# visualize confusion matrix with seaborn heatmap
cm_matrix = pd.DataFrame(data=cm, columns=['Actual Positive:1', 'Actual Negative:0'],
index=['Predict Positive:1', 'Predict Negative:0'])
sns.heatmap(cm_matrix, annot=True, fmt='d', cmap='YlGnBu')
# + [markdown] papermill={"duration": 0.034906, "end_time": "2021-08-17T08:02:55.392129", "exception": false, "start_time": "2021-08-17T08:02:55.357223", "status": "completed"} tags=[]
# ### **6.1 Using AutoML H20**
# + papermill={"duration": 30.764508, "end_time": "2021-08-17T08:03:26.191752", "exception": false, "start_time": "2021-08-17T08:02:55.427244", "status": "completed"} tags=[]
# Install Dependencies
# !pip install requests
# !pip install tabulate
# !pip install "colorama>=0.3.8"
# !pip install future
# + papermill={"duration": 7.513434, "end_time": "2021-08-17T08:03:33.743258", "exception": false, "start_time": "2021-08-17T08:03:26.229824", "status": "completed"} tags=[]
# Install and import h2o
# !pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o
import h2o
# + papermill={"duration": 7.068229, "end_time": "2021-08-17T08:03:40.851677", "exception": false, "start_time": "2021-08-17T08:03:33.783448", "status": "completed"} tags=[]
from h2o.automl import H2OAutoML
h2o.init(max_mem_size='16G')
# + papermill={"duration": 8.942966, "end_time": "2021-08-17T08:03:49.853833", "exception": false, "start_time": "2021-08-17T08:03:40.910867", "status": "completed"} tags=[]
df_h2o = h2o.import_file('/kaggle/input/santander-customer-transaction-prediction/train.csv')
type(df_h2o)
# + papermill={"duration": 0.076039, "end_time": "2021-08-17T08:03:49.997118", "exception": false, "start_time": "2021-08-17T08:03:49.921079", "status": "completed"} tags=[]
# For binary classification, response should be a factor
df_h2o["target"] = df_h2o["target"].asfactor()
# Settign up Response and Predictor Columns
y_col = "target"
x_col = df_h2o.columns
x_col.remove(y_col)
x_col.remove("ID_code")
# + papermill={"duration": 3367.14497, "end_time": "2021-08-17T08:59:57.207861", "exception": false, "start_time": "2021-08-17T08:03:50.062891", "status": "completed"} tags=[]
aml = H2OAutoML(max_models=20, max_runtime_secs=3600, seed=11, verbosity="info", balance_classes = True) #max_runtime_secs
aml.train(x=x_col,y=y_col, training_frame=df_h2o)
# View the AutoML Leaderboard
lb = aml.leaderboard
print(lb.head(rows=lb.nrows)) # Print all rows instead of default (10 rows)
# + papermill={"duration": 0.433601, "end_time": "2021-08-17T08:59:57.783237", "exception": false, "start_time": "2021-08-17T08:59:57.349636", "status": "completed"} tags=[]
# Get model ids for all models in the AutoML Leaderboard
model_ids = list(aml.leaderboard['model_id'].as_data_frame().iloc[:,0])
# Get the "All Models" Stacked Ensemble model
se = h2o.get_model([mid for mid in model_ids if "StackedEnsemble_AllModels" in mid][0])
# Get the Stacked Ensemble metalearner model
metalearner = h2o.get_model(se.metalearner()['name'])
# + papermill={"duration": 0.701285, "end_time": "2021-08-17T08:59:58.618901", "exception": false, "start_time": "2021-08-17T08:59:57.917616", "status": "completed"} tags=[]
metalearner.std_coef_plot()
|
santander-customer-transaction-training-2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Improving function approximation by adjusting tuning curves
#
# This tutorial shows how adjusting the tuning curves of neurons
# can help implement specific functions with Nengo.
# As an example we will try to to compute
# the Heaviside step function,
# which is 1 for all $x > 0$ and 0 otherwise.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import nengo
# -
# ## The naive approach
#
# As a first pass, we can try to implement the Heaviside step function
# using an ensemble with default parameters.
n_neurons = 150
duration = 2.
# +
def stimulus_fn(t):
return (2. * t / duration) - 1.
def heaviside(x):
return x > 0
# -
with nengo.Network() as model_naive:
stimulus = nengo.Node(stimulus_fn)
ens = nengo.Ensemble(n_neurons=n_neurons, dimensions=1)
output = nengo.Node(size_in=1)
nengo.Connection(stimulus, ens)
nengo.Connection(ens, output, function=heaviside)
p_naive = nengo.Probe(output, synapse=0.005)
with nengo.Simulator(model_naive) as sim_naive:
sim_naive.run(duration)
t = sim_naive.trange()
plt.figure()
plt.plot(t, sim_naive.data[p_naive], label="naive")
plt.plot(t, heaviside(stimulus_fn(t)), '--', c='black', label="ideal")
plt.xlabel("t")
plt.ylabel("Output")
plt.legend(loc='best');
# We see that this approach does work,
# but there is room for improvement.
# ## Investigating the tuning curves
#
# Let us take a look at
# the tuning curves of the neurons in the ensemble.
plt.figure()
plt.plot(*nengo.utils.ensemble.tuning_curves(ens, sim_naive))
plt.xlabel("Input")
plt.ylabel("Firing rate [Hz]");
# About half of these neurons are tuned to fire more for smaller values.
# But these values are not relevant
# for the Heaviside step function,
# since the output is always 0
# when input is less than 0.
# We can change all neurons to be tuned
# to fire more for larger values
# by setting all the encoders to be positive.
with nengo.Network() as model_pos_enc:
stimulus = nengo.Node(stimulus_fn)
ens = nengo.Ensemble(n_neurons=n_neurons, dimensions=1,
encoders=nengo.dists.Choice([[1.]]))
output = nengo.Node(size_in=1)
nengo.Connection(stimulus, ens)
nengo.Connection(ens, output, function=heaviside)
p_pos_enc = nengo.Probe(output, synapse=0.005)
with nengo.Simulator(model_pos_enc) as sim_pos_enc:
sim_pos_enc.run(duration)
# The resulting tuning curves:
plt.figure()
plt.plot(*nengo.utils.ensemble.tuning_curves(ens, sim_pos_enc))
plt.xlabel("Input")
plt.ylabel("Firing rate [Hz]");
# And the resulting decoded Heaviside step function:
t = sim_pos_enc.trange()
plt.figure()
plt.plot(t, sim_naive.data[p_naive], label="naive")
plt.plot(t, sim_pos_enc.data[p_pos_enc], label="pos. enc.")
plt.plot(t, heaviside(stimulus_fn(t)), '--', c='black', label="ideal")
plt.xlabel("t")
plt.ylabel("Output")
plt.legend(loc='best');
# Compared to the naive approach,
# the representation of outputs lower than 1 is less noisy,
# but otherwise there is little improvement.
# Even though the tuning curves are all positive,
# they are still covering a lot of irrelevant parts of the input space.
# Because all outputs should be 0 when input is less than 0,
# there is no need to have neurons tuned to inputs less than 0.
# Let's shift all the intercepts to the range $(0.0, 1.0)$.
# ## Intercept distributions
#
# Not only can the range of intercepts be important,
# but also the distribution of intercepts.
# Let us take a look at the Heaviside step function:
xs = np.linspace(-1, 1, 100)
plt.figure()
plt.plot(xs, heaviside(xs))
plt.ylim(-0.1, 1.1)
# This function is mostly constant,
# except for the large jump at 0.
# The constant parts are easy to approximate
# and do not need a lot of neural resources,
# but the highly nonlinear jump
# requires more neural resources
# for an accurate representation.
#
# Let us compare the thresholding of a scalar in three ways:
#
# 1. With a uniform distribution of intercepts (the default)
# 2. With all intercepts at 0 (where we have the nonlinearity)
# 3. With an exponential distribution
#
# The last approach is in between
# the two extremes of a uniform distribution
# and placing all intercepts at 0.
# It will distribute most intercepts close to 0,
# but some intercepts will still be at larger values.
# +
threshold = 0.
with nengo.Network() as model_dists:
stimulus = nengo.Node(stimulus_fn)
ens_uniform = nengo.Ensemble(
n_neurons=n_neurons, dimensions=1,
encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Uniform(threshold, 1.))
ens_fixed = nengo.Ensemble(
n_neurons=n_neurons, dimensions=1,
encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Choice([threshold]))
ens_exp = nengo.Ensemble(
n_neurons=n_neurons, dimensions=1,
encoders=nengo.dists.Choice([[1]]),
intercepts=nengo.dists.Exponential(0.15, threshold, 1.))
out_uniform = nengo.Node(size_in=1)
out_fixed = nengo.Node(size_in=1)
out_exp = nengo.Node(size_in=1)
nengo.Connection(stimulus, ens_uniform)
nengo.Connection(stimulus, ens_fixed)
nengo.Connection(stimulus, ens_exp)
nengo.Connection(ens_uniform, out_uniform, function=heaviside)
nengo.Connection(ens_fixed, out_fixed, function=heaviside)
nengo.Connection(ens_exp, out_exp, function=heaviside)
p_uniform = nengo.Probe(out_uniform, synapse=0.005)
p_fixed = nengo.Probe(out_fixed, synapse=0.005)
p_exp = nengo.Probe(out_exp, synapse=0.005)
# -
with nengo.Simulator(model_dists) as sim_dists:
sim_dists.run(duration)
# Let us look at the tuning curves first.
# +
plt.figure(figsize=(12, 4))
plt.subplot(1, 3, 1)
plt.plot(*nengo.utils.ensemble.tuning_curves(ens_uniform, sim_dists))
plt.xlabel("Input")
plt.ylabel("Firing rate [Hz]")
plt.title("Uniform intercepts")
plt.subplot(1, 3, 2)
plt.plot(*nengo.utils.ensemble.tuning_curves(ens_fixed, sim_dists))
plt.xlabel("Input")
plt.ylabel("Firing rate [Hz]")
plt.title("Fixed intercepts")
plt.subplot(1, 3, 3)
plt.plot(*nengo.utils.ensemble.tuning_curves(ens_exp, sim_dists))
plt.xlabel("Input")
plt.ylabel("Firing rate [Hz]")
plt.title("Exponential intercept distribution")
plt.tight_layout()
# -
# Now let us look at how these three ensembles
# approximate the thresholding function.
t = sim_dists.trange()
plt.figure()
plt.plot(t, sim_naive.data[p_naive], label='naive', c='gray')
plt.plot(t, sim_dists.data[p_uniform], label='Uniform intercepts')
plt.plot(t, sim_dists.data[p_fixed], label='Fixed intercepts')
plt.plot(t, sim_dists.data[p_exp], label='Exp. intercept dist.')
plt.plot(t, heaviside(stimulus_fn(t)), '--', c='black', label="ideal")
plt.xlabel('t')
plt.ylabel('Output')
plt.legend(loc='best')
# We see that the fixed intercepts
# produce slightly higher decoded values close to the threshold,
# but the slope is lower than for uniform intercepts.
# The best approximation of the thresholding
# is done with the exponential intercept distribution.
# Here we get a quick rise to 1 at the threshold
# and a fairly constant representation of 1
# for inputs sufficiently above 0.
# All three distributions perfectly represent values below 0.
#
# Nengo provides the `ThresholdingEnsemble` preset
# to make it easier to assign intercepts
# according to the exponential distribution,
# and also adjusts the encoders and evaluation points accordingly.
with nengo.Network() as model_final:
stimulus = nengo.Node(stimulus_fn)
with nengo.presets.ThresholdingEnsembles(0.):
ens = nengo.Ensemble(n_neurons=n_neurons, dimensions=1)
output = nengo.Node(size_in=1)
nengo.Connection(stimulus, ens)
nengo.Connection(ens, output, function=heaviside)
p_final = nengo.Probe(output, synapse=0.005)
with nengo.Simulator(model_final) as sim_final:
sim_final.run(duration)
t = sim_final.trange()
plt.figure()
plt.plot(t, sim_final.data[p_final], label="final")
plt.plot(t, heaviside(stimulus_fn(t)), '--', c='black', label="ideal")
plt.xlabel("t")
plt.ylabel("Output")
plt.legend(loc='best');
# ## The takeaway
#
# Adjusting ensemble parameters in the right way
# can sometimes help in implementing functions more acurately in neurons.
# Think about how your function maps from
# the input vector space to the output vector space,
# and consider ways to modify ensemble parameters
# to better cover important parts
# of the input vector space.
|
docs/examples/advanced/functions-and-tuning-curves.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generative Adversarial Networks (GANs)
#
# So far in CS231N, all the applications of neural networks that we have explored have been **discriminative models** that take an input and are trained to produce a labeled output. This has ranged from straightforward classification of image categories to sentence generation (which was still phrased as a classification problem, our labels were in vocabulary space and we’d learned a recurrence to capture multi-word labels). In this notebook, we will expand our repetoire, and build **generative models** using neural networks. Specifically, we will learn how to build models which generate novel images that resemble a set of training images.
#
# ### What is a GAN?
#
# In 2014, [Goodfellow et al.](https://arxiv.org/abs/1406.2661) presented a method for training generative models called Generative Adversarial Networks (GANs for short). In a GAN, we build two different neural networks. Our first network is a traditional classification network, called the **discriminator**. We will train the discriminator to take images, and classify them as being real (belonging to the training set) or fake (not present in the training set). Our other network, called the **generator**, will take random noise as input and transform it using a neural network to produce images. The goal of the generator is to fool the discriminator into thinking the images it produced are real.
#
# We can think of this back and forth process of the generator ($G$) trying to fool the discriminator ($D$), and the discriminator trying to correctly classify real vs. fake as a minimax game:
# $$\underset{G}{\text{minimize}}\; \underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$
# where $x \sim p_\text{data}$ are samples from the input data, $z \sim p(z)$ are the random noise samples, $G(z)$ are the generated images using the neural network generator $G$, and $D$ is the output of the discriminator, specifying the probability of an input being real. In [Goodfellow et al.](https://arxiv.org/abs/1406.2661), they analyze this minimax game and show how it relates to minimizing the Jensen-Shannon divergence between the training data distribution and the generated samples from $G$.
#
# To optimize this minimax game, we will aternate between taking gradient *descent* steps on the objective for $G$, and gradient *ascent* steps on the objective for $D$:
# 1. update the **generator** ($G$) to minimize the probability of the __discriminator making the correct choice__.
# 2. update the **discriminator** ($D$) to maximize the probability of the __discriminator making the correct choice__.
#
# While these updates are useful for analysis, they do not perform well in practice. Instead, we will use a different objective when we update the generator: maximize the probability of the **discriminator making the incorrect choice**. This small change helps to allevaiate problems with the generator gradient vanishing when the discriminator is confident. This is the standard update used in most GAN papers, and was used in the original paper from [Goodfellow et al.](https://arxiv.org/abs/1406.2661).
#
# In this assignment, we will alternate the following updates:
# 1. Update the generator ($G$) to maximize the probability of the discriminator making the incorrect choice on generated data:
# $$\underset{G}{\text{maximize}}\; \mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$
# 2. Update the discriminator ($D$), to maximize the probability of the discriminator making the correct choice on real and generated data:
# $$\underset{D}{\text{maximize}}\; \mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] + \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$
#
# ### What else is there?
# Since 2014, GANs have exploded into a huge research area, with massive [workshops](https://sites.google.com/site/nips2016adversarial/), and [hundreds of new papers](https://github.com/hindupuravinash/the-gan-zoo). Compared to other approaches for generative models, they often produce the highest quality samples but are some of the most difficult and finicky models to train (see [this github repo](https://github.com/soumith/ganhacks) that contains a set of 17 hacks that are useful for getting models working). Improving the stabiilty and robustness of GAN training is an open research question, with new papers coming out every day! For a more recent tutorial on GANs, see [here](https://arxiv.org/abs/1701.00160). There is also some even more recent exciting work that changes the objective function to Wasserstein distance and yields much more stable results across model architectures: [WGAN](https://arxiv.org/abs/1701.07875), [WGAN-GP](https://arxiv.org/abs/1704.00028).
#
#
# GANs are not the only way to train a generative model! For other approaches to generative modeling check out the [deep generative model chapter](http://www.deeplearningbook.org/contents/generative_models.html) of the Deep Learning [book](http://www.deeplearningbook.org). Another popular way of training neural networks as generative models is Variational Autoencoders (co-discovered [here](https://arxiv.org/abs/1312.6114) and [here](https://arxiv.org/abs/1401.4082)). Variational autoencoders combine neural networks with variational inference to train deep generative models. These models tend to be far more stable and easier to train but currently don't produce samples that are as pretty as GANs.
#
# Example pictures of what you should expect (yours might look slightly different):
#
# 
#
# ## Setup
# +
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# A bunch of utility functions
def show_images(images):
images = np.reshape(images, [images.shape[0], -1]) # images reshape to (batch_size, D)
sqrtn = int(np.ceil(np.sqrt(images.shape[0])))
sqrtimg = int(np.ceil(np.sqrt(images.shape[1])))
fig = plt.figure(figsize=(sqrtn, sqrtn))
gs = gridspec.GridSpec(sqrtn, sqrtn)
gs.update(wspace=0.05, hspace=0.05)
for i, img in enumerate(images):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(img.reshape([sqrtimg,sqrtimg]))
return
def preprocess_img(x):
return 2 * x - 1.0
def deprocess_img(x):
return (x + 1.0) / 2.0
def rel_error(x,y):
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def count_params():
"""Count the number of parameters in the current TensorFlow graph """
param_count = np.sum([np.prod(x.get_shape().as_list()) for x in tf.global_variables()])
return param_count
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
return session
answers = np.load('gan-checks-tf.npz')
# -
# ## Dataset
# GANs are notoriously finicky with hyperparameters, and also require many training epochs. In order to make this assignment approachable without a GPU, we will be working on the MNIST dataset, which is 60,000 training and 10,000 test images. Each picture contains a centered image of white digit on black background (0 through 9). This was one of the first datasets used to train convolutional neural networks and it is fairly easy -- a standard CNN model can easily exceed 99% accuracy.
#
#
# **Heads-up**: Our MNIST wrapper returns images as vectors. That is, they're size (batch, 784). If you want to treat them as images, we have to resize them to (batch,28,28) or (batch,28,28,1). They are also type np.float32 and bounded [0,1].
class MNIST(object):
def __init__(self, batch_size, shuffle=False):
"""
Construct an iterator object over the MNIST data
Inputs:
- batch_size: Integer giving number of elements per minibatch
- shuffle: (optional) Boolean, whether to shuffle the data on each epoch
"""
train, _ = tf.keras.datasets.mnist.load_data()
X, y = train
X = X.astype(np.float32)/255
X = X.reshape((X.shape[0], -1))
self.X, self.y = X, y
self.batch_size, self.shuffle = batch_size, shuffle
def __iter__(self):
N, B = self.X.shape[0], self.batch_size
idxs = np.arange(N)
if self.shuffle:
np.random.shuffle(idxs)
return iter((self.X[i:i+B], self.y[i:i+B]) for i in range(0, N, B))
# show a batch
mnist = MNIST(batch_size=16)
show_images(mnist.X[:16])
# ## LeakyReLU
# In the cell below, you should implement a LeakyReLU. See the [class notes](http://cs231n.github.io/neural-networks-1/) (where alpha is small number) or equation (3) in [this paper](http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf). LeakyReLUs keep ReLU units from dying and are often used in GAN methods (as are maxout units, however those increase model size and therefore are not used in this notebook).
#
# HINT: You should be able to use `tf.maximum`
def leaky_relu(x, alpha=0.01):
"""Compute the leaky ReLU activation function.
Inputs:
- x: TensorFlow Tensor with arbitrary shape
- alpha: leak parameter for leaky ReLU
Returns:
TensorFlow Tensor with the same shape as x
"""
# TODO: implement leaky ReLU
pass
# Test your leaky ReLU implementation. You should get errors < 1e-10
# +
def test_leaky_relu(x, y_true):
tf.reset_default_graph()
with get_session() as sess:
y_tf = leaky_relu(tf.constant(x))
y = sess.run(y_tf)
print('Maximum error: %g'%rel_error(y_true, y))
test_leaky_relu(answers['lrelu_x'], answers['lrelu_y'])
# -
# ## Random Noise
# Generate a TensorFlow `Tensor` containing uniform noise from -1 to 1 with shape `[batch_size, dim]`.
def sample_noise(batch_size, dim):
"""Generate random uniform noise from -1 to 1.
Inputs:
- batch_size: integer giving the batch size of noise to generate
- dim: integer giving the dimension of the the noise to generate
Returns:
TensorFlow Tensor containing uniform noise in [-1, 1] with shape [batch_size, dim]
"""
# TODO: sample and return noise
pass
# Make sure noise is the correct shape and type:
# +
def test_sample_noise():
batch_size = 3
dim = 4
tf.reset_default_graph()
with get_session() as sess:
z = sample_noise(batch_size, dim)
# Check z has the correct shape
assert z.get_shape().as_list() == [batch_size, dim]
# Make sure z is a Tensor and not a numpy array
assert isinstance(z, tf.Tensor)
# Check that we get different noise for different evaluations
z1 = sess.run(z)
z2 = sess.run(z)
assert not np.array_equal(z1, z2)
# Check that we get the correct range
assert np.all(z1 >= -1.0) and np.all(z1 <= 1.0)
print("All tests passed!")
test_sample_noise()
# -
# ## Discriminator
# Our first step is to build a discriminator. You should use the layers in `tf.layers` to build the model.
# All fully connected layers should include bias terms. For initialization, just use the default initializer used by the `tf.layers` functions.
#
# Architecture:
# * Fully connected layer with input size 784 and output size 256
# * LeakyReLU with alpha 0.01
# * Fully connected layer with output size 256
# * LeakyReLU with alpha 0.01
# * Fully connected layer with output size 1
#
# The output of the discriminator should thus have shape `[batch_size, 1]`, and contain real numbers corresponding to the scores that each of the `batch_size` inputs is a real image.
def discriminator(x):
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
with tf.variable_scope("discriminator"):
# TODO: implement architecture
pass
return logits
# Test to make sure the number of parameters in the discriminator is correct:
# +
def test_discriminator(true_count=267009):
tf.reset_default_graph()
with get_session() as sess:
y = discriminator(tf.ones((2, 784)))
cur_count = count_params()
if cur_count != true_count:
print('Incorrect number of parameters in discriminator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in discriminator.')
test_discriminator()
# -
# ## Generator
# Now to build a generator. You should use the layers in `tf.layers` to construct the model. All fully connected layers should include bias terms. Note that you can use the tf.nn module to access activation functions. Once again, use the default initializers for parameters.
#
# Architecture:
# * Fully connected layer with inupt size tf.shape(z)[1] (the number of noise dimensions) and output size 1024
# * `ReLU`
# * Fully connected layer with output size 1024
# * `ReLU`
# * Fully connected layer with output size 784
# * `TanH` (To restrict every element of the output to be in the range [-1,1])
def generator(z):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
with tf.variable_scope("generator"):
# TODO: implement architecture
pass
return img
# Test to make sure the number of parameters in the generator is correct:
# +
def test_generator(true_count=1858320):
tf.reset_default_graph()
with get_session() as sess:
y = generator(tf.ones((1, 4)))
cur_count = count_params()
if cur_count != true_count:
print('Incorrect number of parameters in generator. {0} instead of {1}. Check your achitecture.'.format(cur_count,true_count))
else:
print('Correct number of parameters in generator.')
test_generator()
# -
# # GAN Loss
#
# Compute the generator and discriminator loss. The generator loss is:
# $$\ell_G = -\mathbb{E}_{z \sim p(z)}\left[\log D(G(z))\right]$$
# and the discriminator loss is:
# $$ \ell_D = -\mathbb{E}_{x \sim p_\text{data}}\left[\log D(x)\right] - \mathbb{E}_{z \sim p(z)}\left[\log \left(1-D(G(z))\right)\right]$$
# Note that these are negated from the equations presented earlier as we will be *minimizing* these losses.
#
# **HINTS**: Use [tf.ones_like](https://www.tensorflow.org/api_docs/python/tf/ones_like) and [tf.zeros_like](https://www.tensorflow.org/api_docs/python/tf/zeros_like) to generate labels for your discriminator. Use [tf.nn.sigmoid_cross_entropy_with_logits](https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits) to help compute your loss function. Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing.
def gan_loss(logits_real, logits_fake):
"""Compute the GAN loss.
Inputs:
- logits_real: Tensor, shape [batch_size, 1], output of discriminator
Unnormalized score that the image is real for each real image
- logits_fake: Tensor, shape[batch_size, 1], output of discriminator
Unnormalized score that the image is real for each fake image
Returns:
- D_loss: discriminator loss scalar
- G_loss: generator loss scalar
HINT: for the discriminator loss, you'll want to do the averaging separately for
its two components, and then add them together (instead of averaging once at the very end).
"""
# TODO: compute D_loss and G_loss
D_loss = None
G_loss = None
pass
return D_loss, G_loss
# Test your GAN loss. Make sure both the generator and discriminator loss are correct. You should see errors less than 1e-5.
# +
def test_gan_loss(logits_real, logits_fake, d_loss_true, g_loss_true):
tf.reset_default_graph()
with get_session() as sess:
d_loss, g_loss = sess.run(gan_loss(tf.constant(logits_real), tf.constant(logits_fake)))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_gan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_true'], answers['g_loss_true'])
# -
# # Optimizing our loss
# Make an `AdamOptimizer` with a 1e-3 learning rate, beta1=0.5 to mininize G_loss and D_loss separately. The trick of decreasing beta was shown to be effective in helping GANs converge in the [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) paper. In fact, with our current hyperparameters, if you set beta1 to the Tensorflow default of 0.9, there's a good chance your discriminator loss will go to zero and the generator will fail to learn entirely. In fact, this is a common failure mode in GANs; if your D(x) learns to be too fast (e.g. loss goes near zero), your G(z) is never able to learn. Often D(x) is trained with SGD with Momentum or RMSProp instead of Adam, but here we'll use Adam for both D(x) and G(z).
# TODO: create an AdamOptimizer for D_solver and G_solver
def get_solvers(learning_rate=1e-3, beta1=0.5):
"""Create solvers for GAN training.
Inputs:
- learning_rate: learning rate to use for both solvers
- beta1: beta1 parameter for both solvers (first moment decay)
Returns:
- D_solver: instance of tf.train.AdamOptimizer with correct learning_rate and beta1
- G_solver: instance of tf.train.AdamOptimizer with correct learning_rate and beta1
"""
D_solver = None
G_solver = None
pass
return D_solver, G_solver
# ## Putting it all together
# Now just a bit of Lego Construction.. Read this section over carefully to understand how we'll be composing the generator and discriminator
# +
tf.reset_default_graph()
# number of images for each batch
batch_size = 128
# our noise dimension
noise_dim = 96
# placeholder for images from the training dataset
x = tf.placeholder(tf.float32, [None, 784])
# random noise fed into our generator
z = sample_noise(batch_size, noise_dim)
# generated images
G_sample = generator(z)
with tf.variable_scope("") as scope:
#scale images to be -1 to 1
logits_real = discriminator(preprocess_img(x))
# Re-use discriminator weights on new inputs
scope.reuse_variables()
logits_fake = discriminator(G_sample)
# Get the list of variables for the discriminator and generator
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'generator')
# get our solver
D_solver, G_solver = get_solvers()
# get our loss
D_loss, G_loss = gan_loss(logits_real, logits_fake)
# setup training steps
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
D_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'discriminator')
G_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'generator')
# -
# # Training a GAN!
# Well that wasn't so hard, was it? After the first epoch, you should see fuzzy outlines, clear shapes as you approach epoch 3, and decent shapes, about half of which will be sharp and clearly recognizable as we pass epoch 5. In our case, we'll simply train D(x) and G(z) with one batch each every iteration. However, papers often experiment with different schedules of training D(x) and G(z), sometimes doing one for more steps than the other, or even training each one until the loss gets "good enough" and then switching to training the other.
# a giant helper function
def run_a_gan(sess, G_train_step, G_loss, D_train_step, D_loss, G_extra_step, D_extra_step,\
show_every=2, print_every=1, batch_size=128, num_epoch=10):
"""Train a GAN for a certain number of epochs.
Inputs:
- sess: A tf.Session that we want to use to run our data
- G_train_step: A training step for the Generator
- G_loss: Generator loss
- D_train_step: A training step for the Generator
- D_loss: Discriminator loss
- G_extra_step: A collection of tf.GraphKeys.UPDATE_OPS for generator
- D_extra_step: A collection of tf.GraphKeys.UPDATE_OPS for discriminator
Returns:
Nothing
"""
# compute the number of iterations we need
mnist = MNIST(batch_size=batch_size, shuffle=True)
for epoch in range(num_epoch):
# every show often, show a sample result
if epoch % show_every == 0:
samples = sess.run(G_sample)
fig = show_images(samples[:16])
plt.show()
print()
for (minibatch, minbatch_y) in mnist:
# run a batch of data through the network
_, D_loss_curr = sess.run([D_train_step, D_loss], feed_dict={x: minibatch})
_, G_loss_curr = sess.run([G_train_step, G_loss])
# print loss every so often.
# We want to make sure D_loss doesn't go to 0
if epoch % print_every == 0:
print('Epoch: {}, D: {:.4}, G:{:.4}'.format(epoch,D_loss_curr,G_loss_curr))
print('Final images')
samples = sess.run(G_sample)
fig = show_images(samples[:16])
plt.show()
# #### Train your GAN! This should take about 10 minutes on a CPU, or less than a minute on GPU.
with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess,G_train_step,G_loss,D_train_step,D_loss,G_extra_step,D_extra_step)
# # Least Squares GAN
# We'll now look at [Least Squares GAN](https://arxiv.org/abs/1611.04076), a newer, more stable alternative to the original GAN loss function. For this part, all we have to do is change the loss function and retrain the model. We'll implement equation (9) in the paper, with the generator loss:
# $$\ell_G = \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[\left(D(G(z))-1\right)^2\right]$$
# and the discriminator loss:
# $$ \ell_D = \frac{1}{2}\mathbb{E}_{x \sim p_\text{data}}\left[\left(D(x)-1\right)^2\right] + \frac{1}{2}\mathbb{E}_{z \sim p(z)}\left[ \left(D(G(z))\right)^2\right]$$
#
#
# **HINTS**: Instead of computing the expectation, we will be averaging over elements of the minibatch, so make sure to combine the loss by averaging instead of summing. When plugging in for $D(x)$ and $D(G(z))$ use the direct output from the discriminator (`score_real` and `score_fake`).
def lsgan_loss(scores_real, scores_fake):
"""Compute the Least Squares GAN loss.
Inputs:
- scores_real: Tensor, shape [batch_size, 1], output of discriminator
The score for each real image
- scores_fake: Tensor, shape[batch_size, 1], output of discriminator
The score for each fake image
Returns:
- D_loss: discriminator loss scalar
- G_loss: generator loss scalar
"""
# TODO: compute D_loss and G_loss
D_loss = None
G_loss = None
pass
return D_loss, G_loss
# Test your LSGAN loss. You should see errors less than 1e-7.
# +
def test_lsgan_loss(score_real, score_fake, d_loss_true, g_loss_true):
with get_session() as sess:
d_loss, g_loss = sess.run(
lsgan_loss(tf.constant(score_real), tf.constant(score_fake)))
print("Maximum error in d_loss: %g"%rel_error(d_loss_true, d_loss))
print("Maximum error in g_loss: %g"%rel_error(g_loss_true, g_loss))
test_lsgan_loss(answers['logits_real'], answers['logits_fake'],
answers['d_loss_lsgan_true'], answers['g_loss_lsgan_true'])
# -
# Create new training steps so we instead minimize the LSGAN loss:
D_loss, G_loss = lsgan_loss(logits_real, logits_fake)
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
# _Run the following cell to train your model!_
with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess, G_train_step, G_loss, D_train_step, D_loss, G_extra_step, D_extra_step)
# # Deep Convolutional GANs
# In the first part of the notebook, we implemented an almost direct copy of the original GAN network from <NAME>. However, this network architecture allows no real spatial reasoning. It is unable to reason about things like "sharp edges" in general because it lacks any convolutional layers. Thus, in this section, we will implement some of the ideas from [DCGAN](https://arxiv.org/abs/1511.06434), where we use convolutional networks as our discriminators and generators.
#
# #### Discriminator
# We will use a discriminator inspired by the TensorFlow MNIST classification [tutorial](https://www.tensorflow.org/get_started/mnist/pros), which is able to get above 99% accuracy on the MNIST dataset fairly quickly. *Be sure to check the dimensions of x and reshape when needed*, fully connected blocks expect [N,D] Tensors while conv2d blocks expect [N,H,W,C] Tensors. Please use `tf.layers` to define the following architecture:
#
# Architecture:
# * Conv2D: 32 Filters, 5x5, Stride 1, padding 0
# * Leaky ReLU(alpha=0.01)
# * Max Pool 2x2, Stride 2
# * Conv2D: 64 Filters, 5x5, Stride 1, padding 0
# * Leaky ReLU(alpha=0.01)
# * Max Pool 2x2, Stride 2
# * Flatten
# * Fully Connected with output size 4 x 4 x 64
# * Leaky ReLU(alpha=0.01)
# * Fully Connected with output size 1
#
# Once again, please use biases for all convolutional and fully connected layers, and use the default parameter initializers. Note that a padding of 0 can be accomplished with the 'VALID' padding option.
def discriminator(x):
"""Compute discriminator score for a batch of input images.
Inputs:
- x: TensorFlow Tensor of flattened input images, shape [batch_size, 784]
Returns:
TensorFlow Tensor with shape [batch_size, 1], containing the score
for an image being real for each input image.
"""
with tf.variable_scope("discriminator"):
# TODO: implement architecture
pass
return logits
test_discriminator(1102721)
# #### Generator
# For the generator, we will copy the architecture exactly from the [InfoGAN paper](https://arxiv.org/pdf/1606.03657.pdf). See Appendix C.1 MNIST. Please use `tf.layers` for your implementation. You might find the documentation for [tf.layers.conv2d_transpose](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d_transpose) useful. The architecture is as follows.
#
# Architecture:
# * Fully connected with output size 1024
# * `ReLU`
# * BatchNorm
# * Fully connected with output size 7 x 7 x 128
# * `ReLU`
# * BatchNorm
# * Resize into Image Tensor of size 7, 7, 128
# * Conv2D^T (transpose): 64 filters of 4x4, stride 2
# * `ReLU`
# * BatchNorm
# * Conv2d^T (transpose): 1 filter of 4x4, stride 2
# * `TanH`
#
# Once again, use biases for the fully connected and transpose convolutional layers. Please use the default initializers for your parameters. For padding, choose the 'same' option for transpose convolutions. For Batch Normalization, assume we are always in 'training' mode.
def generator(z):
"""Generate images from a random noise vector.
Inputs:
- z: TensorFlow Tensor of random noise with shape [batch_size, noise_dim]
Returns:
TensorFlow Tensor of generated images, with shape [batch_size, 784].
"""
with tf.variable_scope("generator"):
# TODO: implement architecture
pass
return img
test_generator(6595521)
# We have to recreate our network since we've changed our functions.
# +
tf.reset_default_graph()
batch_size = 128
# our noise dimension
noise_dim = 96
# placeholders for images from the training dataset
x = tf.placeholder(tf.float32, [None, 784])
z = sample_noise(batch_size, noise_dim)
# generated images
G_sample = generator(z)
with tf.variable_scope("") as scope:
#scale images to be -1 to 1
logits_real = discriminator(preprocess_img(x))
# Re-use discriminator weights on new inputs
scope.reuse_variables()
logits_fake = discriminator(G_sample)
# Get the list of variables for the discriminator and generator
D_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'discriminator')
G_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,'generator')
D_solver,G_solver = get_solvers()
D_loss, G_loss = gan_loss(logits_real, logits_fake)
D_train_step = D_solver.minimize(D_loss, var_list=D_vars)
G_train_step = G_solver.minimize(G_loss, var_list=G_vars)
D_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS,'discriminator')
G_extra_step = tf.get_collection(tf.GraphKeys.UPDATE_OPS,'generator')
# -
# ### Train and evaluate a DCGAN
# This is the one part of A3 that significantly benefits from using a GPU. It takes 3 minutes on a GPU for the requested five epochs. Or about 50 minutes on a dual core laptop on CPU (feel free to use 3 epochs if you do it on CPU).
with get_session() as sess:
sess.run(tf.global_variables_initializer())
run_a_gan(sess,G_train_step,G_loss,D_train_step,D_loss,G_extra_step,D_extra_step,num_epoch=5)
# ## INLINE QUESTION 1
#
# We will look at an example to see why alternating minimization of the same objective (like in a GAN) can be tricky business.
#
# Consider $f(x,y)=xy$. What does $\min_x\max_y f(x,y)$ evaluate to? (Hint: minmax tries to minimize the maximum value achievable.)
#
# Now try to evaluate this function numerically for 6 steps, starting at the point $(1,1)$,
# by using alternating gradient (first updating y, then updating x) with step size $1$.
# You'll find that writing out the update step in terms of $x_t,y_t,x_{t+1},y_{t+1}$ will be useful.
#
# Record the six pairs of explicit values for $(x_t,y_t)$ in the table below.
# ### Your answer:
#
# $y_0$ | $y_1$ | $y_2$ | $y_3$ | $y_4$ | $y_5$ | $y_6$
# ----- | ----- | ----- | ----- | ----- | ----- | -----
# 1 | | | | | |
# $x_0$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$
# 1 | | | | | |
#
# ## INLINE QUESTION 2
# Using this method, will we ever reach the optimal value? Why or why not?
# ### Your answer:
# ## INLINE QUESTION 3
# If the generator loss decreases during training while the discriminator loss stays at a constant high value from the start, is this a good sign? Why or why not? A qualitative answer is sufficient
# ### Your answer:
|
GANs-TensorFlow.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nwb_data
# language: python
# name: nwb_data
# ---
# # Individual participant characteristics
# Creates table of individual participant characteristics (good/total electrodes, hemisphere implanted, recording days used, etc.)
# %pip install natsort
# %pip install seaborn
# %pip install nilearn
# %pip install dandi
# %pip install git+https://github.com/catalystneuro/brunton-lab-to-nwb.git
import pandas as pd
from plot_utils import load_data_characteristics, plot_ecog_descript
# ## Generate table
# +
rec_days, hemi, surf_tot, surf_good, depth_tot, depth_good, _, part, _, _ = load_data_characteristics()
ages = [44, 20, 33, 19, 31, 37, 26, 33, 20, 34, 34, 22] # not found in data files
gender = ['M', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'M', 'M', 'F', 'M'] # not found in data files
surf_elecs = [str(val_good)+' / '+str(val_tot) for val_good, val_tot in zip(surf_good, surf_tot)]
depth_elecs = [str(val_good)+' / '+str(val_tot) for val_good, val_tot in zip(depth_good, depth_tot)]
pd.DataFrame([part, gender, ages, rec_days, hemi, surf_elecs, depth_elecs],
index=['Participant', 'Gender', 'Age (years)', 'Recording days used', 'Hemisphere implanted',
'Surface electrodes: # good / total', 'Depth electrodes: # good / total']).T
# -
|
figs_stream/Table_part_characteristics.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from fastai2.basics import *
from fastai2.callback.all import *
from fastai2.text.all import *
# +
# all_slow
# -
# # Integration test on Wikitext-2
#
# > Training a Language Model on WT2
# ## Data
path = untar_data(URLs.WIKITEXT_TINY)
# The dataset comes with the articles in two csv files, so we read it and concatenate them in one dataframe.
df_train = pd.read_csv(path/'train.csv', header=None)
df_valid = pd.read_csv(path/'test.csv', header=None)
df_all = pd.concat([df_train, df_valid])
df_all.head()
# We could tokenize it based on spaces to compare (as is usually done) but here we'll use the standard fastai tokenizer.
df_tok,count = tokenize_df(df_all, [0])
df_tok.head()
# The tokenize function returns a new dataframe with the tokenized texts and a counter with the frequency of each word. We use that counter to create a vocab, then a `Numericalize` transform.
vocab = make_vocab(count)
tfm = Numericalize(make_vocab(count))
splits = [list(range_of(df_train)), list(range(len(df_train), len(df_all)))]
dsrc = DataSource(df_tok, [[attrgetter("text"), tfm]], splits=splits, dl_type=LMDataLoader)
bs,sl = 104,72
dbch = dsrc.databunch(bs=bs, seq_len=sl, after_batch=Cuda)
dbch.show_batch()
# ## Model
config = awd_lstm_lm_config.copy()
config.update({'input_p': 0.6, 'output_p': 0.4, 'weight_p': 0.5, 'embed_p': 0.1, 'hidden_p': 0.2})
model = get_language_model(AWD_LSTM, len(vocab), config=config)
opt_func = partial(Adam, wd=0.1, eps=1e-7)
cb_funcs = [partial(MixedPrecision, clip=0.1), partial(RNNTrainer, alpha=2, beta=1)]
learn = Learner(dbch, model, loss_func=CrossEntropyLossFlat(), opt_func=opt_func, cb_funcs=cb_funcs, metrics=[accuracy, Perplexity()])
learn.fit_one_cycle(1, 5e-3, moms=(0.8,0.7,0.8), div=10)
# Full training
# +
#learn.fit_one_cycle(90, 5e-3, moms=(0.8,0.7,0.8), div=10)
# -
|
nbs/35_tutorial.wikitext.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="NzVo3MVIiuyJ"
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import cv2
# + id="w8-S1_68jF78"
import tensorflow as tf
from tensorflow import keras
# + id="3LL-3dTlkqVx"
train_dir='/media/adarshsrivastava/DATA/Face_mask/Train'
test_dir='/media/adarshsrivastava/DATA/Face_mask/Test'
validation_dir='/media/adarshsrivastava/DATA/Face_mask/Validation'
# + id="5ssXDdlCoY2e"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen=ImageDataGenerator(rescale=1./255)
test_datagen=ImageDataGenerator(rescale=1./255)
validation_datagen=ImageDataGenerator(rescale=1./255)
# + colab={"base_uri": "https://localhost:8080/"} id="KJYVFKTPo87g" outputId="6131b6dc-18d8-4e1e-9cde-c06d2ab557ba"
train_generator=train_datagen.flow_from_directory(
train_dir,
target_size=(128,128),
batch_size=80,
class_mode='binary'
)
validation_generator=validation_datagen.flow_from_directory(
validation_dir,
target_size=(128,128),
batch_size=25,
class_mode="binary"
)
# + colab={"base_uri": "https://localhost:8080/"} id="B3sWxpA-g9kl" outputId="d85a4fa1-5313-4b69-c1f8-8d07fed06b6d"
from tensorflow.keras.applications import VGG19
conv_base=VGG19(
weights='imagenet',
include_top=False,
input_shape=(128,128,3)
)
# + colab={"base_uri": "https://localhost:8080/"} id="uaKqPtiC336i" outputId="16dab141-0ccc-4c0c-d8e0-254ca7a5d7e7"
conv_base.summary()
# + id="pxP0uw2A38PC"
from tensorflow.keras import layers
from tensorflow.keras import models
# + id="IzYn3pUP4J29"
model=models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(2,activation='softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="85qNTlut5dR-" outputId="832cd601-0930-48c6-9bd6-7fe823dda4b9"
model.summary()
# + id="iuAfUMn95hPJ"
from tensorflow.keras import optimizers
# + id="xEUP3bFN5ocp"
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizers.RMSprop(1e-4),
metrics=['acc'])
# + id="8o9EEuKW6AT6"
checkpoint_cb=keras.callbacks.ModelCheckpoint("maskdetection2-{epoch:02d}.h")
# + colab={"base_uri": "https://localhost:8080/"} id="tAZAJRTI6Ssk" outputId="e27d05db-fae3-4448-d110-6a71654cd227"
history=model.fit(train_generator,
steps_per_epoch=125,
epochs=5,
validation_data=validation_generator,
validation_steps=32,
callbacks=[checkpoint_cb]
)
# + colab={"base_uri": "https://localhost:8080/", "height": 324} id="PytdzmqZ6z82" outputId="d7316ac3-8ae9-480e-a802-1b10d6a31546"
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid()
plt.gca().set_ylim(0,1)
plt.show()
# + id="NN2mpCZz7UNV"
hist_df=pd.DataFrame(history.history)
# + id="DrN_JPiA2Zhv"
hist_csv_file='histfacemask.csv'
with open(hist_csv_file,mode='w') as f:
hist_df.to_csv(f)
# + colab={"base_uri": "https://localhost:8080/"} id="3J683zpl7rE_" outputId="f2852ca2-0f07-4260-f97b-c6d6540406c5"
test_generator=test_datagen.flow_from_directory(
test_dir,
target_size=(128,128),
batch_size=32,
class_mode='binary'
)
# + colab={"base_uri": "https://localhost:8080/"} id="IPo46-Hf8UTH" outputId="409fc554-db35-47ef-f741-e8284c022f65"
model.evaluate(test_generator,steps=31)
# + id="dC05LdO4uswY"
face_clsfr=cv2.CascadeClassifier('/media/adarshsrivastava/DATA/Face_mask/haarcascade_frontalface_default.xml')
labels_dict={0:'with_mask',1:'without_mask'}
color_dict={0:(0,255,0),1:(0,0,255)}
size = 4
webcam=cv2.VideoCapture(0) #Use camera 0
# We load the xml file
classifier = cv2.CascadeClassifier('/media/adarshsrivastava/DATA/Face_mask/haarcascade_frontalface_default.xml')
while True:
(rval, im) = webcam.read()
im=cv2.flip(im,1,1) #Flip to act as a mirror
# Resize the image to speed up detection
mini = cv2.resize(im, (im.shape[1]//size, im.shape[0]//size))
# detect MultiScale / faces
faces = classifier.detectMultiScale(mini)
# Draw rectangles around each face
for f in faces:
(x, y, w, h) = [v * size for v in f] #Scale the shapesize backup
#Save just the rectangle faces in SubRecFaces
face_img = im[y:y+h, x:x+w]
resized=cv2.resize(face_img,(128,128))
normalized=resized/255.0
reshaped=np.reshape(normalized,(1,128,128,3))
reshaped = np.vstack([reshaped])
result=model.predict(reshaped)
label=np.argmax(result,axis=1)[0]
cv2.rectangle(im,(x,y),(x+w,y+h),color_dict[label],2)
cv2.rectangle(im,(x,y-40),(x+w,y),color_dict[label],-1)
cv2.putText(im, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)
# Show the image
cv2.imshow('LIVE',im)
key = cv2.waitKey(10)
# if Esc key is press then break out of the loop
if key == 27: #The Esc key
break
# Stop video
webcam.release()
# Close all started windows
cv2.destroyAllWindows()
# + id="--05LOLTkeVy"
|
Notebook/Face_Mask_nb.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gathering data from the web - Problems
# + [markdown] colab_type="text" id="rYYTxll6wm6i"
# **Author:** <NAME> ([Personal Website](https://www.tiesdekok.com)) <br>
# **Last updated:** June 2020
# **Python version:** Python 3.6+
# **License:** MIT License
# **Recommended environment: `limpergPython`**
# -
import os
recommendedEnvironment = 'limpergPython'
if os.environ['CONDA_DEFAULT_ENV'] != recommendedEnvironment:
print('Warning: it does not appear you are using the {0} environment, did you run "conda activate {0}" before starting Jupyter?'.format(recommendedEnvironment))
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>Introduction</span>
# </div>
# + [markdown] colab_type="text" id="XDCbIPT4wm6l"
# <div style='border-style: solid; padding: 5px; border-color: darkred; border-width:5px; text-align: center; margin-left: 100px; margin-right:100px;'>
# <span style='color:black; font-size: 20px; font-weight:bold;'> Make sure to open up the respective tutorial notebook(s)! <br> That is what you are expected to use as primariy reference material. </span>
# </div>
# -
# ### Relevant tutorial notebooks:
#
# 1) [`0_python_basics.ipynb`](https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/0_python_basics.ipynb)
#
#
# 2) [`2_handling_data.ipynb`](https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/2_handling_data.ipynb)
#
#
# 3) [`4_web_scraping.ipynb`](https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/4_web_scraping.ipynb)
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: center; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>Part 1 </span>
# </div>
#
# <div style='border-style: solid; padding: 5px; border-color: darkred; border-width:5px; text-align: center; margin-left: 100px; margin-right:100px;'>
# <span style='color:black; font-size: 15px; font-weight:bold;'> Note: feel free to add as many cells as you'd like to answer these problems, you don't have to fit it all in one cell. </span>
# </div>
# The goal of these problems is to get hands-on experience with gathering data from the Web using `Requests` and `Requests-HTML`.
# The tasks below are split up into two sections:
#
# 1. API tasks
#
# 2. Web scraping tasks
# ## Import required packages
import requests
from requests_html import HTMLSession
import pandas as pd
import numpy as np
# ### Also run the code below, it solves a couple of minor problems that you don't need to worry about
from IPython.display import HTML
import time
def show_image(url):
return HTML('<img src="{}?{}"></img>'.format(url, int(time.time())))
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>API Problem</span>
# </div>
# ## 1) Use the `genderize.io` API with the `requests` library
# Use this API: https://genderize.io/
# **NOTE:** it might be that this API is down if you get a "too many requests message"
# ### 1a) Use the API to automatically guess the gender of your first name
# ### 1b) Write a function that take any first name as input and that uses the API to return the predicted gender and probability
# ### 1c) Create a list of names, and use the `guess_gender` function to predict the gender of each name. Include a 1 second pause after each guess.
# **Hint:** *use the `time` library for the pause*
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>Web Scraping Problem</span>
# </div>
# ## 2) Create a webscraper that can collect information for a Foster Faculty member
# Your goal is to create a webscraper that can extract the following information from a Foster Faculty staff page (such as this one: https://foster.uw.edu/faculty-research/directory/david-burgstahler/ ):
#
# * Name
# * URL to profile image
# * Title of first selected publication
# **Hint 1:** use the `requests-html` library
# **Hint 2:** if you get an error mentioning SSL --> add `, verify=False` to the `session.get()` command like so: `session.get(.... , verify=False)`
# ### 2a) Use `requests-html` to extract the above three pieces of information from the Faculty page of <NAME>
# url = https://foster.uw.edu/faculty-research/directory/david-burgstahler/
#
# ---
#
# **Tip** you can show a picture from a URL in the notebook by using the provided `show_image(url)` function
# ### 2b) Create a function that takes a URL for a Staff page and extracts the three pieces of information and returns it as a dictionary
# Make sure to test your function by feeding it with the URL for various staff members! A full list is available here:
# https://foster.uw.edu/faculty-research/academic-departments/accounting/faculty/
#
# **Warning:** make sure that the function can deal with faculty members that do not have a picture or any selected publication, test if with (for example):
# https://foster.uw.edu/faculty-research/directory/jane-jollineau/
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>Part 2: Advanced Funcionality</span>
# </div>
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>API Problem</span>
# </div>
# ## 3) Get current picture of traffic camera using the `wsdot` API and `requests`
# ### 3a) Get access key
# Go to " http://wsdot.com/traffic/api " in your browser.
# At the bottom of the page type a random email address in the text field (e.g. <EMAIL>) and copy the access key and assign it to a Python variable.
# ### 3b) Retrieve current picture of traffic camera for the `NE 45th St` camera
#
# See: https://www.wsdot.com/traffic/seattle/default.aspx?cam=1032#cam
#
# The `CAMERAID` of the `NE 45th St` camera is: **1032**
#
# ---
#
# **Tip** you can show a picture from a URL in the notebook by using the provided `show_image(url)` function
#
# 
# <div style='border-style: solid; padding: 5px; border-color: darkred; border-width:5px; text-align: center; margin-left: 100px; margin-right:100px;'>
# <span style='color:black; font-size: 15px; font-weight:bold;'> Note: use the API, don't scrape the webpage! </span>
# </div>
# You can retrieve the current picture of a traffic camera using the API described here:
# http://wsdot.com/traffic/api/HighwayCameras/HighwayCamerasREST.svc/help/operations/GetCameraAsJson
# ### 3C) Save the image to your computer
# There are many ways to do this, but for a pure `requests` solution see: [link](https://kite.com/python/answers/how-to-download-an-image-using-requests-in-python#:~:text=Use%20requests.,write%2Dand%2Dbinary%20mode.)
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>Web Scraping Problem</span>
# </div>
# ## 4) Create a webscraper that creates an Excel sheet with information for all Foster (UW) Faculty members in Accounting
# ### 4a) Create a list of URLs for all the Foster faculty members in Accounting
# This information is here: https://foster.uw.edu/faculty-research/academic-departments/accounting/faculty/
#
# **Hint 1:** use the `requests-html` library
# **Hint 2:** if you get an error mentioning SSL --> add `, verify=False` to the `session.get()` command like so: `session.get(.... , verify=False)`
# ### 4b) Apply the function you created in step 2b to all the URLs you gathered in step 4a and save it all (including the URL) to a Pandas DataFrame
#
# Bonus points if you track the progress using the `tqdm` library:
#
# ```python
# from tqdm.notebook import tqdm
#
# for i in tqdm(range(100)):
# time.sleep(0.5)
# ```
# ## 5) Create a function that retrieves all the sport events in Seattle for a given date range
# https://visitseattle.org/ maintains an event calendar for events in Seattle.
#
# You can find the sports events at this page:
# https://visitseattle.org/?s&frm=events&event_type%5B0%5D=sports
#
# **Task:** create a function that takes a starting date and an end date and returns the following information about the sports events:
#
# * Title
# * Link
# * Location
# * Date info
#
# 
# <div style='border-style: solid; padding: 10px; border-color: black; border-width:5px; text-align: left; margin-top:20px; margin-bottom: 20px;'>
# <span style='color:black; font-size: 30px; font-weight:bold;'>Part 3: "Ties, I am bored, please give me a challenge"</span>
# </div>
# **Note:** You don't have to complete part 3 if you are handing in the problems for credit.
#
# ------
# ## Use `Selenium` to scrape the blog content of on the ARC platform:
#
# The steps to get started with `Selenium` are provided here: https://nbviewer.jupyter.org/github/TiesdeKok/LearnPythonforResearch/blob/master/4_web_scraping.ipynb#Selenium
#
# The Accounting Resources Centre has a Blog section where academics can post articles related to Accounting and Accounting research.
#
# The link is here: https://arc.eaa-online.org/blog
#
# **Task:** create an Excel sheet that contains all the Blog posts with the following information:
#
# 1. Title
# 2. Author
# 3. Number of likes
# 4. Number of views
#
# For the sake of challenge I encourage you to try and automate clicking the "more" button at the botton.
#
|
problems/day_3/web_gathering_problems.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ZugerTrophy Run Review
# ### An exercise in the 'Split', 'Apply', 'Combine' approach where you split by the route then apply the max, avg. etc. and then combine as a new table. See this video: https://youtu.be/Wb2Tp35dZ-I
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, date, time, timedelta
df = pd.read_csv('ZugerTrophy.csv', na_values = ['NA'])
df.head()
df.dtypes
#remove unnecessary columns from the dataframe
df.drop(['Rank_Overall', 'Rank 40-49 (40+)', 'Rank Men'], axis=1, inplace=True)
df.head()
# +
#strip out the distance to a separate column
#split_df = df['Route'].str.split('(.\d\.\d+|14)', expand=True)
split_df = df['Route'].str.split(' ', expand=True)
split_df.tail(6)
# +
#check that the split is ok in the middle of the file
#split_df.loc[87:91]
# -
#create a categorical dtype to be used for the route lengths
from pandas.api.types import CategoricalDtype
ordered_cat = CategoricalDtype(['kurz', 'mittel', 'lang'], ordered=True)
#add back key columns from split df to the original df
df['Distance'] = pd.to_numeric(split_df[2])
#df['Exercise'] = split_df[4].replace('[\- ]', '', regex=True)
df['Exercise'] = split_df[4]
df['Exercise'] = df['Exercise'].replace('[\\t\t\t ]', '', regex=True)
df['Route'] = df['Route'].replace('[\t\t]', '', regex=True)
df['Route1'] = split_df[0]
df['Route2'] = split_df[1].astype(ordered_cat)
df.head()
# +
#adjust the dtypes
df['Date'] = pd.to_datetime(df['Date'], format='%d.%m.%Y')
df['Run_Time'] = pd.to_timedelta(df['Run_Time'], unit='s')
# to_tmedelta to average pace column but add '00:' to get %H%M%S format
df['Pace'] = pd.to_timedelta('00:'+df['Pace'])
df.dtypes
# -
#this would remove the walking routes - but I prefer to split it out
patternDel = "Walking"
filter = df['Route'].str.contains(patternDel)
df = df[~filter]
df
#create a dataframe groupby object with 'key' as the Route. This is 'splitting' the dataset
g = df.groupby('Route')
g
#let's have a look at the dataframe
for Route, Route_df in g:
print(Route)
print(Route_df)
#Look at a specific route in the dataframe
g.get_group('06_Ägerital kurz 2.4 - Running')
#get my records. This is where you 'apply' a function such as 'min' and 'combine' a new dataframe
g1 = g.min()
g1
#Paces = [5.09, 5.24, 5.14, 5.49, 7.43, 6.02, 4.30, 6.19, 5.02, 5.21, 6.19, 5.42, 5.32, 6.11, 4.39, 5.20, 5.31, 4.55, 4.46]
#Routes = ['Steinhausen kurz 2.8', 'Steinhausen lang 12.3', 'Steinhausen mittel 8.4', 'Baar kurz 2.7', 'Baar lang 14.6', '02_Baar mittel 6.9', '03_Ennetsee kurz 2.2', '03_Ennetsee lang 14', '03_Ennetsee mittel 5.1', '04_Menzingen kurz 3.6', '04_Menzingen lang 14.0', '04_Menzingen mittel 8.1', '05_Zug kurz 2.1', '05_Zug lang 16.8', '05_Zug mittel 5.9', '06_Ägerital kurz 2.4', '06_Ägerital lang 17.2', '06_Ägerital mittel 3.9', 'Winter-Challenge Zug 5.9']
Routes = g1['Route1']
Routes2 = g1['Route2']
Distance = g1['Distance']
Time = g1['Run_Time']
Paces = g1['Pace']
pbs = {'Route1':Routes, 'Version':Routes2, 'Distance':Distance, 'Run_Time':Time, 'Pace':Paces}
df = pd.DataFrame(pbs)
df
#df1 = df.sort_values(by='Pace', ascending=True).reset_index(drop=True)
df1 = df.sort_values(['Route1', 'Version'], ascending=(True, True)).reset_index(drop=True)
df1
# +
#plot the times
plt.figure(figsize=(16, 10))
# Make a dataset:
bars = (df1.Route1)
y_pos = np.arange(len(bars))
# Create bars
plt.bar(y_pos, df1.Run_Time)
# Create names on the x-axis
plt.xticks(y_pos, bars)
plt.xticks(rotation=70)
# Add labels
plt.xlabel("Route")
plt.ylabel("Time")
plt.title("Timing")
# Show graphic and save
plt.savefig('Timing')
plt.show()
# +
#plot the pace
plt.figure(figsize=(16, 10))
# Make a dataset:
bars = (df1.Route1)
y_pos = np.arange(len(bars))
# Create bars
plt.bar(y_pos, df1.Pace)
# Create names on the x-axis
plt.xticks(y_pos, bars)
plt.xticks(rotation=70)
# Add labels
plt.xlabel("Route")
plt.ylabel("Pace")
plt.title("Pacing")
# Show graphic
plt.savefig('Pacing')
plt.show()
# +
# # %matplotlib inline
# plt.plot(g.index.to_pydatetime().dropna(), g.NbFluxEntrant.dropna())
|
ZugerTrophy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from statsmodels.discrete.discrete_model import Logit
import statsmodels.api as sm
from keras.models import Sequential
from keras.layers import Dense, Activation
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
pd.set_option("display.max_rows", 999)
pd.set_option("display.max_columns", 999)
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# data = pd.concat([pd.read_csv('./data/house_train.csv'), pd.read_csv('./data/house_test.csv')])
data = pd.read_csv('./data/house_train.csv')
data.head(20)
data.isnull().sum().sort_index()
# +
# numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
# newdf = df.select_dtypes(include=numerics)
data._get_numeric_data().isnull().sum().sort_values(ascending=False)
# -
data.LotFrontage.hist()
data.LotFrontage.describe()
(2017 - data.GarageYrBlt).hist()
(2017 - data.GarageYrBlt).describe()
data.SalePrice.hist()
data.SalePrice.describe()
# +
df = pd.DataFrame(index=data.index)
df['is_price_top75'] = data.SalePrice >= 214000
df['SaleCondition'] = data.SaleCondition
# -
|
keras/logistic_regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: msms_rt_ssvm_39
# language: python
# name: msms_rt_ssvm_39
# ---
# # Illustrate how the max-margin changes between 2D and 3D encoding fingerprints.
#
# As exmaple we use:
# - Molecule: KWGRBVOPPLSCSI-WPRPVWTQSA-N
# - Spectrum: EA34486399
# - Dataset: EA_002
# - Sample index: 0
# - MS2-scorer: SIRIUS
#
# We inspect how the 3D fingerprints can be used to break the ties of the top-scoring candidates. The molecule belongs to the "Benzene and substituted derivatives" ClassyFire class. For this class we observe a improvement of 7.5%p top-1.
# +
import sqlite3
import numpy as np
import pickle
import gzip
import matplotlib.pyplot as plt
import pandas as pd
import os
# RDKIT
from rdkit.Chem import MolFromSmiles, Draw, MolFromInchi, MolToSmiles, AssignStereochemistry
from rdkit.Chem.rdDepictor import Compute2DCoords
from rdkit.Chem.Draw import IPythonConsole, rdMolDraw2D
from rdkit import __version__ as rdkit__version
print(rdkit__version)
from IPython.display import SVG
mb_db_fn = "/home/bach/Documents/doctoral/projects/lcms2struct_experiments/data/massbank.sqlite"
# -
def inspect_stereoisomers(
spec_id: str, dataset: str, ms2scorer: str, spl_idx: int,
mb_db_fn="/home/bach/Documents/doctoral/projects/lcms2struct_experiments/data/massbank.sqlite"
):
fns = {
l: os.path.join(
"massbank__with_stereo",
"ds=%s__lloss_mode=mol_feat_fps__mol_feat=FCFP__binary__all__%s__mol_id=cid__ms2scorer=%s__ssvm_flavor=default" %
(dataset, l, ms2scorer),
"combined__cand_agg_id=inchikey__marg_agg_fun=average",
"marginals__spl=%d.pkl.gz" % spl_idx
)
for l in ["3D", "2D"]
}
# Load the marginals
mm = {}
for l, fn in fns.items():
with gzip.open(fn) as f:
mm[l] = pickle.load(f)
# Get the correct index in the ms feature sequence
idx = {
l: [m["spectrum_id"].get("spectrum_id") for m in mm[l].values()].index(spec_id)
for l in ["3D", "2D"]
}
assert idx["2D"] == idx["3D"], "MS feature index should be the same"
# Extract the candidate sets for the ms feature
cands = {l: mm[l][idx[l]] for l in ["3D", "2D"]}
# Load the
conn = sqlite3.connect(mb_db_fn)
ikey = conn.execute(
"SELECT inchikey FROM scored_spectra_meta ssm"
" INNER JOIN molecules m ON m.cid = ssm.molecule"
" WHERE accession is ?", (spec_id,)
).fetchone()[0]
ikey1 = ikey.split("-")[0] # first inchikey part specifying the constitution
assert cands["2D"]["label"][cands["2D"]["index_of_correct_structure"]] == ikey
assert cands["3D"]["label"][cands["3D"]["index_of_correct_structure"]] == ikey
assert cands["2D"]["n_cand"] == cands["3D"]["n_cand"]
df = []
for i in range(cands["2D"]["n_cand"]):
if cands["2D"]["label"][i].split("-")[0] == ikey1:
assert cands["3D"]["label"][i].split("-")[0] == ikey1
assert cands["3D"]["ms_score"][i] == cands["2D"]["ms_score"][i]
smi = conn.execute("SELECT smiles_iso FROM molecules WHERE inchikey IS ?", (cands["3D"]["label"][i], )).fetchone()[0]
df.append([
cands["3D"]["label"][i],
cands["3D"]["label"][i] == ikey,
smi,
cands["3D"]["ms_score"][i],
cands["2D"]["score"][i],
cands["3D"]["score"][i]
])
df = pd.DataFrame(df, columns=["inchikey", "is_true_structure", "smiles", "ms2_score", "max_margin_2D", "max_margin_3D"])
return df
df = inspect_stereoisomers("BS02391126", "BS_000", "cfmid4__norm", 0)
df.sort_values(by="max_margin_3D", ascending=False)
df = inspect_stereoisomers("BS64681001", "BS_000", "cfmid4__norm", 3)
df.sort_values(by="max_margin_3D", ascending=False)
df = inspect_stereoisomers("PR75447353", "PR_002", "cfmid4__norm", 12)
df.sort_values(by="max_margin_3D", ascending=False)
# Filenames of the aggregated margins
fn__2D = "massbank__with_stereo/ds=LQB_000__lloss_mode=mol_feat_fps__mol_feat=FCFP__binary__all__2D__mol_id=cid__ms2scorer=metfrag__norm__ssvm_flavor=default/combined__cand_agg_id=inchikey__marg_agg_fun=average/marginals__spl=0.pkl.gz"
fn__3D = "massbank__with_stereo/ds=LQB_000__lloss_mode=mol_feat_fps__mol_feat=FCFP__binary__all__3D__mol_id=cid__ms2scorer=metfrag__norm__ssvm_flavor=default/combined__cand_agg_id=inchikey__marg_agg_fun=average/marginals__spl=0.pkl.gz"
# Load the margins
# Find the index corresponding to the spectrum "EA34486399"
idx__2D = [m["spectrum_id"].get("spectrum_id") for m in mm__2D.values()].index("BS57272571")
idx__3D = [m["spectrum_id"].get("spectrum_id") for m in mm__3D.values()].index("BS57272571")
assert idx__2D == idx__3D
# Extract the ranked candidate lists for the spectrum.
cands__2D = mm__2D[idx__2D]
cands__3D = mm__3D[idx__3D]
# Collect the MS2-scores, Max-margins (2D) and Max-margins (3D) for the stereoisomers corresponding to the ground truth structure ("**KWGRBVOPPLSCSI**-WPRPVWTQSA-N")
# +
conn = sqlite3.connect(mb_db_fn)
ikey = "<KEY>"
ikey1 = ikey.split("-")[0] # first inchikey part specifying the constitution
assert cands__2D["label"][cands__2D["index_of_correct_structure"]] == ikey
assert cands__3D["label"][cands__3D["index_of_correct_structure"]] == ikey
assert cands__2D["n_cand"] == cands__3D["n_cand"]
df = []
for i in range(cands__2D["n_cand"]):
if cands__2D["label"][i].split("-")[0] == ikey1:
assert cands__3D["label"][i].split("-")[0] == ikey1
assert cands__3D["ms_score"][i] == cands__2D["ms_score"][i]
smi = conn.execute("SELECT smiles_iso FROM molecules WHERE inchikey IS ?", (cands__3D["label"][i], )).fetchone()[0]
df.append([cands__3D["label"][i], cands__3D["label"][i] == ikey, smi, cands__3D["ms_score"][i], cands__2D["score"][i], cands__3D["score"][i]])
df = pd.DataFrame(df, columns=["inchikey", "is_true_structure", "smiles", "ms2_score", "max_margin_2D", "max_margin_3D"])
# df
df.sort_values(by="max_margin_3D", ascending=False)
# -
# Filenames of the aggregated margins
fn__2D = "massbank__with_stereo/ds=UT_000__lloss_mode=mol_feat_fps__mol_feat=FCFP__binary__all__2D__mol_id=cid__ms2scorer=metfrag__norm__ssvm_flavor=default/combined__cand_agg_id=inchikey__marg_agg_fun=average/marginals__spl=4.pkl.gz"
fn__3D = "massbank__with_stereo/ds=UT_000__lloss_mode=mol_feat_fps__mol_feat=FCFP__binary__all__3D__mol_id=cid__ms2scorer=metfrag__norm__ssvm_flavor=default/combined__cand_agg_id=inchikey__marg_agg_fun=average/marginals__spl=4.pkl.gz"
# Load the margins
# +
with gzip.open(fn__2D) as f:
mm__2D = pickle.load(f)
with gzip.open(fn__3D) as f:
mm__3D = pickle.load(f)
# -
# Find the index corresponding to the spectrum "EA34486399"
idx__2D = [m["spectrum_id"].get("spectrum_id") for m in mm__2D.values()].index("UT29257495")
idx__3D = [m["spectrum_id"].get("spectrum_id") for m in mm__3D.values()].index("UT29257495")
assert idx__2D == idx__3D
# Extract the ranked candidate lists for the spectrum.
cands__2D = mm__2D[idx__2D]
cands__3D = mm__3D[idx__3D]
# Collect the MS2-scores, Max-margins (2D) and Max-margins (3D) for the stereoisomers corresponding to the ground truth structure ("**KWGRBVOPPLSCSI**-WPRPVWTQSA-N")
# +
conn = sqlite3.connect(mb_db_fn)
ikey = "<KEY>"
ikey1 = ikey.split("-")[0] # first inchikey part specifying the constitution
assert cands__2D["label"][cands__2D["index_of_correct_structure"]] == ikey
assert cands__3D["label"][cands__3D["index_of_correct_structure"]] == ikey
assert cands__2D["n_cand"] == cands__3D["n_cand"]
df = []
for i in range(cands__2D["n_cand"]):
if cands__2D["label"][i].split("-")[0] == ikey1:
assert cands__3D["label"][i].split("-")[0] == ikey1
assert cands__3D["ms_score"][i] == cands__2D["ms_score"][i]
smi = conn.execute("SELECT smiles_iso FROM molecules WHERE inchikey IS ?", (cands__3D["label"][i], )).fetchone()[0]
df.append([cands__3D["label"][i], cands__3D["label"][i] == ikey, smi, cands__3D["ms_score"][i], cands__2D["score"][i], cands__3D["score"][i]])
df = pd.DataFrame(df, columns=["inchikey", "is_true_structure", "smiles", "ms2_score", "max_margin_2D", "max_margin_3D"])
# df
df.sort_values(by="max_margin_3D", ascending=False)
# -
df["max_margin_3D"].rank(ascending=False)
np.interp(df["max_margin_3D"].rank(ascending=False).tolist(), [1, 9], [100, 0])
df["max_margin_3D"].tolist()
# +
# Draw.MolDrawing(MolFromSmiles("CC([C@H](C1=CC=CC=C1)O)NC"))
legends = []
for _, row in df.iterrows():
# l = "InChIKey (1): %s\nInChIKey (2): %s" % (__s for __s in row["inchikey"].split("-")[:2])
l = "InChIKey (1):\t%s\nInChIKey (2):\t%s" % (row["inchikey"].split("-")[0], row["inchikey"].split("-")[1])
legends.append(l)
if row["is_true_structure"]:
legends[-1] += " *"
mols = [MolFromSmiles(s) for s in df["smiles"]]
_ = [Compute2DCoords(mol) for mol in mols]
Draw.MolsToGridImage(
mols,
molsPerRow=1,
subImgSize=(100, 100),
useSVG=True,
legends=legends
)
# -
MolToSmiles(MolFromInchi("InChI=1S/C10H15NO/c1-8(11-2)10(12)9-6-4-3-5-7-9/h3-8,10-12H,1-2H3/t8-,10-/m0/s1"))
Draw.MolsToGridImage(
[
MolFromInchi("InChI=1S/C10H15NO/c1-8(11-2)10(12)9-6-4-3-5-7-9/h3-8,10-12H,1-2H3/t8-,10-/m0/s1"),
MolFromSmiles("C[C@@H]([C@@H](C1=CC=CC=C1)O)NC", sanitize=True),
MolFromSmiles("CN[C@@H](C)[C@H](O)C1=CC=CC=C1", sanitize=True),
MolFromSmiles("c1([C@H]([C@@H](NC)C)O)ccccc1")
],
molsPerRow=1,
subImgSize=(100, 100),
useSVG=True,
)
MolFromSmiles("C[C@@H]([C@@H](C1=CC=CC=C1)O)NC")
m = MolFromSmiles("C/C=C/C=C\\C")
Compute2DCoords(m)
AssignStereochemistry(m, False, True, True)
m
mol = MolFromSmiles("CN[C@@H](C)[C@H](O)C1=CC=CC=C1")
Compute2DCoords(mol)
d2d = rdMolDraw2D.MolDraw2DSVG(200, 200)
d2d.DrawMolecule(mol)
d2d.FinishDrawing()
SVG(d2d.GetDrawingText())
|
results_processed/publication/massbank/ssvm_lib=v2__exp_ver=4/exp_03__c__example_margin_2D_vs_3D.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="doTswgRIwJsj"
# General libraries
import os
import numpy as np
import pandas as pd
import random
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
# Deep learning libraries
import keras.backend as K
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization
from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
import tensorflow as tf
# Setting seeds for reproducibility
seed = 232
np.random.seed(seed)
tf.set_random_seed(seed)
# + id="7pXjO-l8wJsn" outputId="118fc427-1aee-4424-dc66-8287dab71c8b"
# input_path = '../input/chest_xray/chest_xray/'
input_path = '../input/chest-xray-pneumonia//chest_xray/chest_xray/'
fig, ax = plt.subplots(2, 3, figsize=(15, 7))
ax = ax.ravel()
plt.tight_layout()
for i, _set in enumerate(['train', 'val', 'test']):
set_path = input_path+_set
ax[i].imshow(plt.imread(set_path+'/NORMAL/'+os.listdir(set_path+'/NORMAL')[0]), cmap='gray')
ax[i].set_title('Set: {}, Condition: Normal'.format(_set))
ax[i+3].imshow(plt.imread(set_path+'/PNEUMONIA/'+os.listdir(set_path+'/PNEUMONIA')[0]), cmap='gray')
ax[i+3].set_title('Set: {}, Condition: Pneumonia'.format(_set))
# + id="fxlj-_2OwJsn"
# Distribution of our datasets
for _set in ['train', 'val', 'test']:
n_normal = len(os.listdir(input_path + _set + '/NORMAL'))
n_infect = len(os.listdir(input_path + _set + '/PNEUMONIA'))
print('Set: {}, normal images: {}, pneumonia images: {}'.format(_set, n_normal, n_infect))
# + id="njouxoqnwJso"
# input_path = '../input/chest_xray/chest_xray/'
input_path = '../input/chest-xray-pneumonia//chest_xray/chest_xray/'
def process_data(img_dims, batch_size):
# Data generation objects
train_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.3, vertical_flip=True)
test_val_datagen = ImageDataGenerator(rescale=1./255)
# This is fed to the network in the specified batch sizes and image dimensions
train_gen = train_datagen.flow_from_directory(
directory=input_path+'train',
target_size=(img_dims, img_dims),
batch_size=batch_size,
class_mode='binary',
shuffle=True)
test_gen = test_val_datagen.flow_from_directory(
directory=input_path+'test',
target_size=(img_dims, img_dims),
batch_size=batch_size,
class_mode='binary',
shuffle=True)
# I will be making predictions off of the test set in one batch size
# This is useful to be able to get the confusion matrix
test_data = []
test_labels = []
for cond in ['/NORMAL/', '/PNEUMONIA/']:
for img in (os.listdir(input_path + 'test' + cond)):
img = plt.imread(input_path+'test'+cond+img)
img = cv2.resize(img, (img_dims, img_dims))
img = np.dstack([img, img, img])
img = img.astype('float32') / 255
if cond=='/NORMAL/':
label = 0
elif cond=='/PNEUMONIA/':
label = 1
test_data.append(img)
test_labels.append(label)
test_data = np.array(test_data)
test_labels = np.array(test_labels)
return train_gen, test_gen, test_data, test_labels
# + id="rq0NbQycwJsp"
# Hyperparameters
img_dims = 150
epochs = 10
batch_size = 32
# Getting the data
train_gen, test_gen, test_data, test_labels = process_data(img_dims, batch_size)
# + id="vxWJl2tIwJsp"
# Input layer
inputs = Input(shape=(img_dims, img_dims, 3))
# First conv block
x = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
x = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# Second conv block
x = SeparableConv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = SeparableConv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# Third conv block
x = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPool2D(pool_size=(2, 2))(x)
# Fourth conv block
x = SeparableConv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = SeparableConv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Dropout(rate=0.2)(x)
# Fifth conv block
x = SeparableConv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = SeparableConv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(x)
x = BatchNormalization()(x)
x = MaxPool2D(pool_size=(2, 2))(x)
x = Dropout(rate=0.2)(x)
# FC layer
x = Flatten()(x)
x = Dense(units=512, activation='relu')(x)
x = Dropout(rate=0.7)(x)
x = Dense(units=128, activation='relu')(x)
x = Dropout(rate=0.5)(x)
x = Dense(units=64, activation='relu')(x)
x = Dropout(rate=0.3)(x)
# Output layer
output = Dense(units=1, activation='sigmoid')(x)
# Creating model and compiling
model = Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Callbacks
checkpoint = ModelCheckpoint(filepath='best_weights.hdf5', save_best_only=True, save_weights_only=True)
lr_reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.3, patience=2, verbose=2, mode='max')
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.1, patience=1, mode='min')
# + id="BYf2NNFQwJsq"
# Fitting the model
hist = model.fit_generator(
train_gen, steps_per_epoch=train_gen.samples // batch_size,
epochs=epochs, validation_data=test_gen,
validation_steps=test_gen.samples // batch_size, callbacks=[checkpoint, lr_reduce])
# + id="gFQX_Ze1wJsr"
fig, ax = plt.subplots(1, 2, figsize=(10, 3))
ax = ax.ravel()
for i, met in enumerate(['acc', 'loss']):
ax[i].plot(hist.history[met])
ax[i].plot(hist.history['val_' + met])
ax[i].set_title('Model {}'.format(met))
ax[i].set_xlabel('epochs')
ax[i].set_ylabel(met)
ax[i].legend(['train', 'val'])
# + id="wKT_G0IzwJsr" outputId="3a848df9-b57b-417f-ffc9-28881304dc35"
from sklearn.metrics import accuracy_score, confusion_matrix
preds = model.predict(test_data)
acc = accuracy_score(test_labels, np.round(preds))*100
cm = confusion_matrix(test_labels, np.round(preds))
tn, fp, fn, tp = cm.ravel()
print('CONFUSION MATRIX ------------------')
print(cm)
print('\nTEST METRICS ----------------------')
precision = tp/(tp+fp)*100
recall = tp/(tp+fn)*100
print('Accuracy: {}%'.format(acc))
print('Precision: {}%'.format(precision))
print('Recall: {}%'.format(recall))
print('F1-score: {}'.format(2*precision*recall/(precision+recall)))
print('\nTRAIN METRIC ----------------------')
print('Train acc: {}'.format(np.round((hist.history['acc'][-1])*100, 2)))
|
Chest_X-Ray.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Examples 3; Extra bits
#
# Some additional thing included in deepSI which were not discussed in the other examples.
#
# * Hyperparameter search for linear systems
# * Creating your estimator, a more in-depth look into how estimators work
# * CNN and images.
# * Input design.
# ## 1. Hyperparameter search for linear systems
#
# You can also optimize the hyper parameter using deepSI by a grid search. Used very commonly in linear SysID.
import deepSI
train, test = deepSI.datasets.Silverbox()
Sys = deepSI.fit_systems.Sklearn_io_linear
bestfit, sys, best_sys_choices, best_fit_choices = \
deepSI.fit_systems.grid_search(Sys,train,sim_val=test,sys_dict_choices=dict(na=[1,2,3,4,5,6],nb=[1,2,3,4,5,6]) )
Sys = deepSI.fit_systems.SS_linear
bestfit, sys, best_sys_choices, best_fit_choices = \
deepSI.fit_systems.grid_search(Sys,train,sim_val=test,\
sys_dict_choices=dict(nx=[1,2,3,4,5,6]),\
fit_dict_choices=dict(SS_f=[1,2,3,5,10,20,40]))
# ## 2. Creating your estimator, a more in-depth look into how estimators work
#
#
# +
#to be made
# -
# ## 3. CNN and images.
# +
from deepSI.fit_systems import SS_encoder_CNN_video
sys = SS_encoder_CNN_video()
sys.init_nets(nu=None, ny=(50,50)) #image shape = (height, width)
print(sys.fn) #normal network
print(sys.encoder) #4 blocks of downscaling
print(sys.hn) #4 blocks of downscaling
sys = SS_encoder_CNN_video()
sys.init_nets(nu=None, ny=(3,50,50)) #image shape = (nchannels,height, width)
print(sys.fn) #normal network
print(sys.encoder) #4 blocks of downscaling
print(sys.hn) #4 blocks of downscaling
# -
sys.parameters_with_names
# ## 4. Input design.
#
# +
#deepSI.exp_design.multisine()
#deepSI.exp_design.filtered_signal()
# -
|
examples/Examples 3; Extra bits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PromoterArchitecturePipeline] *
# language: python
# name: conda-env-PromoterArchitecturePipeline-py
# ---
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from scipy import stats
location = '../../../data/output/non-overlapping_includingbidirectional_all_genes_newannotation'
promoter_pref='Araport11_TSS_outward_promoter5UTR'
file_names='non-overlapping_includingbidirectional_all_genes_newannotation'
Czechowski_gene_categories = f'../../../data/output/{file_names}/genes/promoters_5UTR_czechowski_constitutive_variable_random.txt'
GC_content_tsv = f'../../../data/output/{file_names}/rolling_window/GC_content_rw_{promoter_pref}/{promoter_pref}_GCcontent_rw.tsv'
promoter_bed = f'{location}/FIMO/promoters.bed'
promoter_5UTR_bed = f'{location}/FIMO/promoters_5UTR.bed'
foldername_prefix = 'GC_content_rw'
#bp covered chromatin files
root_chrom_bp_covered = f'../../../data/output/{file_names}/rolling_window/OpenChromatin_rw_{promoter_pref}/{promoter_pref}_root_bpcovered_rw.bed'
shoot_chrom_bp_covered = f'../../../data/output/{file_names}/rolling_window/OpenChromatin_rw_{promoter_pref}/{promoter_pref}_shoot_bpcovered_rw.bed'
rootshootintersect_chrom_bp_covered = f'../../../data/output/{file_names}/rolling_window/OpenChromatin_rw_{promoter_pref}/{promoter_pref}_rootshootintersect_bpcovered_rw.bed'
#make directory for the plots to be exported to
dirName = f'{location}/rolling_window/GC_content_rw_{promoter_pref}/plots'
try:
# Create target Directory
os.mkdir(dirName)
print("Directory " , dirName , " created")
except FileExistsError:
print("Directory " , dirName , " already exists")
# +
promoters = pd.read_csv(Czechowski_gene_categories, sep='\t', header=None)
cols = ['AGI','gene_type']
promoters.columns = cols
#Read in GC content table to get window data (ignore GC content variable)
GC_content = pd.read_table(GC_content_tsv, sep='\t', header=None)
GC_content
cols2 = ['name', 'percentage_GC_content']
GC_content.columns = cols2
#Make AGI column
GC_content['AGI'] = GC_content.name.str.split('_',expand=True)[0]
#make window number column
GC_content = GC_content.assign(window_number=GC_content.name.str.extract(r'_(.*?)\:'))
#make chr column
GC_content = GC_content.assign(chr=GC_content.name.str.split(':',n=3,expand=True)[2])
#make start column
GC_content = GC_content.assign(start=GC_content.name.str.split(':',n=3,expand=True)[3].str.split('-',expand=True)[0])
#make stop column
GC_content = GC_content.assign(stop=GC_content.name.str.split(':',n=3,expand=True)[3].str.split('-',expand=True)[1])
#make df columns integars
GC_content = GC_content.astype({'stop':'int','start':'int','chr':'int'})
#add window length column
GC_content = GC_content.assign(window_length=GC_content.stop - GC_content.start)
# #merge to limit to genes of interest
# GC_content = pd.merge(promoters, GC_content, how ='left', on='AGI')
# GC_content
# -
#allow colour codes in seaborn
sns.set(color_codes=True)
sns.set_style("ticks")
# +
# dist_plot = GC_content['percentage_GC_content']
# #create figure with no transparency
# dist_plot_fig = sns.distplot(dist_plot).get_figure()
# -
#remove windows with fewer than 100 promoters extending to that location
openchrom = GC_content[GC_content['window_number'].map(GC_content['window_number'].value_counts()) > 99]
def add_coverage(df,coverage_bed,suffix):
"""add % bp covered data from a bed file to the df. Prefix is a name added to any new columns"""
#read in bed file
coverage_df = pd.read_table(coverage_bed, sep='\t', header=None)
cols = ['chr','start','stop','name','no._of_overlaps', 'no._of_bases_covered','window_length','fraction_bases_covered']
coverage_df.columns = cols
#add % bases covered column
coverage_df['percentage_bases_covered'] = coverage_df['fraction_bases_covered'] * 100
#filter columns
coverage_df = coverage_df[['chr','start','stop','name','percentage_bases_covered']]
#make df columns integars
df = df.astype({'stop':'int','start':'int','chr':'int'})
coverage_df = coverage_df.astype({'stop':'int','start':'int','chr':'int'})
#merge the dfs
merged = pd.merge(df,coverage_df, how='left',on=['chr','start','stop'],suffixes=('', f'_{suffix}'))
#remove NaN
#merged = merged[merged['name'].notnull()]
return merged
def rep_sample(df, col, n, random_state):
"""function to return a df with equal sample sizes
taken from here: https://stackoverflow.com/questions/39457762/python-pandas-conditionally-select-a-uniform-sample-from-a-dataframe"""
#identify number of categories
nu = df[col].nunique()
# find number of rows
m = len(df)
# integar divide total sample size by number of categories
mpb = n // nu
# multiply this by the number of categories and subtract from the number of samples to find the remainder
mku = n - mpb * nu
# make an array fileld with zeros corresponding to each category
fills = np.zeros(nu)
# make values in the array 1s up until the remainder
fills[:mku] = 1
# calculate sample sizes for each category
sample_sizes = (np.ones(nu) * mpb + fills).astype(int)
#group the df by categories
gb = df.groupby(col)
#define sample size function
sample = lambda sub_df, i: sub_df.sample(sample_sizes[i], random_state = random_state)
#run sample size function on each category
subs = [sample(sub_df, i) for i, (_, sub_df) in enumerate(gb)]
#return concatenated sub dfs
return pd.concat(subs)
def windows_coords(output_prefix,variable_of_interest,variable_of_interest_name,variable_of_interest_df,promoter_bed,promoter_5UTR_bed,
window_offset,chromatin_tissue_variable='percentage_bases_covered_rootshootintersect_chrom',
chromatin_tissue_variable_name='% open chromatin root and shoot intersect',x_range=False,estimator='median',ci=95, n_boot=10000,
genetype=False, genetype2=False, genetype3=False):
"""function to add the centre of each window corresponding to each window no. and return a lineplot. Also add promoter length distributio, Araport TSS distribution,
EPD TSS distribution (add the most common TSS as documented on eukaryotic promoter database Arabidopsis last modified on EPD 06/06/2018)"""
#read in bed file
promoter_df = pd.read_table(promoter_bed, sep='\t', header=None)
col = ['chr','start','stop','AGI','dot1', 'strand','source','type','dot2','attributes']
promoter_df.columns = col
promoter_5UTR_df = pd.read_table(promoter_5UTR_bed, sep='\t', header=None)
promoter_5UTR_df.columns = col
#add promoter length column
promoter_df['promoter_length'] = promoter_df.stop-promoter_df.start
#temporarily merge promoter_df with promoter_5UTR_bed
temp_merged = pd.merge(promoter_df,promoter_5UTR_df,how='left',on='AGI', suffixes = ('','_promsUTR'))
#add 5UTR length column
temp_merged['five_UTR_length'] = (temp_merged.stop_promsUTR-temp_merged.start_promsUTR) - temp_merged.promoter_length
#filter columns
temp_merged = temp_merged[['chr','start','stop','AGI','dot1', 'strand','source','type','dot2','attributes','promoter_length','five_UTR_length']]
#rename temp_merged back to promoter_df
promoter_df = temp_merged.copy()
#merge promoter_df with variable_of_interest_df on AGI
merged = pd.merge(variable_of_interest_df, promoter_df, on='AGI', how='left',suffixes=('','_wholeprom'))
#remove NaN
merged = merged[merged[variable_of_interest].notnull()]
#make columns integars
merged = merged.astype({'stop_wholeprom':'int','start_wholeprom':'int','start':'int','stop':'int'})
#split merged into 2 dfs by strand
pos = merged[merged.strand == '+'].copy()
neg = merged[merged.strand == '-'].copy()
#add variable of interest position column where position is the middle of the window using the Araport TSS (end of promoter bed file) as a reference
#this will lead to positive positions being in the 5'UTR and negative in the promoter region
pos['position'] = (pos.stop_wholeprom) - (pos.start+0.5*(pos.stop-pos.start))
neg['position'] = (neg.start+0.5*(neg.stop-neg.start)) - neg.start_wholeprom
merged2 = pd.merge(pos,neg,how='outer')
merged2 = merged2.astype({'position': 'int64'})
#make window number an integar
variable_of_interest_df = variable_of_interest_df.astype({'window_number': 'float'})
#calculate promoter and 5UTR window length based on window cutoff
number_of_windows = len(variable_of_interest_df.window_number.unique())
#promoter window number plus 1 because window 0 is excluded
promoter_window_number = len(variable_of_interest_df[variable_of_interest_df.window_number < 0].window_number.unique()) + 1
#5UTR window number plus 1
five_UTR_window_number = len(variable_of_interest_df[variable_of_interest_df.window_number > 0].window_number.unique()) + 1
#max_promoter_length
window_length = variable_of_interest_df.window_length.max()
max_promoter_length = promoter_window_number*(window_length-window_offset)
max_5UTR_length = five_UTR_window_number*(window_length-window_offset)
#make integars
merged2 = merged2.astype({f'{variable_of_interest}':'float64',f'{chromatin_tissue_variable}':'float64'})
#change estimator
if estimator == 'mean':
new_estimator=estimator
if estimator == 'median':
new_estimator = np.median
#set number of subplots so can easily change all output possibilities, where subplotA is the top
subplots = 2
f, axes = plt.subplots(subplots, figsize=(10,8))
OpenChromplot = axes[subplots-subplots]
variableofinterestplot = axes[subplots-(subplots-1)]
#check the plot axes variables are there. If they are not, assign None to them
try:
OpenChromplot
except NameError:
OpenChromplot = None
try:
variableofinterestplot
except NameError:
variableofinterestplot = None
if genetype!=False:
#filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
#make integars
merged2 = merged2.astype({'window_number':'float64'})
#remove windows with fewer than 50 promoters extending to that location if looking at specific genetypes
merged2 = merged2[merged2['window_number'].map(merged2['window_number'].value_counts()) > 49]
#calculate promoter and 5UTR window length based on window cutoff
number_of_windows = len(merged2.window_number.unique())
#promoter window number plus 1 because window 0 is excluded
promoter_window_number = len(merged2[merged2.window_number < 0].window_number.unique()) + 1
#5UTR window number plus 1
five_UTR_window_number = len(merged2[merged2.window_number > 0].window_number.unique()) + 1
#redefine max_promoter_length
window_length = merged2.window_length.max()
max_promoter_length = promoter_window_number*(window_length-window_offset)
max_5UTR_length = five_UTR_window_number*(window_length-window_offset)
#make all values of interest negative as upstream from ATG
merged_positive = merged2.copy()
merged2[['promoter_length','position']] = -merged2[['promoter_length','position']]
if genetype==False:
#if openchromplot variable present, add that plot
if OpenChromplot !=None:
#Open chromatin lineplot
sns.lineplot(y=merged2[chromatin_tissue_variable],x=merged2.position,ax=OpenChromplot,estimator=new_estimator,ci=ci, n_boot=n_boot)
#set titles and axes labels
OpenChromplot.set_title(f'A: All promoters {chromatin_tissue_variable_name}', weight='bold')
OpenChromplot.set_ylabel(f'{estimator} % open chromatin')
OpenChromplot.set_xlabel('')
OpenChromplot.set_xticklabels([])
#if variableofinterestplot variable present, add that plot
if variableofinterestplot !=None:
#variable of interest lineplot
sns.lineplot(y=merged2[variable_of_interest], x=merged2.position, ax=variableofinterestplot,estimator=new_estimator,ci=ci, n_boot=n_boot)
#set titles and axes labels
variableofinterestplot.set_title(f'B: All promoters sliding windows {variable_of_interest_name}', weight='bold')
variableofinterestplot.set_ylabel(f'{estimator} {variable_of_interest_name}')
variableofinterestplot.set_xlabel('position relative to Araport 11 TSS')
elif genetype2==False:
#filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
#if openchromplot variable present, add that plot
if OpenChromplot !=None:
#Open chromatin lineplot
sns.lineplot(y=merged2[merged2.gene_type == genetype][chromatin_tissue_variable],x=merged2[merged2.gene_type == genetype].position,
ax=OpenChromplot,estimator=new_estimator,ci=ci, n_boot=n_boot)
#set titles and axes labels
OpenChromplot.set_title(f'A: {genetype} {chromatin_tissue_variable_name}', weight='bold')
OpenChromplot.set_ylabel(f'{estimator} % open chromatin')
OpenChromplot.set_xlabel('')
OpenChromplot.set_xticklabels([])
#if variableofinterestplot variable present, add that plot
if variableofinterestplot !=None:
#variable of interest lineplot
sns.lineplot(y=merged2[merged2.gene_type == genetype][variable_of_interest], x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterestplot, estimator=new_estimator,ci=ci, n_boot=n_boot)
#set titles and axes labels
variableofinterestplot.set_title(f'B: {genetype} {variable_of_interest_name}', weight='bold')
variableofinterestplot.set_ylabel(f'{estimator} {variable_of_interest_name}')
variableofinterestplot.set_xlabel('position relative to Araport 11 TSS')
elif genetype3==False:
#filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
## make a subselection of categories so all sample sizes are equal
# first select only the relevant genetypes
merged2 = merged2[merged2.gene_type.isin([genetype,genetype2])]
# make each promoter unique
merged2_unique = merged2.drop_duplicates('AGI')
# identify sample size of the minimum category
minimum_sample_size = merged2_unique.gene_type.value_counts().min()
# print this
print(f'sample size in each category = {minimum_sample_size}')
# multiply this by the number of categories
total_sample_size = minimum_sample_size * len(merged2_unique.gene_type.unique())
#select equal sample sizes of each category with a random state of 1 so it's reproducible
equal_samplesizes = rep_sample(merged2_unique, 'gene_type',total_sample_size,random_state = 1)
# now filter out genes which were not selected using the minimum sample size
to_remove = merged2_unique[~merged2_unique.AGI.isin(equal_samplesizes.AGI)]
merged2 = merged2[~merged2.AGI.isin(to_remove.AGI)]
#if openchromplot variable present, add that plot
if OpenChromplot !=None:
#Open chromatin lineplot
sns.lineplot(y=merged2[merged2.gene_type == genetype][chromatin_tissue_variable],x=merged2[merged2.gene_type == genetype].position,
ax=OpenChromplot,estimator=new_estimator,label=genetype,ci=ci, n_boot=n_boot)
sns.lineplot(y=merged2[merged2.gene_type == genetype2][chromatin_tissue_variable],x=merged2[merged2.gene_type == genetype2].position,
ax=OpenChromplot,estimator=new_estimator,label=genetype2,ci=ci, n_boot=n_boot)
#set titles & axes names
OpenChromplot.set_title(f'A: {chromatin_tissue_variable_name}', weight='bold')
OpenChromplot.set_ylabel(f'{estimator} % open chromatin')
OpenChromplot.set_xlabel('')
OpenChromplot.set_xticklabels([])
#if variableofinterestplot variable present, add that plot
if variableofinterestplot !=None:
#lineplot variable of interest
l1=sns.lineplot(y=merged2[merged2.gene_type == genetype][variable_of_interest], x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterestplot,label=genetype,estimator=new_estimator,ci=ci, n_boot=n_boot)
l2=sns.lineplot(y=merged2[merged2.gene_type == genetype2][variable_of_interest], x=merged2[merged2.gene_type == genetype2].position,
ax=variableofinterestplot,label=genetype2,estimator=new_estimator,ci=ci, n_boot=n_boot)
#set titles & axes names
variableofinterestplot.set_title(f'B: {variable_of_interest_name}', weight='bold')
variableofinterestplot.set_ylabel(f'{estimator} {variable_of_interest_name}')
variableofinterestplot.set_xlabel('position relative to Araport 11 TSS')
#set y axis as maximum mean window % bp covered of all genetype subset
#axes[2].set_ylim([0,merged2.groupby('window_number').percentage_bases_covered.median().max()+20])
#gene_type labels
gene_type_labels = [genetype, genetype2]
# Create the legend
axes[0].legend()
else:
#filter so only genetype subset present
merged2 = merged2[merged2.gene_type.notnull()]
## make a subselection of categories so all sample sizes are equal
# make each promoter unique
merged2_unique = merged2.drop_duplicates('AGI')
# identify sample size of the minimum category
minimum_sample_size = merged2_unique.gene_type.value_counts().min()
# print this
print(f'sample size in each category = {minimum_sample_size}')
# multiply this by the number of categories
total_sample_size = minimum_sample_size * len(merged2_unique.gene_type.unique())
#select equal sample sizes of each category with a random state of 1 so it's reproducible
equal_samplesizes = rep_sample(merged2_unique, 'gene_type',total_sample_size,random_state = 1)
# now filter out genes which were not selected using the minimum sample size
to_remove = merged2_unique[~merged2_unique.AGI.isin(equal_samplesizes.AGI)]
merged2 = merged2[~merged2.AGI.isin(to_remove.AGI)]
#if openchromplot variable present, add that plot
if OpenChromplot !=None:
#Open chromatin lineplot
sns.lineplot(y=merged2[merged2.gene_type == genetype][chromatin_tissue_variable],x=merged2[merged2.gene_type == genetype].position,
ax=OpenChromplot,estimator=new_estimator,label=genetype,ci=ci, n_boot=n_boot)
sns.lineplot(y=merged2[merged2.gene_type == genetype2][chromatin_tissue_variable],x=merged2[merged2.gene_type == genetype2].position,
ax=OpenChromplot,estimator=new_estimator,label=genetype2,ci=ci, n_boot=n_boot)
sns.lineplot(y=merged2[merged2.gene_type == genetype3][chromatin_tissue_variable],x=merged2[merged2.gene_type == genetype3].position,
ax=OpenChromplot,estimator=new_estimator,label=genetype3,ci=ci, n_boot=n_boot)
#set titles & axes names
OpenChromplot.set_title(f'A: {chromatin_tissue_variable_name}', weight='bold')
OpenChromplot.set_ylabel(f'{estimator} % open chromatin')
OpenChromplot.set_xlabel('')
OpenChromplot.set_xticklabels([])
#if variableofinterestplot variable present, add that plot
if variableofinterestplot !=None:
#lineplot
l1=sns.lineplot(y=merged2[merged2.gene_type == genetype][variable_of_interest], x=merged2[merged2.gene_type == genetype].position,
ax=variableofinterestplot, label=genetype,estimator=new_estimator,ci=ci, n_boot=n_boot)
l2=sns.lineplot(y=merged2[merged2.gene_type == genetype2][variable_of_interest], x=merged2[merged2.gene_type == genetype2].position,
ax=variableofinterestplot,label=genetype2,estimator=new_estimator,ci=ci, n_boot=n_boot)
l3=sns.lineplot(y=merged2[merged2.gene_type == genetype3][variable_of_interest], x=merged2[merged2.gene_type == genetype3].position,
ax=variableofinterestplot,label=genetype3,estimator=new_estimator,ci=ci, n_boot=n_boot)
#set titles & axes names
variableofinterestplot.set_title(f'B: {variable_of_interest_name}', weight='bold')
variableofinterestplot.set_ylabel(f'{estimator} {variable_of_interest_name}')
variableofinterestplot.set_xlabel('position relative to Araport 11 TSS')
#gene_type labels
gene_type_labels = [genetype, genetype2, genetype3]
# Create the legend
axes[0].legend()
#set x axis range if specified
if x_range==False:
x_range_final = [-max_promoter_length, max_5UTR_length]
else:
x_range_final = x_range
#for all subplots:
for n in axes:
#remove grids
n.grid(False)
n.set_xlim(x_range_final)
#use a tight layout
f.tight_layout()
plt.savefig(f'../../../data/output/{file_names}/rolling_window/{foldername_prefix}_{promoter_pref}/plots/{output_prefix}_{estimator}_openchromatin_sliding_window.pdf', format='pdf')
return merged2
# +
# def plot_length(df,output_prefix, genetype=False, genetype2=False, genetype3=False):
# ###NEED TO SHARE AXES!
# """function to plot length distribution of promoters and 5'UTRs"""
# #make columns integar
# #df = df.astype({'promoter_length': 'int','five_UTR_length':'int'})
# #make promoter length positive (square then take square root)
# df.promoter_length = (df.promoter_length**2)**(1/2)
# #make integar
# #make subplots
# subplots = 2
# f, axes = plt.subplots(subplots, figsize=(10,8))
# promoterlengths = axes[subplots-subplots]
# fiveUTRlengths = axes[subplots-(subplots-1)]
# #find max length
# max_prom_length = df.promoter_length.max()
# max_5UTR_length = df.five_UTR_length.max()
# #choose the largest value
# x_range = max(max_prom_length, max_5UTR_length)
# if genetype==False:
# #prom length plot
# sns.distplot(df.promoter_length, ax=promoterlengths)
# #5'UTR length plot
# sns.distplot(df.five_UTR_length, ax=fiveUTRlengths,axlabel='length (bp)')
# #set titles
# promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
# fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set y label
# promoterlengths.set_ylabel('Density')
# fiveUTRlengths.set_ylabel('Density')
# #remove x labels from the top graph
# promoterlengths.set_xlabel('')
# promoterlengths.set_xticklabels([])
# elif genetype2==False:
# #prom length plot
# sns.distplot(df[df.gene_type == genetype].promoter_length, ax=promoterlengths)
# #5'UTR length plot
# sns.distplot(df[df.gene_type == genetype].five_UTR_length, ax=fiveUTRlengths,axlabel='length (bp)')
# #set titles
# promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
# fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set y label
# promoterlengths.set_ylabel('Density')
# fiveUTRlengths.set_ylabel('Density')
# #remove x labels from the top graph
# promoterlengths.set_xlabel('')
# promoterlengths.set_xticklabels([])
# elif genetype3==False:
# #prom length plots
# sns.distplot(df[df.gene_type == genetype].promoter_length, hist=None,ax=promoterlengths,label=genetype)
# sns.distplot(df[df.gene_type == genetype2].promoter_length, hist=None,ax=promoterlengths,label=genetype2)
# #5'UTR length plots
# sns.distplot(df[df.gene_type == genetype].five_UTR_length,hist=None, ax=fiveUTRlengths,axlabel='length (bp)',label=genetype)
# sns.distplot(df[df.gene_type == genetype2].five_UTR_length,hist=None, ax=fiveUTRlengths,axlabel='length (bp)',label=genetype2)
# #set titles
# promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
# fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set y label
# promoterlengths.set_ylabel('Density')
# fiveUTRlengths.set_ylabel('Density')
# #remove x labels from the top graph
# promoterlengths.set_xlabel('')
# promoterlengths.set_xticklabels([])
# plt.legend()
# else:
# #prom length plots
# sns.distplot(df[df.gene_type == genetype].promoter_length, hist=None,ax=promoterlengths,label=genetype)
# sns.distplot(df[df.gene_type == genetype2].promoter_length, hist=None,ax=promoterlengths,label=genetype2)
# sns.distplot(df[df.gene_type == genetype3].promoter_length, hist=None,ax=promoterlengths,label=genetype3)
# #5'UTR length plots
# sns.distplot(df[df.gene_type == genetype].five_UTR_length,hist=None, ax=fiveUTRlengths,axlabel='length (bp)',label=genetype)
# sns.distplot(df[df.gene_type == genetype2].five_UTR_length,hist=None, ax=fiveUTRlengths,axlabel='length (bp)',label=genetype2)
# sns.distplot(df[df.gene_type == genetype3].five_UTR_length,hist=None, ax=fiveUTRlengths,axlabel='length (bp)',label=genetype3)
# #set titles
# promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
# fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set y label
# promoterlengths.set_ylabel('Density')
# fiveUTRlengths.set_ylabel('Density')
# #remove x labels from the top graph
# promoterlengths.set_xlabel('')
# promoterlengths.set_xticklabels([])
# plt.legend()
# for n in axes:
# #remove grids
# n.grid(False)
# n.set_xlim(0,x_range)
# #tight layout
# plt.tight_layout()
# #save figure
# plt.savefig(f'../../../data/output/{file_names}/rolling_window/{foldername_prefix}_{promoter_pref}/plots/{output_prefix}_promoter_lengths.pdf', format='pdf')
# -
def plot_length(df,output_prefix, genetype=False, genetype2=False, genetype3=False):
###NEED TO SHARE AXES!
"""function to plot length distribution of promoters and 5'UTRs"""
#make columns integar
#df = df.astype({'promoter_length': 'int','five_UTR_length':'int'})
#make promoter length positive (square then take square root)
df.promoter_length = (df.promoter_length**2)**(1/2)
#make integar
#make subplots
subplots = 2
f, axes = plt.subplots(subplots, figsize=(10,8))
promoterlengths = axes[subplots-subplots]
fiveUTRlengths = axes[subplots-(subplots-1)]
#find max length
max_prom_length = df.promoter_length.max()
max_5UTR_length = df.five_UTR_length.max()
#choose the largest value
x_range = max(max_prom_length, max_5UTR_length)
if genetype==False:
#prom length plot
sns.kdeplot(df.promoter_length, ax=promoterlengths)
#5'UTR length plot
sns.kdeplot(df.five_UTR_length, ax=fiveUTRlengths,)
#set titles
promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set axes labels
# promoterlengths.set_axis_labels("Density (a.u.)", 'length (bp)')
# fiveUTRlengths.set_axis_labels("Density (a.u.)", 'length (bp)')
fiveUTRlengths.set_xlabel('length (bp)')
#remove x labels from the top graph
promoterlengths.set_xlabel('')
promoterlengths.set_xticklabels([])
elif genetype2==False:
#prom length plot
sns.kdeplot(df[df.gene_type == genetype].promoter_length, ax=promoterlengths)
#5'UTR length plot
sns.kdeplot(df[df.gene_type == genetype].five_UTR_length, ax=fiveUTRlengths)
#set titles
promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set axes labels
# promoterlengths.set_axis_labels("Density (a.u.)", 'length (bp)')
# fiveUTRlengths.set_axis_labels("Density (a.u.)", 'length (bp)')
#remove x labels from the top graph
promoterlengths.set_xlabel('')
promoterlengths.set_xticklabels([])
fiveUTRlengths.set_xlabel('length (bp)')
elif genetype3==False:
#prom length plots
sns.kdeplot(df[df.gene_type == genetype].promoter_length, ax=promoterlengths,label=genetype)
sns.kdeplot(df[df.gene_type == genetype2].promoter_length, ax=promoterlengths,label=genetype2)
#5'UTR length plots
sns.kdeplot(df[df.gene_type == genetype].five_UTR_length,ax=fiveUTRlengths,label=genetype)
sns.kdeplot(df[df.gene_type == genetype2].five_UTR_length, ax=fiveUTRlengths,label=genetype2)
#set titles
promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
# #set axes labels
# promoterlengths.set_axis_labels("Density (a.u.)", 'length (bp)')
# fiveUTRlengths.set_axis_labels("Density (a.u.)", 'length (bp)')
fiveUTRlengths.set_xlabel('length (bp)')
#remove x labels from the top graph
promoterlengths.set_xlabel('')
promoterlengths.set_xticklabels([])
plt.legend()
else:
#prom length plots
sns.kdeplot(df[df.gene_type == genetype].promoter_length,ax=promoterlengths,label=genetype)
sns.kdeplot(df[df.gene_type == genetype2].promoter_length, ax=promoterlengths,label=genetype2)
sns.kdeplot(df[df.gene_type == genetype3].promoter_length,ax=promoterlengths,label=genetype3)
#5'UTR length plots
sns.kdeplot(df[df.gene_type == genetype].five_UTR_length,ax=fiveUTRlengths,label=genetype)
sns.kdeplot(df[df.gene_type == genetype2].five_UTR_length, ax=fiveUTRlengths,label=genetype2)
sns.kdeplot(df[df.gene_type == genetype3].five_UTR_length, ax=fiveUTRlengths,label=genetype3)
#set titles
promoterlengths.set_title('A: promoter lengths using Araport11 TSS annotation')
fiveUTRlengths.set_title('B: 5\'UTR lengths using Araport11 TSS annotation')
#set y label
fiveUTRlengths.set_xlabel('length (bp)')
#remove x labels from the top graph
promoterlengths.set_xlabel('')
promoterlengths.set_xticklabels([])
plt.legend()
for n in axes:
#remove grids
n.grid(False)
n.set_xlim(0,x_range)
#tight layout
plt.tight_layout()
#save figure
plt.savefig(f'../../../data/output/{file_names}/rolling_window/{foldername_prefix}_{promoter_pref}/plots/{output_prefix}_promoter_lengths.pdf', format='pdf')
def add_genetype(df,gene_categories):
"""function to add gene type to the df, and remove random genes"""
select_genes = pd.read_table(gene_categories, sep='\t', header=None)
cols = ['AGI', 'gene_type']
select_genes.columns = cols
merged = pd.merge(select_genes, df, on='AGI', how='left')
# no_random = merged_renamed[merged_renamed.gene_type != 'random']
# no_random.reset_index(drop=True, inplace=True)
return merged
GC_content.columns
#add root chromatin coverage data
GC_content = add_coverage(GC_content, root_chrom_bp_covered,'root_chrom')
#add shoot chromatin coverage data
GC_content = add_coverage(GC_content, shoot_chrom_bp_covered,'shoot_chrom')
#add rootshootintersect chromatin coverage data
GC_content = add_coverage(GC_content, rootshootintersect_chrom_bp_covered,'rootshootintersect_chrom')
GC_content
#all promoters in genome
all_proms = windows_coords('Araport11_allproms','percentage_GC_content','% GC content',GC_content,promoter_bed,promoter_5UTR_bed,50,estimator='mean')
#all promoters in genome
all_proms = windows_coords('Araport11_allproms','percentage_GC_content','% GC content',GC_content,promoter_bed,promoter_5UTR_bed,50,estimator='median')
all_proms
lengths = plot_length(all_proms,'Araport11_allproms')
# ## Now do constitutive and variable promoter from Czechowski et al 2005
GC_prom_types = add_genetype(GC_content, Czechowski_gene_categories)
rolling_rootshootintersect = windows_coords('Araport11_Czechowski_genetypenocontrol','percentage_GC_content','% GC content',GC_prom_types,promoter_bed,promoter_5UTR_bed,
50,estimator='median', genetype='constitutive', genetype2='variable',ci=95, n_boot=10000)
GC_prom_types
GC_prom_types_length = add_genetype(all_proms, Czechowski_gene_categories)
plot_length(GC_prom_types_length,'Araport11_Czechowski_genetypenocontrol', genetype='constitutive', genetype2='variable')
rolling_incl_control = windows_coords('Araport11_Czechowski_genetype','percentage_GC_content','% GC content',GC_prom_types,
promoter_bed,promoter_5UTR_bed, 50,estimator='median',
genetype='constitutive', genetype2='variable', genetype3='control',ci=95, n_boot=10000)
plot_length(GC_prom_types_length,'Araport11_Czechowski_genetype', genetype='constitutive', genetype2='variable', genetype3='control')
|
src/plotting/rolling_window/GC_content_rw_plots_centred_around_Araport11TSS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
#setwd("/Users/alan/stats")
# -
install.packages("psych")
install.packages("aod")
install.packages("QuantPsyc")
install.packages("lsr")
library(psych)
library(aod)
library(QuantPsyc)
library(lsr)
data <- read.table("Stats1.13.HW.11.txt", header = T)
describe(data)
data$id = factor(data$id)
# +
print("1. Using a t-test, compare verbal scores before and after training in the fixed condition. Is the difference pre-test to post-test significant?")
data.f <- subset(data,data$cond == "fixed")
t.test(data.f$verbal.post,data.f$verbal.pre,paired=T) # yes
# +
print("2. What are the degrees of freedom for the comparison between pre-test and post-test for the spatial scores?")
t.test(data.f$spatial.post,data.f$spatial.pre,paired=T) # 49
# +
print("3. Run a Wilcoxon test for the same comparison (pre-test to post-test on spatial scores, fixed condition). Which of the two tests gives the highest p-value for the comparison?")
wilcox.test(data.f$spatial.pre,data.f$spatial.post,paired=T) # wilcoxan is higher.
# +
print("4. What is the effect size (Cohen's d) for the difference between pre-test and post-test spatial scores for the malleable condition? (round to two decimal places)")
data.m <- subset(data,data$cond == "malleable")
# Cohen's d for dependent t-tests
# d = Mean of difference scores / Standard deviation of difference scores
round(cohensD(data.m$spatial.post, data.m$spatial.pre, method="paired"),2) # 0.45
# I answered the question correctly; the given answer is wrong (fixed vs. malleable)
# +
print("5. Which of the three tasks shows the largest improvements from pre-test to post-test, in the fixed condition?") # verbal
t.test(data.f$verbal.post,data.f$verbal.pre,paired=T)
t.test(data.f$spatial.post,data.f$spatial.pre,paired=T)
t.test(data.f$intel.post,data.f$intel.pre,paired=T)
# +
print("6. Which of the three tasks shows the largest improvements from pre-test to post-test, in the malleable condition?") # verbal
t.test(data.m$verbal.post,data.m$verbal.pre,paired=T)
t.test(data.m$spatial.post,data.m$spatial.pre,paired=T)
t.test(data.m$intel.post,data.m$intel.pre,paired=T)
# +
print("7. Conduct Mann-Whitney comparisons between all tasks at pre-test. Which task(s) differ significantly from the other two in pre-test scores?") # all?
wilcox.test(data$verbal.pre,data$spatial.pre,paired=F)
wilcox.test(data$verbal.pre,data$intel.pre,paired=F)
wilcox.test(data$spatial.pre,data$intel.pre,paired=F)
# -
print("8. Which feedback condition led to the largest improvements overall?")
# malleable: 1.38 vs. 1.06 for fixed
pre.m = data.m$verbal.pre + data.m$spatial.pre + data.m$intel.pre
post.m = data.m$verbal.post + data.m$spatial.post + data.m$intel.post
cohensD(pre.m, post.m, method="paired")
pre.f = data.f$verbal.pre + data.f$spatial.pre + data.f$intel.pre
post.f = data.f$verbal.post + data.f$spatial.post + data.f$intel.post
cohensD(pre.f, post.f, method="paired")
print("9. Which task is largely driving this effect?") # verbal
cohensD(data.m$verbal.pre, data.m$verbal.post, method="paired")
cohensD(data.m$spatial.pre, data.m$spatial.post, method="paired")
cohensD(data.m$intel.pre, data.m$intel.post, method="paired")
cohensD(data.f$verbal.pre, data.f$verbal.post, method="paired")
cohensD(data.f$spatial.pre, data.f$spatial.post, method="paired")
cohensD(data.f$intel.pre, data.f$intel.post, method="paired")
print("10. Based on the present data, are you convinced that malleable feedback is beneficial to performance when engaging in a cognitive training program?") # it depends.
# cohen's D is high for verbal. Not so different for others.
|
Stats HW11.R.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import re
import base64
import requests
import yaml
from collections import Counter
# ## This notebook illustrates how to obtain the training data for a specific repo (issues with labels), in this case [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
#
# Note there are several ways to get the training data: (1) BigQuery and (2) Scraping or the API. This notebook shows how to obtain training data from BigQuery. In production scenarios we will likely use the API or scraping as not all data is contained in BigQuery.
# # Part 1 Acquire Data
#
# ### Using [BigQuery](https://console.cloud.google.com/bigquery?sq=1073071082706:e0106dd7b95a4acaabe5ee94d8bffdf9)
#
# ```sql
#
# #standardSQL
#
# SELECT *
# FROM (
# SELECT
# updated_at
# , MAX(updated_at) OVER (PARTITION BY url) as last_time
# , FORMAT("%T", ARRAY_CONCAT_AGG(labels)) as labels
# , repo, url, title, body, len_labels
# FROM(
# SELECT
# TIMESTAMP(REGEXP_REPLACE(JSON_EXTRACT(payload, '$.issue.updated_at'), "\"", "")) as updated_at
# , REGEXP_EXTRACT(JSON_EXTRACT(payload, '$.issue.url'), r'https://api.github.com/repos/(.*)/issues') as repo
# , JSON_EXTRACT(payload, '$.issue.url') as url
# -- extract the title and body removing parentheses, brackets, and quotes
# , LOWER(TRIM(REGEXP_REPLACE(JSON_EXTRACT(payload, '$.issue.title'), r"\\n|\(|\)|\[|\]|#|\*|`|\"", ' '))) as title
# , LOWER(TRIM(REGEXP_REPLACE(JSON_EXTRACT(payload, '$.issue.body'), r"\\n|\(|\)|\[|\]|#|\*|`|\"", ' '))) as body
# , REGEXP_EXTRACT_ALL(JSON_EXTRACT(payload, "$.issue.labels"), ',"name\":"(.+?)","color') as labels
# , ARRAY_LENGTH(REGEXP_EXTRACT_ALL(JSON_EXTRACT(payload, "$.issue.labels"), ',"name\":"(.+?)","color')) as len_labels
# FROM `githubarchive.month.20*`
# WHERE
# _TABLE_SUFFIX BETWEEN '1601' and '1912'
# and type="IssuesEvent"
# )
# WHERE
# repo = 'kubernetes/kubernetes'
# GROUP BY updated_at, repo, url, title, body, len_labels
# )
# WHERE last_time = updated_at and len_labels >= 1
# ```
#
# #### Data from the above query is available on GCS: [https://storage.googleapis.com/issue_label_bot/k8s_issues/000000000000.csv](https://storage.googleapis.com/issue_label_bot/k8s_issues/000000000000.csv)
# +
df = pd.read_csv('https://storage.googleapis.com/issue_label_bot/k8s_issues/000000000000.csv')
# convert from string to list
df.labels = df.labels.apply(lambda x: eval(x))
# change data type of last_time to date
df['last_time'] = pd.to_datetime(df.last_time)
df.head()
# -
# ### Clean Data
#
# - Remove labels that are deprecated and [no longer specified here](https://raw.githubusercontent.com/kubernetes/test-infra/master/label_sync/labels.yaml).
# - Remove bot commands that contain the label name (otherwhise this will be data leakage).
# +
def get_current_labels(url="https://raw.githubusercontent.com/kubernetes/test-infra/master/label_sync/labels.yaml"):
"""
Get list of valid issue labels (b/c labels get deprecated over time).
See: https://kubernetes.slack.com/archives/C1TU9EB9S/p1561570627363100
"""
req = requests.get(url)
yml = yaml.safe_load(req.content)
return [x['name'] for x in yml['default']['labels']]
# remove deprecated labels
current_labels = get_current_labels()
df.labels = df.labels.apply(lambda x: [l for l in x if l in current_labels])
# get rid of bot commands
df['body'] = df.body.apply(lambda x: re.sub('(/sig|/kind|/status/triage/|priority) \S+', '', str(x)))
# -
# ### Filter Issues
#
# Remove Issues that:
# - Do not have any labels
# - Do not contain at least one label that has a minimum frequency of 25
#
#
#
#
#
#
# +
# remove labels that do not occur at least 25 times
c = Counter()
for row in df.labels:
c.update(row)
min_threshold = 25
min_threshold_labels = [k for k in c if c[k] >= min_threshold]
df['labels'] = df.labels.apply(lambda x: [l for l in x if l in min_threshold_labels])
# filter out issues without any labels
df = df[df.labels.apply(lambda x: x != [])]
# -
# remove extreanous columns
df = df[['last_time', 'repo', 'title', 'body', 'labels']]
print(f'Number of labeled issues after filtering and cleaning: {df.shape[0]:,}')
df.head()
|
Issue_Embeddings/notebooks/07_Get_Repo_TrainingData_BigQuery.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: notebooks//ipynb,python_scripts//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Solution for Exercise 01
#
# The goal of is to compare the performance of our classifier to some baseline classifier that would ignore the input data and instead make constant predictions:
# %%
import pandas as pd
df = pd.read_csv("https://www.openml.org/data/get_csv/1595261/adult-census.csv")
# %%
target_name = "class"
target = df[target_name].to_numpy()
data = df.drop(columns=[target_name, "fnlwgt"])
numerical_columns = [c for c in data.columns
if data[c].dtype.kind in ["i", "f"]]
data_numeric = data[numerical_columns]
# %%
from sklearn.model_selection import cross_val_score
from sklearn.dummy import DummyClassifier
high_revenue_clf = DummyClassifier(strategy="constant", constant=" >50K")
scores = cross_val_score(high_revenue_clf, data_numeric, target)
print(f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %%
low_revenue_clf = DummyClassifier(strategy="constant", constant=" <=50K")
scores = cross_val_score(low_revenue_clf, data_numeric, target)
print(f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %%
most_freq_revenue_clf = DummyClassifier(strategy="most_frequent")
scores = cross_val_score(most_freq_revenue_clf, data_numeric, target)
print(f"{scores.mean():.3f} +/- {scores.std():.3f}")
# %% [markdown]
# So 81% accuracy is significantly better than 76% which is the score of a baseline model that would always predict the most frequent class which is the low revenue class: `" <=50K"`.
#
# In this dataset, we can see that the target classes are imbalanced: almost 3/4 of the records are people with a revenue below 50K:
# %%
df["class"].value_counts()
# %%
(target == " <=50K").mean()
|
notebooks/02_basic_preprocessing_exercise_01_solution.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using Interact
#
# In this lecture we will begin to learn about creating dashboard-type GUI with iPython widgets!
# The `interact` function (`ipywidgets.interact`) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython's widgets.
# +
# Start with some imports!
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# -
# <div class="alert alert-success">
# Please Note! The widgets in this notebook won't show up on NbViewer or GitHub renderings. To view the widgets and interact with them, you will need to download this notebook and run it with a Jupyter Notebook server.
#
# </div>
# ## Basic `interact`
# At the most basic level, `interact` auto-generates UI controls for function arguments, and then calls the function with those arguments when you manipulate the controls interactively. To use `interact`, you need to define a function that you want to explore. Here is a function that prints its only argument `x`.
# Very basic function
def f(x):
return x
# When you pass this function as the first argument to `interact` along with an integer keyword argument (`x=10`), a slider is generated and bound to the function parameter. Note that the semicolon here just prevents an **out** cell from showing up.
# Generate a slider to interact with
interact(f, x=10,);
# When you move the slider, the function is called, which prints the current value of `x`.
#
# If you pass `True` or `False`, `interact` will generate a check-box:
# Booleans generate check-boxes
interact(f, x=True);
# If you pass a string, `interact` will generate a text area.
# Strings generate text areas
interact(f, x='Hi there!');
# `interact` can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, `interact` also works with functions that have multiple arguments.
# Using a decorator!
@interact(x=True, y=1.0)
def g(x, y):
return (x, y)
# ## Fixing arguments using `fixed`
# There are times when you may want to explore a function using `interact`, but fix one or more of its arguments to specific values. This can be accomplished by wrapping values with the `fixed` function.
# Again, a simple function
def h(p, q):
return (p, q)
# When we call `interact`, we pass `fixed(20)` for q to hold it fixed at a value of `20`.
interact(h, p=5, q=fixed(20));
# Notice that a slider is only produced for `p` as the value of `q` is fixed.
# ## Widget abbreviations
# When you pass an integer-valued keyword argument of `10` (`x=10`) to `interact`, it generates an integer-valued slider control with a range of `[-10,+3\times10]`. In this case, `10` is an *abbreviation* for an actual slider widget:
#
# ```python
# IntSlider(min=-10,max=30,step=1,value=10)
# ```
#
# In fact, we can get the same result if we pass this `IntSlider` as the keyword argument for `x`:
# Can call the IntSlider to get more specific
interact(f, x=widgets.IntSlider(min=-10,max=30,step=1,value=10));
# This examples clarifies how `interact` processes its keyword arguments:
#
# 1. If the keyword argument is a `Widget` instance with a `value` attribute, that widget is used. Any widget with a `value` attribute can be used, even custom ones.
# 2. Otherwise, the value is treated as a *widget abbreviation* that is converted to a widget before it is used.
#
# The following table gives an overview of different widget abbreviations:
#
# <table class="table table-condensed table-bordered">
# <tr><td><strong>Keyword argument</strong></td><td><strong>Widget</strong></td></tr>
# <tr><td>`True` or `False`</td><td>Checkbox</td></tr>
# <tr><td>`'Hi there'`</td><td>Text</td></tr>
# <tr><td>`value` or `(min,max)` or `(min,max,step)` if integers are passed</td><td>IntSlider</td></tr>
# <tr><td>`value` or `(min,max)` or `(min,max,step)` if floats are passed</td><td>FloatSlider</td></tr>
# <tr><td>`['orange','apple']` or `{'one':1,'two':2}`</td><td>Dropdown</td></tr>
# </table>
# Note that a dropdown is used if a list or a dict is given (signifying discrete choices), and a slider is used if a tuple is given (signifying a range).
#
# You have seen how the checkbox and text area widgets work above. Here, more details about the different abbreviations for sliders and drop-downs are given.
#
# If a 2-tuple of integers is passed `(min,max)`, an integer-valued slider is produced with those minimum and maximum values (inclusively). In this case, the default step size of `1` is used.
# Min,Max slider with Tuples
interact(f, x=(0,4));
# If a 3-tuple of integers is passed `(min,max,step)`, the step size can also be set.
# (min, max, step)
interact(f, x=(0,8,2));
# A float-valued slider is produced if the elements of the tuples are floats. Here the minimum is `0.0`, the maximum is `10.0` and step size is `0.1` (the default).
interact(f, x=(0.0,10.0));
# The step size can be changed by passing a third element in the tuple.
interact(f, x=(0.0,10.0,0.01));
# For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to `5.5`.
@interact(x=(0.0,20.0,0.5))
def h(x=5.5):
return x
# Dropdown menus are constructed by passing a list of strings. In this case, the strings are both used as the names in the drop-down menu UI and passed to the underlying Python function.
interact(f, x=['apples','oranges']);
# If you want a drop-down menu that passes non-string values to the Python function, you can pass a dictionary. The keys in the dictionary are used for the names in the drop-down menu UI and the values are the arguments that are passed to the underlying Python function.
interact(f, x={'one': 10, 'two': 20});
# ## Using function annotations with `interact`
# You can also specify widget abbreviations using [function annotations](https://docs.python.org/3/tutorial/controlflow.html#function-annotations).
#
# Define a function with a checkbox widget abbreviation for the argument `x`.
def f(x:True): # Python 3 only
return x
# Then, because the widget abbreviation has already been defined, you can call `interact` with a single argument.
interact(f);
# ## interactive
# In addition to `interact`, IPython provides another function, `interactive`, that is useful when you want to reuse the widgets that are produced or access the data that is bound to the UI controls.
#
# Note that unlike `interact`, the return value of the function will not be displayed automatically, but you can display a value inside the function with `IPython.display.display`.
#
# Here is a function that returns the sum of its two arguments and displays them. The display line may be omitted if you don’t want to show the result of the function.
# +
from IPython.display import display
def f(a, b):
display(a + b)
return a+b
# -
# Unlike `interact`, `interactive` returns a `Widget` instance rather than immediately displaying the widget.
w = interactive(f, a=10, b=20)
# The widget is an `interactive`, a subclass of `VBox`, which is a container for other widgets.
type(w)
# The children of the `interactive` are two integer-valued sliders and an output widget, produced by the widget abbreviations above.
w.children
# To actually display the widgets, you can use IPython's `display` function.
display(w)
# At this point, the UI controls work just like they would if `interact` had been used. You can manipulate them interactively and the function will be called. However, the widget instance returned by `interactive` also give you access to the current keyword arguments and return value of the underlying Python function.
#
# Here are the current keyword arguments. If you rerun this cell after manipulating the sliders, the values will have changed.
w.kwargs
# Here is the current return value of the function.
w.result
# # Conclusion
#
# You should now have a basic understanding of how to use Interact in Jupyter Notebooks!
|
16-Bonus Material - Introduction to GUIs/01-Interact.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Path Tutorial
#
#
# Defining paths in your Matplotlib visualization.
#
# The object underlying all of the :mod:`matplotlib.patches` objects is
# the :class:`~matplotlib.path.Path`, which supports the standard set of
# moveto, lineto, curveto commands to draw simple and compound outlines
# consisting of line segments and splines. The ``Path`` is instantiated
# with a (N, 2) array of (x, y) vertices, and a N-length array of path
# codes. For example to draw the unit rectangle from (0, 0) to (1, 1), we
# could use this code:
#
# +
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
verts = [
(0., 0.), # left, bottom
(0., 1.), # left, top
(1., 1.), # right, top
(1., 0.), # right, bottom
(0., 0.), # ignored
]
codes = [
Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
fig, ax = plt.subplots()
patch = patches.PathPatch(path, facecolor='orange', lw=2)
ax.add_patch(patch)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
plt.show()
# -
# The following path codes are recognized
#
# ============== ================================= ====================================================================================================================
# Code Vertices Description
# ============== ================================= ====================================================================================================================
# ``STOP`` 1 (ignored) A marker for the end of the entire path (currently not required and ignored)
# ``MOVETO`` 1 Pick up the pen and move to the given vertex.
# ``LINETO`` 1 Draw a line from the current position to the given vertex.
# ``CURVE3`` 2 (1 control point, 1 endpoint) Draw a quadratic Bézier curve from the current position, with the given control point, to the given end point.
# ``CURVE4`` 3 (2 control points, 1 endpoint) Draw a cubic Bézier curve from the current position, with the given control points, to the given end point.
# ``CLOSEPOLY`` 1 (point itself is ignored) Draw a line segment to the start point of the current polyline.
# ============== ================================= ====================================================================================================================
#
#
# .. path-curves:
#
#
# Bézier example
# ==============
#
# Some of the path components require multiple vertices to specify them:
# for example CURVE 3 is a `bézier
# <https://en.wikipedia.org/wiki/B%C3%A9zier_curve>`_ curve with one
# control point and one end point, and CURVE4 has three vertices for the
# two control points and the end point. The example below shows a
# CURVE4 Bézier spline -- the bézier curve will be contained in the
# convex hull of the start point, the two control points, and the end
# point
#
#
# +
verts = [
(0., 0.), # P0
(0.2, 1.), # P1
(1., 0.8), # P2
(0.8, 0.), # P3
]
codes = [
Path.MOVETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
]
path = Path(verts, codes)
fig, ax = plt.subplots()
patch = patches.PathPatch(path, facecolor='none', lw=2)
ax.add_patch(patch)
xs, ys = zip(*verts)
ax.plot(xs, ys, 'x--', lw=2, color='black', ms=10)
ax.text(-0.05, -0.05, 'P0')
ax.text(0.15, 1.05, 'P1')
ax.text(1.05, 0.85, 'P2')
ax.text(0.85, -0.05, 'P3')
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(-0.1, 1.1)
plt.show()
# -
# .. compound_paths:
#
# Compound paths
# ==============
#
# All of the simple patch primitives in matplotlib, Rectangle, Circle,
# Polygon, etc, are implemented with simple path. Plotting functions
# like :meth:`~matplotlib.axes.Axes.hist` and
# :meth:`~matplotlib.axes.Axes.bar`, which create a number of
# primitives, e.g., a bunch of Rectangles, can usually be implemented more
# efficiently using a compound path. The reason ``bar`` creates a list
# of rectangles and not a compound path is largely historical: the
# :class:`~matplotlib.path.Path` code is comparatively new and ``bar``
# predates it. While we could change it now, it would break old code,
# so here we will cover how to create compound paths, replacing the
# functionality in bar, in case you need to do so in your own code for
# efficiency reasons, e.g., you are creating an animated bar plot.
#
# We will make the histogram chart by creating a series of rectangles
# for each histogram bar: the rectangle width is the bin width and the
# rectangle height is the number of datapoints in that bin. First we'll
# create some random normally distributed data and compute the
# histogram. Because numpy returns the bin edges and not centers, the
# length of ``bins`` is 1 greater than the length of ``n`` in the
# example below::
#
# # histogram our data with numpy
# data = np.random.randn(1000)
# n, bins = np.histogram(data, 100)
#
# We'll now extract the corners of the rectangles. Each of the
# ``left``, ``bottom``, etc, arrays below is ``len(n)``, where ``n`` is
# the array of counts for each histogram bar::
#
# # get the corners of the rectangles for the histogram
# left = np.array(bins[:-1])
# right = np.array(bins[1:])
# bottom = np.zeros(len(left))
# top = bottom + n
#
# Now we have to construct our compound path, which will consist of a
# series of ``MOVETO``, ``LINETO`` and ``CLOSEPOLY`` for each rectangle.
# For each rectangle, we need 5 vertices: 1 for the ``MOVETO``, 3 for
# the ``LINETO``, and 1 for the ``CLOSEPOLY``. As indicated in the
# table above, the vertex for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices::
#
# nverts = nrects*(1+3+1)
# verts = np.zeros((nverts, 2))
# codes = np.ones(nverts, int) * path.Path.LINETO
# codes[0::5] = path.Path.MOVETO
# codes[4::5] = path.Path.CLOSEPOLY
# verts[0::5, 0] = left
# verts[0::5, 1] = bottom
# verts[1::5, 0] = left
# verts[1::5, 1] = top
# verts[2::5, 0] = right
# verts[2::5, 1] = top
# verts[3::5, 0] = right
# verts[3::5, 1] = bottom
#
# All that remains is to create the path, attach it to a
# :class:`~matplotlib.patches.PathPatch`, and add it to our axes::
#
# barpath = path.Path(verts, codes)
# patch = patches.PathPatch(barpath, facecolor='green',
# edgecolor='yellow', alpha=0.5)
# ax.add_patch(patch)
#
#
# +
import numpy as np
import matplotlib.patches as patches
import matplotlib.path as path
fig, ax = plt.subplots()
# Fixing random state for reproducibility
np.random.seed(19680801)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5, 0] = left
verts[0::5, 1] = bottom
verts[1::5, 0] = left
verts[1::5, 1] = top
verts[2::5, 0] = right
verts[2::5, 1] = top
verts[3::5, 0] = right
verts[3::5, 1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green',
edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
plt.show()
|
matplotlib/tutorials_jupyter/advanced/path_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [ATM 623: Climate Modeling](../index.ipynb)
#
# [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany
#
# # Lecture 12: Examing the transient and equilibrium CO$_2$ response in the CESM
# ## Warning: content out of date and not maintained
#
# You really should be looking at [The Climate Laboratory book](https://brian-rose.github.io/ClimateLaboratoryBook) by <NAME>, where all the same content (and more!) is kept up to date.
#
# ***Here you are likely to find broken links and broken code.***
# ### About these notes:
#
# This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways:
#
# - The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware
# - The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb)
# - A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html).
#
# [Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html).
#
# Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab
# ## Contents
#
# ...
# I have run two sets of experiments with the CESM model:
#
# - The fully coupled model:
# - pre-industrial control
# - 1%/year CO2 ramp scenario for 80 years
# - The slab ocean model:
# - pre-industrial control with prescribed q-flux
# - 2xCO2 scenario run out to equilibrium
# Our main first task is to compute the two canonical measures of climate sensitivity for this model:
#
# - Equilibrium Climate Sensitivity (ECS)
# - Transient Climate Response (TCR)
# From the IPCC AR5 WG1 report, Chapter 9, page 817:
#
# > Equilibrium climate sensitivity (ECS) is the equilibrium change in global and annual mean surface air temperature after doubling the atmos- pheric concentration of CO2 relative to pre-industrial levels.
#
# > The transient climate response (TCR) is the change in global and annual mean surface temperature from an experiment in which the CO2 con- centration is increased by 1% yr–1, and calculated using the difference between the start of the experiment and a 20-year period centred on the time of CO2 doubling.
# ### First, a quick demonstration that 1%/year compounded increase reaches doubling after 70 years
startingamount = 1.
amount = startingamount
for n in range(70):
amount *= 1.01
amount
# TCR is always smaller than ECS due to the transient effects of ocean heat uptake.
#
# We are going to **estimate** the ECS of the fully coupled model by using the equilibrium response of the Slab Ocean .
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
# ## Load the concatenated output from the CAM output (atmosphere)
casenames = {'cpl_control': 'cpl_1850_f19',
'cpl_CO2ramp': 'cpl_CO2ramp_f19',
'som_control': 'som_1850_f19',
'som_2xCO2': 'som_1850_2xCO2',
}
# The path to the THREDDS server, should work from anywhere
basepath = 'http://thredds.atmos.albany.edu:8080/thredds/dodsC/CESMA/'
# For better performance if you can access the roselab_rit filesystem (e.g. from JupyterHub)
#basepath = '/roselab_rit/cesm_archive/'
casepaths = {}
for name in casenames:
casepaths[name] = basepath + casenames[name] + '/concatenated/'
# make a dictionary of all the CAM atmosphere output
atm = {}
for name in casenames:
path = casepaths[name] + casenames[name] + '.cam.h0.nc'
print('Attempting to open the dataset ', path)
atm[name] = xr.open_dataset(path, decode_times=False)
# ### A plot of the prescribed CO2 concentrations in the coupled simulations
days_per_year = 365
fig, ax = plt.subplots()
for name in ['cpl_control', 'cpl_CO2ramp']:
ax.plot(atm[name].time/days_per_year, atm[name].co2vmr*1E6, label=name)
ax.set_title('CO2 volume mixing ratio (CESM coupled simulations)')
ax.set_xlabel('Years')
ax.set_ylabel('pCO2 (ppm)')
ax.grid()
ax.legend();
# Issues to think about:
#
# - Why do we talk about fractional changes in CO2, such as "doubling atmospheric CO2", and "1%/year compounded CO2 increase?
# - Why not instead talk about changes in absolute amounts of CO2?
#
# The answer is closely related to the fact that the **radiative forcing** associated with CO2 increase is approximately **logarithmic** in CO2 amount. So a doubling of CO2 represents roughly the same radiative forcing *regardless of the initial CO2 concentration*.
# ## Compute and plot time series of global, annual mean near-surface air temperature in all four simulations
# The surface air temperature, which we will use for our sensitivity metrics
atm['cpl_control'].TREFHT
# The area weighting needed for global averaging
gw = atm['som_control'].gw
print(gw)
def global_mean(field, weight=gw):
'''Return the area-weighted global average of the input field'''
return (field*weight).mean(dim=('lat','lon'))/weight.mean(dim='lat')
# Loop through the four simulations and produce the global mean timeseries
TREFHT_global = {}
for name in casenames:
TREFHT_global[name] = global_mean(atm[name].TREFHT)
# #### Make some pretty timeseries plots, including an **approximate** running annual average
fig, axes = plt.subplots(2,1,figsize=(10,8))
for name in casenames:
if 'cpl' in name:
ax = axes[0]
ax.set_title('Fully coupled ocean')
else:
ax = axes[1]
ax.set_title('Slab ocean')
field = TREFHT_global[name]
field_running = field.rolling(time=12, center=True).mean()
line = ax.plot(field.time / days_per_year,
field,
label=name,
linewidth=0.75,
)
ax.plot(field_running.time / days_per_year,
field_running,
color=line[0].get_color(),
linewidth=2,
)
for ax in axes:
ax.legend();
ax.set_xlabel('Years')
ax.set_ylabel('Temperature (K)')
ax.grid();
ax.set_xlim(0,100)
fig.suptitle('Global mean surface air temperature in CESM simulations', fontsize=16);
# Issues to think about here include:
#
# - Why is the annual average here only approximate? *(think about the calendar)*
# - Why is there an annual cycle in the global average temperature? (planet is coldest during NH winter)
# - Different character of the temperature **variability** in the coupled vs. slab model
# - Much more rapid warming in the Slab Ocean Model
# ## Now we can work on computing ECS and TCR
# extract the last 10 years from the slab ocean control simulation
# and the last 20 years from the coupled control
nyears_slab = 10
nyears_cpl = 20
clim_slice_slab = slice(-(nyears_slab*12),None)
clim_slice_cpl = slice(-(nyears_cpl*12),None)
# extract the last 10 years from the slab ocean control simulation
T0_slab = TREFHT_global['som_control'].isel(time=clim_slice_slab).mean(dim='time')
T0_slab
# and the last 20 years from the coupled control
T0_cpl = TREFHT_global['cpl_control'].isel(time=clim_slice_cpl).mean(dim='time')
T0_cpl
# extract the last 10 years from the slab 2xCO2 simulation
T2x_slab = TREFHT_global['som_2xCO2'].isel(time=clim_slice_slab).mean(dim='time')
T2x_slab
# extract the last 20 years from the coupled CO2 ramp simulation
T2x_cpl = TREFHT_global['cpl_CO2ramp'].isel(time=clim_slice_cpl).mean(dim='time')
T2x_cpl
ECS = T2x_slab - T0_slab
TCR = T2x_cpl - T0_cpl
print('The Equilibrium Climate Sensitivity is {:.3} K.'.format(float(ECS)))
print('The Transient Climate Response is {:.3} K.'.format(float(TCR)))
# ## Some CMIP climate sensitivity results to compare against
# <img src='http://www.climatechange2013.org/images/figures/WGI_AR5_Fig9-43.jpg' width=800>
# <img src='../images/AR5_Table9.5.png'>
# Comparing against the multi-model mean of the ECS and TCR, our model is apparently slightly less sensitive than the CMIP5 mean.
# ## Let's make some maps to compare spatial patterns of transient vs. equilibrium warming
# Here is a helper function that takes a 2D lat/lon field and renders it as a nice contour map with accompanying zonal average line plot.
# The map projection capabilities come from the cartopy package. There are many possible projections
import cartopy.crs as ccrs
def make_map(field):
'''input field should be a 2D xarray.DataArray on a lat/lon grid.
Make a filled contour plot of the field, and a line plot of the zonal mean
'''
fig = plt.figure(figsize=(14,6))
nrows = 10; ncols = 3
mapax = plt.subplot2grid((nrows,ncols), (0,0), colspan=ncols-1, rowspan=nrows-1, projection=ccrs.Robinson())
barax = plt.subplot2grid((nrows,ncols), (nrows-1,0), colspan=ncols-1)
plotax = plt.subplot2grid((nrows,ncols), (0,ncols-1), rowspan=nrows-1)
cx = mapax.contourf(field.lon, field.lat, field, transform=ccrs.PlateCarree())
mapax.set_global(); mapax.coastlines();
plt.colorbar(cx, cax=barax, orientation='horizontal')
plotax.plot(field.mean(dim='lon'), field.lat)
plotax.set_ylabel('Latitude')
plotax.grid()
return fig, (mapax, plotax, barax), cx
# Plot a single time slice of surface air temperature just as example
fig, axes, cx = make_map(atm['cpl_control'].TREFHT.isel(time=0))
# ### Make maps of the surface air temperature anomaly due to CO2 doubling in both the slab and coupled models
# +
Tmap_cpl_2x = atm['cpl_CO2ramp'].TREFHT.isel(time=clim_slice_cpl).mean(dim='time')
Tmap_cpl_control = atm['cpl_control'].TREFHT.isel(time=clim_slice_cpl).mean(dim='time')
DeltaT_cpl = Tmap_cpl_2x - Tmap_cpl_control
Tmap_som_2x = atm['som_2xCO2'].TREFHT.isel(time=clim_slice_slab).mean(dim='time')
Tmap_som_control = atm['som_control'].TREFHT.isel(time=clim_slice_slab).mean(dim='time')
DeltaT_som = Tmap_som_2x - Tmap_som_control
# +
fig, axes, cx = make_map(DeltaT_cpl)
fig.suptitle('Surface air temperature anomaly (coupled transient)', fontsize=16);
axes[1].set_xlim(0,7) # ensure the line plots have same axes
cx.set_clim([0, 8]) # ensure the contour maps have the same color intervals
fig, axes,cx = make_map(DeltaT_som)
fig.suptitle('Surface air temperature anomaly (equilibrium SOM)', fontsize=16);
axes[1].set_xlim(0,7)
cx.set_clim([0, 8])
# -
# Lots of intersting phenomena to think about here, including:
#
# - Polar amplification of surface warming
# - Reduction in equator-to-pole temperature gradients
# - Much larger polar amplification in SOM than in transient -- especially over the Southern Ocean (the *delayed warming of the Southern Ocean*)
# - North Atlantic *warming hole* present in transient but not in equilibrium SOM.
# - Land-ocean warming contrast: larger in transient, but still present in equilibrium
# ## Homework assignment
#
# Continue to compare the transient and equilibrium responses in these CESM simulations.
#
# Specifically, I would like you to examine the following:
#
# 1. Top-of-atmosphere energy budget:
# - Calculate the global mean **net TOA energy flux** for all four simulations.
# - Which ones are close to zero in the global average, and which are not?
# - Make spatial maps of the **change in ASR** and the **change in OLR** after doubling CO2 (both transient and equilibrium).
# - Repeat for the **clear-sky** component of those changes.
# - Comment on what you found in your maps:
# - Are there any discernible spatial patterns in ASR and OLR changes?
# - What about the clear sky components?
# - Can you relate any of these results to the surface warming maps we created above?
#
# 2. The hydrological cycle:
# - precipitation
# - evaporation
# - P-E
# - For each of these quantities, plot the anomalies two different ways:
# - Absolute changes
# - Normalized changes in %/K (normalized by the global mean warming)
# - Comment on which of these two perspective seems more useful, and why.
#
# 3. Land - ocean warming contrast:
# - Make line plots of the zonal average surface air temperature change **over land only** and **over ocean only**.
#
# For all your results, please make an effort to point out any interesting or surprising results.
# ____________
# ## Version information
# ____________
# %load_ext version_information
# %version_information numpy, matplotlib, xarray, cartopy
# ____________
#
# ## Credits
#
# The author of this notebook is [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany.
#
# It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php)
#
# Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to <NAME>. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation.
# ____________
# ## Appendix: for later reference, here is how you can open the other output types
#
# The following will open the rest of the CESM output (land, sea ice, river routing, ocean).
#
# These are not needed for the above homework assignment, but may be useful later on.
# +
# # make a dictionary of all the CLM land model output
# land = {}
# for name in casenames:
# path = casepaths[name] + casenames[name] + '.clm2.h0.nc'
# print('Attempting to open the dataset ', path)
# land[name] = xr.open_dataset(path)
# +
# # make a dictionary of all the sea ice model output
# ice = {}
# for name in casenames:
# path = casepaths[name] + casenames[name] + '.cice.h.nc'
# print('Attempting to open the dataset ', path)
# ice[name] = xr.open_dataset(path)
# +
# # make a dictionary of all the river transport output
# rtm = {}
# for name in casenames:
# path = casepaths[name] + casenames[name] + '.rtm.h0.nc'
# print('Attempting to open the dataset ', path)
# rtm[name] = xr.open_dataset(path)
# +
# ocn = {}
# for name in casenames:
# if 'cpl' in name:
# path = casepaths[name] + casenames[name] + '.pop.h.nc'
# print('Attempting to open the dataset ', path)
# ocn[name] = xr.open_dataset(path)
# -
|
Lectures/Lecture12 -- CESM climate sensitivity.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python3
# ---
# + [markdown] id="OTYxS9G6FYmU"
#
#
# ---
#
# ## Modules Required
#
# ---
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="OLtjFJdl4uT2" outputId="257e0332-987d-4d84-e179-1cd13fd4f7d2"
# %load_ext autoreload
# + id="3yx5uvr_4uT3"
# %autoreload 2
# + id="2HHg-AcUtt-Q"
import pandas as pd
import numpy as np
from sklearn.feature_selection import chi2 , f_classif , mutual_info_classif , SelectKBest
import zipfile as zp
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score,confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import itertools
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import seaborn as sns
from config import global_list,global_fs,clfs,feature_size
from train import different_size, compute_accuracy,feature_methods,compute_roc_auc
from visualisation import style_time,style_accuracy_row_wise,style_accuracy_col_wise,make_bar_graph
# + [markdown] id="FG2p4qfyFqo2"
#
#
# ---
#
#
# ## Importing Data
#
#
#
# ---
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="UWxX85YPvz_i" outputId="f35be945-eb96-469c-8751-8a0b9d466366"
df = pd.read_csv('/Users/arvindchandrasekarreddy/Desktop/PythonProjects/malware_detection/Evaluating dataset /Fusion_AndroZoo_CICMalDroid/permissions+actions+services.csv' , index_col= 0 )
df_copy = df.copy( deep=True )
labels = df['class_label']
df.drop(['class_label','sub_class'] , axis = 1 , inplace=True )
#df.drop(['class_label'] , axis = 1 , inplace=True )
display(df.head(5))
# + colab={"base_uri": "https://localhost:8080/"} id="UiGTLyYIwjn7" outputId="32e787c7-b748-4a37-9750-fb31579a5ecd"
print( 'shape of df (after removing the class ) = ' , df.shape)
print( 'shape of df_copy = ' , df_copy.shape)
# + [markdown] id="hTlgsZjDGODe"
#
#
# ---
#
# ## Model Building
#
# ---
#
#
# + id="dfb_Z8Gn8UTV"
main_list = []
fs_methods = feature_methods( df,labels )
for index,each_clf in enumerate( clfs ):
for index_fs,each_fs in enumerate( fs_methods ):
for each_size in feature_size:
local_d = different_size(each_size ,each_clf ,index , index_fs,each_fs,labels )
main_list.append(local_d)
# + [markdown] id="3aU8PDFDBNbY"
# ---
# ## Visualisations
#
# ---
#
#
#
#
# + [markdown] id="btorZFAgOu2c"
#
#
# >### Visualising when Feature selection is increased in a serial Fashion
#
#
# + id="Bv24KTAEDa4A"
#main_list
# + id="e0LWie1XDX1u"
multiples_of_10 = pd.DataFrame(main_list)
multiples_of_10
# -
style_accuracy_col_wise(multiples_of_10)
# + id="zGur0fyIJwE4"
multiples_of_10.to_csv('/Users/arvindchandrasekarreddy/Desktop/PythonProjects/malware_detection/Evaluating dataset /results_fusion_datasets/permissions+actions+services_results.csv')
# + id="lz-lt4mFF3oH"
multiples_of_10 = pd.read_csv('multiples_of_10.csv')
set_5000 = pd.read_csv('5000_results.csv' , index_col= 0 )
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="zTt0hKB3L6op" outputId="55abab55-da19-4617-ce4a-d1d4b17ab0c4"
#multiples_of_10.pivot( index=['clf','fs'] , columns= 'size' , values='model_time' ).columns
style_time(multiples_of_10)
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="X7c4fu8xMZ1P" outputId="a7de8fd1-bbd1-4973-f966-f8ef50b668b7"
style_accuracy_col_wise(multiples_of_10)
# + colab={"base_uri": "https://localhost:8080/"} id="PBGF-_KbcHVk" outputId="825e00ab-9af5-4230-819f-d33aafd4617c"
#trying to reproduce results
chi2 , fmeasure , mutual = fs_methods
X_train, X_test, y_train, y_test = train_test_split(fmeasure[:,:6400] , labels , random_state = 0 );
compute_accuracy( DecisionTreeClassifier( random_state = 0 ) , 'rf' ,X_train,y_train, X_test, y_test )
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="DkUwCig-M7Lv" outputId="9d5eb89e-6f2a-463e-92b1-544373518249"
style_accuracy_row_wise(multiples_of_10)
# + [markdown] id="1UMbB7FbPBD7"
#
#
# > ### Finding accuracy for all the features
#
#
# + [markdown] id="0LV685aTH-Hp"
#
#
# ---
#
#
# ## ML on raw data without Feature selection
#
# ---
#
#
# + id="fk1_gss3IIW3"
No_feature_selection = []
X_train, X_test, y_train, y_test = train_test_split(df , labels , random_state = 0 );
for each,clf in enumerate( clfs ):
result = compute_accuracy(clf , global_list[each] ,X_train, y_train,X_test,y_test)
No_feature_selection.append(result)
# + id="InWr5khlYuBc"
df_No_feature_selection = pd.DataFrame(No_feature_selection)
df_No_feature_selection = df_No_feature_selection.set_index('clf')
df_No_feature_selection = df_No_feature_selection.sort_values('accuracy')
# + colab={"base_uri": "https://localhost:8080/", "height": 350} id="EOYt5aeCbL9h" outputId="be492f5c-c397-4fb7-b125-333fdf8b5664"
make_bar_graph( df_No_feature_selection['accuracy'] , 5 , 5 );
# + colab={"base_uri": "https://localhost:8080/"} id="r5HeQZMuAYkO" outputId="2e64dd0d-d1d2-4cf7-b878-a049aae4f851"
multiples_of_10.pivot( index=[ 'clf' , 'fs'] , columns= 'size' , values='accuracy' ).apply( np.max ).sort_values()
# + id="Ln_QEaqhdqTE"
make_bar_graph( multiples_of_10.pivot( index=[ 'clf' , 'fs'] , columns= 'size' , values='accuracy' ).apply( np.max ).sort_values() , 8 ,8 );
# + [markdown] id="4lQtG4iSqwdX"
#
#
# ---
#
#
# ## Result Evaluation
#
# ---
#
#
# + [markdown] id="dfq1grCVKtCg"
#
#
# > ### **APPLYING RANDOM FOREST**
#
#
# + id="YgedhHjQrvNj"
from sklearn.naive_bayes import GaussianNB
chi_400 = SelectKBest(chi2, k = 400).fit_transform( df , labels )
indices = df.index
X_train, X_test, y_train, y_test, idx1, idx2 = train_test_split(chi_400, labels, indices)
# + colab={"base_uri": "https://localhost:8080/"} id="R6H-so-B2gFi" outputId="8220c773-927a-49d1-8a6b-968d6ddd0378"
#X_train, X_test, y_train, y_test = train_test_split(chi_400 , labels );
gnb_clf = RandomForestClassifier()
model = gnb_clf.fit(X_train , y_train)
prediction = model.predict(X_test)
print(format((accuracy_score( prediction , y_test) * 100),'.2f'))
# + [markdown] id="JMfV54-aK0pZ"
#
#
# > ### *CONFUSION MATRIX FOR RANDOM FOREST*
#
# + colab={"base_uri": "https://localhost:8080/"} id="vQacmerpK0Li" outputId="e93e9d03-9f9e-43bb-f5aa-76e94bb5ebbb"
confusion_matrix( prediction , y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="LxgHP-Q_MEhP" outputId="1204c0d5-fd57-4c3f-9f91-d4e411b2b727"
y_test.groupby(y_test).size()
# + [markdown] id="7WQH4kfhLhVi"
#
#
# > ### **Trying to figure out which observations are being flagged wrongly**
#
# + id="OyuQEAkL2mKw"
res = pd.DataFrame( data = np.c_[ list(y_test) , list(prediction)] , columns= ['y_test','prediction'] , index = list(idx2) )
res = res.astype( { 'y_test' : int , 'prediction' : int } )
# + colab={"base_uri": "https://localhost:8080/", "height": 414} id="zWWzAT_k5C1z" outputId="f29ea22a-2c63-486b-85ef-33b742b3f594"
#False Negatives i.e Malware's wrongly classified as benign
res[ (res.y_test == 1) & (res.prediction == 0) ]
#to check if the wrongly flagged once are in the df_copy
df_copy.loc[ res[ (res.y_test == 1) & (res.prediction == 0) ].index ].index
# + [markdown] id="BZQtYzFPMtqc"
#
#
# > ***Visualising the number of vt_detections for wrongly flagged instances***
#
#
# + id="QGuKbHbn_4oQ"
bad_ass = pd.read_csv('/content/sub_set_bad_3272.csv', index_col=0 , usecols = ['sha256','vt_detection'] )
# + id="J222M51aBpbc"
intersected = set(df_copy.loc[res[ (res.y_test == 1) & (res.prediction == 0) ].index].index).intersection(set(bad_ass.index))
# + colab={"base_uri": "https://localhost:8080/", "height": 445} id="cLgeNIsEDPEq" outputId="ae8bad04-622b-4a50-d8f5-911d1689f2b0"
worng_flagged = bad_ass.loc[list(intersected)]
worng_flagged
# + id="P3DnNaWFIAOA"
vt_scan_reports = pd.read_csv('/content/vt_scan_reports.csv' , index_col=0 , names=['vt_detection'] , skiprows = 1 )
# + colab={"base_uri": "https://localhost:8080/", "height": 514} id="1rhMrDN3IIpY" outputId="59d6b256-a1ca-4ac7-b363-d2dbd9f79883"
#plt.figure( figsize=(8,8) )
f , (ax1,ax2) = plt.subplots(1,2,figsize=(18,8) , sharex=True)
sns.countplot( data = vt_scan_reports,y = 'vt_detection',ax=ax1)
sns.countplot( data= worng_flagged , y = 'vt_detection' , ax=ax2)
# + [markdown] id="MX0NNoExNWLa"
#
#
# 1. The scan report from Virus total prooves that the positive rate is not upto the mark
# 2. May need to scan all the SHAKEYS using virus total before we proceed to action
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 355} id="_5WvxyylN4nu" outputId="2ac50b97-5c99-432f-fffa-dc8afbb1f099"
vt_scan_reports[vt_scan_reports.vt_detection < 5]
# + [markdown] id="KGTK6AEthvQf"
#
# + [markdown] id="Gmtl7zX9DJio"
# ## Final
# + id="WSzWEbBIDRSM"
multiples_of_10_RANDOM = pd.read_csv('/content/multiples_of_10_RANDOM=0.csv' , index_col=0 )
# + colab={"base_uri": "https://localhost:8080/", "height": 447} id="YBNn5EEJDk8G" outputId="f43cf952-05f5-4d76-b6d9-d8b05301a3d3"
multiples_of_10_RANDOM.pivot( index = [ 'clf' , 'fs' ] , columns = 'size' , values = 'accuracy' )
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="8s0boVkODxX0" outputId="f3d848ed-4fda-4cd3-861c-76c329e52d60"
style_time(multiples_of_10_RANDOM)
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="aYJNe1GvD5IM" outputId="57e1f8f4-0d6d-472c-fdda-76c348c1d25f"
style_accuracy_col_wise(multiples_of_10_RANDOM)
# + id="Gkk4o66kJB67"
# + [markdown] id="OkF2hBQ4JsOg"
# ## Performance Analysis for best performing models
# + id="q4g1w2jYsbEE"
raw_results = compute_roc_auc( RandomForestClassifier( random_state=0 ) , 'rf' ,df , labels )
best_results = compute_roc_auc( RandomForestClassifier( random_state=0 ) , 'rf' ,fmeasure[ : , : 3600 ] , labels )
second_best_results = compute_roc_auc( RandomForestClassifier( random_state=0 ) , 'rf' ,fmeasure[ : , : 900 ] , labels )
# + colab={"base_uri": "https://localhost:8080/", "height": 294} id="dGix2DT2tjZ0" outputId="a695c06e-7bec-4a88-f418-08ff1ac463d7"
f , (ax1,ax2,ax3) = plt.subplots(1,3,figsize = (12,4))
ax1.set_title('Raw Data(17077)')
ax1.plot(raw_results['fprRD'], raw_results['tprRD'], 'b', label = 'AUC = %0.4f' % raw_results['roc_aucRD'])
ax1.legend(loc = 'lower right')
ax1.plot([0, 1], [0, 1],'r--')
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
ax1.set_ylabel('True Positive Rate')
ax1.set_xlabel('False Positive Rate')
ax2.set_title('Random Forest(3600)')
ax2.plot(best_results['fprRD'], best_results['tprRD'], 'b', label = 'AUC = %0.4f' % best_results['roc_aucRD'])
ax2.legend(loc = 'lower right')
ax2.plot([0, 1], [0, 1],'r--')
ax2.set_xlim([0, 1])
ax2.set_ylim([0, 1])
ax2.set_ylabel('True Positive Rate')
ax2.set_xlabel('False Positive Rate')
ax3.set_title('Random Forest(900)')
ax3.plot(second_best_results['fprRD'], second_best_results['tprRD'], 'b', label = 'AUC = %0.4f' % second_best_results['roc_aucRD'])
ax3.legend(loc = 'lower right')
ax3.plot([0, 1], [0, 1],'r--')
ax3.set_xlim([0, 1])
ax3.set_ylim([0, 1])
ax3.set_ylabel('True Positive Rate')
ax3.set_xlabel('False Positive Rate')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="4BYa8ON7xBf8" outputId="7ce0cb8c-9ba1-4c4e-c346-b7ba9da8b79a"
plt.figure( figsize= (9,3) )
ax1 = plt.subplot(131)
sns.heatmap(raw_results['Conf'],cbar = False ,annot = True,fmt='.1f',xticklabels=['Benign(0)','Malware(1)'], yticklabels =['Benign(0)','Malware(1)'],cmap = 'Greys')
ax1.set_title('Raw Data(17077)')
ax2 = plt.subplot(132)
sns.heatmap(best_results['Conf'],annot = True,fmt='.1f',xticklabels=['Benign(0)','Malware(1)'], yticklabels =['Benign(0)','Malware(1)'],cbar = False,cmap = 'Greens')
ax2.set_title('Random Forest(3600)')
ax3 = plt.subplot(133)
sns.heatmap(second_best_results['Conf'],annot = True,fmt='.1f',xticklabels=['Benign(0)','Malware(1)'], yticklabels =['Benign(0)','Malware(1)'],cbar = False,cmap = 'Oranges')
ax3.set_title('Random Forest(900)')
|
Scripts/main/Malware_detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow]
# language: python
# name: conda-env-tensorflow-py
# ---
# + id="y-KtZcaWB02O" colab_type="code" colab={}
from unittest import mock
import os
import numpy as np
import tensorflow as tf
import json
import matplotlib.pyplot as plt
from tensorflow.python.estimator import estimator
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverHook, meta_graph
from tensorflow.python.platform import tf_logging as logging
from google.colab import drive, auth
# + id="CCzCaKLPB02f" colab_type="code" colab={}
#################################### SETUP #####################################
def setup():
drive.mount('/content/gdrive')
auth.authenticate_user()
def upload_credentials():
# Upload credentials to TPU.
with tf.Session(TF_MASTER) as sess:
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(sess, credentials=auth_info)
# + id="r2ycJsJCB02o" colab_type="code" colab={}
################################# DATA INPUT ###################################
def parser(serialized_example):
"""Parses a single example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'image_transformed': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64) # label is unused
})
result_image = tf.decode_raw(features['image_raw'], tf.uint8)
result_image.set_shape([48 * 48 * 4])
result_image = tf.reshape(result_image, [48, 48, 4])[:,:,:CHANNELS]
# Normalize the values of the image from [0, 255] to [-1.0, 1.0]
result_image = tf.cast(result_image, dtype=tf.float32) / 127.5 - 1
input_image = tf.decode_raw(features['image_transformed'], tf.uint8)
input_image.set_shape([48 * 48 * 4])
input_image = tf.reshape(input_image, [48, 48, 4])[:,:,:CHANNELS]
# Normalize the values of the image from [0, 255] to [-1.0, 1.0]
input_image = tf.cast(input_image, dtype=tf.float32) / 127.5 - 1
return input_image, result_image
def make_input_fn(is_training=True):
def input_fn(params):
batch_size = params['batch_size']
dataset = tf.data.TFRecordDataset(data_file).map(parser).cache().shuffle(batch_size)
if is_training:
dataset = dataset.repeat()
input_images, result_images = dataset.prefetch(batch_size).batch(batch_size, drop_remainder=True).make_one_shot_iterator().get_next()
if ADD_NOISE_TO_EXAMPLE:
input_images += tf.random_normal(shape=tf.shape(input_images), mean=0.0, stddev=0.1, dtype=tf.float32)
features = {
'image_input': input_images,
'image_result': result_images,
}
return features, None
return input_fn
def predict_input_fn(params):
batch_size = params['batch_size']
dataset = tf.data.TFRecordDataset(data_file).map(parser).cache().shuffle(batch_size)
input_images, _ = dataset.prefetch(batch_size).batch(batch_size, drop_remainder=True).make_one_shot_iterator().get_next()
return {'image_input': input_images}, None
margonem_data_file = 'gs://tputestingmnist/datasets/characters_margonem_conditional_7.tfrecords'
def margonem_predict_input_fn(params):
batch_size = params['batch_size']
dataset = tf.data.TFRecordDataset(margonem_data_file).map(parser).cache().shuffle(batch_size)
input_images, _ = dataset.prefetch(batch_size).batch(batch_size, drop_remainder=True).make_one_shot_iterator().get_next()
return {'image_input': input_images}, None
# def noise_input_fn(params):
# noise_dataset = tf.data.Dataset.from_tensors(tf.constant(np.random.uniform(-1, 1, (params['batch_size'], LATENT_DIM)), dtype=tf.float32))
# return {'random_noise': noise_dataset.make_one_shot_iterator().get_next()}, None
# + id="P6i0DewZB02v" colab_type="code" colab={}
############################### DATA SAVEING ###################################
def images_to_zero_one(images):
return np.clip(np.array(images) * 0.5 + 0.5, 0., 1.)
def save_imgs(epoch, images):
if not os.path.exists(GOOGLE_DRIVE_DIR):
os.mkdir(GOOGLE_DRIVE_DIR)
# Rescale images to 0 - 1
images = images_to_zero_one(images)
fig, axs = plt.subplots(R, C, figsize=(20,20))
for i in range(R):
for j in range(C):
axs[i,j].imshow(images[C*i + j])
axs[i,j].axis('off')
fig.savefig(os.path.join(GOOGLE_DRIVE_DIR, '{}.png'.format(epoch)))
plt.close()
# + id="khVvoOzKB024" colab_type="code" colab={}
################################## MODEL #######################################
def _leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=ALPHA)
def _relu(x):
return tf.nn.relu(x)
def _batch_norm(x, is_training, name):
return tf.layers.batch_normalization(x, momentum=0.8, epsilon=1e-5,
training=is_training, name=name)
def _dense(x, neurons, name, activation=None):
return tf.layers.dense(x, neurons, name=name, activation=activation,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))
def _conv2d(x, filters, stride, name, activation=None):
return tf.layers.conv2d(x, filters, [KERNEL_SIZE, KERNEL_SIZE],
strides=[stride, stride], activation=activation,
padding='same', name=name,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))
def _deconv2d(x, filters, stride, name, activation=None):
return tf.layers.conv2d_transpose(x, filters, [KERNEL_SIZE, KERNEL_SIZE],
strides=[stride, stride], activation=activation,
padding='same', name=name,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))
def _dropout(x, name):
return tf.nn.dropout(x, rate=DROPOUT_PROB, name=name)
def convolution_block(x, filters, resize_factor, is_training, index,
activation=_leaky_relu, dropout=False, batch_norm=True):
x = _conv2d(x, filters=filters, stride=resize_factor, activation=activation, name='conv_{}'.format(index))
if batch_norm:
x = _batch_norm(x, is_training, name='bn_conv_{}'.format(index))
if dropout:
x = _dropout(x, name='drop_deconv_{}'.format(index))
return x
def deconvolution_block(x, filters, resize_factor, is_training, index,
activation=_relu, dropout=False, batch_norm=True):
x = _deconv2d(x, filters=filters, stride=resize_factor, activation=activation, name='deconv_{}'.format(index))
if batch_norm:
x = _batch_norm(x, is_training, name='bn_deconv_{}'.format(index))
if dropout:
x = _dropout(x, name='drop_deconv_{}'.format(index))
return x
class Pix2Pix:
@staticmethod
def discriminator(x, is_training=True, scope='Discriminator'):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = convolution_block(x, 64, 2, is_training, 11, batch_norm=False)
x = convolution_block(x, 128, 2, is_training, 12)
x = convolution_block(x, 256, 2, is_training, 13)
x = convolution_block(x, 512, 2, is_training, 14)
x = tf.layers.Flatten()(x)
x = _dense(x, neurons=1, name='d_dense')
return x
@staticmethod
def generator(image, is_training=True, scope='Generator'):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x1 = convolution_block(image, 64, 2, is_training, 11, dropout=False, batch_norm=False)
x2 = convolution_block(x1, 128, 2, is_training, 12, dropout=False)
x3 = convolution_block(x2, 256, 2, is_training, 13, dropout=False)
x4 = convolution_block(x3, 512, 2, is_training, 14, dropout=False)
x5 = deconvolution_block(x4, 512, 2, is_training, 21)
x6 = deconvolution_block(tf.concat([x3, x5], axis=3), 256, 2, is_training, 22)
x7 = deconvolution_block(tf.concat([x2, x6], axis=3), 128, 2, is_training, 23)
x8 = deconvolution_block(tf.concat([x1, x7], axis=3), CHANNELS, 2, is_training, 24, activation=tf.tanh, batch_norm=False)
return x8
# + id="Gcr7UF8SB029" colab_type="code" colab={}
################################ MODEL FUN #####################################
def make_model_fn(model):
def model_fn(features, labels, mode, params):
# PREDICT #
if mode == tf.estimator.ModeKeys.PREDICT:
image_input = features['image_input']
generated_images = model.generator(image_input, is_training=False)
predictions = {'generated_images': generated_images, 'image_input': image_input}
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
image_result = features['image_result']
image_input = features['image_input']
generated_images = model.generator(image_input, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
# Discriminator loss
d_on_data_logits = tf.squeeze(model.discriminator(tf.concat([image_input, image_result], axis=3)))
d_on_data_labels = tf.ones_like(d_on_data_logits)
d_on_g_logits = tf.squeeze(model.discriminator(tf.concat([image_input, generated_images], axis=3)))
d_on_g_labels = tf.zeros_like(d_on_g_logits)
d_loss = tf.contrib.gan.losses.wargs.modified_discriminator_loss(
discriminator_real_outputs=d_on_data_logits,
discriminator_gen_outputs=d_on_g_logits,
reduction=tf.losses.Reduction.NONE,
label_smoothing=0.2
)
# Generator loss
g_loss = tf.contrib.gan.losses.wargs.modified_generator_loss(
discriminator_gen_outputs=d_on_g_logits,
reduction=tf.losses.Reduction.NONE
)
# TRAIN #
if mode == tf.estimator.ModeKeys.TRAIN:
d_loss = tf.reduce_mean(d_loss)
g_loss = tf.reduce_mean(g_loss)
d_optimizer = tf.train.AdamOptimizer(learning_rate=D_LR, beta1=0.5)
g_optimizer = tf.train.AdamOptimizer(learning_rate=G_LR, beta1=0.5)
d_optimizer = tf.contrib.tpu.CrossShardOptimizer(d_optimizer)
g_optimizer = tf.contrib.tpu.CrossShardOptimizer(g_optimizer)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_step = d_optimizer.minimize(d_loss, var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='Discriminator'))
g_step = g_optimizer.minimize(g_loss, var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope='Generator'))
increment_step = tf.assign_add(tf.train.get_or_create_global_step(), 1)
joint_op = tf.group([d_step, g_step, increment_step])
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=g_loss, train_op=joint_op)
# EVAL #
elif mode == tf.estimator.ModeKeys.EVAL:
def _eval_metric_fn(d_loss, g_loss, d_real_labels, d_gen_lanels, d_real_logits, d_gen_logits):
return {
'discriminator_loss': tf.metrics.mean(d_loss),
'generator_loss': tf.metrics.mean(g_loss),
'discriminator_real_accuracy': tf.metrics.accuracy(labels=d_real_labels, predictions=tf.math.round(tf.sigmoid(d_real_logits))),
'discriminator_gen_accuracy': tf.metrics.accuracy(labels=d_gen_lanels, predictions=tf.math.round(tf.sigmoid(d_gen_logits))),
}
return tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=tf.reduce_mean(g_loss),
eval_metrics=(_eval_metric_fn, [d_loss, g_loss, d_on_data_labels,
d_on_g_labels, d_on_data_logits, d_on_g_logits]))
return model_fn
# + id="-5rb586jB03F" colab_type="code" colab={}
################################ ESTIMATORS ####################################
def make_estimators(model, only_cpu=False):
model_fn = make_model_fn(model)
config = tf.contrib.tpu.RunConfig(
master=TF_MASTER,
save_checkpoints_steps=EVAL_EPOCHS,
save_checkpoints_secs=None,
save_summary_steps=EVAL_EPOCHS,
model_dir=MODEL_DIR,
keep_checkpoint_max=3,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=EVAL_EPOCHS))
if not only_cpu:
# TPU-based estimator used for TRAIN and EVAL
est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=True,
config=config,
train_batch_size=BATCH_SIZE,
eval_batch_size=BATCH_SIZE)
else:
est = None
# CPU-based estimator used for PREDICT (generating images)
cpu_est = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=False,
config=config,
predict_batch_size=EXAMPLES)
return est, cpu_est
# + id="7nIuQqdWB03O" colab_type="code" colab={}
################################# TRAINING #####################################
def train(est, cpu_est):
current_step = estimator._load_global_step_from_checkpoint_dir(MODEL_DIR)
tf.logging.info('Starting training')
while current_step < EPOCHS:
next_checkpoint = int(min(current_step + EVAL_EPOCHS, EPOCHS))
est.train(input_fn=make_input_fn(), max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info('Finished training step %d' % current_step)
# Evaluation
metrics = est.evaluate(input_fn=make_input_fn(False), steps=1)
tf.logging.info('Finished evaluating')
tf.logging.info(metrics)
# Render some generated images
generated_iter = cpu_est.predict(input_fn=predict_input_fn)
images = [np.concatenate([p['image_input'], p['generated_images']], axis=1) for p in generated_iter]
save_imgs('predict', images)
tf.logging.info('Finished generating images')
# + id="STjYktznB03T" colab_type="code" colab={}
def do_experiment():
setup()
upload_credentials()
model = Pix2Pix()
est, cpu_est = make_estimators(model)
train(est, cpu_est)
# + id="mS5ajICgBpCO" colab_type="code" colab={}
def predict_on_image(image, cpu_est):
def image_predict_input_fn(params):
batch_size = params['batch_size']
images = np.zeros((batch_size, 48, 48, 4))
images[0,:,:,:] = image
dataset = tf.data.Dataset.from_tensors(images)
input_images, _ = dataset.prefetch(batch_size).batch(batch_size, drop_remainder=True).make_one_shot_iterator().get_next()
return {'image_input': input_images}, None
generated_iter = cpu_est.predict(input_fn=image_predict_input_fn)
images = [np.concatenate([p['image_input'], p['generated_images']], axis=1) for p in generated_iter]
plt.imshow(images[0])
# + id="y8nJTGZfCI5h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 368} outputId="b104a1bb-18a2-40e0-ea08-90ddb2a9c53d"
setup()
upload_credentials()
model = Pix2Pix()
_, cpu_est = make_estimators(model, only_cpu=True)
# + id="WDvqThn2CPjf" colab_type="code" colab={}
plt.imread()
predict_on_image(, cpu_est)
# + id="1vdQ83GWB03c" colab_type="code" colab={}
R, C = 4, 4
EXAMPLES = R * C
CHANNELS = 4
LATENT_DIM = 128
ADD_NOISE_TO_EXAMPLE = False
DROPOUT_PROB = 0.5
ALPHA = 0.2
BATCH_SIZE = 1024
EPOCHS = 100000
EVAL_EPOCHS = 5000
G_LR = 0.0002
D_LR = 0.0001
KERNEL_SIZE = 4
RUN_NAME = 'Pix2Pix_2'
margonem_data_file = 'gs://tputestingmnist/datasets/characters_margonem_conditional_7.tfrecords'
data_file = 'gs://tputestingmnist/datasets/characters_conditional_7.tfrecords'
MODEL_DIR = 'gs://tputestingmnist/{}/'.format(RUN_NAME)
GOOGLE_DRIVE_DIR = '/content/gdrive/My Drive/Programowanie/PixelGen/{}'.format(RUN_NAME)
TF_MASTER = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
# try:
# do_experiment()
# except Exception as e:
# print (e)
# pass
# + id="3eImay3kpN2C" colab_type="code" colab={}
|
notebooks/pix2pix.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Contents*
# ===
# - [A database of estates](#A-database-of-estates)
# - [Dictionaries: definition and declaration](#Dictionaries:-definition-and-declaration)
# - [Access](#Access)
# - [Operations](#Operations)
# - [Loops](#Loops)
# A database of estates
# ===
# The elements of a list often have a meaning attached to. Let's suppose we want to build a database of estates, each defined by
#
# - coordinates as (lon, lat) in decimal degrees
# - extension (sqm)
# - presence of land
# - price, in thousands of Euro.
#
# We can use a list to contain these records; in turn, each estate will be a list of data. Also suppose that, initially, the database is empty, and that it is populated with a new record every time an estate is registered (for example, through the form of a web application).
# +
estates = []#empty database
e1 = [[12.3456, -56.7890], 2000 , False, 10]#estate registration
estates += [e1]#adding the estate to the database
e2 = [[-8.9101, 60.1234], 12000, False, 125]
estates += [e2]
e3 = [[45.6789, 10.3456], 100 , True, 350]
estates += [e3]
for i, e in enumerate(estates):
print('\nEstate {}'.format(i))#'\n' is 'new line'
print('Coordinates: {}'.format(e[0]))
print('Extension (smq): {}'.format(e[1]))
print('Land: {}'.format('Yes' if e[2] else 'No'))
print('Price: {}K Euro'.format(e[3]))
# -
# When the features describing an estate are too many, however, to manage them with a list becomes troublesome. In fact, to have access to a specific feature, we need to know its position in the list.
# Dictionaries: definition and declaration
# ---
#
# A *dictionary* is a special Python container, where every element has a *key* (a unique identifier) and a *value*. As opposed to a list, a dictionary provide its elements with a semantic, that is, a unique meaning.
dictionary_name = {key1:value1, key1:value2}
# The code in the cell above is a general example of declaration.
#
# - we use curly brackets {} to declare a dictionary
# - (key, value) pairs can be as many as needed
# - a key can't be repeated (each estate has *just one* price, one extension, ...)
# - values can be of any type (including lists and other dictionaries).
#
# Going back to our examples, we can implement the estate database as a list of dictionaries.
# +
estates = []
e1 = {'coordinates':[12.3456, -56.7890], 'extension':2000, 'has_land':False, 'price':10}
estates += [e1]
e2 = {'coordinates':[-8.9101, 60.1234], 'extension':12000, 'has_land':False, 'price':125}
estates += [e2]
e3 = {'coordinates':[45.6789, 10.3456], 'extension':100, 'has_land':True, 'price':350}
estates += [e3]
# -
e1
# Access
# ---
# Now we can easily access an estate's feature through the corresponding key.
e1['price']
for i, e in enumerate(estates):
print('\nEstate {}'.format(i))
print('Coordinates: {}'.format(e['coordinates']))
print('Extension (smq): {}'.format(e['extension']))
print('Land: {}'.format('Sì' if e['has_land'] else 'No'))
print('Price: {}K Euro'.format(e['price']))
# Such construction has several advantages; here is an example of use in combination with a list comprehension.
[h['price'] for h in estates]
# Operations
# ---
# As we did with lists, we'll have a look at the basic operations we can apply to a dictionary.
#
# Let's change the application domain. An estate agency rents apartments in a seaside town. Each apartment has a name and a price, and the agency wants to create and update a database to keep track of such data.
#
# How many dictionaries do we need?
# +
prices = {}#database: empty dictionary
prices['Villa Bianca'] = 500#insertion
prices['Red pine'] = 250
prices['The hovel'] = 100
prices
# -
print('The weekly price of Villa Bianca is {} Euro.'.format(prices['Villa Bianca']))
# The agency has a hard time renting *The hovel*...
# +
prices['The hovel'] = 80#modification
print('Special offer! The weekly price of The hovel is {} Euro.'.format(prices['The hovel']))
# -
# Loops
# ---
# Key-value pairs can be accessed through the *items* function. Key and values can also be accessed individually.
for apt, price in prices.items():#key/value
print('The weekly price of {} is {} Euro'.format(apt, price))
for apt in prices.keys():#keys
print('Apt name:', apt)
for price in prices.values():#values
print('Weekly price:', price)
# <script>
# $(document).ready(function(){
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('div.prompt').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#999; background:#fff;">
# Created with Jupyter, delivered by Fastly, rendered by Rackspace.
# </footer>
|
eng/07. Dictionaries.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python
# coding: utf-8
# #!pip install "watson-developer-cloud"
get_ipython().system('pip install "ibm-watson"')
# #!pip install --upgrade watson-developer-cloud
get_ipython().system('pip install wordcloud')
# #!pip install --wordcloud
# -
get_ipython().system('pip install conversation_analytics_toolkit')
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
import json
import nltk
import numpy as np
import pandas as pd
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import ibm_watson
import conversation_analytics_toolkit
from conversation_analytics_toolkit import wa_assistant_skills
from conversation_analytics_toolkit import transformation
from conversation_analytics_toolkit import filtering2 as filtering
from conversation_analytics_toolkit import analysis
from conversation_analytics_toolkit import visualization
from conversation_analytics_toolkit import selection as vis_selection
from conversation_analytics_toolkit import wa_adaptor
from conversation_analytics_toolkit import transcript
from conversation_analytics_toolkit import flows
from conversation_analytics_toolkit import keyword_analysis
from conversation_analytics_toolkit import sentiment_analysis
pd.options.display.max_colwidth = 150
# Dados do Workspace a ser acessado. Estas informações foram extraidas do Watson Assistant
WAS_WORKSPACE = "ba8da1b8-596f-4627-b935-79b286b55a7d"
WAS_API_KEY = "<KEY>"
WAS_URL = "https://api.us-south.assistant.watson.cloud.ibm.com"
# Estabelecendo a autenticação com as informações do ambiente criado com o Watson Assistant
from ibm_watson import AssistantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator(WAS_API_KEY)
assistant = AssistantV1(
version='2020-04-01',
authenticator=authenticator
)
assistant.set_service_url(WAS_URL)
original_workspace_id = WAS_WORKSPACE
# Verifica se é possível estabelecer uma conexão com o workspace
def verifica_workspace(check_workspace_id):
wksp_notready = True
while(wksp_notready == True):
print('Testando workspace...' + check_workspace_id)
workspace = assistant.get_workspace(workspace_id=check_workspace_id).get_result()
print('Workspace Status: {0}'.format(workspace['status']))
if workspace['status'] == 'Available':
wksp_notready = False
print('Pronto para uso!')
else:
print('Em treinamento...aguarde 20 segundos e tente novamente')
time.sleep(20)
# Imprime os resultados do teste
def printred(str_temp,isbold):
if isbold:
print(colored(str_temp, 'red', attrs=['bold']))
else:
print(colored(str_temp, 'red'))
original_workspace = assistant.get_workspace(workspace_id=original_workspace_id, export=True)
verifica_workspace(original_workspace_id)
# Buscando os registros
workspace=assistant.get_workspace(
workspace_id=WAS_WORKSPACE,
export=True
).get_result()
limit_number_of_records=5000
# Filtrando os Logs de Maio/2021 a Junho/2021
query_filter = "response_timestamp>=2021-05-10,response_timestamp<2021-06-30"
# Lendo e Salvando o log para um dataframe do Pandas
df_logs = wa_adaptor.read_logs(assistant, WAS_WORKSPACE, limit_number_of_records, query_filter)
# Passo 1 - Preparando a Skill
# AS_WORKSPACE: Contém o ID da Skill do ChatBot
skill_id = WAS_WORKSPACE
assistant_skills = wa_assistant_skills.WA_Assistant_Skills()
assistant_skills.add_skill(skill_id, workspace)
# Passo 2 - Extração e Transformação
df_logs_canonical = transformation.to_canonical_WA_v2(df_logs, assistant_skills, skill_id_field=None, include_nodes_visited_str_types=True, include_context=True)
# Visualizando o log a ser analisado
df_logs_to_analyze = df_logs_canonical.copy(deep=False)
with pd.option_context('display.max_rows', 5, 'display.max_columns', None):
display(df_logs_to_analyze.head(33))
# Indicadores e métricas de conversas com o chatbot
#
# Iniciando com uma visualização do fluxo para medir e descobrir como as conversas progridem em cada retorno da conversa.
title = "Todas as Conversas"
turn_based_path_flows = analysis.aggregate_flows(df_logs_to_analyze, mode="turn-based", on_column="turn_label", max_depth=400, trim_reroutes=False)
# increase the width of the Jupyter output cell
display(HTML("<style>.container { width:95% !important; }</style>"))
config = {
'commonRootPathName': title, # label for the first root node
'height': 700, # control the visualization height. Default 600
'nodeWidth': 250,
'maxChildrenInNode':10, # control the number of immediate children to show (and collapse rest into *others* node). Default 5
'linkWidth' : 360, # control the width between pathflow layers. Default 360 'sortByAttribute': 'flowRatio' # control the sorting of the chart. (Options: flowRatio, dropped_offRatio, flows, dropped_off, rerouted)
'sortByAttribute': 'flowRatio',
'title': title,
'mode': "turn-based"
}
jsondata = json.loads(turn_based_path_flows.to_json(orient='records'))
visualization.draw_flowchart(config, jsondata, python_selection_var="selection")
ano_mes = pd.DatetimeIndex(df_logs_to_analyze['response_timestamp']).to_period('D')
grafico_interacoes = df_logs_to_analyze.groupby(ano_mes).count()[['conversation_id']]
grafico_pessoas = df_logs_to_analyze.groupby(ano_mes).agg({"conversation_id": pd.Series.nunique})
grafico_pessoas.plot.line(legend=False)
plt.title("Quantidade de Acessos ao Francobot")
plt.xlabel("Dia")
plt.ylabel("Quantidade de Acessos")
grafico_interacoes.plot.line(legend=False)
plt.title("Quantidade de Interações com o Francobot")
plt.xlabel("Dia")
plt.ylabel("Quantidade de Interações")
# Nuvem de palavras com os textos procurados no FrancoBot
nltk.download('stopwords')
text = df_logs_to_analyze.loc[df_logs_to_analyze['request_text'] != '']['request_text']
stopwords = nltk.corpus.stopwords.words('portuguese')
newStopWords = ['Sair', 'Encerramento','Oi', 'Fim', 'Começar']
stopwords.extend(newStopWords)
wordcloud = WordCloud(stopwords=stopwords).generate(' '.join(text))
# Mostra a Nuvem gerada
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
|
NB_FRANCOBOT.ipynb
|