code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from UQpy.StochasticProcess import BSRM
from UQpy.RunModel import RunModel
import numpy as np
from scipy.stats import skew
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# The input parameters necessary for the generation of the stochastic processes are given below:
# +
n_sim = 10 # Num of samples
n = 1 # Num of dimensions
# Input parameters
T = 600 # Time(1 / T = dw)
nt = 12000 # Num.of Discretized Time
F = 1 / T * nt / 2 # Frequency.(Hz)
nf = 6000 # Num of Discretized Freq.
# # Generation of Input Data(Stationary)
dt = T / nt
t = np.linspace(0, T - dt, nt)
df = F / nf
f = np.linspace(0, F - df, nf)
# -
# Defining the Power Spectral Density($S$)
# +
S = 32 * 1 / np.sqrt(2 * np.pi) * np.exp(-1 / 2 * f ** 2)
# Generating the 2 dimensional mesh grid
fx = f
fy = f
Fx, Fy = np.meshgrid(f, f)
b = 95 * 2 * 1 / (2 * np.pi) * np.exp(2 * (-1 / 2 * (Fx ** 2 + Fy ** 2)))
B_Real = b
B_Imag = b
B_Real[0, :] = 0
B_Real[:, 0] = 0
B_Imag[0, :] = 0
B_Imag[:, 0] = 0
# -
# Defining the Bispectral Density($B$)
B_Complex = B_Real + 1j * B_Imag
B_Ampl = np.absolute(B_Complex)
# Make sure that the input parameters are in order to prevent aliasing
# +
t_u = 2*np.pi/2/F
if dt>t_u:
print('Error')
# -
BSRM_object = BSRM(n_sim, S, B_Complex, dt, df, nt, nf)
samples = BSRM_object.samples
fig, ax = plt.subplots()
plt.title('Realisation of the BiSpectral Representation Method')
plt.plot(t, samples[0])
ax.yaxis.grid(True)
ax.xaxis.grid(True)
plt.show()
print('The mean of the samples is ', np.mean(samples), 'whereas the expected mean is 0.000')
print('The variance of the samples is ', np.var(samples), 'whereas the expected variance is ', np.sum(S)*df*2)
print('The skewness of the samples is ', np.mean(skew(samples, axis=0)), 'whereas the expected skewness is ', np.sum(B_Real)*df**2*6/(np.sum(S)*df*2)**(3/2))
# +
import time
t = time.time()
z = RunModel(cpu=4, model_type=None, model_script='UQpy_Model.sh', input_script='UQpy_Input.sh',
output_script='UQpy_Output.sh', samples=BSRM_object.samples, dimension=2)
t_run = time.time()-t
print(t_run)
# -
| example/StochasticProcess/BSRM_matlab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, importlib, sys, time, h5py
import numpy as np
from scipy import linalg, stats, ndimage
from scipy.sparse import diags
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica'], 'size':13})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
import ldfa.ldfa as ldfa
import miccs.dynamiccs as dmx
import miccs.inference as inf
importlib.reload(dmx)
importlib.reload(inf)
importlib.reload(ldfa)
save_dir = "../data/simulation"
if not os.path.exists(save_dir):
os.makedirs(save_dir)
np.random.seed(0)
# # load data
data_dir = "../data"
file_name = ['v4_lfp_beta.npy', 'pfc_lfp_beta.npy']
rec_time = [0, 0.5]
data_real = [np.load('%s/%s'%(data_dir, fname)).T for fname in file_name]
dims = [data_real[0].shape[1], data_real[1].shape[1]]
num_time = data_real[0].shape[0]
obs_trial = data_real[0].shape[2]
# ## cross precision
num_blobs = 2
times = np.array([[17, 10, 6], [30, 37, 9]])
H0 = np.zeros((times.shape[0], num_time, num_time))
for i, (time_0, time_1, duration) in enumerate(times):
H0[i, time_0:time_0+duration, time_1:time_1+duration] \
= np.abs(np.arange(duration)-np.arange(duration)[:,None]) < 2
import matplotlib.gridspec as gridspec
plt.figure(figsize=(4,3))
gridspec.GridSpec(1,4); plt.subplot2grid((1,4), (0,0), colspan=3, rowspan=1)
dmx.imshow(np.sum(H0, 0), time=[0,50], identity=True, vmax=1)
plt.ylabel(r'series 1 at time $t$'); plt.xlabel(r'series 2 at time $s$')
ax = plt.subplot2grid((1,4), (0,3))
ax.axis('off')
H0_islag = H0.copy()[(times[:,0]-times[:,1]) != 0]
dmx.imshow(np.sum(H0_islag, 0))
H0_lag = np.full((times.shape[0], 2*num_time-1), False)
for i, H0_i in enumerate(H0_islag):
H0_lag[i] = np.isin(np.arange(2*num_time-1),
np.unique(np.sum(np.where(H0_i), 0)))
dmx.imshow(np.sum(H0_lag, 0)[None,:], aspect=2, extent=[0,50,0,1])
# ## latent precision
rhos = [0.105, 0.142]
precs_auto = [linalg.inv(
np.exp(-np.square((np.arange(num_time)[:,None]-np.arange(num_time))*rho))
+ 1 * np.eye(num_time))
for rho in rhos]
snr = 0.6
prec_latent = np.array(np.block([
[precs_auto[0]+snr*np.eye(num_time)*np.sum(H0,(0,2)), -snr*np.sum(H0,0)],
[-snr*np.sum(H0,0).T, precs_auto[1]+snr*np.eye(num_time)*np.sum(H0,(0,1))]
]))
cov_latent = linalg.inv(prec_latent)
sd_latent = np.sqrt(np.diag(cov_latent))
corr_latent = cov_latent / sd_latent / sd_latent[:,None]
chol_latent = linalg.cholesky(corr_latent)
plt.figure(figsize=(12,3))
plt.subplot(1,4,1)
plt.title(r'$logdet(\Sigma) = %.1f$'%(np.linalg.slogdet(corr_latent)[1]))
plt.plot([num_time, num_time], [0, 2*num_time], linewidth = 0.3, color='black')
plt.plot([0, 2*num_time], [num_time, num_time], linewidth = 0.3, color='black')
dmx.imshow(corr_latent, vmax=1)
plt.subplot(1,4,2); dmx.imshow(corr_latent[:num_time,num_time:], identity=True, time=[0,50], vmax=1)
plt.subplot(1,4,3)
plt.plot([num_time, num_time], [0, 2*num_time], linewidth = 0.3, color='black')
plt.plot([0, 2*num_time], [num_time, num_time], linewidth = 0.3, color='black')
dmx.imshow(np.linalg.inv(corr_latent), vmax=1.5)
plt.subplot(1,4,4); dmx.imshow(np.linalg.inv(corr_latent)[:num_time,num_time:], identity=True, time=[0,50], vmax=1.5)
lgv_latent = np.linalg.slogdet(corr_latent)[1]
# ## latent direction
dirs = [np.random.normal(np.zeros(dim), 1) for dim in dims]
dirs = [dir_i / np.sqrt(np.sum(dir_i**2)) for dir_i in dirs]
# ## dummy datasets
num_trial = 1000
lambdas_eps = np.square(np.linspace(0, np.sqrt(4), 7)[:0:-1])
lambdas_eps
dummies = []
for iter_eps, lambda_eps in enumerate(lambdas_eps):
mchols = [np.array([linalg.cholesky(lambda_eps*np.cov(dat_t, bias=True)) for dat_t in dat])
for dat in data_real]
dummies.append(
[dat[:,:,np.random.choice(obs_trial, num_trial, replace=True)]
+ np.matmul(mchol.transpose([0,2,1]), np.random.normal(size=(num_time,dim,num_trial)))
for dat, dim, mchol in zip(data_real, dims, mchols)])
# +
# for iter_eps, data in enumerate(datasets):
# io.savemat('%s/data_sim_%d.mat'%(save_dir, iter_eps), {'pop_0': data[0], 'pop_1': data[1]})
# +
Omegas_dmm = np.zeros(lambdas_eps.shape+(2*num_time, 2*num_time))
Sigmas_dmm = np.zeros(lambdas_eps.shape+(2*num_time, 2*num_time))
for iter_eps, dummy in enumerate(dummies):
start_eps = time.time()
m_x = [np.mean(dat, -1) for dat in dummy]
S_xt = [np.mean([np.cov(dat_t, bias=True) for dat_t in dat], 0)
for m, dat in zip(m_x, dummy)]
eig_S_xt = [(w[0], v[:,0]) for w, v in [np.linalg.eig(S) for S in S_xt]]
weight_init = [eig_S_xt[0][1].copy() for _ in np.arange(num_time)] \
+ [eig_S_xt[1][1].copy() for _ in np.arange(num_time)]
Omega_dmm, Sigma_dmm, latent_dmm, weight_dmm =\
dmx.fit(dummy, 0, 0, 0, 0, num_time, weight_init = weight_init)
Omegas_dmm[iter_eps] = Omega_dmm
Sigmas_dmm[iter_eps] = Sigma_dmm
sys.__stdout__.write("%d-th simulation finished, lapse: %.3fsec.\n"
%(iter_eps+1, time.time()-start_eps))
sys.__stdout__.flush()
# -
show_eps = np.array([1,2,4,5]).astype(int)
lgvs_dmm = np.array([-np.linalg.slogdet(W)[1] for W in Omegas_dmm])
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Omegas_dmm[show_eps], lgvs_dmm[show_eps])):
plt.subplot(1,4,i+1)
plt.plot([num_time, num_time], [0, 2*num_time], linewidth = 0.3, color='black')
plt.plot([0, 2*num_time], [num_time, num_time], linewidth = 0.3, color='black')
dmx.imshow(np.linalg.inv(W))
plt.title(r'$\log\det(\Phi_\mathcal{T}) = %.1f$'
%(lgv_dmm))
dummy[0].shape
# +
Sigmas_avg = np.zeros(lambdas_eps.shape+(2*num_time, 2*num_time))
for iter_eps, dummy in enumerate(dummies):
start_eps = time.time()
latent_avg = [np.mean(dumm, 1) for dumm in dummy]
Sigmas_avg[iter_eps] = np.corrcoef(latent_avg[0], latent_avg[1])
sys.__stdout__.write("%d-th simulation finished, lapse: %.3fsec.\n"
%(iter_eps+1, time.time()-start_eps))
sys.__stdout__.flush()
# -
lgvs_avg = np.array([np.linalg.slogdet(S)[1] for S in Sigmas_avg])
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_avg[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1)
plt.plot([num_time, num_time], [0, 2*num_time], linewidth = 0.3, color='black')
plt.plot([0, 2*num_time], [num_time, num_time], linewidth = 0.3, color='black')
dmx.imshow(W)
plt.title(r'$\log\det(\Phi_\mathcal{T}) = %.1f$'
%(lgv_dmm))
# ## simulation
# +
datasets = []
for iter_eps, dummy in enumerate(dummies):
ldummy = [dir_i @ (dumm - np.mean(dumm, -1, keepdims=True))
for dir_i, dumm in zip(dirs, dummy)]
cov_ldummy = np.cov(ldummy[0], ldummy[1])
sd_ldummy = np.sqrt(np.diag(cov_ldummy))
lv = (chol_latent.T @ np.random.normal(0, 1, (2*num_time, num_trial)))
datasets.append(
[dummy[0] + dirs[0][None,:,None] * (sd_ldummy[:num_time,None,None]*lv[:num_time,None,:]),
dummy[1] + dirs[1][None,:,None] * (sd_ldummy[num_time:,None,None]*lv[num_time:,None,:])])
# datasets.append(
# [dummy[0] + dirs[0][None,:,None] * (sd_ldummy[:num_time,None,None]*lv[:num_time,None,:]-ldummy[0][:,None,:]),
# dummy[1] + dirs[1][None,:,None] * (sd_ldummy[num_time:,None,None]*lv[num_time:,None,:]-ldummy[1][:,None,:])])
# -
for iter_eps, data in enumerate(datasets):
io.savemat('%s/data_sim_%d.mat'%(save_dir, iter_eps), {'pop_0': data[0], 'pop_1': data[1]})
# # parameters
offset_cross = 10
offset_auto = 5
lambdas= np.vstack([
np.exp(np.linspace(np.log(0.2), np.log(0.002), 11)), #np.full((11,), 0.06), ## lambdas_cross
np.full((11,), 0), ## lambdas_auto
np.full((11,), 0), ## lambdas_diag
np.zeros(11)]) ## lambdas_ridge
lambda_cross, lambda_auto, lambda_diag, lambda_ridge = lambdas.T[2]
lambda_cross, lambda_auto, lambda_diag, lambda_ridge
time_graph_cross = (np.abs(np.subtract(*np.where(
np.full([num_time,num_time],True)))
.reshape([num_time,num_time])) < offset_cross)
time_graph_auto = (np.abs(np.subtract(*np.where(
np.full([num_time,num_time],True)))
.reshape([num_time,num_time])) < offset_auto)
lambda_glasso = np.array(np.block(
[[(1+lambda_auto)*(time_graph_auto-np.eye(num_time)) + (1+lambda_diag)*np.eye(num_time), (1+lambda_cross)*time_graph_cross],
[(1+lambda_cross)*time_graph_cross, (1+lambda_auto)*(time_graph_auto-np.eye(num_time)) + (1+lambda_diag)*np.eye(num_time)]])) -1
dmx.imshow(lambda_glasso)
max_dfa = 1000
max_lasso = 1000
max_glasso = 1000
ths = 1e-5
# # DynaMICCS
# +
Omegas_dmx = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
Sigmas_dmx = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
for i, data in enumerate(datasets):
# initialization by CCA
start_dmx = time.time()
S_xt = np.cov(*[dat.transpose([1,0,2]).reshape([d,-1])
for dat, d in zip(data, dims)])
S_1 = S_xt[:dims[0],:dims[0]]
S_12 = S_xt[:dims[0],dims[0]:]
S_2 = S_xt[dims[0]:,dims[0]:]
U_1= linalg.inv(linalg.sqrtm(S_1))
U_2 = linalg.inv(linalg.sqrtm(S_2))
u, s, vh = np.linalg.svd(U_1 @ S_12 @ U_2)
weight_init = [(U_1 @ u[:,0]).copy() for _ in np.arange(num_time)] \
+ [ (U_2 @ vh[0]).copy() for _ in np.arange(num_time)]
# fit DynaMICCS
Omega_est, Sigma_est, latent_est, weight_est =\
dmx.fit(data, lambda_diag, lambda_cross, offset_cross,
lambda_auto, offset_auto,
weight_init = weight_init)
Omegas_dmx[i] = Omega_est
Sigmas_dmx[i] = Sigma_est
sys.__stdout__.write('%d-th simulation on DynaMICCS finished, lapse: %.3fs \n'
%(i+1, time.time()-start_dmx))
sys.__stdout__.flush()
# -
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Omegas_dmx[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1)
dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.2, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_dmx[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.5, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
# # LDFA-H
# +
Omegas_cdfa = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
Sigmas_cdfa = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
for i, data in enumerate(datasets):
start_cdfa = time.time()
# fit dfa
Omega_est, Sigma_est, params_est =\
ldfa.fit([dat.T for dat in data], 1,
lambda_cross, offset_cross, lambda_auto, offset_auto,
ths_glasso=1e-8, ths_lasso=1e-8)
Omegas_cdfa[i] = Omega_est[0]
Sigmas_cdfa[i] = Sigma_est[0]
sys.__stdout__.write('%d-th simulation on LDFA-H finished, lapse: %.3fs \n'
%(i+1, time.time()-start_cdfa))
sys.__stdout__.flush()
# -
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Omegas_cdfa[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1)
dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.2, time=[0,50])
plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_cdfa[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.5, time=[0,50])
plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
time_graph_cross = (np.abs(np.arange(num_time)-np.arange(num_time)[:,None])<offset_cross)
# +
Omegas_ldfa_1 = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
Sigmas_ldfa_1 = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
for i, data in enumerate(datasets):
start_cdfa = time.time()
CCA_ts = np.zeros((num_time, num_time))
for t, s in zip(*np.where(time_graph_cross)):
S_ts = np.cov(data[0][t], data[1][s], bias=True)
S_1 = S_ts[:dims[0],:dims[0]]
S_12 = S_ts[:dims[0],dims[0]:]
S_2 = S_ts[dims[0]:,dims[0]:]
U_1= linalg.inv(linalg.sqrtm(S_1))
U_2 = linalg.inv(linalg.sqrtm(S_2))
u, l, vh = np.linalg.svd(U_1 @ S_12 @ U_2)
CCA_ts[t,s] = l[0]
t, s = np.where(CCA_ts == np.max(CCA_ts))
S_ts = np.cov(data[0][t[0]], data[1][s[0]], bias=True)
S_1 = S_ts[:dims[0],:dims[0]]
S_12 = S_ts[:dims[0],dims[0]:]
S_2 = S_ts[dims[0]:,dims[0]:]
U_1= linalg.inv(linalg.sqrtm(S_1))
U_2 = linalg.inv(linalg.sqrtm(S_2))
u, l, vh = np.linalg.svd(U_1 @ S_12 @ U_2)
weights = [(U_1 @ u[:,0]).copy(), (U_2 @ vh[0]).copy()]
# fit dfa
Omega_est, Sigma_est, params_est =\
ldfa.fit(data, lambda_cross, offset_cross,
lambda_auto, offset_auto, ths_glasso=1e-8, ths_lasso=1e-8,
beta_init = weights)
Omegas_ldfa_1[i] = Omega_est
Sigmas_ldfa_1[i] = Sigma_est
sys.__stdout__.write('%d-th simulation on LDFA-H finished, lapse: %.3fs \n'
%(i+1, time.time()-start_cdfa))
sys.__stdout__.flush()
# -
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Omegas_ldfa_1[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1)
dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.2, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_ldfa_1[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.5, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
# # APC (Averaged Pairwise Correlation)
# +
Sigmas_apc = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
for i, data in enumerate(datasets):
start_apc = time.time()
# fit dfa
for j in np.arange(dims[0]):
for k in np.arange(dims[1]):
Sigmas_apc[i] += np.corrcoef(data[0][:,j,:], data[1][:,k,:])
Sigmas_apc[i] /= dims[0] * dims[1]
sys.__stdout__.write('%d-th simulation on APC finished, lapse: %.3fs \n'
%(i+1, time.time()-start_apc))
sys.__stdout__.flush()
# -
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_apc[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.5, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
# # CAS (Correlation of Averaged Signals)
# +
Sigmas_cas = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
for i, data in enumerate(datasets):
start_cas = time.time()
# fit dfa
for j in np.arange(dims[0]):
for k in np.arange(dims[1]):
Sigmas_cas[i] = np.corrcoef(np.mean(data[0],1),
np.mean(data[1],1))
sys.__stdout__.write('%d-th simulation on CAS finished, lapse: %.3fs \n'
%(i+1, time.time()-start_cas))
sys.__stdout__.flush()
# -
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_cas[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.5, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
# # CCA
# +
Sigmas_cca = np.zeros((len(lambdas_eps),2*num_time,2*num_time))
for i, data in enumerate(datasets):
# initialization by CCA
start_cca = time.time()
S_xt = np.cov(*[dat.transpose([1,0,2]).reshape([d,-1])
for dat, d in zip(data, dims)])
S_1 = S_xt[:dims[0],:dims[0]]
S_12 = S_xt[:dims[0],dims[0]:]
S_2 = S_xt[dims[0]:,dims[0]:]
U_1= linalg.inv(linalg.sqrtm(S_1))
U_2 = linalg.inv(linalg.sqrtm(S_2))
u, s, vh = np.linalg.svd(U_1 @ S_12 @ U_2)
weights = [(U_1 @ u[:,0]).copy(), (U_2 @ vh[0]).copy()]
Sigmas_cca[i] = np.corrcoef(*[np.tensordot(w, dat, (0,1))
for w, dat in zip(weights, data)])
sys.__stdout__.write('%d-th simulation on CCA finished, lapse: %.3fs \n'
%(i+1, time.time()-start_cca))
sys.__stdout__.flush()
# -
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_cca[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(W[:num_time,num_time:], identity=True, vmax=0.5, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
# # DKCCA
Sigmas_dkcca = np.zeros((len(lambdas_eps),num_time,num_time))
for iter_eps, data in enumerate(datasets):
latent_dkcca = io.loadmat('%s/result_DKCCA_%d.mat'%(save_dir,iter_eps))['latent_DKCCA']
latent_dkcca = np.concatenate([latent_dkcca[0,0].T, latent_dkcca[1,0].T], 0)
Sigmas_dkcca[iter_eps] = np.corrcoef(latent_dkcca)
plt.figure(figsize=(12, 3))
for i, (l, W, lgv_dmm) in enumerate(zip(
lambdas_eps[show_eps], Sigmas_dkcca[show_eps], lgvs_avg[show_eps])):
plt.subplot(1,4,i+1); dmx.imshow(np.abs(W)[:25,25:], identity=True, vmax=0.5, time=[0,50])
# plt.title(r'$\Delta = %.1f$'%(lgv_latent-lgv_dmm))
| example/3.1 LDFA-H versus existing methods in addressing noise auto-correlation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# cd /media/sf_datasets/Smarter\ Devices/BLUED_extracted/BLUED-TK
import numpy as np
import os
import pandas as pd
from sklearn import svm
from sklearn.model_selection import train_test_split as tts
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
path = 'events/'
list_of_files = os.listdir(path)
list_of_files.sort()
print (list_of_files)
# +
inputs=70
#reading the dataframe
df=pd.read_table('events/peaks.csv',index_col=0,sep='\t')
total_events=len(df)
#converting the string to numpy array
def makeArray(text):
text=text.replace("["," ").strip()
return np.fromstring(text,sep=' ')
df.features=df.features.apply(makeArray)
#finding the single event devices
freq=df.groupby('label').count()
# freq=freq.sort_values(by='features')
single_freq=freq[freq.features==1].index
# single_freq
#eliminating all the single event devices
for i in single_freq:
df=(df[df['label']!=i])
#df=df[df.label!=111]
# +
feature_matrix=np.empty((total_events,inputs))
#label_matrix=np.zeros((total_events,outputs))
ll=np.empty((total_events))
count=0
for i in df.index:
feature_matrix[count]=df.loc[i].features
ll[count]=df.loc[i].label
count+=1
device_list=list(df.label.unique())
label_matrix = np.array(df.label)
# for i in range(0,len(ll)):
# label_matrix[i,device_list.index(ll[i])]=1
print(feature_matrix.shape)
print(label_matrix.shape)
m_f=feature_matrix.mean(axis=0)
v_f=feature_matrix.var(axis=0)
feature_matrix=(feature_matrix-m_f)/v_f
feature_train, feature_test, label_train, label_test = tts(feature_matrix, label_matrix, test_size=0.3, random_state=42)
#feature_matrix = preprocessing.StandardScaler().fit(feature_matrix).transform(feature_matrix)
print (label_matrix)
# -
y = []
x = np.zeros(60)
# +
# #stacking all the data into a single array for test train split
# for file in list_of_files:
# data = np.loadtxt(path+file,delimiter =',')
# '''for i in range(len(data)):
# for j in range(1:181):
# if(int(data[i,j])<int(data[i,j-1])):
# continue
# on_data ='''
# if file.split("_")[1] == 'labels':
# data = list(data)
# #data = np.matrix(data).T
# #print (data.shape)
# # print (data.shape)
# #= np.vstack((np.matrix(y),data))
# y = y+data
# else:
# for i in range(len(data)):
# norm = np.mean(np.matrix(data[i,:30]))
# data[i,30:]-=norm
# x = np.vstack((x,np.matrix(data[i,30:])))
# print (x.shape)
# #print(data[:,120:].shape)
# #x = np.vstack((x,data[:,120:]))
# +
# y = np.array(y)
# print (y.shape)
# x = x[1:]
# print (x.shape,y.shape)
# x_train, x_test, y_train, y_test = tts(x, y, test_size=0.25, random_state=42)
# scalerx = preprocessing.StandardScaler().fit(x_train)
# x_train = scalerx.transform(x_train)
# x_test = scalerx.transform(x_test)
# -
p = 50
plt.plot(x[p].reshape(-1,1))
plt.show()
print (y[p])
# for i in range(len(y_train)):
# if y_train[i] == 211.0:
# #print (y_train[i],x_train[i])
# plt.plot(x_train[i])
# plt.show()
'''C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=3, test_size=0.2, random_state=42)
grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv)
grid.fit(x,y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = svm.SVC(C=C, gamma=gamma)
clf.fit(x_train, y_train)
classifiers.append((C, gamma, clf))
print (x_train.shape)'''
# +
dataa = pd.read_csv('events/peaks_PQS.txt',sep=',')
y = dataa['labels']
y1 = dataa[:]
del dataa['labels']
x= dataa.as_matrix()
# -
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import sklearn.utils
from sklearn.model_selection import train_test_split as tts
# +
#reading the dataframe
df=pd.read_table('events/peaks_PQS.txt',index_col=0,sep=',')
cols=[str(i) for i in range(70)]
cols+=['p','q','s','labels']
df.columns=cols
#finding the single event devices
freq=df.groupby('labels').count()
single_freq=freq[freq['0']==1].index
print(freq)
# df=df.ix[:,'p':]
#print(df)
# comment to remove p,q,s
#del df['72'],df['71'],df['70']
# #eliminating refrigerator
# #df=(df[df['label']!=111])
# #eliminating monitor
# # df=(df[df['label']!=140])
# # print(len(df))
# # printing the values by count
# # df.groupby('label').count().sort_values(by='features')
#eliminating all the single event devices
for i in single_freq:
df=(df[df['labels']!=i])
print("Events left after removal of single events %d"%len(df))
#remove irrelavant events
uf,lf=df[df.p>20],df[df.p<-20]
_df=uf.append(lf)
print("Events left |p|>30 %d"%len(_df))
df=_df
device_list=df['labels'].unique()
print('output devices %s'%len(device_list))
# +
#convert the dataframe to matrix
feature_matrix=df.as_matrix()
labels=np.array(df['labels'])
print(len(set(labels)))
inputs=feature_matrix.shape[1]
outputs=len(device_list)
total_events=len(df)
print('Total events is %s'%(total_events))
# +
feature_matrix=(feature_matrix-feature_matrix.mean(axis=0))/feature_matrix.var(axis=0)
feature_train, feature_test, label_train, label_test = tts(feature_matrix, labels, test_size=0.1, random_state=42,stratify=labels)
print('feature_train.shape=%s label_train.shape=%s'%(feature_train.shape,label_train.shape))
print('inputs=%d output=%d'%(inputs,outputs))
# -
train_count = 0
test_count = 0
for i in label_train:
if i == :
train_count+=1
print (train_count)
for i in label_test:
if i == 140:
test_count+=1
print(test_count)
clf = svm.SVC(C=1.0,kernel='linear',cache_size=1000,decision_function_shape='ovr',shrinking=True,probability=True)
#clf.fit(feature_train, label_train)
scores = cross_val_score(clf,feature_matrix ,labels, cv=10)
scores.mean()
# +
result = clf.predict(feature_test)
result = np.matrix(result).T
#print (result)
# -
index = 0
count = 0
while index<len(result):
if(result[index,0] == label_test[index]):
count+=1
index +=1
#result = clf.predict(feature_test)
#result = np.matrix(result).T
eff = count/len(label_test)
print (eff*100)
| BLUED-TK/svm_on_ipeaks.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xeus-cling-cpp14
// ---
// 
// <center> <h1>xframe is a dataframe for C++, based on xtensor and xtl</h1> </center>
// +
#include <string>
#include <iostream>
#include "xtensor/xrandom.hpp"
#include "xtensor/xmath.hpp"
#include "xframe/xio.hpp"
#include "xframe/xvariable.hpp"
#include "xframe/xvariable_view.hpp"
#include "xframe/xvariable_masked_view.hpp"
#include "xframe/xreindex_view.hpp"
// -
// Let's first define some useful type aliases so we can reduce the amount of typing
using coordinate_type = xf::xcoordinate<xf::fstring>;
using variable_type = xf::xvariable<double, coordinate_type>;
using data_type = variable_type::data_type;
// # 1. Variables
// ## 1.1. Creating variable
//
// In the following we define a 2D variable called `dry_temperature`. A variable in `xframe` is the composition of a tensor data and a coordinate system. It is the equivalent of `DataArray` from <a href=http://xarray.pydata.org/en/stable/data-structures.html>xarray</a>. The tensor data can be any valid `xtensor` expression whose `value_type` is `xoptional`. Common types are `xarray_optional`, `xtensor_optional` and `xoptional_assembly`, which allows to create an optional expression from existing regular tensor expressions.
// 

data_type dry_temperature_data = xt::eval(xt::random::rand({6, 3}, 15., 25.));
dry_temperature_data(0, 0).has_value() = false;
dry_temperature_data(2, 1).has_value() = false;
dry_temperature_data
// Once the data is defined, we can define the coordinate system. A coordinate system is a mapping of dimension names with label axes. Although it is possible to create an axe from a vector of labels, then the coordinate system from a map containing axes and dimension names, and finally the variable from this coordinate system and the previously created data, `xframe` makes use of the initialize-list syntax so everything can be created in place with a very expressive syntax:
auto time_axis = xf::axis({"2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04", "2018-01-05", "2018-01-06"});
auto dry_temperature = variable_type(
dry_temperature_data,
{
{"date", time_axis},
{"city", xf::axis({"London", "Paris", "Brussels"})}
}
);
dry_temperature
// ## 1.2. Indexing and selecting data
//
// Like <a href=http://xarray.pydata.org/en/stable/indexing.html>xarray</a>, `xframe` supports four different kinds of indexing as described below:
// **Dimension lookup:** Positional - **Index lookup:** By integer
dry_temperature(3, 0)
// **Dimension lookup:** Positional - **Index lookup:** By label
dry_temperature.locate("2018-01-04", "London")
// **Dimension lookup:** By name - **Index lookup:** By integer
dry_temperature.iselect({{"date", 3}, {"city", 0}})
// **Dimension lookup:** By name - **Index lookup:** By label
dry_temperature.select({{"date", "2018-01-04"}, {"city", "London"}})
// Contrary to <a href=http://xarray.pydata.org/en/stable/indexing.html>xarray</a>, these methods return a single value, they do not allow to create views of the variable by selecting many data points. This feature is possible with `xframe` though, by using the free function counterparts of the methods described above, and will be covered in a next section.
// ## 1.3. Maths and broadcasting
//
// Variable support all the common mathematics operations and functions; like <a href=https://xtensor.readthedocs.io/en/latest/expression.html>xtensor</a>, these operations are lazy and return expressions. `xframe` supports operations on variables with different dimensions and labels thanks to broadcasting. This one is performed according the dimension names rather than the dimension positions as shown below.
//
// Let's first define a variable containing the relative humidity for cities:
// +
data_type relative_humidity_data = xt::eval(xt::random::rand({3}, 50.0, 70.0));
auto relative_humidity = variable_type(
relative_humidity_data,
{
{"city", xf::axis({"Paris", "London", "Brussels"})}
}
);
relative_humidity
// -
// We will use it and the previously defined `dry_temperature` variable (that we show again below) to compute the water_pour_pressure
dry_temperature
auto water_vapour_pressure = 0.01 * relative_humidity * 6.1 * xt::exp((17.27 * dry_temperature) / (237.7 + dry_temperature));
water_vapour_pressure
// The relative humidity has been broadcasted so its values are repeated for each date.
// When the labels of variables involved in an operation are not the same, the result contains the *intersection* of the label sets:
// +
data_type coeff_data = xt::eval(xt::random::rand({6, 3}, 0.7, 0.9));
dry_temperature_data(0, 0).has_value() = false;
dry_temperature_data(2, 1).has_value() = false;
auto coeff = variable_type(
coeff_data,
{
{"date", time_axis},
{"city", xf::axis({"London", "New York", "Brussels"})}
}
);
coeff
// -
auto res = coeff * dry_temperature;
res
// ## 1.4. Higher dimension variables
//
// The following code creates and displays a three-dimensional variable.
data_type pressure_data = {{{ 1., 2., 3. },
{ 4., 5., 6. },
{ 7., 8., 9. }},
{{ 1.3, 1.5, 1.},
{ 2., 2.3, 2.4},
{ 3.1, 3.8, 3.}},
{{ 8.5, 8.2, 8.6},
{ 7.5, 8.6, 9.7},
{ 4.5, 4.4, 4.3}}};
auto pressure = variable_type(
pressure_data,
{
{"x", xf::axis(3)},
{"y", xf::axis(3, 6, 1)},
{"z", xf::axis(3)},
}
);
pressure
// # 2. Views
// ## 2.1. Multiselection
//
// Views can be used to select many data points in a variable. The syntax is similar to the one used for selecting a single data point, excpet that it uses free functions instead of methods of variable.
dry_temperature
// **Dimension lookup:** Positional - **Index lookup:** By integer
auto v1 = ilocate(dry_temperature, xf::irange(0, 5, 2), xf::irange(1, 3));
v1
// **Dimension lookup:** Positional - **Index lookup:** By label
auto v2 = locate(dry_temperature, xf::range("2018-01-01", "2018-01-06", 2), xf::range("Paris", "Brussels"));
v2
// **Dimension lookup:** By name - **Index lookup:** By integer
auto v3 = iselect(dry_temperature, {{"city", xf::irange(1, 3)}, {"date", xf::irange(0, 5, 2)}});
v3
// **Dimension lookup:** By name - **Index lookup:** By label
auto v4 = select(dry_temperature,
{{"city", xf::range("Paris", "Brussels")},
{"date", xf::range("2018-01-01", "2018-01-06", 2)}});
v4
// ## 2.2. Keeping and dropping labels
//
// The previous selection made use of ranges (label range from `xframe` and index range from `xtensor`), however it is also possible to select data points by explicitly specifying a list of labels to keep or to drop.
// **Dimension lookup:** Positional - **Index lookup:** By integer
auto v5 = ilocate(dry_temperature, xf::ikeep(0, 2, 4), xf::idrop(0));
v5
// **Dimension lookup:** By name - **Index lookup:** By integer
auto v6 = locate(dry_temperature, xf::keep("2018-01-01", "2018-01-03", "2018-01-05"), xf::drop("London"));
v6
// **Dimension lookup:** By name - **Index lookup:** By integer
auto v7 = iselect(dry_temperature, {{"city", xf::idrop(0)}, {"date", xf::ikeep(0, 2, 4)}});
v7
// **Dimension lookup:** By name - **Index lookup:** By label
auto v8 = select(dry_temperature,
{{"city", xf::drop("London")},
{"date", xf::keep("2018-01-01", "2018-01-03", "2018-01-05")}});
v8
// ## 2.3 Masking views
//
// Masking views allow to select data points based on conditions expressed on labels. These conditons can be complicated boolean expressions.
pressure
auto masked_pressure = xf::where(
pressure,
not_equal(pressure.axis<int>("x"), 2) && pressure.axis<int>("z") < 2
);
masked_pressure
// When assigning to a masking view, masked values are not changed. Like other views, a masking view is a proxy on its junderlying expression, no copy is made, so changing a unmasked value actually changes the corresponding value in the underlying expression.
masked_pressure = 1.;
masked_pressure
pressure
// ## 2.4 Reindexing views
//
// Reindexing views give variables new set of coordinates to corresponding dimensions. Like other views, no copy is involved. Asking for values corresponding to new labels not found in the original set of coordinates returns missing values. In the next example, we reindex the `city` dimension.
dry_temperature
auto temp = reindex(dry_temperature, {{"city", xf::axis({"London", "New York", "Brussels"})}});
temp
// The `reindex_like` is a shortcut that allows to reindex a variable given the set of coordinates of another variable
auto dry_temp2 = variable_type(
dry_temperature_data,
{
{"date", time_axis},
{"city", xf::axis({"London", "New York", "Brussels"})}
}
);
auto temp2 = reindex_like(dry_temperature, dry_temp2);
temp2
| notebooks/xframe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''base'': conda)'
# metadata:
# interpreter:
# hash: 9fe336409bf8d1c21a3bedb40a72035b066e16950eddbec83b6c8b74e32600ee
# name: python3
# ---
# # United States - Crime Rates - 1960 - 2014
# ### Introduction:
#
# This time you will create a data
#
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
import pandas as pd
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv).
# ### Step 3. Assign it to a variable called crime.
crime = pd.read_csv('US_Crime_Rates_1960_2014.csv')
crime.head()
# ### Step 4. What is the type of the columns?
crime.dtypes
# ##### Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now.
#
# ### Step 5. Convert the type of the column Year to datetime64
crime.Year = pd.to_datetime(crime.Year, format = '%Y')
crime.info()
crime.head()
# ### Step 6. Set the Year column as the index of the dataframe
crime = crime.set_index('Year')
crime.head()
# ### Step 7. Delete the Total column
crime.drop('Total', axis = 1)
crime.head()
# ### Step 8. Group the year by decades and sum the values
#
# #### Pay attention to the Population column number, summing this column is a mistake
decades = crime.resample('10AS').sum()
max_pop = crime.resample('10AS').max()
decades['Population'] = max_pop
decades.head()
# ### Step 9. What is the most dangerous decade to live in the US?
decades.idxmax()
| 04_Apply/US_Crime_Rates/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="ubFUlqz8cj1L"
# # Tutorial Part 6: Introduction to Graph Convolutions
#
# In this tutorial we will learn more about "graph convolutions." These are one of the most powerful deep learning tools for working with molecular data. The reason for this is that molecules can be naturally viewed as graphs.
#
# 
#
# Note how standard chemical diagrams of the sort we're used to from high school lend themselves naturally to visualizing molecules as graphs. In the remainder of this tutorial, we'll dig into this relationship in significantly more detail. This will let us get a deeper understanding of how these systems work.
#
# ## Colab
#
# This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
#
# [](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/06_Introduction_to_Graph_Convolutions.ipynb)
#
# ## Setup
#
# To run DeepChem within Colab, you'll need to run the following installation commands. This will take about 5 minutes to run to completion and install your environment. You can of course run this tutorial locally if you prefer. In that case, don't run these cells since they will download and install Anaconda on your local machine.
# + colab={"base_uri": "https://localhost:8080/", "height": 156} colab_type="code" id="EoCLxSnBcj1N" outputId="d0555806-a13b-4522-c845-c36a7f910fca"
# !curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import conda_installer
conda_installer.install()
# !/root/miniconda/bin/conda info -e
# + colab={"base_uri": "https://localhost:8080/", "height": 211} colab_type="code" id="3Jv2cmnW91CF" outputId="bd523c54-3038-4654-89ad-356ad1e207ca"
# !pip install --pre deepchem
# + [markdown] colab_type="text" id="BX2erW0ncj1W"
# # What are Graph Convolutions?
#
# Consider a standard convolutional neural network (CNN) of the sort commonly used to process images. The input is a grid of pixels. There is a vector of data values for each pixel, for example the red, green, and blue color channels. The data passes through a series of convolutional layers. Each layer combines the data from a pixel and its neighbors to produce a new data vector for the pixel. Early layers detect small scale local patterns, while later layers detect larger, more abstract patterns. Often the convolutional layers alternate with pooling layers that perform some operation such as max or min over local regions.
#
# Graph convolutions are similar, but they operate on a graph. They begin with a data vector for each node of the graph (for example, the chemical properties of the atom that node represents). Convolutional and pooling layers combine information from connected nodes (for example, atoms that are bonded to each other) to produce a new data vector for each node.
#
# # Training a GraphConvModel
#
# Let's use the MoleculeNet suite to load the Tox21 dataset. To featurize the data in a way that graph convolutional networks can use, we set the featurizer option to `'GraphConv'`. The MoleculeNet call returns a training set, a validation set, and a test set for us to use. It also returns `tasks`, a list of the task names, and `transformers`, a list of data transformations that were applied to preprocess the dataset. (Most deep networks are quite finicky and require a set of data transformations to ensure that training proceeds stably.)
# + colab={"base_uri": "https://localhost:8080/", "height": 89} colab_type="code" id="JMi2V8Jncj1W" outputId="56ab5eb6-07be-4d8f-c19b-88d1f73f2f46"
import deepchem as dc
tasks, datasets, transformers = dc.molnet.load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = datasets
# + [markdown] colab_type="text" id="QfMW0Y4Kcj1Z"
# Let's now train a graph convolutional network on this dataset. DeepChem has the class `GraphConvModel` that wraps a standard graph convolutional architecture underneath the hood for user convenience. Let's instantiate an object of this class and train it on our dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 245} colab_type="code" id="Y9n3jTNHcj1a" outputId="2caab2e5-5e5a-4f97-a440-753692341d35"
n_tasks = len(tasks)
model = dc.models.GraphConvModel(n_tasks, mode='classification')
model.fit(train_dataset, nb_epoch=50)
# + [markdown] colab_type="text" id="kDDroutEcj1g"
# Let's try to evaluate the performance of the model we've trained. For this, we need to define a metric, a measure of model performance. `dc.metrics` holds a collection of metrics already. For this dataset, it is standard to use the ROC-AUC score, the area under the receiver operating characteristic curve (which measures the tradeoff between precision and recall). Luckily, the ROC-AUC score is already available in DeepChem.
#
# To measure the performance of the model under this metric, we can use the convenience function `model.evaluate()`.
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="MeX-9RNWcj1h" outputId="642d3f81-33de-46bb-fc7a-8b5edda99881"
metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
print('Training set score:', model.evaluate(train_dataset, [metric], transformers))
print('Test set score:', model.evaluate(test_dataset, [metric], transformers))
# + [markdown] colab_type="text" id="l-LBxrKN6CMs"
# The results are pretty good, and `GraphConvModel` is very easy to use. But what's going on under the hood? Could we build GraphConvModel ourselves? Of course! DeepChem provides Keras layers for all the calculations involved in a graph convolution. We are going to apply the following layers from DeepChem.
#
# - `GraphConv` layer: This layer implements the graph convolution. The graph convolution combines per-node feature vectures in a nonlinear fashion with the feature vectors for neighboring nodes. This "blends" information in local neighborhoods of a graph.
#
# - `GraphPool` layer: This layer does a max-pooling over the feature vectors of atoms in a neighborhood. You can think of this layer as analogous to a max-pooling layer for 2D convolutions but which operates on graphs instead.
#
# - `GraphGather`: Many graph convolutional networks manipulate feature vectors per graph-node. For a molecule for example, each node might represent an atom, and the network would manipulate atomic feature vectors that summarize the local chemistry of the atom. However, at the end of the application, we will likely want to work with a molecule level feature representation. This layer creates a graph level feature vector by combining all the node-level feature vectors.
#
# Apart from this we are going to apply standard neural network layers such as [Dense](https://keras.io/api/layers/core_layers/dense/), [BatchNormalization](https://keras.io/api/layers/normalization_layers/batch_normalization/) and [Softmax](https://keras.io/api/layers/activation_layers/softmax/) layer.
# + colab={} colab_type="code" id="71_E0CAUcj1n"
from deepchem.models.layers import GraphConv, GraphPool, GraphGather
import tensorflow as tf
import tensorflow.keras.layers as layers
batch_size = 100
class MyGraphConvModel(tf.keras.Model):
def __init__(self):
super(MyGraphConvModel, self).__init__()
self.gc1 = GraphConv(128, activation_fn=tf.nn.tanh)
self.batch_norm1 = layers.BatchNormalization()
self.gp1 = GraphPool()
self.gc2 = GraphConv(128, activation_fn=tf.nn.tanh)
self.batch_norm2 = layers.BatchNormalization()
self.gp2 = GraphPool()
self.dense1 = layers.Dense(256, activation=tf.nn.tanh)
self.batch_norm3 = layers.BatchNormalization()
self.readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh)
self.dense2 = layers.Dense(n_tasks*2)
self.logits = layers.Reshape((n_tasks, 2))
self.softmax = layers.Softmax()
def call(self, inputs):
gc1_output = self.gc1(inputs)
batch_norm1_output = self.batch_norm1(gc1_output)
gp1_output = self.gp1([batch_norm1_output] + inputs[1:])
gc2_output = self.gc2([gp1_output] + inputs[1:])
batch_norm2_output = self.batch_norm1(gc2_output)
gp2_output = self.gp2([batch_norm2_output] + inputs[1:])
dense1_output = self.dense1(gp2_output)
batch_norm3_output = self.batch_norm3(dense1_output)
readout_output = self.readout([batch_norm3_output] + inputs[1:])
logits_output = self.logits(self.dense2(readout_output))
return self.softmax(logits_output)
# + [markdown] colab_type="text" id="oC20PZiccj1p"
# We can now see more clearly what is happening. There are two convolutional blocks, each consisting of a `GraphConv`, followed by batch normalization, followed by a `GraphPool` to do max pooling. We finish up with a dense layer, another batch normalization, a `GraphGather` to combine the data from all the different nodes, and a final dense layer to produce the global output.
#
# Let's now create the DeepChem model which will be a wrapper around the Keras model that we just created. We will also specify the loss function so the model know the objective to minimize.
# + colab={} colab_type="code" id="31Wr0t2zcj1q"
model = dc.models.KerasModel(MyGraphConvModel(), loss=dc.models.losses.CategoricalCrossEntropy())
# + [markdown] colab_type="text" id="Wz43oG9rcj1j"
# What are the inputs to this model? A graph convolution requires a complete description of each molecule, including the list of nodes (atoms) and a description of which ones are bonded to each other. In fact, if we inspect the dataset we see that the feature array contains Python objects of type `ConvMol`.
# -
test_dataset.X[0]
# Models expect arrays of numbers as their inputs, not Python objects. We must convert the `ConvMol` objects into the particular set of arrays expected by the `GraphConv`, `GraphPool`, and `GraphGather` layers. Fortunately, the `ConvMol` class includes the code to do this, as well as to combine all the molecules in a batch to create a single set of arrays.
#
# The following code creates a Python generator that given a batch of data generates the lists of inputs, labels, and weights whose values are Numpy arrays. `atom_features` holds a feature vector of length 75 for each atom. The other inputs are required to support minibatching in TensorFlow. `degree_slice` is an indexing convenience that makes it easy to locate atoms from all molecules with a given degree. `membership` determines the membership of atoms in molecules (atom `i` belongs to molecule `membership[i]`). `deg_adjs` is a list that contains adjacency lists grouped by atom degree. For more details, check out the [code](https://github.com/deepchem/deepchem/blob/master/deepchem/feat/mol_graphs.py).
# + colab={} colab_type="code" id="o-cPAG0I8Tc4"
from deepchem.metrics import to_one_hot
from deepchem.feat.mol_graphs import ConvMol
import numpy as np
def data_generator(dataset, epochs=1):
for ind, (X_b, y_b, w_b, ids_b) in enumerate(dataset.iterbatches(batch_size, epochs,
deterministic=False, pad_batches=True)):
multiConvMol = ConvMol.agglomerate_mols(X_b)
inputs = [multiConvMol.get_atom_features(), multiConvMol.deg_slice, np.array(multiConvMol.membership)]
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
inputs.append(multiConvMol.get_deg_adjacency_lists()[i])
labels = [to_one_hot(y_b.flatten(), 2).reshape(-1, n_tasks, 2)]
weights = [w_b]
yield (inputs, labels, weights)
# + [markdown] colab_type="text" id="VSTbjm9Hcj1v"
# Now, we can train the model using `fit_generator(generator)` which will use the generator we've defined to train the model.
# + colab={"base_uri": "https://localhost:8080/", "height": 245} colab_type="code" id="59WW4rhwcj1w" outputId="660ecb20-a2f4-4ae5-e0c8-bc72e309ee72"
model.fit_generator(data_generator(train_dataset, epochs=50))
# + [markdown] colab_type="text" id="skrL9YEEcj13"
# Now that we have trained our graph convolutional method, let's evaluate its performance. We again have to use our defined generator to evaluate model performance.
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="f3prNsgGcj14" outputId="dc95fbba-f5bf-4f7b-8d56-efdc37345d80"
print('Training set score:', model.evaluate_generator(data_generator(train_dataset), [metric], transformers))
print('Test set score:', model.evaluate_generator(data_generator(test_dataset), [metric], transformers))
# + [markdown] colab_type="text" id="tvOYgj52cj16"
# Success! The model we've constructed behaves nearly identically to `GraphConvModel`. If you're looking to build your own custom models, you can follow the example we've provided here to do so. We hope to see exciting constructions from your end soon!
# + [markdown] colab_type="text" id="j1FrVn88cj17"
# # Congratulations! Time to join the Community!
#
# Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:
#
# ## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)
# This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.
#
# ## Join the DeepChem Gitter
# The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
| examples/tutorials/06_Introduction_to_Graph_Convolutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: exatrkx
# language: python
# name: exatrkx
# ---
import os
os.environ['TRKXINPUTDIR'] = '/global/cfs/cdirs/m3443/data/trackml-kaggle/train_all'
os.environ['TRKXOUTPUTDIR'] = '/global/cfs/projectdirs/m3443/usr/caditi97/iml2020/graph_plots'
# + tags=[]
# system import
import pkg_resources
import yaml
import pprint
import random
random.seed(1234)
import numpy as np
import pandas as pd
import itertools
import matplotlib.pyplot as plt
# %matplotlib widget
# 3rd party
import torch
from torch_geometric.data import Data
from trackml.dataset import load_event
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
# local import
# from heptrkx.dataset import event as master
from exatrkx import config_dict # for accessing predefined configuration files
from exatrkx import outdir_dict # for accessing predefined output directories
from exatrkx.src import utils_dir
# for preprocessing
from exatrkx import FeatureStore
from exatrkx.src import utils_torch
# for embedding
from exatrkx import LayerlessEmbedding
from exatrkx import EmbeddingInferenceCallback
# for filtering
from exatrkx import VanillaFilter
from exatrkx import FilterInferenceCallback
# -
embed_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/embedding/checkpoints/epoch=10.ckpt'
filter_ckpt_dir = '/global/cfs/cdirs/m3443/data/lightning_models/filtering/checkpoints/epoch=54.ckpt'
outdir = 'view_embedding'
evtid = 1000
n_pids = 10
# event_file = os.path.join(os.environ['TRKXINPUTDIR'], 'event{:09}'.format(evtid))
event_file = f'/global/cfs/cdirs/m3443/data/trackml-kaggle/train_10evts/event00000{evtid}'
# event = master.Event(os.environ['TRKXINPUTDIR'])
# event.read(evtid)
import trackml
from trackml.dataset import load_event
hits, particles, truth, cells = trackml.dataset.load_event(event_file, parts=['hits', 'particles', 'truth', 'cells'])
hits = hits.merge(truth, on='hit_id', how='left')
hits = hits.merge(particles, on='particle_id', how='left')
pids = particles[(particles.nhits) > 5]
np.random.seed(456)
rnd = np.random.randint(0, pids.shape[0], n_pids)
print("random idx: ", rnd)
sel_pids = pids.particle_id.values[rnd]
hits = hits[hits.particle_id.isin(sel_pids)]
# hits = cluster_info(utils_dir.detector_path)
# + jupyter={"outputs_hidden": true} tags=[]
hits.columns
# + jupyter={"outputs_hidden": true} tags=[]
hits.shape
# +
hits = hits.assign(R=np.sqrt((hits.x - hits.vx)**2 + (hits.y - hits.vy)**2 + (hits.z - hits.vz)**2))
hits = hits.sort_values('R').reset_index(drop=True).reset_index(drop=False)
hit_list = hits.groupby(['particle_id'], sort=False)['index'].agg(lambda x: list(x)).groupby(level=0).agg(lambda x: list(x))
e = []
for row in hit_list.values:
for i, j in zip(row[0:-1], row[1:]):
e.extend(list(itertools.product(i, j)))
layerless_true_edges = np.array(e).T
# + tags=[]
data = Data(x=torch.from_numpy(hits[['r', 'phi', 'z']].to_numpy()/np.array([1000, np.pi, 1000])).float(),\
pid=torch.from_numpy(hits.particle_id.to_numpy()),
layers=torch.from_numpy(hits.layer.to_numpy()), hid=torch.from_numpy(hits.hit_id.to_numpy()))
# -
cell_features = ['cell_count', 'cell_val', 'leta', 'lphi', 'lx', 'ly', 'lz', 'geta', 'gphi']
data.layerless_true_edges = torch.from_numpy(layerless_true_edges)
data.cell_data = torch.from_numpy(hits[cell_features].values).float()
data
# ### Evaluating Embedding
# +
action = 'embedding'
config_file = pkg_resources.resource_filename(
"exatrkx",
os.path.join('configs', config_dict[action]))
with open(config_file) as f:
e_config = yaml.load(f, Loader=yaml.FullLoader)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(e_config)
# -
e_config['train_split'] = [1, 0, 0]
e_config['r_val'] = 2.0
e_model = LayerlessEmbedding(e_config)
e_model = e_model.load_from_checkpoint(embed_ckpt_dir, hparams=e_config)
e_model.eval()
spatial = e_model(torch.cat([data.cell_data, data.x], axis=-1))
spatial.shape
spatial_np = spatial.detach().numpy()
data.pid
# +
fig = plt.figure(figsize=(6,6))
for pid in sel_pids:
idx = hits.particle_id == pid
plt.scatter(spatial_np[idx, 0], spatial_np[idx, 1])
plt.savefig(os.path.join(outdir, "embedding_0_1.pdf"))
# -
fig = plt.figure(figsize=(6,6))
for pid in sel_pids:
idx = hits.particle_id == pid
plt.scatter(spatial_np[idx, 2], spatial_np[idx, 3])
plt.savefig(os.path.join(outdir, "embedding_2_3.pdf"))
fig = plt.figure(figsize=(6,6))
for pid in sel_pids:
idx = hits.particle_id == pid
plt.scatter(spatial_np[idx, 4], spatial_np[idx, 5])
plt.savefig(os.path.join(outdir, "embedding_4_5.pdf"))
fig = plt.figure(figsize=(6,6))
for pid in sel_pids:
idx = hits.particle_id == pid
plt.scatter(spatial_np[idx, 6], spatial_np[idx, 7])
plt.savefig(os.path.join(outdir, "embedding_6_7.pdf"))
e_spatial = utils_torch.build_edges(spatial, e_model.hparams['r_val'], e_model.hparams['knn_val'])
e_spatial.shape
e_spatial[:, 0]
e_spatial_np = e_spatial.detach().numpy()
hits.iloc[[0, 2]].head()
event.particles[event.particles.particle_id.isin(sel_pids)]
hits.iloc[[e_spatial[0, 0], e_spatial[0, 1]]].x.values
e_spatial_np[0, 0], e_spatial_np[1, 0]
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].x.values, hits[hits.particle_id == pid].y.values, hits[hits.particle_id == pid].z.values)
# add edges
e_spatial_np_t = e_spatial_np.T
for iedge in range(e_spatial_np.shape[1]):
ax.plot(hits.iloc[e_spatial_np_t[iedge]].x.values, hits.iloc[e_spatial_np_t[iedge]].y.values, hits.iloc[e_spatial_np_t[iedge]].z.values, color='k', alpha=0.3, lw=1.)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.savefig(os.path.join(outdir, "emedding_edges_3d.pdf"))
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].x.values, hits[hits.particle_id == pid].y.values)
# add edges
e_spatial_np_t = e_spatial_np.T
for iedge in range(e_spatial_np.shape[1]):
ax.plot(hits.iloc[e_spatial_np_t[iedge]].x.values, hits.iloc[e_spatial_np_t[iedge]].y.values, color='k', alpha=0.3, lw=2.)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.savefig(os.path.join(outdir, "embedding_edges_x_y.pdf"))
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].z.values, hits[hits.particle_id == pid].r.values)
# add edges
e_spatial_np_t = e_spatial_np.T
for iedge in range(e_spatial_np.shape[1]):
ax.plot(hits.iloc[e_spatial_np_t[iedge]].z.values, hits.iloc[e_spatial_np_t[iedge]].r.values, color='k', alpha=0.3, lw=1.)
ax.set_xlabel('z')
ax.set_ylabel('r')
plt.savefig(os.path.join(outdir, "embedding_edges_z_r.pdf"))
# -
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.scatter(hits.x.values, hits.y.values)
plt.savefig(os.path.join(outdir, "embedding_hits_x_y.pdf"))
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.scatter(hits.z.values, hits.r.values)
plt.savefig(os.path.join(outdir, "embedding_hits_z_r.pdf"))
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].x.values, hits[hits.particle_id == pid].y.values)
plt.savefig(os.path.join(outdir, "embedding_hits_truth_x_y.pdf"))
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].z.values, hits[hits.particle_id == pid].r.values)
plt.savefig(os.path.join(outdir, "embedding_hits_truth_z_r.pdf"))
# +
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
for pid in sel_pids:
ax.scatter(hits[hits.particle_id == pid].x.values, hits[hits.particle_id == pid].y.values)
# add edges
for iedge in range(e_spatial_np.shape[1]):
ax.plot(hits.iloc[e_spatial_np_t[iedge]].x.values, hits.iloc[e_spatial_np_t[iedge]].y.values, color='k', alpha=0.3, lw=2.)
ax.set_axis_off()
plt.savefig(os.path.join(outdir, "embedding_front.pdf"))
| notebooks/checks/graph_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="uFLiavR4mQxr"
# # Import necessary packages
# + id="51NOyEN8nBJD"
# %%capture
# !pip3 install seaborn
# + id="mSxhe0TkjpmT" outputId="7374558b-045a-4812-9941-a8fe3839a2fc" colab={"base_uri": "https://localhost:8080/", "height": 69}
#@title Load the Universal Sentence Encoder's TF Hub module
from absl import logging
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4" #@param ["https://tfhub.dev/google/universal-sentence-encoder/4", "https://tfhub.dev/google/universal-sentence-encoder-large/5"]
model = hub.load(module_url)
print ("module %s loaded" % module_url)
def embed(input):
return model(input)
# + [markdown] id="eEG7181FmEZJ"
# # Embed The Two Text messages
# + id="QKKsUgLtlHZJ"
msg1 = "Robin is Batman's sidekick"
msg2 = "Batman's sidekick is not Robin"
messages = [msg1, msg2]
# Reduce logging output.
logging.set_verbosity(logging.ERROR)
message_embeddings = embed(messages)
# + [markdown] id="lgUvnKM_mdFV"
# # Compare the encoding
# + id="8xdStDrhmBkM" outputId="f1e84427-3193-416d-fdf3-96ca30a0d715" colab={"base_uri": "https://localhost:8080/", "height": 478}
def plot_similarity(labels, features, rotation):
corr = np.inner(features, features)
sns.set(font_scale=1.2)
g = sns.heatmap(
corr,
xticklabels=labels,
yticklabels=labels,
vmin=0,
vmax=1,
cmap="YlOrRd")
g.set_xticklabels(labels, rotation=rotation)
g.set_title("Semantic Textual Similarity")
return corr
corr = plot_similarity(messages, message_embeddings, 90)
# + id="cIqks6M4MkU5" outputId="85d5126e-0169-46ae-ae8a-9b6caca27653" colab={"base_uri": "https://localhost:8080/", "height": 52}
#@title while Batman's sidekick is Robin
print(corr)
# + id="yny0RVT4NO2-" outputId="e52796e4-99ab-4a92-ce29-005031ae2d8a" colab={"base_uri": "https://localhost:8080/", "height": 52}
#@title while Batman's sidekick is not Robin
print(corr)
| Sentence_similarity_hackathon_AP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: research
# language: python
# name: research
# ---
# # Set up FEAT for beta extraction
# Set up analysis and paths
# %matplotlib inline
projecttitle = 'Analogy'
import sys, os
if sys.platform == 'darwin':
homedir = os.path.join("/Users", "njchiang")
sys.path.append(os.path.join(homedir, "GitHub", "task-fmri-utils"))
sys.path.append(os.path.join(homedir, "GitHub", "tikhonov"))
elif sys.platform == "linux":
homedir = os.path.join("/home", "njchiang", "data")
sys.path.append(os.path.join(homedir, "GitHub", "task-fmri-utils"))
sys.path.append(os.path.join(homedir, "GitHub", "tikhonov"))
else:
homedir = os.path.join("D:\\")
sys.path.append(os.path.join(homedir, "GitHub", "task-fmri-utils"))
sys.path.append(os.path.join(homedir, "GitHub", "tikhonov"))
# imports
from fmri_core import analysis as pa
from fmri_core import utils as pu
from fmri_core import vis as pv
# load configuration
os.chdir(os.path.join(homedir, "CloudStation", "Grad", "Research", "montilab-ucla"))
projectSettings = pu.load_config(os.path.join('analogy', 'config', 'project.json'))
analysisSettings = pu.load_config(os.path.join('analogy', 'config', 'analyses.json'))
if sys.platform == 'darwin':
paths = projectSettings['filepaths']['osxPaths']
elif sys.platform == "linux":
paths = projectSettings["filepaths"]["linuxPaths"]
else:
paths = projectSettings['filepaths']['winPaths']
for sub, runs in projectSettings["subjects"].items():
destination_dir = "/mnt/d/fmri/Analogy/derivatives/{}/misc/regressors/LSA".format(sub)
# os.mkdir(destination_dir)
os.chdir(destination_dir)
for run in runs:
labels = pu.load_labels("/home/njchiang/data/fmri/Analogy",
'data', sub, 'func',
pu.format_bids_name(sub, 'task-analogy', run, "events.tsv"),
sep='\t')
for i, r in labels.iterrows():
# if r["AB"] == 1:
# tt = "AB"
# elif r["CD"] == 1:
# tt = "CD"
# elif r["Probe"] == 1:
# tt = "Probe"
# fname = "{}_{}.txt".format(r["TrialTag"], tt).replace(":", "-")
fname = "{}_trial_{}.txt".format(run, i)
with open(fname, "w") as f:
f.write("{} {} {}".format(r["onset"], r["duration"], r["intensity"]))
sub="sub-16"
labels = pu.load_labels("/home/njchiang/data/fmri/Analogy",
'data', sub, 'func',
pu.format_bids_name(sub, 'task-analogy', 'run-01', "events.tsv"),
sep='\t')
for i, r in labels.iterrows():
print(i)
# +
# replace "-" with ":"
# replace "###NAMEX### with the trial
def replacetext(template, replacements, output):
# replacements = {'###SUB###': sub, '###RUN###': run, ###SCAN###, scan, ###T1###: t1, ###IN###: input, ###OUTPUT###: output}
# still need vol though...
with open(template, 'r') as infile, open(output, 'w') as outfile:
for line in infile:
for src, target in replacements.items():
line = line.replace(src, target)
outfile.write(line)
return
# def LSA_replacements(labels, replacements={}):
# # template: ###NAMEX### ==> ### TrialTag ###
# for i, r in labels.iterrows():
# if r["AB"] == 1:
# tt = "AB"
# elif r["CD"] == 1:
# tt = "CD"
# elif r["Probe"] == 1:
# tt = "Probe"
# replacements["###NAME{}###".format(i+1)] = "{}_{}".format(r["TrialTag"], tt).replace(":", "-")
# return replacements
def replace_subject(sub, run):
labels = pu.load_labels("/home/njchiang/data/fmri/Analogy",
'data', sub, 'func',
pu.format_bids_name(sub, 'task-analogy', run, "events.tsv"),
sep='\t')
replacements = {"###SUB###": sub, "###RUN###": run}
for i in range(len(labels)):
replacements["###NAME{}###".format(i+1)] = "{}_trial_{}".format(run, i)
# return LSA_replacements(labels, replacements)
return replacements
# -
template = "/home/njchiang/data/fmri/Analogy/derivatives/standard/templates/LSA-template.fsf"
for sub, runs in projectSettings["subjects"].items():
for run in runs:
replacements = replace_subject(sub, run)
outfile = "/home/njchiang/data/fmri/Analogy/derivatives/{}/betas/{}".format(sub, pu.format_bids_name(sub, 'task-analogy', run, "LSA.fsf"))
replacetext(template, replacements, outfile)
# + language="bash"
# # adjust vols
# cd /home/njchiang/data/fmri/Analogy/derivatives/
# for s in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16
# do
# for r in 01 02 03 04 05 06 07 08
# do
# vol=`fslval ../data/sub-${s}/func/sub-${s}_task-analogy_run-${r}_bold.nii.gz dim4`
# sed -i -e "s/###VOL###/${vol}/g" sub-${s}/betas/sub-${s}_task-analogy_run-${r}_LSA.fsf
# done
# done
# + language="bash"
# # upload files
# cd /home/njchiang/data/fmri/Analogy/derivatives/
# for s in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16
# do
# echo sub-${s}
# scp sub-${s}/betas/*.fsf n<EMAIL>:/u/project/monti/Analysis/Analogy/derivatives/sub-${s}/betas/
# done
# + language="bash"
# # upload files
# cd /home/njchiang/data/fmri/Analogy/derivatives/
# for s in 01 02 03 04 05
# do
# echo sub-${s}
# rsync -av sub-${s}/betas n<EMAIL>:/u/project/monti/Analysis/Analogy/derivatives/sub-${s}
# rsync -av sub-${s}/misc/regressors/LSA n<EMAIL>:/u/project/monti/Analysis/Analogy/derivatives/sub-${s}/misc/regressors
# done
#
#
# + language="bash"
# cd /home/njchiang/data/fmri/Analogy/derivatives/
#
# for s in 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16
# do
# echo sub-${s}
# scp sub-${s}/betas/*.fsf <EMAIL>:/u/project/monti/Analysis/Analogy/derivatives/sub-${s}/betas
# scp -r sub-${s}/misc/regressors/LSA <EMAIL>:/u/project/monti/Analysis/Analogy/derivatives/sub-${s}/misc/regressors
# done
# -
# # SCRATCH
os.mkdir("/home/njchiang/data/fmri/Analogy/derivatives/{}/misc/LSA".format(sub))
os.system("flirt --version")
for i, r in labels.iterrows():
if r["AB"] == 1:
tt = "AB"
elif r["CD"] == 1:
tt = "CD"
elif r["Probe"] == 1:
tt = "Probe"
fname = "{}_{}.txt".format(r["TrialTag"], tt)
# destination_dir = "/tmp"
destination_dir = "/home/njchiang/data/fmri/Analogy/derivatives/{}/misc/regressors/LSA".format(sub)
with open(os.path.join(destination_dir, fname), "w") as f:
f.write("{} {} {}".format(r["onset"], r["duration"], r["intensity"]))
labels.TrialTag.unique()
# + language="bash"
# ls
# -
# # postprocess:
# 1. map each example_func to BOLD_template
# 2. concatenate copes
# 3. register copes to BOLD_template
# 4. profit!
| notebooks/draft/Setup-LSA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS-109B Advanced Data Science
# ## Lab 8: Recurrent Neural Networks
#
# **Harvard University**<br>
# **Fall 2020**<br>
# **Instructors:** <NAME>, <NAME>, and <NAME><br>
# **Lab Instructors:** <NAME> and <NAME><br>
# **Content:** <NAME>, <NAME>, <NAME>
# RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
# ## Learning Goals
#
# In this lab we will look at Recurrent Neural Networks (RNNs), LSTMs and their building blocks.
#
# By the end of this lab, you should:
#
# - know how to put together the building blocks used in RNNs and its variants (GRU, LSTM) in `keras` with an example.
# - have a good undertanding on how sequences -- any data that has some temporal semantics (e.g., time series, natural language, images etc.) -- fit into and benefit from a recurrent architecture
# - be familiar with preprocessing text and dynamic embeddings
# - be familiar with gradient issues on RNNs processing longer sentence lengths
# - understand different kinds of LSTM architectures, classifiers, sequence to sequence models and their far-reaching applications
#
# ## 1. IMDb Review Classification: Feedforward, CNN, RNN, LSTM
#
#
# In this task, we are going to do sentiment classification on a movie review dataset. We are going to build a feedforward net, a convolutional neural net, a recurrent net and combine one or more of them to understand performance of each of them. A sentence can be thought of as a sequence of words that collectively represent meaning. Individual words impact the meaning. Thus, the context matters; words that occur earlier in the sentence influence the sentence's structure and meaning in the latter part of the sentence (e.g., Jose asked Anqi if she were going to the library today). Likewise, words that occur later in a sentence can affect the meaning of earlier words (e.g., Apple is an interesting company). As we have seen in lecture, if we wish to make use of a full sentence's context in both directions, then we should use a bi-directional RNN (e.g., Bi-LSTM). For the purpose of this tutorial, we are going to restrict ourselves to only uni-directional RNNs.
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, SimpleRNN
from keras.layers.embeddings import Embedding
from keras.layers import Flatten
from keras.preprocessing import sequence
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
import numpy as np
# fix random seed for reproducibility
numpy.random.seed(1)
# +
# We want to have a finite vocabulary to make sure that our word matrices are not arbitrarily small
vocabulary_size = 10000
#We also want to have a finite length of reviews and not have to process really long sentences.
max_review_length = 500
# -
# Computers have no built-in knowledge of words or their meanings and cannot understand them in any rich way that humans do -- hence, the purpose of Natural Language Processing (NLP). As with any data science, computer science, machine learning task, the first crucial step is to clean (pre-process) your data so that you can soundly make use of it. Within NLP, this first step is called Tokenization and it concerns how to represent each token (a.k.a. word) of your corpus (i.e., dataset).
#
# #### TOKENIZATION
#
# A ``token`` refers to a single, atomic unit of meaning (i.e., a word). How should our computers represent each word? We could read in our corpus word by word and store each word as a String (data structure). However, Strings tend to use more computer memory than Integers and can become cumbersome. As long as we preserve the uniqueness of the tokens and are consistent, we are better off converting each distinct word to a distinct number (Integer). This is standard practice within NLP / computer science / data science, etc.
# As a simple example of tokenization, we can see a small example.
#
# If the five sentences below were our entire corpus, our conversion would look as follows:
#
# 1. i have books - [1, 4, 7]
# 2. interesting books are useful [10,2,9,8]
# 3. i have computers [1,4,6]
# 4. computers are interesting and useful [6,9,11,10,8]
# 5. books and computers are both valuable. [2,10,2,9,13,12]
# 6. Bye Bye [7,7]
#
# Create tokens for vocabulary based on frequency of occurrence. Hence, we assign the following tokens
#
# I-1, books-2, computers-3, have-4, are-5, computers-6,bye-7, useful-8, are-9, and-10,interesting-11, valuable-12, both-13
#
# Thankfully, our dataset is already represented in such a tokenized form.
#
# **NOTE:** Often times, depending on your NLP task, it is useful to also perform other pre-processing, cleaning steps, such as:
# - treating each punctuation mark as a token (e.g., , . ! ? are each separate tokens)
# - lower-casing all words (so that a given word isn't treated differently just because it starts a sentence or not)
# - separating each sentence with a unique symbol (e.g., <S> and </S>)
# - removing words that are incredibly common (e.g., function words, (in)definite articles). These are referred to as 'stopwords'). For language modelling, we DO NOT remove stopwords. A sentence's meaning needs to include all of the original words.
# #### Load data
# +
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocabulary_size)
print('Number of reviews', len(X_train))
print('Length of first and fifth review before padding', len(X_train[0]) ,len(X_train[4]))
print('First review', X_train[0])
print('First label', y_train[0])
# -
# #### Preprocess data
#
# If we were training our RNN one sentence at a time, it would be okay to have sentences of varying lengths. However, as with any neural network, it can be sometimes be advantageous to train inputs in batches. When doing so with RNNs, our input tensors need to be of the same length/dimensions. Thus, let's pad our sentences.
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
print('Length of first and fifth review after padding', len(X_train[0]) ,len(X_train[4]))
# ### MODEL 1A : FEED-FORWARD NETWORKS WITHOUT EMBEDDINGS
#
# Let us build a single-layer feed-forward net with a hidden layer of 250 nodes. Each input would be a 500-dim vector of tokens since we padded all our sequences to size 500.
#
# <br>
# <div class="exercise" style="background-color:#b3e6ff">
# <b>EXERCISE</b>: Calculate the number of parameters involved in this network and implement a feedforward net to do classification without looking at cells below.
# </div>
# +
model = Sequential()
model.add(Dense(250, activation='relu',input_dim=max_review_length))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=128, verbose=2)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
# -
# <div class="discussion" style="background-color:#F5E4C3">
# <b>Discussion:</b> Why was the performance bad? What was wrong with tokenization?
# </div>
#
# ### MODEL 1B : FEED-FORWARD NETWORKS WITH EMBEDDINGS
#
# #### What is an embedding layer ?
#
# An embedding is a "distributed representation" (e.g., vector) of a particular atomic item (e.g., word token, object, etc). When representing items by embeddings:
# - each distinct item should be represented by its own unique embedding
# - the semantic similarity between items should correspond to the similarity between their respective embeddings (i.e., words that are more similar to one another should have embeddings that are more similar to each other).
#
# There are essentially an infinite number of ways to create such embeddings, and since these representations have such a great influence on the performance of our models, there has been an incredible amount of research dedicated to this very aspect. If you are interested in learning more, start with the astromonically impactful papers of [word2vec](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) and [GloVe](https://www.aclweb.org/anthology/D14-1162.pdf).
#
# In general, though, one can view the embedding process as a linear projection from one vector space to another (e.g., a vector space of unique words being mapped to a world of fixed-length, dense vectors filled with continuous-valued numbers. For NLP, we usually use embeddings to project the one-hot encodings of words (i.e., a vector that is the length of the entire vocabulary, and it is filled with all zeros except for a single value of 1 that corresponds to the particular word) on to a lower-dimensional continuous space (e.g., vectors of size 100) so that the input surface is dense and possibly smooth. Thus, one can view this embedding layer process as just a transformation from $\mathbb{R}^{inp}$ to $\mathbb{R}^{emb}$
embedding_dim = 100
# +
model = Sequential()
# inputs will be converted from batch_size * sentence_length to batch_size*sentence_length*embedding _dim
model.add(Embedding(vocabulary_size, embedding_dim, input_length=max_review_length))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# +
# fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=128, verbose=2)
# evaluate the model on the test set
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
# -
# ### MODEL 2 : CONVOLUTIONAL NEURAL NETWORKS (CNN)
# Text can be thought of as 1-dimensional sequence (a single, long vector) and we can apply 1D Convolutions over a set of word embeddings. Let us walk through convolutions on text data with [this blog](http://debajyotidatta.github.io/nlp/deep/learning/word-embeddings/2016/11/27/Understanding-Convolutions-In-Text/). If you want to learn more, read this [published and well-cited paper](https://www.aclweb.org/anthology/I17-1026.pdf) from my friend, <NAME>.
#
# Fit a 1D convolution with 200 filters, kernel size 3, followed by a feed-forward layer of 250 nodes, and ReLU and Sigmoid activations as appropriate.
# +
# # %load sol2.py
# -
# ### MODEL 3 : Simple RNN
#
# Two great blogs that are helpful for understanding the workings of a RNN and LSTM are
#
# 1. http://karpathy.github.io/2015/05/21/rnn-effectiveness/
# 2. http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#
# At a high-level, an RNN is similar to a feed-forward neural network (FFNN) in that there is an input layer, a hidden layer, and an output layer. The input layer is fully connected to the hidden layer, and the hidden layer is fully connected to the output layer. However, the crux of what makes it a **recurrent** neural network is that the hidden layer for a given time _t_ is not only based on the input layer at time _t_ but also the hidden layer from time _t-1_.
#
# Mathematically, a simpleRNN can be defined by the following recurrence relation.
#
# <center>$h_t = \sigma(W([h_{t-1},x_{t}])+b)$
#
# If we extend this recurrence relation to the length of sequences we have in hand, our RNN architecture can be viewed as follows (this is also referred to as 'unrolling' the network):
#
# <img src="files/fig/LSTM_classification.jpg" width="400">
# +
# # %load sol3.py
# -
# #### RNNs and vanishing/exploding gradients
#
# Let us use sigmoid activations as example. Derivative of a sigmoid can be written as
# <center> $\sigma'(x) = \sigma(x) \cdot \sigma(1-x)$. </center>
#
# <img src = "files/fig/vanishing_gradients.png">
# <br>
#
# Remember, an RNN is a "really deep" feed-forward-esque network (when unrolled in time). Hence, backpropagation happens from $h_t$ all the way to $h_1$. Also realize that sigmoid gradients are multiplicatively dependent on the value of sigmoid. Hence, if the non-activated output of any layer $h_l$ is < 0, then $\sigma$ tends to 0, effectively "vanishing" the gradient. Any layer that the current layer backprops to $H_{1:L-1}$ does not learn anything useful out of the gradients.
#
# #### LSTMs and GRU
# LSTM and GRU are two sophisticated implementations of RNNs that have gates (one could say that their success hinges on using gates). A gate emits probability between 0 and 1. For instance, LSTM is built on these state updates:
#
# $L$ is a linear transformation $L(x) = W*x + b.$
#
# $f_t = \sigma(L([h_{t-1},x_t))$
#
# $i_t = \sigma(L([h_{t-1},x_t))$
#
# $o_t = \sigma(L([h_{t-1},x_t))$
#
# $\hat{C}_t = \tanh(L([h_{t-1},x_t))$
#
# $C_t = f_t * C_{t-1}+i_t*\hat{C}_t$ (Using the forget gate, the neural network can learn to control how much information it has to retain or forget)
#
# $h_t = o_t * \tanh(c_t)$
#
#
# ### MODEL 4 : LSTM
#
# Now, let's use an LSTM model to do classification! To make it a fair comparison to the SimpleRNN, let's start with the same architecture hyper-parameters (e.g., number of hidden nodes, epochs, and batch size). Then, experiment with increasing the number of nodes, stacking multiple layers, applying dropouts etc. Check the number of parameters that this model entails.
# +
# # %load sol4.py
# -
# ### MODEL 5 : CNN + LSTM
#
# CNNs are good at learning spatial features, and sentences can be thought of as 1-D spatial vectors (dimensionality is determined by the number of words in the sentence). We apply an LSTM over the features learned by the CNN (after a maxpooling layer). This leverages the power of CNNs and LSTMs combined! We expect the CNN to be able to pick out invariant features across the 1-D spatial structure (i.e., sentence) that characterize good and bad sentiment. This learned spatial features may then be learned as sequences by an LSTM layer, and the final classification can be made via a feed-forward connection to a single node.
# +
model = Sequential()
model.add(Embedding(vocabulary_size, embedding_dim, input_length=max_review_length))
model.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(X_train, y_train, epochs=3, batch_size=64)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
# -
# ### CONCLUSION
#
# We saw the power of sequence models and how they are useful in text classification. They give a solid performance, low memory footprint (thanks to shared parameters) and are able to understand and leverage the temporally connected information contained in the inputs. There is still an open debate about the performance vs memory benefits of CNNs vs RNNs in the research community.
#
# As a side-note and bit of history: how amazing is it that we can construct these neural networks, train them, and evaluate them in just a few lines of code?! Imagine if we didn't have deep learning libraries like Keras and Tensorflow; we'd have to write backpropagation and gradient descent by hand. Our last network could easily require thousands of lines of code and many hours of debugging. This is what many people did just 8 years ago, since deep learning wasn't common and the community hadn't yet written nicely packaged libraries. Many libraries have come and gone, but nowadays most people use either Tensorflow/Keras (by Google) or PyTorch (by Facebook). I expect them to remain as great libraries for the foreseeable future, so if you're interested in deep learning, it's worth the investment to learn one, or both, of them well.
# ## 2. 231+432 = 665.... It's not ? Let's ask our LSTM
#
# In this exercise, we are going to teach addition to our model. Given two numbers (<999), the model outputs their sum (<9999). The input is provided as a string '231+432' and the model will provide its output as ' 663' (Here the empty space is the padding character). We are not going to use any external dataset and are going to construct our own dataset for this exercise.
#
# The exercise we attempt to do effectively "translates" a sequence of characters '231+432' to another sequence of characters ' 663' and hence, this class of models are called sequence-to-sequence models (aka seq2seq). Such architectures have profound applications in several real-life tasks such as machine translation, summarization, image captioning etc.
#
# To be clear, sequence-to-sequence (aka seq2seq) models take as input a sequence of length N and return a sequence of length M, where N and M may or may not differ, and every single observation/input may be of different values, too. For example, machine translation concerns converting text from one natural language to another (e.g., translating English to French). Google Translate is an example, and their system is a seq2seq model. The input (e.g., an English sentence) can be of any length, and the output (e.g., a French sentence) may be of any length.
#
# **Background knowledge:** The earliest and most simple seq2seq model works by having one RNN for the input, just like we've always done, and we refer to it as being an "encoder." The final hidden state of the encoder RNN is fed as input to another RNN that we refer to as the "decoder." The job of the decoder is to generate each token, one word at a time. This may seem really limiting, as it relies on the encoder encapsulating the entire input sequence with just 1 hidden layer. It seems unrealistic that we could encode an entire meaning of a sentence with just one hidden layer. Yet, results even in this simplistic manner can be quite impressive. In fact, these early results were compelling enough that these models immediately replaced the decades of earlier machine translation work.
from __future__ import print_function
from keras.models import Sequential
from keras import layers
from keras.layers import Dense, RepeatVector, TimeDistributed
import numpy as np
from six.moves import range
# #### The less interesting data generation and preprocessing
class CharacterTable(object):
def __init__(self, chars):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
# converts a String of characters into a one-hot embedding/vector
def encode(self, C, num_rows):
x = np.zeros((num_rows, len(self.chars)))
for i, c in enumerate(C):
x[i, self.char_indices[c]] = 1
return x
# converts a one-hot embedding/vector into a String of characters
def decode(self, x, calc_argmax=True):
if calc_argmax:
x = x.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in x)
# +
TRAINING_SIZE = 50000
DIGITS = 3
MAXOUTPUTLEN = DIGITS + 1
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+ '
ctable = CharacterTable(chars)
# +
def return_random_digit():
return np.random.choice(list('0123456789'))
# generate a new number of length `DIGITS`
def generate_number():
num_digits = np.random.randint(1, DIGITS + 1)
return int(''.join( return_random_digit()
for i in range(num_digits)))
# generate `TRAINING_SIZE` # of pairs of random numbers
def data_generate(num_examples):
questions = []
answers = []
seen = set()
print('Generating data...')
while len(questions) < TRAINING_SIZE:
a, b = generate_number(), generate_number()
# don't allow duplicates; this is good practice for training,
# as we will minimize memorizing seen examples
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# pad the data with spaces so that the length is always MAXLEN.
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
# answers can be of maximum size DIGITS + 1.
ans += ' ' * (MAXOUTPUTLEN - len(ans))
questions.append(query)
answers.append(ans)
print('Total addition questions:', len(questions))
return questions, answers
def encode_examples(questions, answers):
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(answers):
y[i] = ctable.encode(sentence, DIGITS + 1)
indices = np.arange(len(y))
np.random.shuffle(indices)
return x[indices],y[indices]
# +
q,a = data_generate(TRAINING_SIZE)
x,y = encode_examples(q,a)
# divides our data into training and validation
split_at = len(x) - len(x) // 10
x_train, x_val, y_train, y_val = x[:split_at], x[split_at:],y[:split_at],y[split_at:]
print('Training Data shape:')
print('X : ', x_train.shape)
print('Y : ', y_train.shape)
print('Sample Question(in encoded form) : ', x_train[0], y_train[0])
print('Sample Question(in decoded form) : ', ctable.decode(x_train[0]),'Sample Output : ', ctable.decode(y_train[0]))
# -
# #### Let's learn two wrapper functions in Keras - TimeDistributed and RepeatVector with some dummy examples.
# **TimeDistributed** is a wrapper function call that applies an input operation on all the timesteps of an input data. For instance, if I have a feed-forward network which converts a 10-dim vector to a 5-dim vector, then wrapping this TimeDistributed layer on that feed-forward operation would convert a batch_size \* sentence_len \* vector_len(=10) to batch_size \* sentence_len \* output_len(=5)
# +
model = Sequential()
#Inputs to it will be batch_size*time_steps*input_vector_dim(to Dense)
# Output will be batch_size*time_steps* output_vector_dim
# Here, Dense() converts a 5-dim input vector to a 8-dim vector.
model.add(TimeDistributed(Dense(8), input_shape=(3, 5)))
input_array = np.random.randint(10, size=(1,3,5))
print("Shape of input : ", input_array.shape)
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
print("Shape of output : ", output_array.shape)
# -
# **RepeatVector** repeats the vector a specified number of times. Dimension changes from batch_size * number of elements to batch_size* number of repetitions * number of elements.
# +
model = Sequential()
# converts from 1*10 to 1*6
model.add(Dense(6, input_dim=10))
print(model.output_shape)
# converts from 1*6 to 1*3*6
model.add(RepeatVector(3))
print(model.output_shape)
input_array = np.random.randint(1000, size=(1, 10))
print("Shape of input : ", input_array.shape)
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
print("Shape of output : ", output_array.shape)
# note: `None` is the batch dimension
print('Input : ', input_array[0])
print('Output : ', output_array[0])
# -
# ### MODEL ARCHITECTURE
#
# <img src="files/fig/LSTM_addition.jpg" width="400">
# **Note:** Whenever you are initializing a LSTM in Keras, by the default the option `return_sequences = False`. This means that at the end of the step the next component will only get to see the final hidden layer's values. On the other hand, if you set `return_sequences = True`, the LSTM component will return the hidden layer at each time step. It means that the next component should be able to consume inputs in that form.
#
# Think how this statement is relevant in terms of this model architecture and the TimeDistributed module we just learned.
#
# Build an encoder and decoder both single layer 128 nodes and an appropriate dense layer as needed by the model.
# +
# Hyperaparams
RNN = layers.LSTM
HIDDEN_SIZE = 128
BATCH_SIZE = 128
LAYERS = 1
print('Build model...')
model = Sequential()
#ENCODING
model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))
model.add(RepeatVector(MAXOUTPUTLEN))
#DECODING
for _ in range(LAYERS):
# return hidden layer at each time step
model.add(RNN(HIDDEN_SIZE, return_sequences=True))
model.add(TimeDistributed(layers.Dense(len(chars), activation='softmax')))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# -
# Let's check how well our model trained.
for iteration in range(1, 2):
print()
model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=20,
validation_data=(x_val, y_val))
# Select 10 samples from the validation set at random so
# we can visualize errors.
print('Finished iteration ', iteration)
numcorrect = 0
numtotal = 20
for i in range(numtotal):
ind = np.random.randint(0, len(x_val))
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowx, verbose=0)
q = ctable.decode(rowx[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Question', q, end=' ')
print('True', correct, end=' ')
print('Guess', guess, end=' ')
if guess == correct :
print('Good job')
numcorrect += 1
else:
print('Fail')
print('The model scored ', numcorrect*100/numtotal,' % in its test.')
# #### EXERCISE
#
# * Try changing the hyperparams, use other RNNs, more layers, check if increasing the number of epochs is useful.
#
# * Try reversing the data from validation set and check if commutative property of addition is learned by the model.
# * Try printing the hidden layer with two inputs that are commutative and check if the hidden representations it learned are same or similar. Do we expect it to be true? If so, why? If not why? You can access the layer using an index with model.layers and layer.output will give the output of that layer.
#
# * Try doing addition in the RNN model the same way we do by hand. Reverse the order of digits and at each time step, input two digits get an output use the hidden layer and input next two digits and so on.(units in the first time step, tens in the second time step etc.)
| docs/labs/lab08/cs109b-lab08-rnn-solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Cleaning
#
# Main cleaning steps:
# - Transfrom the feature `horsepower` to numeric.
# - Clean company names in the feature can names.
# - Transform `origin` column encoded as 1,2,3 to USA, EUROPE, ASIA in `region` column and drop the `origin` column.
# use black formatter
# %load_ext nb_black
# reload module in development mode
# %load_ext autoreload
# %autoreload 2
# #### Download data
# +
from src.utils import download_data
download_data()
# -
# #### Load raw data
# +
import pandas as pd
from src.clean import load_raw_data
df = load_raw_data()
df.head(5)
# -
# #### Data types and missing values
df.info()
# #### Transform horsepower column to numeric
df["horsepower"] = pd.to_numeric(df["horsepower"], errors="coerce")
df[df["horsepower"].isnull()]
# Some horsepwer values are null. We will impute the missing values with the mean value. Once we have split the data into training and test sets.
# #### Get company names from car name column
df["name"] = df["name"].str.title()
df["company"] = df["name"].str.split(" ").str[0]
# check companies name
df["company"].value_counts().sort_index()
# #### Correct car companies names
# There are some typos in company names. For example: `Vokswagen` or `Toyouta`.
# The function `correct_company_names` will do this for us.
# +
from src.clean import correct_company_names
correct_company_names(df)
# check the output
df["company"].value_counts().sort_index()
# -
# #### Get region from origin column
# The region of origin is categorical. In the dataset it is ordinal (1 for USA, 2 for Euroope and 3 for Asia). It should be nominal data.
#
# The function `get_region_names` will do this for us.
#
# For later model training, we'll use one-hot-encoding to generate the new features.
# +
from src.clean import get_region_names
get_region_names(df)
df.head()
# +
from src.utils import save_data
save_data(df, "interim", "data_cleaned.pkl")
# -
# Next, we move all the cleaning stpes to create a function `clean_dataset` in the module `clean.py`.
| notebooks/01_data_cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
# from tqdm import tqdm
from tqdm import tqdm_notebook as tqdm
import matplotlib.pyplot as plt
import numba
from numba import prange
from time import perf_counter
from scipy.ndimage import convolve, sobel
from scipy import ndimage
from scipy.special import xlogy
from sklearn import preprocessing
from scipy.stats import mode
from scipy.stats import gaussian_kde
from scipy.integrate import quad
import seaborn as sns
# import statistics as statss
# %matplotlib inline
sns.set_style("ticks")
sns.set_context("poster")
# +
n=250
U, V = np.zeros((n, n), dtype=np.float64), np.zeros((n, n), dtype=np.float64)
r, m = n//40, n//2
U[...] = 1.0
V[m-r:m+r, m-r:m+r] = 0.25
U[m-r:m+r, m-r:m+r] = 0.5
U += np.random.normal(scale=0.05, size=U.shape)
V += np.random.normal(scale=0.05, size=V.shape)
U = U - np.min(U)
U = U / np.max(U)
V = V - np.min(V)
V = V / np.max(V)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,15))
ax1.set_title("Initial U")
ax2.set_title("Initial V")
im1 = ax1.imshow(U, origin='lower', interpolation='bicubic', cmap='jet')
im2 = ax2.imshow(V, origin='lower', interpolation='bicubic', cmap='jet')
fig.colorbar(im1,fraction=0.046, pad=0.04)
plt.show()
# +
s = [[1,1,1],
[1,1,1],
[1,1,1]]
@numba.njit(fastmath=True, parallel=True)
def gray_scott(U, V, Du, Dv, f, k, dt, dx, T):
n = U.shape[0]
iters = int(T / dt)
for i in range(iters):
Lu, Lv = Du*lap(U, n)/dx/dx, Dv*lap(V, n)/dx/dx
U, V = U + dt*(Lu - U*V*V + f * (1-U)), V + dt*(Lv + U*V*V - (f+k) * V)
return U, V
def gscott(n, F, k, T, Du=0.16, Dv=0.08, dt=1.0, dx=1.0, seed=5000000):
np.random.seed(seed=seed) ## re-initialize seed to keep same initial conditions at each parameter setting
U, V = np.zeros((n, n), dtype=np.float64), np.zeros((n, n), dtype=np.float64)
r, m = n//40, n//2
U[...] = 1.0
V[m-r:m+r, m-r:m+r] = 0.25
U[m-r:m+r, m-r:m+r] = 0.5
U += np.random.normal(scale=0.05, size=U.shape)
V += np.random.normal(scale=0.05, size=V.shape)
return gray_scott(U, V, Du, Dv, F, k, dt, dx, T)
def calc_objects(inp):
fftding = np.fft.fft2(inp)
outp = ndimage.fourier_ellipsoid(fftding, 1.1)*100
outp = np.fft.ifft2(ndimage.fourier_gaussian(outp, 1.01)).real*10
binarized1 = np.clip(outp-((outp.min() + outp.max())/2), 0, 1)
labels1 = ndimage.label(binarized1, structure=s)
binarized2 = np.clip((outp-((outp.min() + outp.max())/2))*-1, 0, 1)
labels2 = ndimage.label(binarized2, structure=s)
if labels1[1] > labels2[1]:
bins, edges = np.histogram(labels1[0], bins=labels1[1])
return bins[1:]
# Try inversed region
if labels2[1] > 1:
bins, edges = np.histogram(labels2[0], bins=labels2[1])
return bins[1:]
# No objects
return np.zeros(1)
@numba.njit(parallel=True, fastmath=True)
def lap(u, N):
uc = np.empty((N, N))
for x in numba.prange(1, N-1):
uc[x, 1:-1] = u[x+1, 1:-1] + u[x-1, 1:-1] + u[x, :-2] + u[x, 2:] - 4*u[x, 1:-1]
uc[1:-1, 0] = u[1:-1, 1] + u[:-2, 0] + u[2:, 0] + u[1:-1, -1] - 4*u[1:-1, 0]
uc[1:-1, -1] = u[1:-1, -2] + u[:-2, -1] + u[2:, -1] + u[1:-1, 0] - 4*u[1:-1, -1]
uc[0, 1:-1] = u[1, 1:-1] + u[0, 2:] + u[0, :-2] + u[-1, 1:-1] - 4*u[0, 1:-1]
uc[-1, 1:-1] = u[-2, 1:-1] + u[-1, 2:] + u[-1, :-2] + u[0, 1:-1] - 4*u[-1, 1:-1]
uc[0, 0] = u[0, 1] + u[0, -1] + u[1, 0] + u[-1, 0] - 4*u[0, 0]
uc[-1, 0] = u[0, 0] + u[-2, 0] + u[-1, -1] + u[-1, -2] - 4*u[-1, 0]
uc[0, -1] = u[0, 0] + u[0, -2] + u[-1, -1] + u[1, -1] - 4*u[0, -1]
uc[-1, -1] = u[0, -1] + u[-1, 0] + u[-1, -2] + u[-2, -1] - 4*u[-1, -1]
return uc
# +
def gaus_hack(inp):
if len(np.unique(inp)) == 1:
return lambda x: 0
return gaussian_kde(inp)
def fim(x, ind, pdfs, df, dk):
if np.all(ind): # (1, 1) index
vals = np.array([pdfs[0](x), pdfs[1](x), pdfs[2](x)])
if np.any(vals < 1e-8):
return 0.0
return vals[0] * (np.log(vals[1]) - np.log(vals[2]))**2 / (4.0 * dk * dk)
elif np.any(ind): # (1, 0) or (0, 1) index
vals = np.array([i(x) for i in pdfs])
if np.any(vals < 1e-8):
return 0.0
return vals[0] * (np.log(vals[1]) - np.log(vals[2]))\
* (np.log(vals[3]) - np.log(vals[4])) / (4.0 * df * dk)
# (0, 0) index otherwise
vals = np.array([pdfs[0](x), pdfs[3](x), pdfs[4](x)])
if np.any(vals < 1e-8):
return 0.0
return vals[0] * (np.log(vals[1]) - np.log(vals[2]))**2 / (4.0 * df * df)
def fisher_matrix(inps, df, dk):
p = [gaus_hack(i) for i in inps]
FIM = np.zeros((2, 2))
for i in np.ndindex(2, 2):
FIM[i] = quad(fim, -np.inf, np.inf, args=(i, p, df, dk), limit=1000)[0]
return FIM
def bulk_fisher(f, k, df, dk, n=150, T=10000):
iters = len(f) * len(k)
f, k = np.meshgrid(f, k)
outp = np.zeros((*f.shape, 2, 2))
for index, _f in tqdm(np.ndenumerate(f), total=iters):
u, v = gscott(n, _f, k[index], T)
u1,v1 = gscott(n, _f, k[index]+dk, T)
u2,v2 = gscott(n, _f, k[index]-dk, T)
u3,v3 = gscott(n, _f+df, k[index], T)
u4,v4 = gscott(n, _f-df, k[index], T)
inputs = [calc_objects(x) for x in [u, u1, u2, u3, u4]]
outp[index] = fisher_matrix(inputs, df, dk)
return outp
def bulk_fisher1d(f, k, df, dk, n=250, T=20000):
iters = len(f)
outp = np.zeros((len(f), 2, 2))
for index, _f in tqdm(np.ndenumerate(f), total=iters):
u, v = gscott(n, _f, k[index], T)
u1,v1 = gscott(n, _f, k[index]+dk, T)
u2,v2 = gscott(n, _f, k[index]-dk, T)
u3,v3 = gscott(n, _f+df, k[index], T)
u4,v4 = gscott(n, _f-df, k[index], T)
inputs = [calc_objects(x) for x in [u, u1, u2, u3, u4]]
outp[index] = fisher_matrix(inputs, df, dk)
return outp
# -
ds2 = [x[0,1]*dtheta*dtheta for x in outp]
# +
df = 0.0001
dk = 0.0001
f_min = 0.0238191
f_max = 0.0271360
k_min = 0.05869347
k_max = 0.06141
p1 = np.array([k_min, f_min])
p2 = np.array([k_max, f_max])
dr = (p2 - p1)/np.linalg.norm(p2 - p1)
dtheta = np.vdot([dk, df], dr)
f = np.linspace(0.0238191, 0.0271360, 250)
k = np.linspace(0.05869347, 0.06141, 250)
outp = bulk_fisher1d(f, k,dk,df)
print(outp)
ds2 = [x[0,1]*dtheta*dtheta for x in outp]
# -
plt.plot(ds2)
np.savetxt("ds2", np.array(ds2))
f
for i, _f in np.ndenumerate(f):
print(f"{i} {_f}")
f.shape[0]
f[(1,)]
# +
df = 0.0001
dk = 0.0001
k = np.linspace(0.0652, 0.0632, 250)
f = np.linspace(0.0395, 0.04228, 250)
f1 = 0.0238191
f2 = 0.0271360
k1 = 0.05869347
k2 = 0.06141
p1 = np.array([k1, f1])
p2 = np.array([k2, f2])
dr = (p2 - p1)/np.linalg.norm(p2 - p1)
dtheta = np.vdot([dk, df], dr)
outp2 = bulk_fisher1d(f, k,dk,df)
ds2v2 = [x[0,1]*dtheta*dtheta for x in outp2]
np.savetxt("ds2v2", np.array(ds2v2))
# -
f1 = 0.0395
f2 = 0.04228
k1 = 0.0652
k2 = 0.0632
p1 = np.array([k1, f1])
p2 = np.array([k2, f2])
dr = (p2 - p1)/np.linalg.norm(p2 - p1)
dtheta = np.vdot([dk, df], dr)
ds2v2 = np.abs([x[0,1]*dtheta*dtheta for x in outp2])
np.savetxt("ds2v2", np.array(ds2v2))
plt.plot(ds2v2)
| full_fisher_matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jsl240
# language: python
# name: jsl240
# ---
# # ICD-O - SNOMED Entity Resolution - version 2.4.6
#
# ## Example for ICD-O Entity Resolution Pipeline
# A common NLP problem in medical applications is to identify histology behaviour in documented cancer studies.
#
# In this example we will use Spark-NLP to identify and resolve histology behavior expressions and resolve them to an ICD-O code.
#
# Some cancer related clinical notes (taken from https://www.cancernetwork.com/case-studies):
# https://www.cancernetwork.com/case-studies/large-scrotal-mass-multifocal-intra-abdominal-retroperitoneal-and-pelvic-metastases
# https://oncology.medicinematters.com/lymphoma/chronic-lymphocytic-leukemia/case-study-small-b-cell-lymphocytic-lymphoma-and-chronic-lymphoc/12133054
# https://oncology.medicinematters.com/lymphoma/epidemiology/central-nervous-system-lymphoma/12124056
# https://oncology.medicinematters.com/lymphoma/case-study-cutaneous-t-cell-lymphoma/12129416
#
# Note 1: Desmoplastic small round cell tumor
# <div style="border:2px solid #747474; background-color: #e3e3e3; margin: 5px; padding: 10px">
# A 35-year-old African-American man was referred to our urology clinic by his primary care physician for consultation about a large left scrotal mass. The patient reported a 3-month history of left scrotal swelling that had progressively increased in size and was associated with mild left scrotal pain. He also had complaints of mild constipation, with hard stools every other day. He denied any urinary complaints. On physical examination, a hard paratesticular mass could be palpated in the left hemiscrotum extending into the left groin, separate from the left testicle, and measuring approximately 10 × 7 cm in size. A hard, lower abdominal mass in the suprapubic region could also be palpated in the midline. The patient was admitted urgently to the hospital for further evaluation with cross-sectional imaging and blood work.
#
# Laboratory results, including results of a complete blood cell count with differential, liver function tests, coagulation panel, and basic chemistry panel, were unremarkable except for a serum creatinine level of 2.6 mg/dL. Typical markers for a testicular germ cell tumor were within normal limits: the beta–human chorionic gonadotropin level was less than 1 mIU/mL and the alpha fetoprotein level was less than 2.8 ng/mL. A CT scan of the chest, abdomen, and pelvis with intravenous contrast was obtained, and it showed large multifocal intra-abdominal, retroperitoneal, and pelvic masses (Figure 1). On cross-sectional imaging, a 7.8-cm para-aortic mass was visualized compressing the proximal portion of the left ureter, creating moderate left hydroureteronephrosis. Additionally, three separate pelvic masses were present in the retrovesical space, each measuring approximately 5 to 10 cm at their largest diameter; these displaced the bladder anteriorly and the rectum posteriorly.
#
# The patient underwent ultrasound-guided needle biopsy of one of the pelvic masses on hospital day 3 for definitive diagnosis. Microscopic examination of the tissue by our pathologist revealed cellular islands with oval to elongated, irregular, and hyperchromatic nuclei; scant cytoplasm; and invading fibrous tissue—as well as three mitoses per high-powered field (Figure 2). Immunohistochemical staining demonstrated strong positivity for cytokeratin AE1/AE3, vimentin, and desmin. Further mutational analysis of the cells detected the presence of an EWS-WT1 fusion transcript consistent with a diagnosis of desmoplastic small round cell tumor.
# </div>
#
# Note 2: SLL and CLL
# <div style="border:2px solid #747474; background-color: #e3e3e3; margin: 5px; padding: 10px">
# A 72-year-old man with a history of diabetes mellitus, hypertension, and hypercholesterolemia self-palpated a left submandibular lump in 2012. Complete blood count (CBC) in his internist’s office showed solitary leukocytosis (white count 22) with predominant lymphocytes for which he was referred to a hematologist. Peripheral blood flow cytometry on 04/11/12 confirmed chronic lymphocytic leukemia (CLL)/small lymphocytic lymphoma (SLL): abnormal cell population comprising 63% of CD45 positive leukocytes, co-expressing CD5 and CD23 in CD19-positive B cells. CD38 was negative but other prognostic markers were not assessed at that time. The patient was observed regularly for the next 3 years and his white count trend was as follows: 22.8 (4/2012) --> 28.5 (07/2012) --> 32.2 (12/2012) --> 36.5 (02/2013) --> 42 (09/2013) --> 44.9 (01/2014) --> 75.8 (2/2015). His other counts stayed normal until early 2015 when he also developed anemia (hemoglobin [HGB] 10.9) although platelets remained normal at 215. He had been noticing enlargement of his cervical, submandibular, supraclavicular, and axillary lymphadenopathy for several months since 2014 and a positron emission tomography (PET)/computed tomography (CT) scan done in 12/2014 had shown extensive diffuse lymphadenopathy within the neck, chest, abdomen, and pelvis. Maximum standardized uptake value (SUV max) was similar to low baseline activity within the vasculature of the neck and chest. In the abdomen and pelvis, however, there was mild to moderately hypermetabolic adenopathy measuring up to SUV of 4. The largest right neck nodes measured up to 2.3 x 3 cm and left neck nodes measured up to 2.3 x 1.5 cm. His right axillary lymphadenopathy measured up to 5.5 x 2.6 cm and on the left measured up to 4.8 x 3.4 cm. Lymph nodes on the right abdomen and pelvis measured up to 6.7 cm and seemed to have some mass effect with compression on the urinary bladder without symptoms. He underwent a bone marrow biopsy on 02/03/15, which revealed hypercellular marrow (60%) with involvement by CLL (30%); flow cytometry showed CD38 and ZAP-70 positivity; fluorescence in situ hybridization (FISH) analysis showed 13q deletion/monosomy 13; IgVH was unmutated; karyotype was 46XY.
# </div>
#
# Note 3: CNS lymphoma
# <div style="border:2px solid #747474; background-color: #e3e3e3; margin: 5px; padding: 10px">
# A 56-year-old woman began to experience vertigo, headaches, and frequent falls. A computed tomography (CT) scan of the brain revealed the presence of a 1.6 x 1.6 x 2.1 cm mass involving the fourth ventricle (Figure 14.1). A gadolinium-enhanced magnetic resonance imaging (MRI) scan confirmed the presence of the mass, and a stereotactic biopsy was performed that demonstrated a primary central nervous system lymphoma (PCNSL) with a diffuse large B-cell histology. Complete blood count (CBC), lactate dehydrogenase (LDH), and beta-2-microglobulin were normal. Systemic staging with a positron emission tomography (PET)/CT scan and bone marrow biopsy showed no evidence of lymphomatous involvement outside the CNS. An eye exam and lumbar puncture showed no evidence of either ocular or leptomeningeal involvement.
# </div>
#
# Note 4: Cutaneous T-cell lymphoma
# <div style="border:2px solid #747474; background-color: #e3e3e3; margin: 5px; padding: 10px">
# An 83-year-old female presented with a progressing pruritic cutaneous rash that started 8 years ago. On clinical exam there were numerous coalescing, infiltrated, scaly, and partially crusted erythematous plaques distributed over her trunk and extremities and a large fungating ulcerated nodule on her right thigh covering 75% of her total body surface area (Figure 10.1). Lymphoma associated alopecia and a left axillary lymphadenopathy were also noted. For the past 3–4 months she reported fatigue, severe pruritus, night sweats, 20 pounds of weight loss, and loss of appetite.
# </div>
# +
import sys, os, time
import sparknlp_jsl
from sparknlp.base import *
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.pretrained import ResourceDownloader
import pyspark.sql.functions as F
from pyspark.sql.types import StructType, StructField, StringType
# -
spark = sparknlp_jsl.start("####")
# Let's create a dataset with all four case studies
# +
notes = []
notes.append("""A 35-year-old African-American man was referred to our urology clinic by his primary care physician for consultation about a large left scrotal mass. The patient reported a 3-month history of left scrotal swelling that had progressively increased in size and was associated with mild left scrotal pain. He also had complaints of mild constipation, with hard stools every other day. He denied any urinary complaints. On physical examination, a hard paratesticular mass could be palpated in the left hemiscrotum extending into the left groin, separate from the left testicle, and measuring approximately 10 × 7 cm in size. A hard, lower abdominal mass in the suprapubic region could also be palpated in the midline. The patient was admitted urgently to the hospital for further evaluation with cross-sectional imaging and blood work.
Laboratory results, including results of a complete blood cell count with differential, liver function tests, coagulation panel, and basic chemistry panel, were unremarkable except for a serum creatinine level of 2.6 mg/dL. Typical markers for a testicular germ cell tumor were within normal limits: the beta–human chorionic gonadotropin level was less than 1 mIU/mL and the alpha fetoprotein level was less than 2.8 ng/mL. A CT scan of the chest, abdomen, and pelvis with intravenous contrast was obtained, and it showed large multifocal intra-abdominal, retroperitoneal, and pelvic masses (Figure 1). On cross-sectional imaging, a 7.8-cm para-aortic mass was visualized compressing the proximal portion of the left ureter, creating moderate left hydroureteronephrosis. Additionally, three separate pelvic masses were present in the retrovesical space, each measuring approximately 5 to 10 cm at their largest diameter; these displaced the bladder anteriorly and the rectum posteriorly.
The patient underwent ultrasound-guided needle biopsy of one of the pelvic masses on hospital day 3 for definitive diagnosis. Microscopic examination of the tissue by our pathologist revealed cellular islands with oval to elongated, irregular, and hyperchromatic nuclei; scant cytoplasm; and invading fibrous tissue—as well as three mitoses per high-powered field (Figure 2). Immunohistochemical staining demonstrated strong positivity for cytokeratin AE1/AE3, vimentin, and desmin. Further mutational analysis of the cells detected the presence of an EWS-WT1 fusion transcript consistent with a diagnosis of desmoplastic small round cell tumor.""")
notes.append("""A 72-year-old man with a history of diabetes mellitus, hypertension, and hypercholesterolemia self-palpated a left submandibular lump in 2012. Complete blood count (CBC) in his internist’s office showed solitary leukocytosis (white count 22) with predominant lymphocytes for which he was referred to a hematologist. Peripheral blood flow cytometry on 04/11/12 confirmed chronic lymphocytic leukemia (CLL)/small lymphocytic lymphoma (SLL): abnormal cell population comprising 63% of CD45 positive leukocytes, co-expressing CD5 and CD23 in CD19-positive B cells. CD38 was negative but other prognostic markers were not assessed at that time. The patient was observed regularly for the next 3 years and his white count trend was as follows: 22.8 (4/2012) --> 28.5 (07/2012) --> 32.2 (12/2012) --> 36.5 (02/2013) --> 42 (09/2013) --> 44.9 (01/2014) --> 75.8 (2/2015). His other counts stayed normal until early 2015 when he also developed anemia (hemoglobin [HGB] 10.9) although platelets remained normal at 215. He had been noticing enlargement of his cervical, submandibular, supraclavicular, and axillary lymphadenopathy for several months since 2014 and a positron emission tomography (PET)/computed tomography (CT) scan done in 12/2014 had shown extensive diffuse lymphadenopathy within the neck, chest, abdomen, and pelvis. Maximum standardized uptake value (SUV max) was similar to low baseline activity within the vasculature of the neck and chest. In the abdomen and pelvis, however, there was mild to moderately hypermetabolic adenopathy measuring up to SUV of 4. The largest right neck nodes measured up to 2.3 x 3 cm and left neck nodes measured up to 2.3 x 1.5 cm. His right axillary lymphadenopathy measured up to 5.5 x 2.6 cm and on the left measured up to 4.8 x 3.4 cm. Lymph nodes on the right abdomen and pelvis measured up to 6.7 cm and seemed to have some mass effect with compression on the urinary bladder without symptoms. He underwent a bone marrow biopsy on 02/03/15, which revealed hypercellular marrow (60%) with involvement by CLL (30%); flow cytometry showed CD38 and ZAP-70 positivity; fluorescence in situ hybridization (FISH) analysis showed 13q deletion/monosomy 13; IgVH was unmutated; karyotype was 46XY.""")
notes.append("A 56-year-old woman began to experience vertigo, headaches, and frequent falls. A computed tomography (CT) scan of the brain revealed the presence of a 1.6 x 1.6 x 2.1 cm mass involving the fourth ventricle (Figure 14.1). A gadolinium-enhanced magnetic resonance imaging (MRI) scan confirmed the presence of the mass, and a stereotactic biopsy was performed that demonstrated a primary central nervous system lymphoma (PCNSL) with a diffuse large B-cell histology. Complete blood count (CBC), lactate dehydrogenase (LDH), and beta-2-microglobulin were normal. Systemic staging with a positron emission tomography (PET)/CT scan and bone marrow biopsy showed no evidence of lymphomatous involvement outside the CNS. An eye exam and lumbar puncture showed no evidence of either ocular or leptomeningeal involvement.")
notes.append("An 83-year-old female presented with a progressing pruritic cutaneous rash that started 8 years ago. On clinical exam there were numerous coalescing, infiltrated, scaly, and partially crusted erythematous plaques distributed over her trunk and extremities and a large fungating ulcerated nodule on her right thigh covering 75% of her total body surface area (Figure 10.1). Lymphoma associated alopecia and a left axillary lymphadenopathy were also noted. For the past 3–4 months she reported fatigue, severe pruritus, night sweats, 20 pounds of weight loss, and loss of appetite.")
data = spark.createDataFrame([(n,) for n in notes], StructType([StructField("description", StringType())]))
# -
# And let's build a SparkNLP pipeline with the following stages:
# - DocumentAssembler: Entry annotator for our pipelines; it creates the data structure for the Annotation Framework
# - SentenceDetector: Annotator to pragmatically separate complete sentences inside each document
# - Tokenizer: Annotator to separate sentences in tokens (generally words)
# - WordEmbeddings: Vectorization of word tokens, in this case using word embeddings trained from PubMed, ICD10 and other clinical resources.
# - EntityResolver: Annotator that performs search for the KNNs, in this case trained from ICDO Histology Behavior.
# In order to find cancer related chunks, we are going to use a pretrained Search Trie wrapped up in our TextMatcher Annotator; and to identify treatments/procedures we are going to use our good old NER.
#
# - TextMatcher: Trained with a Cancer Glossary and an augmented dataset from JSL Data Market this annotator makes sure to return just found phrases in a search Trie. In this case ICDO phrases.
#
#
# - NerDLModel: TensorFlow based Named Entity Recognizer, trained to extract PROBLEMS, TREATMENTS and TESTS
# - NerConverter: Chunk builder out of tokens tagged by the Ner Model
# +
docAssembler = DocumentAssembler().setInputCol("description").setOutputCol("document")
sentenceDetector = SentenceDetector().setInputCols("document").setOutputCol("sentence")
tokenizer = Tokenizer().setInputCols("sentence").setOutputCol("token")
#Working on adjusting WordEmbeddingsModel to work with the subset of matched tokens
word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\
.setInputCols("sentence", "token")\
.setOutputCol("word_embeddings")
# -
# TextMatcher Strategy
# +
icdo_ner = NerDLModel.pretrained("ner_bionlp", "en", "clinical/models")\
.setInputCols("sentence", "token", "word_embeddings")\
.setOutputCol("icdo_ner")
icdo_chunk = NerConverter().setInputCols("sentence","token","icdo_ner").setOutputCol("icdo_chunk")
icdo_chunk_embeddings = ChunkEmbeddings()\
.setInputCols("icdo_chunk", "word_embeddings")\
.setOutputCol("icdo_chunk_embeddings")
icdo_chunk_resolver = ChunkEntityResolverModel.pretrained("chunkresolve_icdo_clinical", "en", "clinical/models")\
.setInputCols("token","icdo_chunk_embeddings")\
.setOutputCol("tm_icdo_code")
# -
# Ner Model Strategy
# +
clinical_ner = NerDLModel.pretrained("ner_clinical", "en", "clinical/models") \
.setInputCols(["sentence", "token", "word_embeddings"]) \
.setOutputCol("ner")
ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("ner_chunk")
ner_chunk_tokenizer = ChunkTokenizer()\
.setInputCols("ner_chunk")\
.setOutputCol("ner_token")
ner_chunk_embeddings = ChunkEmbeddings()\
.setInputCols("ner_chunk", "word_embeddings")\
.setOutputCol("ner_chunk_embeddings")
# -
#SNOMED Resolution
ner_snomed_resolver = \
EnsembleEntityResolverModel.pretrained("ensembleresolve_snomed_clinical","en","clinical/models")\
.setInputCols("ner_token","ner_chunk_embeddings").setOutputCol("snomed_result")
pipelineFull = Pipeline().setStages([
docAssembler,
sentenceDetector,
tokenizer,
word_embeddings,
clinical_ner,
ner_converter,
ner_chunk_embeddings,
ner_chunk_tokenizer,
ner_snomed_resolver,
icdo_ner,
icdo_chunk,
icdo_chunk_embeddings,
icdo_chunk_resolver
])
# Let's train our Pipeline and make it ready to start transforming
pipelineModelFull = pipelineFull.fit(data)
output = pipelineModelFull.transform(data).cache()
# ### EntityResolver:
# Trained on an augmented ICDO Dataset from JSL Data Market it provides histology codes resolution for the matched expressions. Other than providing the code in the "result" field it provides more metadata about the matching process:
#
# - target_text -> Text to resolve
# - resolved_text -> Best match text
# - confidence -> Relative confidence for the top match (distance to probability)
# - confidence_ratio -> Relative confidence for the top match. TopMatchConfidence / SecondMatchConfidence
# - alternative_codes -> List of other plausible codes (in the KNN neighborhood)
# - alternative_confidence_ratios -> Rest of confidence ratios
# - all_k_results -> All resolved codes for metrics calculation purposes
# - sentence -> SentenceId
# - chunk -> ChunkId
output.withColumn("note",F.monotonically_increasing_id()).select(F.col("note"),F.explode(F.arrays_zip("icdo_chunk.result","tm_icdo_code.result","tm_icdo_code.metadata")).alias("icdo_result")) \
.select("note",
F.expr("icdo_result['0']").alias("chunk"),
F.expr("substring(icdo_result['2'].resolved_text,0,25)").alias("resolved_text"),
F.expr("icdo_result['1']").alias("code"),
#F.expr("icdo_result['2'].alternative_codes").alias("alternative_codes"),
F.expr("round(icdo_result['2'].confidence_ratio,2)").alias("confidence")) \
.distinct() \
.orderBy(["note","confidence"], ascending=[True,False]) \
.toPandas()
output.select(F.explode(F.arrays_zip("ner_chunk.result","ner_chunk.metadata","snomed_result.result","snomed_result.metadata")).alias("icdo_result")) \
.select(F.expr("substring(icdo_result['0'],0,35)").alias("chunk"),
F.expr("icdo_result['1'].entity").alias("entity"),
#F.expr("icdo_result['3'].target_text").alias("target_text"),
F.expr("substring(icdo_result['3'].resolved_text,0,35)").alias("resolved_text"),
#F.expr("icdo_result['2']").alias("code"),
#F.expr("icdo_result['2'].alternative_codes").alias("alternative_codes"),
F.expr("round(icdo_result['3'].confidence_ratio,2)").alias("conf")
) \
.distinct() \
.orderBy("conf",ascending=False)\
.toPandas()
| jupyter/enterprise/healthcare/ICDO-SNOMED-EntityResolution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + language="javascript"
# $('#appmode-leave').hide();
# $('#copy-binder-link').hide();
# $('#visit-repo-link').hide();
# -
# # Chemical Energetics and Kinetics Virtual Notebook
#
# Copyright **<NAME> and <NAME>**, January 2022
#
# This web page and those linked below have been created with Python using jupyter notebooks and will be used to develop important skills in data analysis, data processing and computing using simulated experimental results and computational chemistry software.
# There is also a collection of *standard* physical chemistry problems that can be readily solved numerically, or analytically.
#
# Although this may sound scary to some, most of the numerical labs of this unit can be also solved using simple Excel spreadsheets, or even by hand with some algebra and a pocket calculator.
# During the numerical labs your lab demonstrator will show you how Python notebooks can be used for solving these problems, which you should have already used in first year.
# However, all the data will be available in CSV files, which can be readily imported in Excel.
# We would however encourage you to use Python notebooks for processing the laboratory data, as this is a sought after skill by many employers.
#
# The links below will take you to a series of experiences that will be done in the corresponding week.
# We will start with two labs to refresh and familiarise ourselves with Python, then we will have two thermodynamics "labs", one kinetics "lab", one "lab" about chemical equilibrium and the last 4 labs about "computational chemistry".
# All the labs are focused on physical chemistry concepts that you have already seen in first year (_e.g._ calorimetry, equilibrium, kinetics) or will be covered in the lectures during the semester (_e.g._ quantum chemistry).
#
# Although these numerical labs cover a variety of different topics in Thermodyanmics and Kinetics, the problems proposed here share some common features:
# 1. They have been designed to mimic "real" experiments, to a certain extent. This means that you often have the choice of setting the conditions of the experiment (*i.e.* the temperature) and then "perform" the measurement by clicking a button.
# 2. The results of all measurements come with some random noise, which is designed to mimic the experimental uncertainty of the instruments, and user errors. This means that if you perform the same measurement 10 times at the same conditions you will obtain 10 different values.
# 3. Often the initial conditions can be set using sliding bars, which are designed to be difficult to set to nice round numbers, and the measurements will give results with lots of decimal places. It will be left to you to decide how many digits are significative and worth reporting.
# 4. At the end of the "virtual" experiments, all the data collected can be exported as a Comma Separeted Values (CSV) file that can be directly imported into Excel, or read by Python and R.
# 5. In most cases, the data obtained during the virtual experiment should be comparable to real experimental data.
#
# In the first workshop you will do a short refresher of some basic statistics concepts; average, standard deviation, standard error, and linear regression.
# In the following two workshops we will tackle some kinetics and thermodynamics problem, which mimic the experiments that were part of the CHEM2000 wet chemistry laboratory.
# In the last workshops we will instead focus on using numerical methods to answer physical chemistry questions, such as using a minisation procedure to solve multiple equilibrium problems, or doing energy minimisation procedues to compute the stable structure of molecules.
#
# You don't need to solve all the problems during the workshop. This web page will remain active for the entire semester and you can easily access it from home.
# ## Virtual Laboratories
#
# - [Week 01](week_01/virtual_workshop_01.ipynb): Averages and linear regression
# - [Week 02](week_01/virtual_workshop_01.ipynb):
# - [Week 03](week_03_bombCalorimetry/bombCalorimetry.ipynb): Bomb Calorimetry
# * [Launch bomb calorimetry laboratory](week_03_bombCalorimetry/virtualExperiment.ipynb)
# - [Week 04](week_04_surfaceAdsorption/langmuir.ipynb): Langmuir Isotherm
# * [Launch surface adsorption laboratory](week_04_surfaceAdsorption/virtualExperiment.ipynb)
# - Week 05: **Thermodynamics Lecture**
# - [Week 06](week_06_crystalViolet/crystalViolet.ipynb): Crystal Violet (Kinetics)
# * [Launch crystal violet laboratory](week_06_crystalViolet/virtualExperiment.ipynb)
# - Week 07: **Mid-semester Test**
# - Week 08: **Tuition Free Week**
# - [Week 09](week_09_chemicalEquilibrium/equilibrium.ipynb): Chemical Equilibrium
# - [Week 10](week_10_molecularMechanics1/MolecularMechanics1.ipynb): Molecular mechanics #1
# - [Week 11](week_11_molecularMechanics2/MolecularMechanics2.ipynb): Molecular mechanics #2
# * [Launch water density laboratory](week_11_molecularMechanics2/waterDensity.ipynb)
# * [Launch Molecular dynamics laboratory](week_11_molecularMechanics2/templateMD.ipynb)
# - [Week 12](week_12_quantumChemistry1/QuantumChemistry1.ipynb): Quantum Chemistry #1
# - [Week 13](week_13_quantumChemistry2/QuantumChemistry2.ipynb): Quantum Chemistry #2
#
# ## Sample Jupyter Notebooks
# Here below you can find some snippets of code that can help you getting started with Python.
# This Jupyter Notebooks contain piece of code that you can extract and adapt to solve the first three numerical laboratories (Bomb calorimetry, Langmuir Isotherm and Crystal Violet), they also provide the foundation for the other numerical laboratories.
#
# 0. [Basic introduction to Python can be found here](codeSnippets/0_introductionToPython.ipynb)
# 1. [Computing averages and histograms](codeSnippets/1_averageAndHistogram.ipynb)
# 2. [Average of subset of data](codeSnippets/2_averageChunkOfData.ipynb)
# 3. [Convergence of the average, standard deviation and standard error](codeSnippets/3_progressiveAverage.ipynb)
# 4. [Moving average](codeSnippets/4_movingAverage.ipynb)
# 5. [Handling multiple files](codeSnippets/5_multiFileAverage.ipynb)
# 6. [Linear fit (scipy)](codeSnippets/6_linearFit.ipynb)
# 7. [Exponential fit (scipy and lmfit)](codeSnippets/7_fittingArrhenius.ipynb)
# 8. [Making professional figures](codeSnippets/8_prettyFigure.ipynb)
# # Your working notebooks
# +
import ipywidgets as ipw
import os
from IPython.display import Javascript
import glob as glob
from pathlib import Path
import nbformat as nbf
label_layout = ipw.Layout(width='300px')
# +
##########
pfiles = ['.protectedFiles.txt' , '../.protectedFiles.txt']
for fff in pfiles:
if os.path.isfile(fff):
with open(fff) as f:
protectedFiles = f.read().splitlines()
##########
def launchNotebook(filename):
text = " var name_of_the_notebook = '" + filename + "'"
vv="""
var url = window.location.href.split('/')
var newurl = url[0] + '//'
for (var i = 1; i < url.length - 1; i++) {
console.log(url[i], newurl)
newurl += url[i] + '/'
}
newurl += name_of_the_notebook
window.open(newurl)
"""
text = text + vv
display(Javascript(text))
def openNewNotebook(btn):
if os.path.exists(notebookeName.value):
print("Filename exists - Please select a different name")
return
nb = nbf.v4.new_notebook()
text = """# Click 'Edit App' to start coding"""
code = """\
# python packages
import pandas as pd # Dataframes and reading CSV files
import numpy as np # Numerical libraries
import matplotlib.pyplot as plt # Plotting library
from lmfit import Model # Least squares fitting library
from scipy.optimize import curve_fit # Alternative curve fittting library"""
nb['cells'] = [nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code)]
if notebookeName.value in protectedFiles or notebookeName.value in listOfFiles:
print("File already exists, select a different filename")
else:
with open(notebookeName.value, 'w') as f:
nbf.write(nb, f)
launchNotebook(notebookeName.value)
##########
listOfFiles = []
# files = glob.glob1("./","*.ipynb")
files = glob.glob('./**/*.ipynb', recursive=True)
for f in files:
if f in protectedFiles:
continue
listOfFiles.append(f)
listOfFiles = sorted(listOfFiles)
def dropdown_filesHandler(change):
for i in range(0,len(listOfFiles)):
if listOfFiles[i] == change.new:
oldNotebookeName[0] = listOfFiles[i]
def createMenuFiles(data):
option_list = ["Choose one"]
option_list.extend(data)
dropdown = ipw.Dropdown(description="", options=option_list, layout=ipw.Layout(width="300px"))
dropdown.observe(dropdown_filesHandler, names='value')
return dropdown
##########
oldNotebookeName = ["None"]
def openOldNotebook(btn):
if oldNotebookeName[0] == "None":
print("Please select a filename")
elif oldNotebookeName[0] in protectedFiles:
print("Please select a different filename")
else:
launchNotebook(oldNotebookeName[0])
##########
actions0 = []
notebookeName = ipw.Text("Empty.ipynb")
btn_new = ipw.Button(description="Create a new notebook", layout=label_layout)
btn_new.on_click(openNewNotebook)
btn_old = ipw.Button(description="Open an old notebook", layout=label_layout)
btn_old.on_click(openOldNotebook)
actions0.append(ipw.HBox([btn_new,notebookeName]))
actions0.append(ipw.HBox([btn_old,createMenuFiles(listOfFiles)]))
ipw.VBox(actions0)
# -
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="wiN3XS_Ym8aJ"
# # Cab-Driver Agent
# + [markdown] colab_type="text" id="qu5jnYwQywLL"
# ## Imports
# + [markdown] colab_type="text" id="9EblFUaey5rk"
# ### Import and mount google drive (Required on google colab)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1348, "status": "ok", "timestamp": 1566207070551, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="LQva1UbqnGmE" outputId="b5e9c459-c1fd-4c37-d2be-c2808bae6d35"
# from google.colab import drive
# drive.mount('/gdrive')
# # %cd /gdrive
# + [markdown] colab_type="text" id="rQjEKq7QzPxu"
# ### Import libraries
# **Note: Please use keras version 2.2.4 since the model was saved using the new version, it won't load with older version**
# + colab={} colab_type="code" id="u3jQ5Ixem8aL"
import numpy as np
import random
import math
import datetime
import itertools
import collections
import pickle
import pandas as pd
from collections import deque
# for building DQN model
from keras import layers
from keras import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from keras.models import load_model
from scipy.stats import zscore
# for plotting graphs
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
from Env import CabDriver
# + [markdown] colab_type="text" id="6SzpjVh9m8aQ"
# ## Time Matrix
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1210, "status": "ok", "timestamp": 1566190417174, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="KzsZxfPqm8aQ" outputId="58ab778a-2f21-4cff-ceb0-a5fbbf77a862"
# Loading the time matrix provided
Time_matrix = np.load("TM.npy")
print('Time matrix shape: ', Time_matrix.shape)
print('Time taken in hours from location 1 to location 0 on 11 am on 05th day of week: ', Time_matrix[1][0][11][5])
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 1060, "status": "ok", "timestamp": 1566190419795, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="97tmh86a1t-Q" outputId="d8ed3810-d097-4ca5-a3cc-8ff01cacaa49"
zero_vals_state_action = []
for p in range(Time_matrix.shape[0]):
for q in range(Time_matrix.shape[1]):
for hour in range(Time_matrix.shape[2]):
for day in range(Time_matrix.shape[3]):
if Time_matrix[p][q][hour][day] == 0 and p != q:
for z in range(1,Time_matrix.shape[0]+1):
zero_vals_state_action.append(((z,hour,day),(p+1,q+1)))
print('Number of actions with 0 time taken: ', len(zero_vals_state_action[0:10]))
print(random.sample(zero_vals_state_action,10))
# + [markdown] colab_type="text" id="gAiYIC2Ym8aP"
# ## Test Rig for Environment
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" executionInfo={"elapsed": 856, "status": "ok", "timestamp": 1566190422545, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="NflnCKygm8aV" outputId="bf0bebab-f352-47e4-ccbe-bc4bb56fdf67"
driver_env = CabDriver(debug=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 935} colab_type="code" executionInfo={"elapsed": 968, "status": "ok", "timestamp": 1566190424847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="KEMIE2uBm8aY" outputId="def8c65f-0494-4fb5-cdfd-0dd2ed5a0cd8"
# Check Requests
actions = driver_env.requests(driver_env.state_init)
# select random action
action = random.choice(actions)
print('Random Action Selected : ', action)
print()
# Check Rewards Earned and Next state for random Action
next_state, reward, is_terminal = driver_env.step(driver_env.state_init, action)
print('Reward Value : ',reward)
print('Next State Value : ',next_state)
print('Is terminal : ',is_terminal)
print('Tracking info: ', driver_env.tracking_info())
print()
# Check Rewards Earned and Next state for Action - (0,0) - Driver didn't take any request.
next_state, reward, is_terminal = driver_env.step(driver_env.state_init,(0,0))
print('Reward Value : ',reward)
print('Next State Value : ',next_state)
print('Is terminal : ',is_terminal)
print('Tracking info: ', driver_env.tracking_info())
print()
state_encode_v1 = driver_env.encode_state_v1(driver_env.state_init)
print('encode_state_v1: ', state_encode_v1.shape)
state_encode_v2 = driver_env.encode_state_v2(driver_env.state_init)
print('encode_state_v2: ', state_encode_v2.shape)
# + colab={} colab_type="code" id="2NRLoA8Ym8aa"
episode_num = []
accu_rewards = []
total_time = []
total_steps = []
episodes_max = 100
driver_env=CabDriver()
for i in range(1,episodes_max+1):
driver_env.reset()
state = driver_env.state_init
is_terminal = False
while not is_terminal:
# random action selection
actions = driver_env.requests(state)
action = random.choice(actions)
next_state, reward, is_terminal = driver_env.step(state, action)
state = next_state
tot_rewards, tot_time, steps = driver_env.tracking_info()
episode_num.append(i)
accu_rewards.append(tot_rewards)
total_time.append(tot_time)
total_steps.append(steps)
episodes_vs_rewards=pd.DataFrame(data={'Episode_Number':episode_num,
'Accumulated_Rewards':accu_rewards,
'Total_Time':total_time,
'Total_Steps':total_steps})
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" executionInfo={"elapsed": 987, "status": "ok", "timestamp": 1566190438998, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="lQPSdvRZm8ad" outputId="65c73e0e-7f3f-49c6-de50-fd3b828e25fc"
episodes_vs_rewards.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 835} colab_type="code" executionInfo={"elapsed": 1991, "status": "ok", "timestamp": 1566190448676, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="1LPmkNkmm8ag" outputId="742ddb26-e6dc-4b3b-af4e-051d587cce91"
# Plot rewards vs Episodes
episodes_vs_rewards.plot(x='Episode_Number',y='Accumulated_Rewards')
episodes_vs_rewards.plot(x='Episode_Number',y='Total_Time')
episodes_vs_rewards.plot(x='Episode_Number',y='Total_Steps')
# + [markdown] colab_type="text" id="DH42RzqCm8ai"
# ## Agent Class
#
# If you are using this framework, you need to fill the following to complete the following code block:
# 1. State and Action Size
# 2. Hyperparameters
# 3. Create a neural-network model in function 'build_model()'
# 4. Define epsilon-greedy strategy in function 'get_action()'
# 5. Complete the function 'append_sample()'. This function appends the recent experience tuple <state, action, reward, new-state> to the memory
# 6. Complete the 'train_model()' function with following logic:
# - If the memory size is greater than mini-batch size, you randomly sample experiences from memory as per the mini-batch size and do the following:
# - Initialise your input and output batch for training the model
# - Calculate the target Q value for each sample: reward + gamma*max(Q(s'a,))
# - Get Q(s', a) values from the last trained model
# - Update the input batch as your encoded state and output batch as your Q-values
# - Then fit your DQN model using the updated input and output batch.
# + colab={} colab_type="code" id="v7-1vOkxm8ai"
class DQNAgent:
def __init__(self, state_size=None, action_size=None, discount_factor=0.95, learning_rate=0.01,
epsilon_min=0., epsilon_max=1., epsilon_decay=-0.0001, batch_size=32, experience_buffer_size=2000,
debug=False, env=CabDriver(), state_encoding='v1'):
# Define size of state and action
if state_size is not None:
self.state_size = state_size
else:
if state_encoding == 'v1':
self.state_size = env.state_size
elif state_encoding == 'v2':
self.state_size = len(env.state_space)
else:
self.state_size = env.state_size
if action_size is not None:
self.action_size = action_size
else:
self.action_size = len(env.action_space)
self.state_encoding = state_encoding
self.discount_factor = discount_factor
self.learning_rate = learning_rate
self.epsilon_min = epsilon_min
self.epsilon_max = epsilon_max
self.epsilon = epsilon_max
self.epsilon_decay = epsilon_decay
self.timestep = 0
self.batch_size = batch_size
# create replay memory using deque
self.memory = deque(maxlen=experience_buffer_size)
self.debug = debug
self.env = env
if self.debug:
print('DQNAgent initialized with following params: ', {
'state_size':self.state_size,
'action_size':self.action_size,
'state_encoding':self.state_encoding,
'discount_factor':self.discount_factor,
'learning_rate':self.learning_rate,
'episodes_min':self.epsilon_min,
'epsilon_max':self.epsilon_max,
'epsilon':self.epsilon,
'epsilon_decay':self.epsilon_decay,
'batch_size':self.batch_size
})
# create main model and target model
self.model = self.build_model()
self.target_model = self.build_model()
# initialize target model
self.update_target_model()
# approximate Q function using Neural Network
def build_model(self):
model = Sequential()
# hidden layers
model.add(Dense(32, input_dim=self.state_size, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(32, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(self.action_size, activation='relu', kernel_initializer='he_uniform'))
if self.debug:
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return model
# after some time interval update the target model to be same with model
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights())
def get_action(self, state):
"""
get action in a state according to an epsilon-greedy approach
"""
if np.random.rand() <= self.epsilon:
# random action selection
actions = self.env.requests(state)
action = random.choice(actions)
if self.debug:
print('Random action chosen: ', action)
else:
# choose the action with the highest q(s, a)
if self.state_encoding == 'v1':
encoded_state = self.env.encode_state_v1(state)
else:
encoded_state = self.env.encode_state_v2(state)
# the first index corresponds to the batch size, so reshape state to (1, state_size) so that the first index corresponds to the batch size
encoded_state = encoded_state.reshape(1, self.state_size)
q_value = self.model.predict(encoded_state)
action = self.env.action_space[np.argmax(q_value[0])]
if self.debug:
print('Predicted action chosen: ', action)
return action
def get_q_value(self, state, action):
if self.state_encoding == 'v1':
encoded_state = self.env.encode_state_v1(state)
else:
encoded_state = self.env.encode_state_v2(state)
encoded_state = encoded_state.reshape(1, self.state_size)
q_value = self.model.predict(encoded_state)
action_index = self.env.action_space.index(action)
return q_value[0][action_index]
def append_sample(self, state, action, reward, next_state, is_terminal):
self.memory.append((state, action, reward, next_state, is_terminal))
def decay_epsilon(self):
# Decay in ε after we generate each sample from the environment
self.timestep = self.timestep + 1
self.epsilon = self.epsilon_min + (self.epsilon_max - self.epsilon_min) * np.exp(self.epsilon_decay * self.timestep)
if self.debug:
print('new epsilon value: ', self.epsilon)
# pick samples randomly from replay memory (with batch_size) and train the network
def train_model(self):
"""
train the neural network on a minibatch. Input to the network is the states,
output is the target q-value corresponding to each action.
"""
if len(self.memory) >= self.batch_size:
# Sample batch from the memory
mini_batch = random.sample(self.memory, self.batch_size)
update_input = np.zeros((self.batch_size, self.state_size))
update_target = np.zeros((self.batch_size, self.state_size))
action, reward, is_terminal = [], [], []
for i in range(self.batch_size):
if self.state_encoding == 'v1':
update_input[i] = self.env.encode_state_v1(mini_batch[i][0])
update_target[i] = self.env.encode_state_v1(mini_batch[i][3])
else:
update_input[i] = self.env.encode_state_v2(mini_batch[i][0])
update_target[i] = self.env.encode_state_v2(mini_batch[i][3])
action.append(mini_batch[i][1])
reward.append(mini_batch[i][2])
is_terminal.append(mini_batch[i][4])
# predict the target q-values from states s
target = self.model.predict(update_input)
# target for q-network
target_qval = self.target_model.predict(update_target)
# update the target values
for i in range(self.batch_size):
action_index = self.env.action_space.index(action[i])
if is_terminal[i]:
target[i][action_index] = reward[i]
else:
target[i][action_index] = reward[i] + self.discount_factor * (np.amax(target_qval[i]))
# model fit
self.model.fit(update_input, target, batch_size=self.batch_size, epochs=1, verbose=0)
def save(self, name):
self.model.save(name)
# + [markdown] colab_type="text" id="zXx7xiDJm8am"
# ## DQN block
# + colab={} colab_type="code" id="HM8wV5o3m8am"
scores, timesteps, episodes = [], [], []
state_action_sample = {}
def train_agent(episodes_to_run=1000, model_name='model.hd5', debug=False, state_encoding='v1',
episode_info_frequency=50, model_save_frequency=50, **agent_args):
env = CabDriver(debug=debug)
agent = DQNAgent(debug=debug, state_encoding=state_encoding, **agent_args)
# randomly sample 0.1% of state-action pairs for tracking convergence
state_action_to_sample = random.sample([(state,action) for state,action in itertools.product(env.state_space, env.action_space)], math.ceil(0.001*len(env.state_space)*len(env.action_space)))
for state,action in state_action_to_sample:
state_action_sample.update({(state,action):[]})
for episode in range(episodes_to_run):
is_terminal = False
score = 0
state = env.reset()
rewards = []
while not is_terminal:
# Write your code here
# 1. Pick epsilon-greedy action from possible actions for the current state
# 2. Evaluate your reward and next state
# 3. Append the experience to the memory
# 4. Train the model by calling function agent.train_model
# 5. Keep a track of rewards, Q-values, loss
# get action for the current state and go one step in environment
action = agent.get_action(state)
next_state, reward, is_terminal = env.step(state, action)
rewards.append(reward)
agent.append_sample(state, action, reward, next_state, is_terminal)
agent.decay_epsilon()
agent.train_model()
if (state,action) in state_action_to_sample:
q_value = agent.get_q_value(state, action)
state_action_sample[(state,action)].append(q_value)
state = next_state
agent.update_target_model()
total_rewards, episode_length, total_timesteps = env.tracking_info()
scores.append(total_rewards)
timesteps.append(total_timesteps)
episodes.append(episode)
if episode % episode_info_frequency == 0:
print("episode:", episode, " score:", total_rewards, " requests:", total_timesteps, " memory length:", len(agent.memory), " timesteps:", agent.timestep, " epsilon:", agent.epsilon, " last 200 std dev:", np.std(scores[-200:]))
if episode % model_save_frequency == 0:
agent.save(model_name)
print("episode:", episode, " score:", total_rewards, " requests:", total_timesteps, " memory length:", len(agent.memory), " timesteps:", agent.timestep, " epsilon:", agent.epsilon, " last 200 std dev:", np.std(scores[-200:]))
agent.save(model_name)
# + [markdown] colab_type="text" id="9zRlIfUT5g0J"
# ### Test dqn training block for 1 episode
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 3344, "status": "ok", "timestamp": 1566190628318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="_nMZVb0vm8ao" outputId="d1189a2d-2f53-4adc-ea71-2d05557075cd"
train_agent(episodes_to_run=1, model_name='test.hd5', debug=True, epsilon_decay=-0.00005)
# + [markdown] colab_type="text" id="x8w_yuLx6E5C"
# ### Train dqn agent with state_encoding v1
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 81, "status": "ok", "timestamp": 1566206786414, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="ABwwYYkkm8ar" outputId="aea0335f-5257-409a-ccea-f1695200b29b"
scores, timesteps, episodes = [], [], []
state_action_sample = {}
train_agent(episodes_to_run=20000, model_name='model_new.hd5', episode_info_frequency=200, epsilon_decay=-0.00005)
# + [markdown] colab_type="text" id="HN0EtJ2k7B7H"
# ### Save scores and cab requests per episode to numpy file
# + colab={} colab_type="code" id="YI14_DMZOTIp"
np_array = np.array([episodes,scores,timesteps])
np.save('result.npy', np_array)
# + [markdown] colab_type="text" id="F0z_Sr5O7NWj"
# ### Save state action samples to pickle file
# + colab={} colab_type="code" id="OSwg1OFyPXxE"
with open('state_action_sample.pickle', 'wb') as handle:
pickle.dump(state_action_sample, handle, protocol=pickle.HIGHEST_PROTOCOL)
# + [markdown] colab_type="text" id="jrAP973F6NvU"
# ### Track convergence
# + [markdown] colab_type="text" id="wmrHwYpd6SdR"
# #### Plot reward and cab requests handled per episode
# + colab={"base_uri": "https://localhost:8080/", "height": 350} colab_type="code" executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1566206786421, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="dkeZtck6m8au" outputId="49c8fae1-8350-4830-c686-c22b25ecfbe7"
score_requests_array = np.load('result.npy')
episodes = score_requests_array[0]
scores = score_requests_array[1]
timesteps = score_requests_array[2]
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.plot(episodes, scores, 'b')
plt.xlabel('episode')
plt.ylabel('reward')
plt.title('reward vs episode')
plt.subplot(122)
plt.plot(episodes, timesteps, 'b')
plt.xlabel('episode')
plt.ylabel('number of requests')
plt.title('episode vs number of requests')
plt.show()
# + [markdown] colab_type="text" id="o7UTdutU80wU"
# #### Plot sampled q_values for state action pairs
# + colab={"base_uri": "https://localhost:8080/", "height": 550} colab_type="code" executionInfo={"elapsed": 17, "status": "ok", "timestamp": 1566206786422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="WauVp4y7QZE_" outputId="c0d2f91d-7df1-492b-d5dc-11e3203be32c"
with open('state_action_sample.pickle', 'rb') as handle:
state_action_sample = pickle.load(handle)
plt.figure(figsize=(16, 8))
for state_action,samples in filter(lambda elem: len(elem[1]) >= 5, state_action_sample.items()):
plt.plot(samples, label='state:{}, action:{}'.format(state_action[0], state_action[1]))
plt.xlabel('time step')
plt.ylabel('q_value')
plt.title('q_value vs time steps for state-action samples')
plt.legend()
# + [markdown] colab_type="text" id="SXP7chXEm8a1"
# ## Compare Change in Revenue for 2 Years (24 Episodes)
# + colab={} colab_type="code" id="nxBfjBgKr-IN"
episodes_max = 24
manual_rewards = []
manual_time = []
manual_steps = []
model_rewards = []
model_time = []
model_steps = []
rl_model = load_model('model.hd5')
def test_sequence(manual=True):
episode_num=[]
total_time=[]
total_steps=[]
accu_rewards=[]
driver_env=CabDriver()
for i in range(1,episodes_max+1):
driver_env.reset()
state = driver_env.state_init
is_terminal = False
while not is_terminal:
if manual:
actions = driver_env.requests(state)
action = random.choice(actions)
else:
encoded_state = driver_env.encode_state_v1(state)
encoded_state = encoded_state.reshape(1,36)
q_value = rl_model.predict(encoded_state)
action = driver_env.action_space[np.argmax(q_value[0])]
next_state, reward, is_terminal = driver_env.step(state, action)
state = next_state
tot_rewards, tot_time, steps = driver_env.tracking_info()
episode_num.append(i)
accu_rewards.append(tot_rewards)
total_time.append(tot_time)
total_steps.append(steps)
return accu_rewards,total_time,total_steps
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1235, "status": "ok", "timestamp": 1566207104654, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="lh3mUI-5tBEy" outputId="dde23d12-a753-49c2-c32a-74921db5a049"
start_time=datetime.datetime.now()
manual_rewards,manual_time,manual_steps=test_sequence()
end_time=datetime.datetime.now()
print('Time Elapsed for Manual Selection : ',end_time-start_time)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 2478, "status": "ok", "timestamp": 1566207108187, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="wL260UJctXaQ" outputId="0aa7eb48-58be-4d21-8b36-21a9781ee3f6"
start_time=datetime.datetime.now()
model_rewards,model_time,model_steps=test_sequence(manual=False)
end_time=datetime.datetime.now()
print('Time Elapsed for RL_Model Selection : ',end_time-start_time)
# + colab={} colab_type="code" id="8oWePiVoteek"
data={'manual_rewards':manual_rewards,
'manual_time':manual_time,
'manual_steps':manual_steps,
'model_rewards':model_rewards,
'model_time':model_time,
'model_steps':model_steps,
}
revenue=pd.DataFrame(data)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" executionInfo={"elapsed": 1209, "status": "ok", "timestamp": 1566207126155, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="ortRHsaV6-Uk" outputId="22206314-d15c-4362-949e-31226d6cdbd9"
revenue[['manual_rewards','manual_steps','model_rewards','model_steps']].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 520} colab_type="code" executionInfo={"elapsed": 1703, "status": "ok", "timestamp": 1566207137377, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="fk4ahuAZ6iu3" outputId="3e2e7c07-7e9c-4a56-8cb1-a8e7608e6879"
fig=plt.figure(figsize=(16, 8), dpi=70, facecolor='w', edgecolor='k')
sns.lineplot(y=revenue['manual_rewards'],x=range(1,len(revenue)+1),label='Random Selection')
sns.lineplot(y=revenue['model_rewards'],x=range(1,len(revenue)+1),label='RL Model Selection')
plt.xlabel(' -- Month -- ')
plt.ylabel(' -- Revenue -- ')
plt.title('Reward over 2 years')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 613} colab_type="code" executionInfo={"elapsed": 2255, "status": "ok", "timestamp": 1566207411689, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="0xQvWuu_tySd" outputId="1aa1dadc-2a21-448b-d098-14e63331dc90"
fig=plt.figure(figsize=(16, 10), dpi=70, facecolor='w', edgecolor='k')
sns.lineplot(y=revenue['manual_steps'],x=range(1,len(revenue)+1),label='Random Selection')
sns.lineplot(y=revenue['model_steps'],x=range(1,len(revenue)+1),label='RL Model Selection')
plt.xlabel(' -- Month -- ')
plt.ylabel(' -- Number of requests served per month -- ')
plt.show()
# + [markdown] colab_type="text" id="ZdDABNqCm8a9"
# ## Epsilon-decay function
# epsilon-decay function for the model.
# + colab={} colab_type="code" id="izgTaKEYm8a_"
time = np.arange(0,100000)
epsilon_min=0
epsilon_max=1
epsilon_decay=-0.00005
epsilons = []
random_actions = []
for i in range(0,100000):
epsilon = epsilon_min + (epsilon_max - epsilon_min) * np.exp(epsilon_decay*i)
if np.random.rand() <= epsilon:
action = 1
else:
action = 0
epsilons.append(epsilon)
random_actions.append(action)
# + colab={"base_uri": "https://localhost:8080/", "height": 350} colab_type="code" executionInfo={"elapsed": 1658, "status": "ok", "timestamp": 1566151480359, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-ZbIcHpA6axU/AAAAAAAAAAI/AAAAAAAABfo/UEO6yap-1W0/s64/photo.jpg", "userId": "04283162121766014139"}, "user_tz": -330} id="_n1ry-13m8bB" outputId="7a50cee2-3f3e-4153-99b9-d8d57a2f9cfc"
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.plot(time, epsilons)
plt.xlabel('time step')
plt.ylabel('epsilon value')
plt.title('epsilon value decay per time step')
plt.subplot(122)
plt.plot(time[50000:51000], random_actions[50000:51000])
plt.xlabel('time step')
plt.yticks([])
plt.title('random action per time step')
plt.show()
# -
| SuperCabs/Agent_Architecture2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from pprint import pprint
import torch
import torch.nn as nn
from transformers import BertForTokenClassification, BertTokenizer
from transformers import AdamW
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from sklearn.model_selection import train_test_split
import numpy as np
from tqdm.notebook import tqdm
# -
# ## 读取MSRA实体识别数据集
file = "../datasets/dh_msra.txt"
# ## 检查GPU情况
# +
# GPUcheck
print("CUDA Available: ", torch.cuda.is_available())
n_gpu = torch.cuda.device_count()
if torch.cuda.is_available():
print("GPU numbers: ", n_gpu)
print("device_name: ", torch.cuda.get_device_name(0))
device = torch.device("cuda:0") # 注意选择
torch.cuda.set_device(0)
print(f"当前设备:{torch.cuda.current_device()}")
else :
device = torch.device("cpu")
print(f"当前设备:{device}")
# -
# ## 配置参数
#
# 规范化配置参数,方便使用。
# +
class Config(object):
"""配置参数"""
def __init__(self):
self.model_name = 'Bert_NER.bin'
self.bert_path = './bert-chinese/'
self.ner_file = '../datasets/dh_msra.txt'
self.num_classes = 10 # 类别数(按需修改),这里有10种实体类型
self.hidden_size = 768 # 隐藏层输出维度
self.hidden_dropout_prob = 0.1 # dropout比例
self.batch_size = 128 # mini-batch大小
self.max_len = 103 # 句子的最长padding长度
self.epochs = 3 # epoch数
self.learning_rate = 2e-5 # 学习率
self.save_path = './saved_model/' # 模型训练结果保存路径
# self.fp16 = False
# self.fp16_opt_level = 'O1'
# self.gradient_accumulation_steps = 1
# self.warmup_ratio = 0.06
# self.warmup_steps = 0
# self.max_grad_norm = 1.0
# self.adam_epsilon = 1e-8
# self.class_list = class_list # 类别名单
# self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
config = Config()
# +
all_sentences_separate = []
all_letter_labels = []
label_set = set()
with open(config.ner_file, encoding="utf-8") as f:
single_sentence = []
single_sentence_labels = []
for s in f.readlines():
if s != "\n":
word, label = s.split("\t")
label = label.strip("\n")
single_sentence.append(word)
single_sentence_labels.append(label)
label_set.add(label)
elif s == "\n":
all_sentences_separate.append(single_sentence)
all_letter_labels.append(single_sentence_labels)
single_sentence = []
single_sentence_labels = []
# +
print(all_sentences_separate[0:2])
print(all_letter_labels[0:2])
print(f"\n所有的标签:{label_set}")
# +
# 构建 tag 到 索引 的字典
tag_to_ix = {"B-LOC": 0,
"I-LOC": 1,
"B-ORG": 2,
"I-ORG": 3,
"B-PER": 4,
"I-PER": 5,
"O": 6,
"[CLS]":7,
"[SEP]":8,
"[PAD]":9}
ix_to_tag = {0:"B-LOC",
1:"I-LOC",
2:"B-ORG",
3:"I-ORG",
4:"B-PER",
5:"I-PER",
6:"O",
7:"[CLS]",
8:"[SEP]",
9:"[PAD]"}
# -
# ## 数据示例
#
# 这里简单查看一些数据例子,其中很多都是数字6。
#
# 数字6说明是 O 类型的实体。
# +
all_sentences = [] # 句子
for one_sentence in all_sentences_separate:
sentence = "".join(one_sentence)
all_sentences.append(sentence)
print(all_sentences[0:2])
# +
all_labels = [] # labels
for letter_labels in all_letter_labels:
labels = [tag_to_ix[t] for t in letter_labels]
all_labels.append(labels)
print(all_labels[0:2])
print(len(all_labels[0]))
# -
print(len(all_labels))
# ### input数据准备
# +
# word2token
tokenizer = BertTokenizer.from_pretrained('./bert-chinese/', do_lower_case=True)
# 新版代码,一次性处理好输入
encoding = tokenizer(all_sentences,
return_tensors='pt', # pt 指 pytorch,tf 就是 tensorflow
padding='max_length', # padding 到 max_length
truncation=True, # 激活并控制截断
max_length=config.max_len)
input_ids = encoding['input_ids']
# -
# 这句话的input_ids
print(f"Tokenize 前的第一句话:\n{all_sentences[0]}\n")
print(f"Tokenize + Padding 后的第一句话: \n{input_ids[0]}")
# 新版代码
attention_masks = encoding['attention_mask']
token_type_ids = encoding['token_type_ids']
# 第一句话的 attention_masks
print(attention_masks[0])
# ## 准备labels
#
# 由于我们的input_ids是带有`[CLS]`和`[SEP]`的,所以在准备label的同时也要考虑这些情况。
# [3] 代表 O 实体
for label in all_labels:
label.insert(len(label), 8) # [SEP]
label.insert(0, 7) # [CLS]
if config.max_len > len(label) -1:
for i in range(config.max_len - len(label)): #+2的原因是扣除多出来的CLS和SEP
label.append(9) # [PAD]
print(len(all_labels[0]))
print(all_labels[0])
# +
# 统计最长的段落
max_len_label = 0
max_len_text = 0
for label in all_labels:
if len(label) > max_len_text:
max_len_label = len(label)
print(max_len_label)
for one_input in input_ids:
if len(one_input) > max_len_text:
max_len_text = len(one_input)
print(max_len_text)
# -
# ## 切分训练和测试集
# train-test-split
train_inputs, validation_inputs, train_labels, validation_labels = train_test_split(input_ids,
all_labels,
random_state=2021,
test_size=0.1)
train_masks, validation_masks, _, _ = train_test_split(attention_masks,
input_ids,
random_state=2021,
test_size=0.1)
# +
print(len(train_inputs))
print(len(validation_inputs))
print(train_inputs[0])
print(validation_inputs[0])
# -
# 这里把输入的labels变为tensor形式。
train_labels = torch.tensor(train_labels).clone().detach()
validation_labels = torch.tensor(validation_labels).clone().detach()
# +
print(train_labels[0])
print(len(train_labels))
print(len(train_inputs))
# +
# dataloader
# 形成训练数据集
train_data = TensorDataset(train_inputs, train_masks, train_labels)
# 随机采样
train_sampler = RandomSampler(train_data)
# 读取数据
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=config.batch_size)
# 形成验证数据集
validation_data = TensorDataset(validation_inputs, validation_masks, validation_labels)
# 随机采样
validation_sampler = SequentialSampler(validation_data)
# 读取数据
validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=config.batch_size)
# -
# +
model = BertForTokenClassification.from_pretrained(config.bert_path, num_labels=config.num_classes)
model.cuda()
# 注意:
# 在新版的 Transformers 中会给出警告
# 原因是我们导入的预训练参数权重是不包含模型最终的线性层权重的
# 不过我们本来就是要“微调”它,所以这个情况是符合期望的
# +
# BERT fine-tuning parameters
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.weight']
# 权重衰减
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}]
# -
# 优化器
optimizer = AdamW(optimizer_grouped_parameters,
lr=5e-5)
# 保存loss
train_loss_set = []
# BERT training loop
for _ in range(config.epochs):
## 训练
print(f"当前epoch: {_}")
# 开启训练模式
model.train()
tr_loss = 0 # train loss
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in tqdm(enumerate(train_dataloader)):
# 把batch放入GPU
batch = tuple(t.to(device) for t in batch)
# 解包batch
b_input_ids, b_input_mask, b_labels = batch
# 梯度归零
optimizer.zero_grad()
# 前向传播loss计算
output = model(input_ids=b_input_ids,
attention_mask=b_input_mask,
labels=b_labels)
loss = output[0]
# print(loss)
# 反向传播
loss.backward()
# Update parameters and take a step using the computed gradient
# 更新模型参数
optimizer.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print(f"当前 epoch 的 Train loss: {tr_loss/nb_tr_steps}")
# +
# 验证状态
model.eval()
# 建立变量
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
# -
# 验证集的读取也要batch
for batch in tqdm(validation_dataloader):
# 元组打包放进GPU
batch = tuple(t.to(device) for t in batch)
# 解开元组
b_input_ids, b_input_mask, b_labels = batch
# 预测
with torch.no_grad():
# segment embeddings,如果没有就是全0,表示单句
# position embeddings,[0,句子长度-1]
outputs = model(input_ids=b_input_ids,
attention_mask=b_input_mask,
token_type_ids=None,
position_ids=None)
# print(logits[0])
# Move logits and labels to CPU
scores = outputs[0].detach().cpu().numpy() # 每个字的标签的概率
pred_flat = np.argmax(scores[0], axis=1).flatten()
label_ids = b_labels.to('cpu').numpy() # 真实labels
# print(logits, label_ids)
# +
# 保存模型
# They can then be reloaded using `from_pretrained()`
# 创建文件夹
if not os.path.exists(config.save_path):
os.makedirs(config.save_path)
print("文件夹不存在,创建文件夹!")
else:
pass
output_dir = config.save_path
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
# Good practice: save your training arguments together with the trained model
torch.save(model_to_save.state_dict(), os.path.join(output_dir, config.model_name))
# -
# 读取模型
# Load a trained model and vocabulary that you have fine-tuned
output_dir = config.save_path
model = BertForTokenClassification.from_pretrained(output_dir)
tokenizer = BertTokenizer.from_pretrained(output_dir)
model.to(device)
# +
# 单句测试
# test_sententce = "在北京市朝阳区的一家网吧,我亲眼看见卢本伟和孙笑川一起开挂。"
test_sententce = "史源源的房子租在滨江区南环路税友大厦附近。"
# +
# 构建 tag 到 索引 的字典
tag_to_ix = {"B-LOC": 0,
"I-LOC": 1,
"B-ORG": 2,
"I-ORG": 3,
"B-PER": 4,
"I-PER": 5,
"O": 6,
"[CLS]":7,
"[SEP]":8,
"[PAD]":9}
ix_to_tag = {0:"B-LOC",
1:"I-LOC",
2:"B-ORG",
3:"I-ORG",
4:"B-PER",
5:"I-PER",
6:"O",
7:"[CLS]",
8:"[SEP]",
9:"[PAD]"}
# +
encoding = tokenizer(test_sententce,
return_tensors='pt', # pt 指 pytorch,tf 就是 tensorflow
padding=True, # padding到最长的那句话
truncation=True, # 激活并控制截断
max_length=50)
test_input_ids = encoding['input_ids']
# 创建attention masks
test_attention_masks = encoding['attention_mask']
# -
# 形成验证数据集
# 为了通用,这里还是用了 DataLoader 的形式
test_data = TensorDataset(test_input_ids, test_attention_masks)
# 随机采样
test_sampler = SequentialSampler(test_data)
# 读取数据
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=config.batch_size)
# +
# 验证状态
model.eval()
# 建立变量
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
# Evaluate data for one epoch
# -
# 验证集的读取也要batch
for batch in tqdm(test_dataloader):
# 元组打包放进GPU
batch = tuple(t.to(device) for t in batch)
# 解开元组
b_input_ids, b_input_mask = batch
# 预测
with torch.no_grad():
# segment embeddings,如果没有就是全0,表示单句
# position embeddings,[0,句子长度-1]
outputs = model(input_ids=b_input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None)
# Move logits and labels to CPU
scores = outputs[0].detach().cpu().numpy() # 每个字的标签的概率
pred_flat = np.argmax(scores[0], axis=1).flatten()
# label_ids = b_labels.to('cpu').numpy() # 真实labels
print(pred_flat) # 预测值
pre_labels = [ix_to_tag[n] for n in pred_flat]
print(f"测试句子: {test_sententce}")
print(len(test_sententce))
print(pre_labels)
pre_labels_cut = pre_labels[0:len(test_sententce)+2]
pre_labels_cut
# +
person = [] # 临时栈
persons = []
location = []
locations = []
for i in range(len(pre_labels_cut) - 1):
# Person
# 单字情况
if pre_labels[i] == 'B-PER' and pre_labels[i+1] != 'I-PER' and len(location) == 0:
person.append(i)
persons.append(person)
person = [] # 清空
continue
# 非单字
# 如果前面有连着的 PER 实体
if pre_labels[i] == 'B-PER'and pre_labels[i+1] == 'I-PER' and len(person) != 0:
person.append(i)
# 如果前面没有连着的 B-PER 实体
elif pre_labels[i] == 'B-PER'and pre_labels[i+1] == 'I-PER' and len(location) == 0:
person.append(i) # 加入新的 B-PER
elif pre_labels[i] != 'I-PER' and len(person) != 0:
persons.append(person) # 临时栈内容放入正式栈
person = [] # 清空临时栈
elif pre_labels[i] == 'I-PER' and len(person) != 0:
person.append(i)
else: # 极少数情况会有 I-PER 开头的,不理
pass
# Location
# 单字情况
if pre_labels[i] == 'B-LOC' and pre_labels[i+1] != 'I-LOC' and len(location) == 0:
location.append(i)
locations.append(location)
location = [] # 清空
continue
# 非单字
# 如果前面有连着的 LOC 实体
if pre_labels[i] == 'B-LOC' and pre_labels[i+1] == 'I-LOC' and len(location) != 0:
locations.append(location)
location = [] # 清空栈
location.append(i) # 加入新的 B-LOC
# 如果前面没有连着的 B-LOC 实体
elif pre_labels[i] == 'B-LOC' and pre_labels[i+1] == 'I-LOC' and len(location) == 0:
location.append(i) # 加入新的 B-LOC
elif pre_labels[i] == 'I-LOC' and len(location) != 0:
location.append(i)
# 结尾
elif pre_labels[i] != 'I-LOC' and len(location) != 0:
locations.append(location) # 临时栈内容放入正式栈
location = [] # 清空临时栈
else: # 极少数情况会有 I-LOC 开头的,不理
pass
print(persons)
print(locations)
# +
# 从文字中提取
# 人物
NER_PER = []
for word_idx in persons:
ONE_PER = []
for letter_idx in word_idx:
ONE_PER.append(test_sententce[letter_idx - 1])
NER_PER.append(ONE_PER)
NER_PER_COMBINE = []
for w in NER_PER:
PER = "".join(w)
NER_PER_COMBINE.append(PER)
# 地点
NER_LOC = []
for word_idx in locations:
ONE_LOC = []
for letter_idx in word_idx:
# print(letter_idx)
# print(test_sententce[letter_idx])
ONE_LOC.append(test_sententce[letter_idx - 1])
NER_LOC.append(ONE_LOC)
NER_LOC_COMBINE = []
for w in NER_LOC:
LOC = "".join(w)
NER_LOC_COMBINE.append(LOC)
# 组织
# -
print(f"当前句子:{test_sententce}\n")
print(f" 人物:{NER_PER_COMBINE}\n")
print(f" 地点:{NER_LOC_COMBINE}\n")
| 04其他模型教程/4.03 BERT-NER-MSRA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn import metrics
import warnings
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', 100)
warnings.filterwarnings('ignore')
# +
def group_feature(df, key, target, aggs):
agg_dict = {}
for ag in aggs:
agg_dict[f'{target}_{ag}'] = ag
print(agg_dict)
t = df.groupby(key)[target].agg(agg_dict).reset_index()
return t
def extract_feature(df, train):
t = group_feature(df, 'ship','x',['max','min','mean','std','skew','sum'])
train = pd.merge(train, t, on='ship', how='left')
t = group_feature(df, 'ship','x',['count'])
train = pd.merge(train, t, on='ship', how='left')
t = group_feature(df, 'ship','y',['max','min','mean','std','skew','sum'])
train = pd.merge(train, t, on='ship', how='left')
t = group_feature(df, 'ship','v',['max','min','mean','std','skew','sum'])
train = pd.merge(train, t, on='ship', how='left')
t = group_feature(df, 'ship','d',['max','min','mean','std','skew','sum'])
train = pd.merge(train, t, on='ship', how='left')
train['x_max_x_min'] = train['x_max'] - train['x_min']
train['y_max_y_min'] = train['y_max'] - train['y_min']
train['y_max_x_min'] = train['y_max'] - train['x_min']
train['x_max_y_min'] = train['x_max'] - train['y_min']
train['slope'] = train['y_max_y_min'] / np.where(train['x_max_x_min']==0, 0.001, train['x_max_x_min'])
train['area'] = train['x_max_x_min'] * train['y_max_y_min']
mode_hour = df.groupby('ship')['hour'].agg(lambda x:x.value_counts().index[0]).to_dict()
train['mode_hour'] = train['ship'].map(mode_hour)
t = group_feature(df, 'ship','hour',['max','min'])
train = pd.merge(train, t, on='ship', how='left')
hour_nunique = df.groupby('ship')['hour'].nunique().to_dict()
date_nunique = df.groupby('ship')['date'].nunique().to_dict()
train['hour_nunique'] = train['ship'].map(hour_nunique)
train['date_nunique'] = train['ship'].map(date_nunique)
t = df.groupby('ship')['time'].agg({'diff_time':lambda x:np.max(x)-np.min(x)}).reset_index()
t['diff_day'] = t['diff_time'].dt.days
t['diff_second'] = t['diff_time'].dt.seconds
train = pd.merge(train, t, on='ship', how='left')
return train
def extract_dt(df):
df['time'] = pd.to_datetime(df['time'], format='%m%d %H:%M:%S')
# df['month'] = df['time'].dt.month
# df['day'] = df['time'].dt.day
df['date'] = df['time'].dt.date
df['hour'] = df['time'].dt.hour
# df = df.drop_duplicates(['ship','month'])
df['weekday'] = df['time'].dt.weekday
return df
# -
train = pd.read_hdf('./output/train.h5')
# train = df.drop_duplicates(['ship','type'])
test = pd.read_hdf('./output/test.h5')
train = extract_dt(train)
test = extract_dt(test)
train_label = train.drop_duplicates('ship')
test_label = test.drop_duplicates('ship')
train_label['type'].value_counts(1)
type_map = dict(zip(train_label['type'].unique(), np.arange(3)))
type_map_rev = {v:k for k,v in type_map.items()}
train_label['type'] = train_label['type'].map(type_map)
train_label = extract_feature(train, train_label)
test_label = extract_feature(test, test_label)
features = [x for x in train_label.columns if x not in ['ship','type','time','diff_time','date']]
target = 'type'
print(len(features), ','.join(features))
params = {
'n_estimators': 5000,
'boosting_type': 'gbdt',
'objective': 'multiclass',
'num_class': 3,
'early_stopping_rounds': 100,
}
# +
fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
X = train_label[features].copy()
y = train_label[target]
models = []
pred = np.zeros((len(test_label),3))
oof = np.zeros((len(X), 3))
for index, (train_idx, val_idx) in enumerate(fold.split(X, y)):
train_set = lgb.Dataset(X.iloc[train_idx], y.iloc[train_idx])
val_set = lgb.Dataset(X.iloc[val_idx], y.iloc[val_idx])
model = lgb.train(params, train_set, valid_sets=[train_set, val_set], verbose_eval=100)
models.append(model)
val_pred = model.predict(X.iloc[val_idx])
oof[val_idx] = val_pred
val_y = y.iloc[val_idx]
val_pred = np.argmax(val_pred, axis=1)
print(index, 'val f1', metrics.f1_score(val_y, val_pred, average='macro'))
# 0.8695539641133697
# 0.8866211724839532
test_pred = model.predict(test_label[features])
pred += test_pred/5
# -
oof = np.argmax(oof, axis=1)
print('oof f1', metrics.f1_score(oof, y, average='macro'))
# 0.8701544575329372
# +
pred = np.argmax(pred, axis=1)
sub = test_label[['ship']]
sub['pred'] = pred
print(sub['pred'].value_counts(1))
sub['pred'] = sub['pred'].map(type_map_rev)
sub.to_csv('result.csv', index=None, header=None)
# +
ret = []
for index, model in enumerate(models):
df = pd.DataFrame()
df['name'] = model.feature_name()
df['score'] = model.feature_importance()
df['fold'] = index
ret.append(df)
df = pd.concat(ret)
# -
df = df.groupby('name', as_index=False)['score'].mean()
df = df.sort_values(['score'], ascending=False)
df
| working/201_train_1.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Statistical inference over human and model behavioral performance
# **The purpose of this notebook is to:**
# * Visualize human and model prediction accuracy (proportion correct)
# * Visualize average-human and model agreement (RMSE)
# * Visualize human-human and model-human agreement (Cohen's kappa)
# * Compare performance between models
#
# **This notebook depends on:**
# * Running `./generate_dataframes.py` (INTERNAL USE ONLY)
# * Running `./upload_results.py` (INTERNAL USE ONLY)
# * Running `./download_results.py` (PUBLIC USE)
# * Running `./summarize_human_model_behavior.ipynb` (PUBLIC USE)
# ### Load packages
suppressMessages(suppressWarnings(suppressPackageStartupMessages({library(tidyverse)
library(ggthemes)
library(lme4)
library(lmerTest)
library(brms)
library(broom.mixed)
library(tidyboot)
require('MuMIn')
})))
# +
A = read_csv('../results/csv/summary/model_human_accuracies.csv')
# A = read_csv('../results/csv/models/allModels_results.csv')
AH = read_csv('../results/csv/summary/human_accuracy_by_scenario.csv')
K = read_csv('../results/csv/summary/model_human_CohensK.csv')
## preprocessing
A <- A %>%
dplyr::rename('model_kind'='Model Kind',
'encoder_training_dataset_type'='Encoder Training Dataset Type',
'dynamics_training_dataset_type'='Dynamics Training Dataset Type',
'readout_training_data_type'='Readout Train Data Type',
'readout_type'='Readout Type',
'visual_encoder_architecture' = 'Visual encoder architecture',
'dynamics_model_architecture' = 'Dynamics model architecture') %>%
mutate(dynamics_model_architecture = factor(dynamics_model_architecture)) %>%
# left_join(AH, by='scenario') %>%
group_by(model_kind) %>%
mutate(avg_model_correct = mean(model_correct)) %>%
ungroup()
# -
# ## Visualize human and model prediction accuracy (proportion correct)
# +
## human accuracy only
d = read_csv('../results/csv/summary/human_accuracy_by_scenario.csv')
## accuracy bar plot with 95% CIs
d %>%
ggplot(aes(x=reorder(scenario,-obs_mean), y=obs_mean, color=scenario, fill=scenario)) +
geom_bar(stat='identity') +
geom_errorbar(aes(ymin=ci_lb, ymax = ci_ub), width = 0, size = 1.5, color='black') +
theme_few() +
xlab('scenario') +
ylab('accuracy') +
theme(text = element_text(size=18),
element_line(size=1),
element_rect(size=2, color="#00000"),
axis.text.x = element_text(angle=90)) +
theme(legend.position = "none") +
scale_fill_brewer(palette="Spectral") + scale_color_brewer(palette="Spectral")
ggsave('../results/plots/human_accuracy_across_scenarios.pdf', width=12, height = 18, units='cm')
# +
## human model accuracy comparison (MAIN FIGURE)
A %>%
filter(readout_type %in% c('B')) %>%
select(Model, model_correct, dynamics_training_dataset_type, readout_type, scenario) %>%
bind_rows(AH %>% dplyr::rename(Model=agent, model_correct = obs_mean) %>% select(Model, model_correct, scenario)) %>%
ggplot(aes(x=reorder(Model,-model_correct), y=model_correct,
color=Model, fill=Model, shape = factor(scenario))) +
geom_point(stat='identity', position=position_dodge(0.3)) +
# geom_hline(aes(yintercept = human_correct)) +
# geom_rect(aes(ymin = ci_lb, ymax = ci_ub, xmin = -Inf, xmax = Inf), color=NA, fill = 'gray', alpha = 0.05) +
# facet_grid(rows=vars(scenario), cols=vars(dynamics_training_dataset_type)) +
theme_few() +
theme(axis.text.x = element_text(angle=90)) +
xlab('models') +
ylab('accuracy') +
ylim(0,1) +
scale_fill_brewer(palette="Spectral") + scale_color_brewer(palette="Spectral")
ggsave('../results/plots/human_model_accuracy_across_scenarios.pdf', width=36, height = 36, units='cm')
# +
## human model accuracy comparison by dynamics training data
A %>%
filter(readout_type %in% c('A','B','C')) %>%
ggplot(aes(x=reorder(Model,-model_correct), y=model_correct,
color=Model, fill=Model, shape = factor(dynamics_training_dataset_type))) +
geom_point(stat='identity', position=position_dodge(0.3)) +
#geom_hline(aes(yintercept = human_correct)) +
# geom_rect(aes(ymin = ci_lb, ymax = ci_ub, xmin = -Inf, xmax = Inf), color=NA, fill = 'gray', alpha = 0.05) +
facet_grid(rows=vars(scenario), cols=vars(dynamics_training_dataset_type)) +
theme_few() +
theme(axis.text.x = element_text(angle=90)) +
xlab('models') +
ylab('accuracy') +
scale_fill_brewer(palette="Spectral") + scale_color_brewer(palette="Spectral")
ggsave('../results/plots/human_model_accuracy_across_scenarios_by_dynamicsTraining.pdf', width=36, height = 36, units='cm')
# +
## A = "full sequence"
## B = "initial + predicted"
## C = "initial only"
## human model accuracy comparison by readout type
A %>%
filter(readout_type %in% c('A','B','C')) %>%
ggplot(aes(x=reorder(Model,-model_correct), y=model_correct,
color=Model, fill=Model, shape = factor(readout_type))) +
geom_point(stat='identity', position=position_dodge(0.3)) +
#geom_hline(aes(yintercept = human_correct)) +
# geom_rect(aes(ymin = ci_lb, ymax = ci_ub, xmin = -Inf, xmax = Inf), color=NA, fill = 'gray', alpha = 0.05) +
facet_grid(rows=vars(scenario), cols=vars(readout_type)) +
theme_few() +
theme(axis.text.x = element_text(angle=90)) +
xlab('models') +
ylab('accuracy') +
scale_fill_brewer(palette="Spectral") + scale_color_brewer(palette="Spectral")
ggsave('../results/plots/human_model_accuracy_across_scenarios_by_readoutType.pdf', width=36, height = 36, units='cm')
# +
## model accuracy by scenario & save out
Axs = A %>%
select(Model, model_correct, dynamics_training_dataset_type, readout_type, scenario) %>%
bind_rows(AH %>% dplyr::rename(Model=agent, model_correct = obs_mean) %>% select(Model, model_correct, scenario)) %>%
group_by(scenario, Model) %>%
tidyboot_mean(model_correct)
write_csv(Axs, '../results/csv/summary/model_human_accuracy_by_scenario.csv')
# -
# ## Visualize average-human and model agreement (RMSE)
# +
## TODO
# -
# ## Visualize human-human and model-human agreement (Cohen's kappa)
# +
## TODO
# -
# ## Comparing performance between models
#
# * Question 1: Visual encoder architecture (ConvNet [SVG/VGGFrozenLSTM] vs. transformer [DEITFrozenLSTM] … DEITFrozenMLP vs. SVG/VGGFrozenMLP)
# * Question 2: Dynamics model RNN vs. MLP (LSTM vs. MLP for above)
# * Question 3: Among unsupervised models, object-centric vs. non-object-centric
# * {CSWM, OP3} vs. {SVG}
# * Question 4: Latent vs. pixel reconstruction loss
# * CSWM vs. OP3
# * Question 5: RPIN vs. CSWM/OP3 (“supervised explicit object-centric” vs. “unsupervised implicit object-centric”)
# * Question 6: DPI vs GNS and GNS-RANSAC (DPI is object-centric, whereas GNS is not? and GNS-RANSAC is somewhere in between.)
# * Question 7: GNS vs. GNS-RANSAC
# * Question 8: Particle models (GNS, GNS-RANSAC, DPI) vs. humans
# - Estimate difference between particle model accuracy and human accuracy
# ```
# model_correct ~ particleOrHuman + (1 | scenario) + (1 | training_data) + (1 | readout_type)
# ```
# - Estimate similarity between model responses and human responses
#
# * Question 9: Particle models (GNS, GNS-RANSAC, DPI) vs. remaining vision models
# * Question 10: Among TDW-trained vision models, are supervised (RPIN) better than unsupervised (SVG, CSWM, OP3)
# * Question 11: Posgit chsibly useful to look at how well visual encoders do alone (e.g. Readout type A vs. humans)
# * Question 12: For pretrained encoder vision models (frozen), is readout B any better than readout C? If not, then none of the vision models are actually getting anything out of learning dynamics
# * Question 13: For end2end vision models (CSWM, OP3, SVG, RPIN), is readout B any better than readout C? If not, then none of the vision models are actually getting anything out of learning dynamics
# * Question 14: Impact of dynamics training data variability on accuracy
# * Question 15: is the supervised object-centric vision model (TDW training only) better than the best unsupervised? RPIN vs CSWM
# * Question 16: if possible, same comparison above but on Cohen's kappa? (nice-to-have)
# * Question 17: Is ImageNet pretraining better than object-centric TDW pretraining, assuming a CNN encoder? (VGGfrozen-MLP,LSTM ) vs (CSWM,RPIN)
# * Question 18: If the three particle models DPI, GNS, GNS-R aren't distinguishable, I would like to better understand why. Is this because GNS and GNS-R are being pulled way down by a single outlier (Dominoes)? I.e. does the result change if you exclude dominoes?
# * Question 19: Same as (4) but with Cohen's kappa (nice-to-have)
# * Question 20: Is readout type A (fully observed the movie) significantly better than readout type B or C (initial observations with/without simulation); best to do this separately for the "frozen" models (VGGfrozen, DEITfrozen, etc.) and for the TDW-only trained models (SVG, OP3, CSWM, RPIN)
#
# **Dimensions:**
# * “Visual encoder architecture” : [“ConvNet” “Transformer” “Neither”]
# * “Dynamics model architecture” : [“LSTM”, “MLP”, “Neither”]
# * “ObjectCentric”: [TRUE, FALSE, NA]
# * “Supervised”: [TRUE, FALSE]
# * “SelfSupervisedLoss”: [“latent”, “pixel”, “NA”]
# ## Q1: Visual encoder architecture (ConvNet vs. transformer)
## Comparison 1: Visual encoder architecture (ConvNet [SVG/VGGFrozenLSTM] vs. transformer [DEITFrozenLSTM] … DEITFrozenMLP vs. SVG/VGGFrozenMLP)
Q1 <- A %>%
filter(visual_encoder_architecture %in% c('ConvNet','Transformer')) %>%
filter(readout_type %in% c('A','B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q1)
M1 <- lmer(model_correct ~ visual_encoder_architecture + (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q1)
summary(M1)
# Transformer architectures outperform convnet architectures.
## model comparison relative to null model
anova(M0,M1)
# Model containing `visual_encoder_architecture` as a predictor outperforms null model without it .
## explained variance
r.squaredGLMM(M1)
# Showing that your marginal R squared is 0.14 and your conditional R squared is 0.72.
# ### Transformer architecture outperforms ConvNet architecture: $b=6.670e-02,t(925)=21.2, p<0.001$
# ## Q2: Dynamics model RNN vs. MLP (LSTM vs. MLP for above)
## Comparison 2
Q2 <- A %>%
filter(dynamics_model_architecture %in% c('LSTM','MLP')) %>%
mutate(dynamics_model_architecture = factor(dynamics_model_architecture)) %>%
filter(readout_type %in% c('A','B','C'))
Q2 %>%
group_by(dynamics_model_architecture, Model) %>%
summarise(mean(model_correct))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q2)
M2 <- lmer(model_correct ~ dynamics_model_architecture + (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q2)
summary(M2)
# ### Recurrence (LSTM) does not outperform MLP:
# ```
# Fixed effects:
# Estimate Std. Error df t value
# (Intercept) 6.310e-01 4.157e-02 2.351e+00 15.181
# dynamics_model_architectureMLP -8.559e-04 4.563e-03 2.750e+02 -0.188
# Pr(>|t|)
# (Intercept) 0.00212 **
# dynamics_model_architectureMLP 0.85134
# ```
anova(M0,M2)
## explained variance
r.squaredGLMM(M2)
# ## Q3: Among unsupervised models, object-centric vs. non-object-centric
# {CSWM, OP3} vs. {SVG}
## Comparison 3
Q3 <- A %>%
filter(Supervised==FALSE) %>%
filter(ObjectCentric %in% c(TRUE,FALSE)) %>%
filter(readout_type %in% c('A','B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q3)
M3 <- lmer(model_correct ~ ObjectCentric + (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q3)
summary(M3)
# ### ObjectCentric representations better than non-object centric: $b=-0.0145 ,t(635)=-3.154, p=0.0017$
anova(M0,M3)
## explained variance
r.squaredGLMM(M3)
# ## Q4: Latent vs. pixel reconstruction loss
# CSWM vs. OP3
## Comparison 4
Q4 <- A %>%
filter(Supervised==FALSE) %>%
filter(ObjectCentric %in% c(TRUE,FALSE)) %>%
filter(Model %in% c('CSWM','OP3')) %>%
filter(readout_type %in% c('A','B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q4)
M4 <- lmer(model_correct ~ Model + (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q4)
summary(M4)
# ### Latent (CSWM) better than pixel reconstruction (OP3) loss: $b= -0.059 ,t(275)=-11.04, p<2e-16$
anova(M0,M4)
## explained variance
r.squaredGLMM(M4)
# ## Q5: RPIN vs. CSWM/OP3 (“supervised explicit object-centric” vs. “unsupervised implicit object-centric”)
## Comparison 3
Q5 <- A %>%
filter(Supervised %in% c(TRUE,FALSE)) %>%
filter(Model %in% c('CSWM','OP3', 'RPIN')) %>%
filter(readout_type %in% c('A','B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q5)
M5 <- lmer(model_correct ~ Supervised + (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q5)
summary(M5)
# ### Supervised better than unsupervised: $b= 6.459e-02 ,t(707)=13.99, p<2e-16$
anova(M0,M5)
## explained variance
r.squaredGLMM(M5)
# ## Question 6: DPI vs GNS and GNS-RANSAC (DPI is object-centric, whereas GNS is not? and GNS-RANSAC is somewhere in between.)
Q6 <- A %>%
filter(Model %in% c('DPI','GNS', 'GNS-ransac')) %>%
filter(readout_type %in% c('A','B','C')) %>%
mutate(isDPI = if_else(Model=='DPI', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q6)
M6 <- lmer(model_correct ~ isDPI + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q6)
summary(M6)
# ### DPI not better than {GNS, GNS-ransac}: $b= 3.982e-04 ,t(61)=0.018, p=0.985$
anova(M0,M6)
## explained variance
r.squaredGLMM(M6)
# ## Question 7: GNS vs. GNS-RANSAC
Q7 <- A %>%
filter(Model %in% c('GNS', 'GNS-ransac')) %>%
filter(readout_type %in% c('A','B','C')) %>%
mutate(isRansac = if_else(Model=='GNS-ransac', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q7)
M7 <- lmer(model_correct ~ isRansac + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q7)
summary(M7)
# ### GNS not different from GNS-ransac: $b=0.007, t(37)=0.296, p=0.769$
anova(M0,M7)
## explained variance
r.squaredGLMM(M7)
Q7$readout_type
# ## Question 8: Particle models (GNS, GNS-RANSAC, DPI) vs. humans
#
# Estimate difference between particle model accuracy and human accuracy
# ```
# model_correct ~ particleOrHuman + (1 | scenario) + (1 | training_data) + (1 | readout_type)
# ```
#
# Estimate similarity between model responses and human responses
# Question 9: Particle models (GNS, GNS-RANSAC, DPI) vs. remaining vision models
#
Q8 <- A %>%
filter(Model %in% c('DPI','GNS', 'GNS-ransac')) %>%
filter(readout_type %in% c('A','B','C')) %>%
select(Model, model_correct, dynamics_training_dataset_type, readout_type, scenario) %>%
bind_rows(AH %>% dplyr::rename(Model=agent, model_correct = obs_mean) %>% select(Model, model_correct, scenario)) %>%
mutate(isHuman = if_else(Model=='human', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) , data=Q8)
M8 <- lmer(model_correct ~ isHuman + (1 | scenario), data=Q8)
summary(M8)
# ### Humans not that much better than particle models: $b=0.040, t(71)=1.228, p=0.223$
anova(M0,M8)
## explained variance
r.squaredGLMM(M8)
# ## Question 9: Particle models (GNS, GNS-RANSAC, DPI) vs. remaining vision models
Q9 <- A %>%
mutate(isParticle = if_else(Model %in% c('GNS', 'GNS-ransac', 'DPI'), TRUE, FALSE)) %>%
filter(readout_type %in% c('A','B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q9)
M9 <- lmer(model_correct ~ isParticle + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q9)
summary(M9)
# ### Particle models better than non-particle models:
#
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.977e-01 1.187e-02 5.575e+00 50.34 1.26e-08 ***
# isParticleTRUE 1.094e-01 9.251e-03 1.717e+03 11.83 < 2e-16 ***
# ```
# ## Question 10: Among TDW-trained vision models, are supervised (RPIN) better than unsupervised (SVG, CSWM, OP3)
#
#
Q10 <- A %>%
filter(Model %in% c('SVG', 'CSWM','OP3','RPIN')) %>%
mutate(isSupervised = if_else(Model %in% c('RPIN'), TRUE, FALSE)) %>%
filter(readout_type %in% c('A','B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type) , data=Q10)
M10 <- lmer(model_correct ~ isSupervised + (1 | scenario) + (1 | readout_type) + (1 | dynamics_training_dataset_type), data=Q10)
summary(M10)
# ### Supervised better than unsupervised among TDW-trained models:
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.575e-01 1.344e-02 4.396e+00 41.48 7.17e-07 ***
# isSupervisedTRUE 5.656e-02 4.087e-03 1.069e+03 13.84 < 2e-16 ***
# ```
# ## Question 11: Possibly useful to look at how well visual encoders do alone (e.g. Readout type A vs. humans)
# +
## TODO
# -
#
# ## Question 12: For pretrained encoder vision models (frozen), is readout B any better than readout C? If not, then none of the vision models are actually getting anything out of learning dynamics
#
Q12 <- A %>%
filter(visual_encoder_architecture %in% c('ConvNet','Transformer')) %>%
filter(readout_type %in% c('B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q12)
M12 <- lmer(model_correct ~ readout_type + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q12)
summary(M12)
# ### Readout type B not better than C:
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.687e-01 1.133e-02 8.972e+00 50.215 2.64e-12 ***
# readout_typeC 4.115e-03 3.524e-03 6.130e+02 1.168 0.243
# ```
# ## Question 13: For end2end vision models (CSWM, OP3, SVG, RPIN), is readout B any better than readout C? If not, then none of the vision models are actually getting anything out of learning dynamics
Q13 <- A %>%
filter(Model %in% c('SVG', 'CSWM','OP3','RPIN')) %>%
filter(readout_type %in% c('B','C'))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q13)
M13 <- lmer(model_correct ~ readout_type + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q13)
summary(M13)
# ### Readout B not better than readout C:
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.583e-01 1.060e-02 5.512e+00 52.663 1.16e-08 ***
# readout_typeC 2.664e-03 4.198e-03 7.090e+02 0.635 0.526
# ```
# ## Q14: Impact of dynamics training data variability on accuracy
Q14 <- A %>%
filter(readout_type %in% c('A','B','C')) %>%
dplyr::rename('dynamicsTrainVar'='dynamics_training_dataset_type')
M0 <- lmer(model_correct ~ (1 | scenario) , data=Q14)
M14 <- lmer(model_correct ~ dynamicsTrainVar + (1 | scenario), data=Q14)
summary(M14)
# ## Training on only one (the same) scenario yields higher prediction accuracy than training on all:
# ```
# Estimate Std. Error df t value Pr(>|t|)
# dynamicsTrainVarsame 2.053e-02 4.708e-03 1.718e+03 4.361 1.37e-05
# ```
# ## Training on all-but-this scenario yields somewhat lower prediction accuracy than training on all:
#
# ```
# Estimate Std. Error df t value Pr(>|t|)
# dynamicsTrainVarall_but_this -8.965e-03 4.708e-03 1.718e+03 -1.904 0.057
# ```
anova(M0,M14)
# ## Question 15: is the supervised object-centric vision model (TDW training only) better than the best unsupervised? RPIN vs CSWM
Q15 <- A %>%
filter(readout_type %in% c('A','B','C')) %>%
filter(Model %in% c('RPIN','CSWM')) %>%
mutate(isRPIN = if_else(Model=='RPIN', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q15)
M15 <- lmer(model_correct ~ isRPIN + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q15)
summary(M15)
# ### RPIN better than CSWM
#
# ```
# Estimate Std. Error df t value Pr(>|t|)
# isRPINTRUE 0.03506 0.00654 565.00000 5.36 1.21e-07 ***
# ```
anova(M0,M15)
# ### basic summary stats
Q15 %>% group_by(Model, dynamics_training_dataset_type) %>% mean(model_correct)
Q15 %>%
select(Model, scenario, dynamics_training_dataset_type, readout_type, model_correct) %>%
group_by(Model, dynamics_training_dataset_type) %>%
tidyboot_mean(model_correct)
# summarise(mean(model_correct))
# ## Question 16: if possible, same comparison as Q15 but on Cohen's kappa? (nice-to-have)
# +
## TODO
# -
# ## Question 17: Is ImageNet pretraining better than object-centric TDW pretraining, assuming a CNN encoder? (VGGfrozen-MLP,LSTM ) vs (CSWM,RPIN)
Q17 <- A %>%
filter(readout_type %in% c('A','B','C')) %>%
filter(Model %in% c('RPIN','CSWM', 'VGGFrozenLSTM','VGGFrozenMLP' )) %>%
mutate(isImagenetPretrained = if_else(Model %in% c('VGGFrozenLSTM','VGGFrozenMLP'), TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q17)
M17 <- lmer(model_correct ~ isImagenetPretrained + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q17)
summary(M17)
# ### VGGFrozenMLP / VGGFrozenLSTM better than CSWM/RPIN
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 6.053e-01 1.406e-02 5.070e+00 43.033 1.07e-07 ***
# isImagenetPretrainedTRUE 1.463e-02 5.036e-03 8.530e+02 2.905 0.00377 **
# ```
# ## Question 18: If the three particle models DPI, GNS, GNS-R aren't distinguishable, I would like to better understand why. Is this because GNS and GNS-R are being pulled way down by a single outlier (Dominoes)? I.e. does the result change if you exclude dominoes?
Q18 <- A %>%
filter(Model %in% c('GNS', 'GNS-ransac', 'DPI')) %>%
filter(readout_type %in% c('A','B','C')) %>%
filter(!scenario %in% c('dominoes')) ## excluding dominoes
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q18)
M18 <- lmer(model_correct ~ Model + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q18)
summary(M18)
## including dominoes
Q18b <- A %>%
filter(Model %in% c('GNS', 'GNS-ransac', 'DPI')) %>%
filter(readout_type %in% c('A','B','C')) #%>%
# filter(!scenario %in% c('dominoes')) ## excluding dominoes
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q18b)
M18b <- lmer(model_correct ~ Model + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q18b)
summary(M18b)
# ### Lack of ability to distinguish between particle models (GNS, GNS-ransac, DPI) does not depend on dominoes outlier (we are comparing between models within scenario)
#
# With dominoes
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.707349 0.035477 9.054228 19.938 8.62e-09 ***
# ModelGNS -0.003923 0.025232 59.999972 -0.155 0.877
# ModelGNS-ransac 0.003126 0.025232 59.999972 0.124 0.902
#
# ```
#
# Without dominoes
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 0.70628 0.03097 8.46138 22.809 6.77e-09 ***
# ModelGNS 0.01133 0.01999 52.00024 0.567 0.573
# ModelGNS-ransac 0.02128 0.01999 52.00024 1.064 0.292
#
# ```
Q18b %>%
select(Model, scenario, dynamics_training_dataset_type, readout_type, model_correct) %>%
group_by(Model, dynamics_training_dataset_type) %>%
tidyboot_mean(model_correct)
# summarise(mean(model_correct))
# +
## including dominoes, only comparing within "all" training regime
Q18c <- A %>%
filter(Model %in% c('GNS', 'GNS-ransac', 'DPI')) %>%
filter(dynamics_training_dataset_type %in% c('all')) ## only "all" training regimens
M0 <- lmer(model_correct ~ (1 | scenario) , data=Q18c)
M18c <- lmer(model_correct ~ Model + (1 | scenario) , data=Q18c)
summary(M18c)
# -
Q18c %>%
select(Model, scenario, dynamics_training_dataset_type, readout_type, model_correct) %>%
group_by(Model) %>%
tidyboot_mean(model_correct)
# ## Question 19: Same as (Q18) but with Cohen's kappa (nice-to-have)
# +
## TODO
# -
# ## Question 20: Is readout type A (fully observed the movie) significantly better than readout type B or C (initial observations with/without simulation); best to do this separately for the "frozen" models (VGGfrozen, DEITfrozen, etc.) and for the TDW-only trained models (SVG, OP3, CSWM, RPIN)
## frozen pre-trained
Q20a <- A %>%
filter(readout_type %in% c('A','B','C')) %>%
filter(Model %in% c('VGGFrozenLSTM','VGGFrozenMLP', 'DEITFrozenLSTM','DEITFrozenMLP')) %>%
mutate(fullyObserved = if_else(readout_type=='A', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q20a)
M20a <- lmer(model_correct ~ fullyObserved + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q20a)
summary(M20a)
## TDW-only pre-trained
Q20b <- A %>%
filter(readout_type %in% c('A','B','C')) %>%
filter(Model %in% c('SVG','OP3', 'CSWM','RPIN')) %>%
mutate(fullyObserved = if_else(readout_type=='A', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q20b)
M20b <- lmer(model_correct ~ fullyObserved + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q20b)
summary(M20b)
## TDW-only pre-trained
Q20c <- A %>%
filter(readout_type %in% c('A','B','C')) %>%
mutate(fullyObserved = if_else(readout_type=='A', TRUE, FALSE))
M0 <- lmer(model_correct ~ (1 | scenario) + (1 | dynamics_training_dataset_type) , data=Q20c)
M20c <- lmer(model_correct ~ fullyObserved + (1 | scenario) + (1 | dynamics_training_dataset_type), data=Q20c)
summary(M20c)
# ### Models are more accurate when observing entire sequence than on partial sequences (w/ and w/o predicted features)
#
# Frozen pretrained only
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.907e-01 1.182e-02 8.006e+00 49.99 2.79e-11 ***
# fullyObservedTRUE 1.197e-01 3.378e-03 5.650e+02 35.43 < 2e-16 ***
# ---
# ```
#
# TDW-only
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.597e-01 1.341e-02 4.364e+00 41.72 7.59e-07 ***
# fullyObservedTRUE 6.132e-02 4.214e-03 1.069e+03 14.55 < 2e-16 ***
#
# ```
#
# ALL
# ```
# Fixed effects:
# Estimate Std. Error df t value Pr(>|t|)
# (Intercept) 5.788e-01 1.194e-02 5.708e+00 48.46 1.1e-08 ***
# fullyObservedTRUE 7.325e-02 3.725e-03 1.717e+03 19.66 < 2e-16 ***
# ```
| analysis/inference_human_model_behavior.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:pythondata] *
# language: python
# name: conda-env-pythondata-py
# ---
import os
import pandas as pd
cityData=pd.read_csv('cities.csv')
cityData.head()
cityData.to_html('tabledata.html', index=False, classes=['table', 'table-striped', 'table-hover'])
| Resources/CSVtoHTML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf_env
# language: python
# name: tf_env
# ---
# # Speech Translator
import speech_recognition as sr #Speech To Text
import sys
import os
import IPython.display as ipd #audio file in ipynb
# ## Speech To Text
# we are using speech recognizer to recognize speech of the user
# read filename from arguments
filename = sys.argv[1]
# initialize the recognizer
r = sr.Recognizer()
# User records his voice for a few seconds
duration = 7 # seconds
# The following code snippet is being used to record the voice
# +
import sounddevice as sd
import soundfile as sf
samplerate = 16000
filename = './input/try.wav'
print("start")
mydata = sd.rec(int(samplerate * duration), samplerate=samplerate,
channels=1, blocking=True)
print("end")
sd.wait()
sf.write(filename, mydata, samplerate)
# -
os.listdir('./input')
filepath='./input'
# Try the recorded audio device
ipd.Audio('./input/try.wav')
# Speech to text conversion (of the audio file) using Speech Recognizer
# open the file
with sr.AudioFile(filepath+"/try.wav") as source:
# listen for the data (load audio to memory)
audio_data = r.record(source)
# recognize (convert from speech to text)
text = r.recognize_google(audio_data)
print(text)
# ## Translation using googletrans Library
from googletrans import Translator
translator = Translator()
# detecting the language
language_detcted=translator.detect(text).lang
language_detcted
# #### The JSON of languages supported by both googletrans and GTTS
languages_supported=[
{
'code':'af',
'country': 'afrikaans',
},
{
'code':'sq',
'country': 'albanian',
},
{
'code':'ar',
'country': 'arabic',
},
{
'code':'hy',
'country': 'armenian',
},
{
'code':'bn',
'country': 'bengali',
},
{
'code':'bs',
'country': 'bosnian',
},
{
'code':'ca',
'country': 'catalan',
},
{
'code':'zh-cn',
'country': 'chinese (simplified)',
},
{
'code':'zh-tw',
'country': 'chinese (traditional)',
},
{
'code':'hr',
'country': 'croatian',
},
{
'code':'cs',
'country': 'czech',
},
{
'code':'da',
'country': 'danish',
},
{
'code':'nl',
'country': 'dutch',
},
{
'code':'en',
'country': 'english',
},
{
'code':'et',
'country': 'estonian',
},
{
'code':'tl',
'country': 'filipino',
},
{
'code':'fi',
'country': 'finnish',
},
{
'code':'fr',
'country': 'french',
},
{
'code':'de',
'country': 'german',
},
{
'code':'el',
'country': 'greek',
},
{
'code':'gu',
'country': 'gujarati',
},
{
'code':'hi',
'country': 'hindi',
},
{
'code':'hu',
'country': 'hungarian',
},
{
'code':'is',
'country': 'icelandic',
},
{
'code':'id',
'country': 'indonesian',
},
{
'code':'it',
'country': 'italian',
},
{
'code':'ja',
'country': 'japanese',
},
{
'code':'kn',
'country': 'kannada',
},
{
'code':'ko',
'country': 'korean',
},
{
'code':'la',
'country': 'latin',
},
{
'code':'lv',
'country': 'latvian',
},
{
'code':'ml',
'country': 'malayalam',
},
{
'code':'mr',
'country': 'marathi',
},
{
'code':'my',
'country': 'myanmar (burmese)',
},
{
'code':'ne',
'country': 'nepali',
},
{
'code':'no',
'country': 'norwegian',
},
{
'code':'pl',
'country': 'polish',
},
{
'code':'pt',
'country': 'portuguese',
},
{
'code':'ro',
'country': 'romanian',
},
{
'code':'ru',
'country': 'russian',
},
{
'code':'sr',
'country': 'serbian',
},
{
'code':'sk',
'country': 'slovak',
},
{
'code':'es',
'country': 'spanish',
},
{
'code':'sv',
'country': 'swedish',
},
{
'code':'ta',
'country': 'tamil',
},
{
'code':'te',
'country': 'telugu',
},
{
'code':'th',
'country': 'thai',
},
{
'code':'tr',
'country': 'turkish',
},
{
'code':'uk',
'country': 'ukrainian',
},
{
'code':'ur',
'country': 'urdu',
},
{
'code':'vi',
'country': 'vietnamese',
},
{
'code':'cy',
'country': 'welsh',
},
]
# ## Text To Speech for all the supported languages
from gtts import gTTS
# In the following code snippet we are just taking our text translating it for all languages and then converting the
# text into an audio which is saved in the output folder
for language in languages_supported:
lang=translator.translate(text,src=language_detcted,dest=language['code']).text
print(lang+"\n") #Just to show the text being translated
tts = gTTS(lang, lang=language['code'])
tts.save('./output/'+language['country']+'.mp3')
# ## Output
# You can find the audio files for all languages in the output folder. The example for English(Src Laguage) and Hindi is given below.
ipd.Audio('./output/English.mp3',autoplay=True)
ipd.Audio('./output/Hindi.mp3',autoplay=True)
# # Example
# Asuming that a person is stuck in India and he cannot find someone who speaks english.
duration = 7 # seconds
# He records his voice( You can try too)
samplerate = 16000
filename = './input/example.wav'
print("start")
mydata = sd.rec(int(samplerate * duration), samplerate=samplerate,
channels=1, blocking=True)
print("end")
sd.wait()
sf.write(filename, mydata, samplerate)
ipd.Audio('./input/example.wav')
# Converted to text
with sr.AudioFile("./input/example.wav") as source:
# listen for the data (load audio to memory)
audio_data = r.record(source)
# recognize (convert from speech to text)
text = r.recognize_google(audio_data)
print(text)
# Language is detected automaically
language_detcted=translator.detect(text).lang
print(language_detcted)
# The user can now select all the languages that he wants to translate to from a dropdown
user_selection=['urdu','tamil','telugu','bengali','gujarati','hindi','english']
# We figure out the codes for Language translations
translations=[]
for selection in user_selection:
for language in languages_supported:
if language['country']==selection:
translations.append(language['code'])
translations
# The text is translated
for x in range(0,len(translations)):
lang=translator.translate(text,src=language_detcted,dest=translations[x]).text
print(lang+"\n") #Just to show the text being translated
tts = gTTS(lang, lang=translations[x])
tts.save('./output/example/'+user_selection[x]+'.mp3')
ipd.Audio('./output/example/urdu.mp3',autoplay=True)
ipd.Audio('./output/example/tamil.mp3',autoplay=True)
ipd.Audio('./output/example/telugu.mp3',autoplay=True)
ipd.Audio('./output/example/bengali.mp3',autoplay=True)
ipd.Audio('./output/example/gujarati.mp3',autoplay=True)
ipd.Audio('./output/example/hindi.mp3',autoplay=True)
ipd.Audio('./output/example/english.mp3',autoplay=True)
r = sr.Recognizer()
with sr.AudioFile("C:/Users/Predator/Desktop/preeti.wav") as source:
# listen for the data (load audio to memory)
audio_data = r.record(source)
# recognize (convert from speech to text)
text = r.recognize_google(audio_data)
print(text)
# ###### ---------------END--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
| Verzeo_Major_Projects/TextToSpeech_and_SpeechToText_Togather/LanguageTranslation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Green's function of an atomic chain
#
# This example demonstrates a computation of the retarded Green's function function of an atomic chain. The chain consists of identical atoms of an abstract chemical element, say an element "A".
import sys
import numpy as np
import matplotlib.pyplot as plt
import nanonet.tb as tb
import nanonet.negf as negf
xyz_file = """1
H cell
A1 0.0000000000 0.0000000000 0.0000000000
"""
a = tb.Orbitals('A')
# Let us assume each atomic site has one s-type orbital and the energy level of -0.7 eV. The coupling matrix element equals -0.5 eV.
a.add_orbital('s', -0.7)
tb.Orbitals.orbital_sets = {'A': a}
tb.set_tb_params(PARAMS_A_A={'ss_sigma': -0.5})
# With all these parameters we can create an instance of the class Hamiltonian. The distance between nearest neighbours is set to 1.1 A.
h = tb.Hamiltonian(xyz=xyz_file, nn_distance=1.1).initialize()
# Now we need to set periodic boundary conditions with a one-dimensional unit cell and lattice constant of 1 A.
h.set_periodic_bc([[0, 0, 1.0]])
h_l, h_0, h_r = h.get_hamiltonians()
energy = np.linspace(-3.5, 2.0, 500)
sgf_l = []
sgf_r = []
for E in energy:
L, R = negf.surface_greens_function(E, h_l, h_0, h_r)
sgf_l.append(L)
sgf_r.append(R)
sgf_l = np.array(sgf_l)
sgf_r = np.array(sgf_r)
num_sites = h_0.shape[0]
gf = np.linalg.pinv(np.multiply.outer(energy+0.001j, np.identity(num_sites)) - h_0 - sgf_l - sgf_r)
dos = -np.trace(np.imag(gf), axis1=1, axis2=2)
tr = np.zeros((energy.shape[0]), dtype=complex)
for j, E in enumerate(energy):
gf0 = np.matrix(gf[j, :, :])
gamma_l = 1j * (np.matrix(sgf_l[j, :, :]) - np.matrix(sgf_l[j, :, :]).H)
gamma_r = 1j * (np.matrix(sgf_r[j, :, :]) - np.matrix(sgf_r[j, :, :]).H)
tr[j] = np.real(np.trace(gamma_l * gf0 * gamma_r * gf0.H))
dos[j] = np.real(np.trace(1j * (gf0 - gf0.H)))
# +
fig, ax = plt.subplots(1, 2)
ax[0].plot(energy, dos)
ax[0].set_xlabel('Energy (eV)')
ax[0].set_ylabel('DOS')
ax[0].set_title('Density of states')
ax[1].plot(energy, tr)
ax[1].set_xlabel('Energy (eV)')
ax[1].set_ylabel('Transmission coefficient (a.u)')
ax[1].set_title('Transmission')
fig.tight_layout()
plt.show()
# -
ax = plt.axes()
ax.set_title('Surface self-energy of the semi-infinite chain')
ax.plot(energy, np.real(np.squeeze(sgf_l)))
ax.plot(energy, np.imag(np.squeeze(sgf_l)))
ax.set_xlabel('Energy (eV)')
ax.set_ylabel('Self-energy')
plt.show()
| jupyter_notebooks/chain_greens_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1.1 Convolutional Neural Network for Regression - Transfer Learning
# In this demo code, we will:
#
# <ul>
# <li>Explore if the regression model in 1.0 can be used for transfer learning.</li>
# <li>Visualize the filter activations (intermediate representations) when the CNN is used in a "transfer" setting.</li>
# </ul>
# +
import pandas as pd
from sklearn.decomposition import PCA
#load MNIST dataset
# %matplotlib inline
import numpy as np
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
#normalize the images
x_train = np.expand_dims(x_train/255.0, axis=-1)
x_test = np.expand_dims(x_test/255.0, axis=-1)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
# +
#plot function for sample images
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
def plot_tile(samples):
num_samples, x_dim, y_dim, _ = samples.shape
axes = (np.round(np.sqrt(num_samples))).astype(int)
fig = plt.figure(figsize=(axes, axes))
gs = gridspec.GridSpec(axes, axes)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_aspect('equal')
plt.imshow(sample, cmap=plt.get_cmap('viridis'), aspect='auto')
# +
#visualize first 9 samples of input images based on class labels, within the training dataset
unique_labels, label_counts = np.unique(y_train, return_counts=True)
for label in unique_labels:
x_train_perlabel = x_train[np.squeeze(y_train) == label]
fig = plot_tile(x_train_perlabel[0:9, :, :])
# +
#create (simulate) a synthetic "time series" data vector (y) for each of the input (x) such that y=Gx and G is linear
#G represents some abstract function (i.e. fluid flow simulator)
G = np.load('G.npy')
y_dim = G.shape[-1]
y_reg_train = np.zeros([y_train.shape[0], y_dim])
y_reg_test = np.zeros([y_test.shape[0], y_dim])
#simulate Y = GX
for i in range(y_train.shape[0]):
y_reg_train[i:i+1, :] = np.reshape((x_train[i:i+1, :, :, 0]), [1, x_train.shape[1]*x_train.shape[2]])@G
for i in range(y_test.shape[0]):
y_reg_test[i:i+1, :] = np.reshape((x_test[i:i+1, :, :, 0]), [1, x_test.shape[1]*x_test.shape[2]])@G
print(y_reg_train.shape)
print(y_reg_test.shape)
# +
#visualize the generated signals (for training dataset)
import matplotlib.cm as cm
from matplotlib.colors import Normalize
fig, ax = plt.subplots(1,1, figsize = (16, 7))
my_cmap = cm.get_cmap('jet')
my_norm = Normalize(vmin=0, vmax=9)
cs = my_cmap(my_norm(y_train))
for j in range(10):
plt.subplot(2, 5, j+1)
for i in range(500):
if (y_train[i] == j):
plt.plot(y_reg_train[i, :], c=cs[i], alpha=0.5)
plt.ylim([0, 25])
plt.title('digit '+str(j))
# +
#split label 2 (transfer) and 8, 9 (original)
x_train_original = x_train[y_train >= 8]
x_train_transfer = x_train[y_train == 2]
x_test_original = x_test[y_test >= 8]
x_test_transfer = x_test[y_test == 2]
y_reg_train_original = y_reg_train[y_train >= 8]
y_reg_train_transfer = y_reg_train[y_train == 2]
y_reg_test_original = y_reg_test[y_test >= 8]
y_reg_test_transfer = y_reg_test[y_test == 2]
print(x_train_original.shape)
print(x_test_original.shape)
print(x_train_transfer.shape)
print(x_test_transfer.shape)
print(y_reg_train_original.shape)
print(y_reg_test_original.shape)
print(y_reg_train_transfer.shape)
print(y_reg_test_transfer.shape)
# +
#define a regression model with Keras
import keras
from keras.models import Model
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Conv1D, MaxPooling1D, UpSampling1D
from keras.layers import Input, LeakyReLU, Reshape
from keras import backend as K
from keras.optimizers import Adam
from IPython.display import clear_output
from keras.callbacks import History
#function to view training and validation losses
class PlotLosses(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.i += 1
clear_output(wait=True)
plt.plot(self.x, self.losses, label="loss", c = 'green')
plt.plot(self.x, self.val_losses, label="val_loss", c = 'red')
plt.legend()
plt.show()
# +
#define the convolutional neural network architecture
input_img = Input(shape=(x_train.shape[1], x_train.shape[1], 1))
_ = Conv2D(8, (3, 3), activation='linear', padding='same')(input_img)
_ = LeakyReLU(alpha=0.3)(_)
f1 = MaxPooling2D((2, 2), padding='same')(_)
_ = Conv2D(16, (4, 4), activation='linear', padding='same')(f1)
_ = LeakyReLU(alpha=0.3)(_)
f2 = MaxPooling2D((2, 2), padding='same')(_)
_ = Conv2D(32, (5, 5), activation='linear', padding='same')(f2)
_ = LeakyReLU(alpha=0.8)(_)
f3 = MaxPooling2D((2, 2), padding='same')(_)
_ = Reshape((16*32,))(f3)
_ = Dense(16)(_)
f4 = LeakyReLU(alpha=0.3)(_)
_ = Dense(32)(f4)
_ = LeakyReLU(alpha=0.3)(_)
_ = Dense(64)(_)
_ = LeakyReLU(alpha=0.3)(_)
out = Dense(128)(_)
# +
#set loss function, optimizer and compile
regression = Model(input_img, out)
opt = keras.optimizers.Adam(lr=1e-4)
regression.compile(optimizer=opt,
loss="mse",
metrics=['mse'])
#get summary of architecture parameters and plot arch. diagram
regression.summary()
from keras.utils import plot_model
plot_model(regression, to_file='regression.png')
plot_losses = PlotLosses()
# +
#train the neural network
regression.fit(x_train_original, y_reg_train_original,
epochs=100,
batch_size=32,
shuffle=True,
validation_split=0.2,
callbacks=[plot_losses])
# +
#save and load the trained model
#regression.save('regression.h5')
#from keras.models import load_model
#regression = load_model('regression.h5')
# +
#get the predictions for the train dataset (original split)
y_reg_train_original_hat = regression.predict(x_train_original)
#get the predictions for the test dataset
y_reg_test_original_hat = regression.predict(x_test_original)
# +
#check the prediction vs actual data (original split)
plt.figure(figsize=[3, 3])
plt.scatter(y_reg_test_original.flatten(), y_reg_test_original_hat.flatten(), alpha=0.4, c='blue')
plt.xlim([0, 25])
plt.ylim([0, 25])
plt.title('Test dataset')
plt.figure(figsize=[3, 3])
plt.scatter(y_reg_train_original.flatten(), y_reg_train_original_hat.flatten(), alpha=0.4, c='blue')
plt.xlim([0, 25])
plt.ylim([0, 25])
plt.title('Train dataset')
# +
#used the trained model (on original split) to predict for transfer split
#get the predictions for the test dataset (transfer split)
y_reg_test_transfer_hat = regression.predict(x_test_transfer)
#report the rmse
print("RMSE value is = %.3f" %np.sqrt(np.mean((y_reg_test_transfer-y_reg_test_transfer_hat)**2)))
# +
#create a transfer regressor, initialized with the trained weights, fix the weights of conv layers
#check the trainable status of the individual layers
for layer in regression.layers:
print(layer, layer.trainable)
#freeze all the layers
for layer in regression.layers:
layer.trainable = False
#allow the last two layers to be retrained
for i in range(11, 18):
regression.layers[i].trainable = True
#check the trainable status of the individual layers
for layer in regression.layers:
print(layer, layer.trainable)
# +
#re-train the neural network on the transfer dataset
regression.fit(x_train_transfer, y_reg_train_transfer,
epochs=100,
batch_size=32,
shuffle=True,
validation_split=0.2,
callbacks=[plot_losses])
# +
#used the re-trained model (on transfer split) to predict for transfer split
#get the predictions for the test dataset (transfer split)
y_reg_test_transfer_hat_retrained = regression.predict(x_test_transfer)
#report the rmse
print("RMSE value is = %.3f" %np.sqrt(np.mean((y_reg_test_transfer-y_reg_test_transfer_hat_retrained)**2)))
# +
#functions to plot testing data and the predictions (vs actual data)
#to compare original model (trained on 8,9) vs "transferred" model (retrained with 2)
test_case = 3
plt.figure(figsize=[12, 4])
plt.subplot(1, 2, 1)
plt.imshow(x_test_transfer[test_case, :, :, 0])
plt.title('Test digit')
plt.subplot(1, 2, 2)
plt.plot(y_reg_test_transfer[test_case, :], ls=':', c='k', label='True', alpha=0.9)
plt.plot(y_reg_test_transfer_hat[test_case, :], c='r', label='Pred. (Original)', alpha=0.5)
plt.plot(y_reg_test_transfer_hat_retrained[test_case, :], c='b', label='Pred. (Transfer)', alpha=0.5)
#plt.ylim([0, 25])
plt.title('Predictions')
plt.legend()
# +
# -
| cnn_regression_transfer_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import display, HTML
from IPython.display import Image
# # What is machine learning?
# ## It's a method of data analysis that automates analytical model building
# ## In other words, we use algorithims that iteratively learn from data.
# ## This allows computers to find hidden insights without explicitly being programmed where to look!
display(HTML('<ul><li>Fraud Detection</li><li>Web Search results</li><li>Recommendation Engines</li>Pricing Models<li>Network Intrusion Detection</li><li>Pattern and Image Recognition</li><li>Email Spam Filtering</li><li>Prediction of Equipment Failures</li><li>and Many More Use Cases...</li></ul>'))
# # Machine Learning Process Flow:
Image(filename='./Machine Learning Process Flow.png')
# # Three type of Machine Learning Algorithms:
#
# ## Supervised Learning:
# ### * Labeled data and are trying to predict a label based off of known features
# ### * Trained using labeled examples such as an input where the desired output is known
# ### * Historical data predicts likely future events
# ### * Examples: classification, regression, prediction and gradient boosting
#
# ## Unsupervised Learning:
# ### * Unlabeled data and are trying to group together similar data points based off of features
# ### * The system doesn't know the "right answers" and it tries to figure out what is being shown.
# ### * It is exploratory in nature and tries to ind some structure within.
# ### * Examples: self-organizing maps, nearest-neighbor mapping, k-means clustering and singular value decomposition
#
# ## Reinforcement Learning:
# ### * Algorithm learns to perform an action from experience
# ### * Often used with robotics, gaming and navigation.
# ### * The algorithm discovers through trial and error which action yields the greatest rewards
| DFW_talk_intro_to_machine_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.5
# language: julia
# name: julia-1.6
# ---
# # Fraction of species without cas as a function of spacer length
# This Notebook generates Fig. 2D.
# +
using DelimitedFiles, Random, Statistics, PyPlot
rc(:svg,fonttype=:none)
using NBInclude
@nbinclude("CRISPRCasdb.ipynb")
include("get_repertoires.jl")
include("linreg.jl")
get_freqs_l(lens::Array{Int}; lmin, lmax) = [count(x->x==l,lens) for l in lmin:lmax];
# -
# ## Define strains
# Note: gbs_cc/cnc are ordered at random to eliminate systematic bias when binning.
# +
# read gbs from file
gbdata_cc = readdlm("filtered/gbs_cc_filter.csv",',')
gbdata_cnc = readdlm("filtered/gbs_cnc_filter.csv",',')
# gbdata_cc = readdlm("filtered/gbs_cc.csv",',')
# gbdata_cnc = readdlm("filtered/gbs_cnc.csv",',')
gbs_cc = shuffle(gbdata_cc[:,2])
gbs_cnc = shuffle(gbdata_cnc[:,2]);
# -
# ## Define repertoires
# +
num_gbs_cc, repsizes_cc, spacerlens_cc = get_repertoires(gbs_cc)
num_gbs_cnc, repsizes_cnc, spacerlens_cnc = get_repertoires(gbs_cnc)
mspacerlens_cc = mean.(spacerlens_cc)
mspacerlens_cnc = mean.(spacerlens_cnc);
# -
# ## Length probabilities conditioned on presence/absence of cas
# +
# round mspacerlens to the nearest integer
mspacerlens_cc1 = round.(Int,mspacerlens_cc)
mspacerlens_cnc1 = round.(Int,mspacerlens_cnc)
# concatenated mean spacer lengths
mspacerlens_all1 = vcat(mspacerlens_cc1,mspacerlens_cnc1)
# define minimum and maximum length among cc & cnc
lmin = minimum(mspacerlens_all1)
lmax = maximum(mspacerlens_all1)
# get frequencies for each length
freqs_l_cc1 = get_freqs_l(mspacerlens_cc1, lmin=lmin, lmax=lmax)
freqs_l_cnc1 = get_freqs_l(mspacerlens_cnc1, lmin=lmin, lmax=lmax)
p_l_cc1 = freqs_l_cc1 /num_gbs_cc
p_l_cnc1 = freqs_l_cnc1/num_gbs_cnc;
# -
# ## Conditional probability of missing cas
# +
# P_nocas = num_gbs_cnc/(num_gbs_cc + num_gbs_cnc)
P_nocas_l = freqs_l_cnc1./(freqs_l_cc1 .+ freqs_l_cnc1)
var_l_cc1 = num_gbs_cc *p_l_cc1 .*(1 .- p_l_cc1 )
var_l_cnc1 = num_gbs_cnc*p_l_cnc1.*(1 .- p_l_cnc1)
r = freqs_l_cc1./freqs_l_cnc1
var_r = r.^2 .* (var_l_cc1./freqs_l_cc1.^2 .+ var_l_cnc1./freqs_l_cnc1.^2)
std_P_nocas_l = sqrt.(var_r)./(1 .+ r).^2;
# -
# ## Linear regression of binned data, known $\{\sigma_i^2\}$
# no. of species at that length >= 5
for i in 1:length(lmin:lmax)
if freqs_l_cnc1[i] >= 5
println(i)
end
end
# +
lrange = 8:17
println("Range where linear fit is done: ",(lmin:lmax)[lrange[1]],",",(lmin:lmax)[lrange[end]])
m, c, dm, dc = linreg(x=(lmin:lmax)[lrange], y=P_nocas_l[lrange], dy=std_P_nocas_l[lrange])
# -
round(m,digits=3)
# +
fig, ax = subplots(figsize=(6.4, 3), dpi=200)
errorbar((lmin:lmax)[lrange],P_nocas_l[lrange], linestyle="none",marker="o",markersize=2,
yerr=std_P_nocas_l[lrange], elinewidth=1,capsize=0)
plot(lmin:lmax,m*(lmin:lmax) .+ c)
# hlines(P_nocas,27,41,color="grey")
fs = 12
xlabel("mean spacer length", fontsize=fs)
ylabel("fraction without cas", fontsize=fs)
xlim(28.8,38.2)
ylim(0,0.35)
xticks(30:2:38, fontsize=fs)
yticks(0:.1:.3, fontsize=fs)
legend(["fit, slope = -0.010 \$\\pm\$ $(round(dm,sigdigits=1))"], fontsize=fs)
ax.spines["top"].set_visible(false)
ax.spines["right"].set_visible(false)
# savefig("P_nocas.svg",format="svg")
# -
m_0 = m
# ### permutation test
# Note: The following code cell reuses the above variables.
# +
# num = 10^6
# m_values = Float64[]
# @time for i in 1:num
# mspacerlens_shuf = shuffle(mspacerlens_all1)
# # get frequencies for each length
# freqs_l_cc1 = get_freqs_l(mspacerlens_shuf[1:num_gbs_cc], lmin=lmin, lmax=lmax)
# freqs_l_cnc1 = get_freqs_l(mspacerlens_shuf[num_gbs_cc+1:end], lmin=lmin, lmax=lmax)
# p_l_cc1 = freqs_l_cc1 /num_gbs_cc
# p_l_cnc1 = freqs_l_cnc1/num_gbs_cnc;
# P_nocas_l = freqs_l_cnc1./(freqs_l_cc1 .+ freqs_l_cnc1)
# var_l_cc1 = num_gbs_cc *p_l_cc1 .*(1 .- p_l_cc1 )
# var_l_cnc1 = num_gbs_cnc*p_l_cnc1.*(1 .- p_l_cnc1)
# r = freqs_l_cc1./freqs_l_cnc1
# var_r = r.^2 .* (var_l_cc1./freqs_l_cc1.^2 .+ var_l_cnc1./freqs_l_cnc1.^2)
# std_P_nocas_l = sqrt.(var_r)./(1 .+ r).^2;
# m, c, dm, dc = linreg(x=(lmin:lmax)[lrange], y=P_nocas_l[lrange], dy=std_P_nocas_l[lrange])
# push!(m_values,m)
# end
# +
# count(x->x<m_0, m_values)/num
# -
| Fig2D_cas-v-nocas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ANP Row Sensitivity Graphing
import numpy as np
import pyanp.rowsens as rs
import pyanp.limitmatrix as lm
from pyanp.general import *
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams["figure.figsize"] = (12,7)
mat = np.array([
[0.2, 0.25, 0.05, 0.18],
[0.3, 0.3, 0.25, 0.07],
[0.4, 0.3, 0.5, 0.3],
[0.1, 0.15, 0.2, 0.45]
])
# # Manually Creating the graph
# Do not worry, we have a simpler function to create this graph, see the next section
xs = [i/50 for i in range(1,50)]
row = 0
alts = [1,2,3]
for alt in alts:
row=0
ys = []
for x in xs:
new_mat = rs.row_adjust(mat, row, x)
new_lmt = lm.calculus(new_mat)
new_pri = lm.priority_from_limit(new_lmt)
new_pri[row]=0
new_pri /= sum(new_pri)
y = new_pri[alt]
ys.append(y)
label = "Alt "+str(alt)
plt.plot(xs,ys, label=label)
x = mat[row,alt]
y = linear_interpolate(xs, ys, x)
plt.scatter(x, y, label=label)
plt.legend()
plt.show()
# # Use `influence_table()` function to graph
rs.influence_table(mat, 0)
rs.influence_table(mat, row=0, p0mode=0.5)
df, p0s = rs.influence_table(mat, 0, graph=False)
#display(df)
display(p0s)
df, p0s = rs.influence_table(mat, 0, graph=False, p0mode=0)
display(p0s)
rs.influence_table_plot(df, p0s)
# # Limit calc
limit, p0s=rs.influence_limit(mat, 0, delta=1e-7)
display(limit)
display(p0s)
limit, p0s=rs.influence_limit(mat, 0, p0mode=0.15, delta=1e-7)
display(limit)
display(p0s)
# # Fixed distance influence
rs.influence_fixed(mat, 0)
rs.influence_fixed(mat, 0, delta=-0.25)
rs.influence_fixed(mat, 1)
# # Rank Influence simple
influences = [rs.influence_rank(mat, i) for i in range(len(mat))]
influences
| scrap/Examples-ANPRowSensitivityGraphing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={} colab_type="code" id="9aMFvFjcoI_v"
# Copyright 2018 The TensorFlow GAN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# + [markdown] colab_type="text" id="35cp5a7vN9V8"
# # TF-GAN Tutorial
#
# Tutorial authors: joelshor@, westbrook@
# + [markdown] colab_type="text" id="XSTQ5Flu7FMP"
# ## Colab Prelims
#
#
# ### Steps to run this notebook
#
# This notebook should be run in Colaboratory. If you are viewing this from GitHub, follow the GitHub instructions. If you are viewing this from Colaboratory, you should skip to the Colaboratory instructions.
#
# #### Steps from GitHub
#
# 1. Navigate your web brower to the main Colaboratory website: https://colab.research.google.com.
# 1. Click the `GitHub` tab.
# 1. In the field marked `Enter a GitHub URL or search by organization or user`, put in the URL of this notebook in GitHub and click the magnifying glass icon next to it.
# 1. Run the notebook in colaboratory by following the instructions below.
#
# #### Steps from Colaboratory
#
# This colab will run much faster on GPU. To use a Google Cloud
# GPU:
#
# 1. Go to `Runtime > Change runtime type`.
# 1. Click `Hardware accelerator`.
# 1. Select `GPU` and click `Save`.
# 1. Click `Connect` in the upper right corner and select `Connect to hosted runtime`.
# + colab={} colab_type="code" id="83-azWpoYsDg"
# Check that imports for the rest of the file work.
import tensorflow.compat.v1 as tf
# !pip install tensorflow-gan
import tensorflow_gan as tfgan
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
# Allow matplotlib images to render immediately.
# %matplotlib inline
tf.logging.set_verbosity(tf.logging.ERROR) # Disable noisy outputs.
# + [markdown] colab_type="text" id="b2xrX4F-OEL7"
# ## Overview
#
# This colab will walk you through the basics of using [TF-GAN](https://github.com/tensorflow/gan) to define, train, and evaluate Generative Adversarial Networks (GANs). We describe the library's core features as well as some extra features. This colab assumes a familiarity with TensorFlow's Python API. For more on TensorFlow, please see [TensorFlow tutorials](https://www.tensorflow.org/tutorials/).
# + [markdown] colab_type="text" id="JMljl0ZwONgi"
# ## Learning objectives
#
# In this Colab, you will learn how to:
# * Use TF-GAN Estimators to quickly train a GAN
# + [markdown] colab_type="text" id="pI8zy5Bz65pa"
# ## Unconditional MNIST with GANEstimator
#
# This exercise uses TF-GAN's GANEstimator and the MNIST dataset to create a GAN for generating fake handwritten digits.
#
# ### MNIST
#
# The [MNIST dataset](https://wikipedia.org/wiki/MNIST_database) contains tens of thousands of images of handwritten digits. We'll use these images to train a GAN to generate fake images of handwritten digits. This task is small enough that you'll be able to train the GAN in a matter of minutes.
#
# ### GANEstimator
#
# TensorFlow's Estimator API that makes it easy to train models. TF-GAN offers `GANEstimator`, an Estimator for training GANs.
# + [markdown] colab_type="text" id="qxrYrU887Mns"
# ### Input Pipeline
#
# We set up our input pipeline by defining an `input_fn`. in the "Train and Eval Loop" section below we pass this function to our GANEstimator's `train` method to initiate training. The `input_fn`:
#
# 1. Generates the random inputs for the generator.
# 2. Uses `tensorflow_datasets` to retrieve the MNIST data.
# 3. Uses the tf.data API to format the data.
# + colab={} colab_type="code" id="Zs8kdV0w7Rtq"
import tensorflow_datasets as tfds
import tensorflow.compat.v1 as tf
def input_fn(mode, params):
assert 'batch_size' in params
assert 'noise_dims' in params
bs = params['batch_size']
nd = params['noise_dims']
split = 'train' if mode == tf.estimator.ModeKeys.TRAIN else 'test'
shuffle = (mode == tf.estimator.ModeKeys.TRAIN)
just_noise = (mode == tf.estimator.ModeKeys.PREDICT)
noise_ds = (tf.data.Dataset.from_tensors(0).repeat()
.map(lambda _: tf.random_normal([bs, nd])))
if just_noise:
return noise_ds
def _preprocess(element):
# Map [0, 255] to [-1, 1].
images = (tf.cast(element['image'], tf.float32) - 127.5) / 127.5
return images
images_ds = (tfds.load('mnist:3.*.*', split=split)
.map(_preprocess)
.cache()
.repeat())
if shuffle:
images_ds = images_ds.shuffle(
buffer_size=10000, reshuffle_each_iteration=True)
images_ds = (images_ds.batch(bs, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
return tf.data.Dataset.zip((noise_ds, images_ds))
# + [markdown] colab_type="text" id="t6aboJBr8Rig"
# Download the data and sanity check the inputs.
# + colab={"height": 279} colab_type="code" executionInfo={"elapsed": 2639, "status": "ok", "timestamp": 1559656474241, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="zEhgLuGo8OGc" outputId="efd62ab6-6d5c-4ee3-f6ed-85447922b54e"
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import tensorflow_gan as tfgan
import numpy as np
params = {'batch_size': 100, 'noise_dims':64}
with tf.Graph().as_default():
ds = input_fn(tf.estimator.ModeKeys.TRAIN, params)
numpy_imgs = next(iter(tfds.as_numpy(ds)))[1]
img_grid = tfgan.eval.python_image_grid(numpy_imgs, grid_shape=(10, 10))
plt.axis('off')
plt.imshow(np.squeeze(img_grid))
plt.show()
# + [markdown] colab_type="text" id="4sAetutZ9t93"
# ### Neural Network Architecture
#
# To build our GAN we need two separate networks:
#
# * A generator that takes input noise and outputs generated MNIST digits
# * A discriminator that takes images and outputs a probability of being real or fake
#
# We define functions that build these networks. In the GANEstimator section below we pass the builder functions to the `GANEstimator` constructor. `GANEstimator` handles hooking the generator and discriminator together into the GAN.
#
# + colab={} colab_type="code" id="oZ9n-jw_MG6C"
def _dense(inputs, units, l2_weight):
return tf.layers.dense(
inputs, units, None,
kernel_initializer=tf.keras.initializers.glorot_uniform,
kernel_regularizer=tf.keras.regularizers.l2(l=l2_weight),
bias_regularizer=tf.keras.regularizers.l2(l=l2_weight))
def _batch_norm(inputs, is_training):
return tf.layers.batch_normalization(
inputs, momentum=0.999, epsilon=0.001, training=is_training)
def _deconv2d(inputs, filters, kernel_size, stride, l2_weight):
return tf.layers.conv2d_transpose(
inputs, filters, [kernel_size, kernel_size], strides=[stride, stride],
activation=tf.nn.relu, padding='same',
kernel_initializer=tf.keras.initializers.glorot_uniform,
kernel_regularizer=tf.keras.regularizers.l2(l=l2_weight),
bias_regularizer=tf.keras.regularizers.l2(l=l2_weight))
def _conv2d(inputs, filters, kernel_size, stride, l2_weight):
return tf.layers.conv2d(
inputs, filters, [kernel_size, kernel_size], strides=[stride, stride],
activation=None, padding='same',
kernel_initializer=tf.keras.initializers.glorot_uniform,
kernel_regularizer=tf.keras.regularizers.l2(l=l2_weight),
bias_regularizer=tf.keras.regularizers.l2(l=l2_weight))
# + colab={} colab_type="code" id="NHkpn6ks90_R"
def unconditional_generator(noise, mode, weight_decay=2.5e-5):
"""Generator to produce unconditional MNIST images."""
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
net = _dense(noise, 1024, weight_decay)
net = _batch_norm(net, is_training)
net = tf.nn.relu(net)
net = _dense(net, 7 * 7 * 256, weight_decay)
net = _batch_norm(net, is_training)
net = tf.nn.relu(net)
net = tf.reshape(net, [-1, 7, 7, 256])
net = _deconv2d(net, 64, 4, 2, weight_decay)
net = _deconv2d(net, 64, 4, 2, weight_decay)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = _conv2d(net, 1, 4, 1, 0.0)
net = tf.tanh(net)
return net
# + colab={} colab_type="code" id="w-ZqQ4_thIrP"
_leaky_relu = lambda net: tf.nn.leaky_relu(net, alpha=0.01)
def unconditional_discriminator(img, unused_conditioning, mode, weight_decay=2.5e-5):
del unused_conditioning
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
net = _conv2d(img, 64, 4, 2, weight_decay)
net = _leaky_relu(net)
net = _conv2d(net, 128, 4, 2, weight_decay)
net = _leaky_relu(net)
net = tf.layers.flatten(net)
net = _dense(net, 1024, weight_decay)
net = _batch_norm(net, is_training)
net = _leaky_relu(net)
net = _dense(net, 1, weight_decay)
return net
# + [markdown] colab_type="text" id="OhTAjxnyPS5e"
# ### Evaluating Generative Models, and evaluating GANs
#
#
# TF-GAN provides some standard methods of evaluating generative models. In this example, we measure:
#
# * Inception Score: called `mnist_score` below.
# * Frechet Inception Distance
#
# We apply a pre-trained classifier to both the real data and the generated data calculate the *Inception Score*. The Inception Score is designed to measure both quality and diversity. See [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498) by Salimans et al for more information about the Inception Score.
#
# *Frechet Inception Distance* measures how close the generated image distribution is to the real image distribution. See [GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium](https://arxiv.org/abs/1706.08500) by Heusel et al for more information about the Frechet Inception distance.
# + colab={} colab_type="code" id="1jF-FW5LPTn6"
from tensorflow_gan.examples.mnist import util as eval_util
import os
def get_eval_metric_ops_fn(gan_model):
real_data_logits = tf.reduce_mean(gan_model.discriminator_real_outputs)
gen_data_logits = tf.reduce_mean(gan_model.discriminator_gen_outputs)
real_mnist_score = eval_util.mnist_score(gan_model.real_data)
generated_mnist_score = eval_util.mnist_score(gan_model.generated_data)
frechet_distance = eval_util.mnist_frechet_distance(
gan_model.real_data, gan_model.generated_data)
return {
'real_data_logits': tf.metrics.mean(real_data_logits),
'gen_data_logits': tf.metrics.mean(gen_data_logits),
'real_mnist_score': tf.metrics.mean(real_mnist_score),
'mnist_score': tf.metrics.mean(generated_mnist_score),
'frechet_distance': tf.metrics.mean(frechet_distance),
}
# + [markdown] colab_type="text" id="kxF2-gWHHaej"
# ### GANEstimator
#
# The `GANEstimator` assembles and manages the pieces of the whole GAN model. The `GANEstimator` constructor takes the following compoonents for both the generator and discriminator:
#
# * Network builder functions: we defined these in the "Neural Network Architecture" section above.
# * Loss functions: here we use the wasserstein loss for both.
# * Optimizers: here we use `tf.train.AdamOptimizer` for both generator and discriminator training.
# + colab={} colab_type="code" id="OBd8Vg7lHit8"
train_batch_size = 32 #@param
noise_dimensions = 64 #@param
generator_lr = 0.001 #@param
discriminator_lr = 0.0002 #@param
def gen_opt():
gstep = tf.train.get_or_create_global_step()
base_lr = generator_lr
# Halve the learning rate at 1000 steps.
lr = tf.cond(gstep < 1000, lambda: base_lr, lambda: base_lr / 2.0)
return tf.train.AdamOptimizer(lr, 0.5)
gan_estimator = tfgan.estimator.GANEstimator(
generator_fn=unconditional_generator,
discriminator_fn=unconditional_discriminator,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
params={'batch_size': train_batch_size, 'noise_dims': noise_dimensions},
generator_optimizer=gen_opt,
discriminator_optimizer=tf.train.AdamOptimizer(discriminator_lr, 0.5),
get_eval_metric_ops_fn=get_eval_metric_ops_fn)
# + [markdown] colab_type="text" id="n1uldXfUfstT"
# ### Train and eval loop
#
# The `GANEstimator`'s `train()` method initiates GAN training, including the alternating generator and discriminator training phases.
#
# The loop in the code below calls `train()` repeatedly in order to periodically display generator output and evaluation results. But note that the code below does not manage the alternation between discriminator and generator: that's all handled automatically by `train()`.
# + colab={"height": 2281} colab_type="code" executionInfo={"elapsed": 221607, "status": "ok", "timestamp": 1559656706482, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="AH6gcvcwHvSn" outputId="a72e2218-95a8-4585-8a5c-7c4ec896ac0c"
# Disable noisy output.
tf.autograph.set_verbosity(0, False)
import time
steps_per_eval = 500 #@param
max_train_steps = 5000 #@param
batches_for_eval_metrics = 100 #@param
# Used to track metrics.
steps = []
real_logits, fake_logits = [], []
real_mnist_scores, mnist_scores, frechet_distances = [], [], []
cur_step = 0
start_time = time.time()
while cur_step < max_train_steps:
next_step = min(cur_step + steps_per_eval, max_train_steps)
start = time.time()
gan_estimator.train(input_fn, max_steps=next_step)
steps_taken = next_step - cur_step
time_taken = time.time() - start
print('Time since start: %.2f min' % ((time.time() - start_time) / 60.0))
print('Trained from step %i to %i in %.2f steps / sec' % (
cur_step, next_step, steps_taken / time_taken))
cur_step = next_step
# Calculate some metrics.
metrics = gan_estimator.evaluate(input_fn, steps=batches_for_eval_metrics)
steps.append(cur_step)
real_logits.append(metrics['real_data_logits'])
fake_logits.append(metrics['gen_data_logits'])
real_mnist_scores.append(metrics['real_mnist_score'])
mnist_scores.append(metrics['mnist_score'])
frechet_distances.append(metrics['frechet_distance'])
print('Average discriminator output on Real: %.2f Fake: %.2f' % (
real_logits[-1], fake_logits[-1]))
print('Inception Score: %.2f / %.2f Frechet Distance: %.2f' % (
mnist_scores[-1], real_mnist_scores[-1], frechet_distances[-1]))
# Vizualize some images.
iterator = gan_estimator.predict(
input_fn, hooks=[tf.train.StopAtStepHook(num_steps=21)])
try:
imgs = np.array([next(iterator) for _ in range(20)])
except StopIteration:
pass
tiled = tfgan.eval.python_image_grid(imgs, grid_shape=(2, 10))
plt.axis('off')
plt.imshow(np.squeeze(tiled))
plt.show()
# Plot the metrics vs step.
plt.title('MNIST Frechet distance per step')
plt.plot(steps, frechet_distances)
plt.figure()
plt.title('MNIST Score per step')
plt.plot(steps, mnist_scores)
plt.plot(steps, real_mnist_scores)
plt.show()
# + [markdown] colab_type="text" id="uy1dsvWuwJeS"
# ### Next steps
#
# Try [this colab notebook](https://github.com/tensorflow/gan) to train a GAN on Google's Cloud TPU use TF-GAN.
#
#
#
| tensorflow_gan/examples/colab_notebooks/tfgan_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Trees and Forests
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# Decision Tree Classification
# ==================
#
from figures import plot_tree_interactive
plot_tree_interactive()
# ## Random Forests
from figures import plot_forest_interactive
plot_forest_interactive()
# ## Selecting the Optimal Estimator via Cross-Validation
# +
from sklearn import grid_search
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
digits = load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
rf = RandomForestClassifier(n_estimators=200, n_jobs=-1)
parameters = {'max_features':['sqrt', 'log2'],
'max_depth':[5, 7, 9]}
clf_grid = grid_search.GridSearchCV(rf, parameters)
clf_grid.fit(X_train, y_train)
# -
clf_grid.score(X_train, y_train)
clf_grid.score(X_test, y_test)
| Chapter 2/Trees and Forests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from keras.utils import np_utils
from keras import backend as K
from distutils.version import LooseVersion as LV
from keras import __version__
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import numpy as np
import matplotlib.pyplot as plt
print('Using Keras version:', __version__, 'backend:', K.backend())
assert(LV(__version__) >= LV("2.0.0"))
# +
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
nb_classes = 10
img_rows, img_cols = 28, 28
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# one-hot encoding:
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print()
print('MNIST data loaded: train:',len(X_train),'test:',len(X_test))
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('Y_train:', Y_train.shape)
# +
# Number of hidden units to use:
nb_units = 50
model = Sequential()
# Recurrent layers supported: SimpleRNN, LSTM, GRU:
model.add(SimpleRNN(nb_units,
input_shape=(img_rows, img_cols)))
# To stack multiple RNN layers, all RNN layers except the last one need
# to have "return_sequences=True". An example of using two RNN layers:
#model.add(SimpleRNN(16,
# input_shape=(img_rows, img_cols),
# return_sequences=True))
#model.add(SimpleRNN(32))
model.add(Dense(units=nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
# -
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
# +
# %%time
epochs = 50
history = model.fit(X_train,
Y_train,
epochs=epochs,
batch_size=128,
verbose=2)
# +
plt.figure(figsize=(5,3))
plt.plot(history.epoch,history.history['loss'])
plt.title('loss')
plt.figure(figsize=(5,3))
plt.plot(history.epoch,history.history['acc'])
plt.title('accuracy');
# -
| test_moving_mnist/.ipynb_checkpoints/mnist_rnn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import glob
import numpy as np
import re
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
# %matplotlib inline
from matplotlib.font_manager import FontProperties
from plotnine import *
names = glob.glob("/home/roi/GM_SG/results/exp1/*")
# +
for i in range(len(names)):
#
if i == 0:
#
final = pd.read_csv(names[i])
mean = re.findall("\d+\.\d+", names[i])
final["Mean"] = float(mean[0])
#
else:
#
curr = pd.read_csv(names[i])
mean = re.findall("\d+\.\d+", names[i])
curr["Mean"] = float(mean[0])
final = pd.concat([final,curr])
all_means = list(set(final["Mean"]))
all_means.sort()
# -
variables = final.columns
variables = variables[1:-1]
stds = final.groupby(["Mean"], as_index=False)[variables].std()
means = final.groupby(["Mean"], as_index=False)[variables].mean()
final.groupby(["Mean"])[variables].mean()
# ## RMSE vs Mean of adversary distribution
# +
means1 = means
devs1 = stds
t = "RMSE vs Wine Specific Cost"
x = means1["Mean"]
#
y1 = means1["nash_atdata"]
y2 = means1["raw_atdata"]
#
y1dev = devs1["nash_atdata"]
y2dev = devs1["raw_atdata"]
f,ax2 = plt.subplots(1,1,sharex=True, facecolor='w')
sns.set_style("whitegrid")
# Plot distributions
ax2.errorbar(x, y1, yerr=y1dev ,ecolor='black', color = sns.xkcd_rgb["black"], fmt = 'o',linestyle='-',elinewidth=0.9,capsize=5,capthick=0.5)
ax2.errorbar(x, y2, yerr=y2dev ,ecolor='black', color = sns.xkcd_rgb["black"], fmt = 'o', linestyle='--',elinewidth=0.9,capsize=5,capthick=0.5)
# Create the legend box
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Add legend and labels
ax2.legend(labels=["Nash", "Raw"],loc='upper right', bbox_to_anchor=(0,0.97,1,0.2), prop={'size': 14})
ax2.set(xlabel="Wine Specific Cost")
ax2.set_ylabel("RMSE")
# Set size of title and x,y labels
plt.gcf().autofmt_xdate()
ax2.tick_params(labelsize=14)
f.suptitle(t,x=0.5, y=0.93, fontsize=20)
ax2.xaxis.label.set_size(20)
ax2.yaxis.label.set_size(20)
plt.rcParams["figure.figsize"] = (12,9)
plt.savefig("/home/roi/GM_SG/figs/rmse_vs_cw.eps", format='eps', dpi=600)
# -
# # Time histogram
time_nash = pd.read_csv("/home/roi/GM_SG/results/time_nash.log", header=None)
time_nash.columns = ["Time Backward Solution"]
time_nash.plot.hist(bins=30)
print(time_nash.mean())
print(time_nash.std())
p = ggplot(data = time_nash) + geom_histogram(aes(x="Time Backward Solution"), bins = 30)
p = p + ylab("Count")
p
| notebooks/.ipynb_checkpoints/analysis_adv_reg-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Text Preprocessing
# :label:`sec_text_preprocessing`
#
# We have reviewed and evaluated
# statistical tools
# and prediction challenges
# for sequence data.
# Such data can take many forms.
# Specifically,
# as we will focus on
# in many chapters of the book,
# text is one of the most popular examples of sequence data.
# For example,
# an article can be simply viewed as a sequence of words, or even a sequence of characters.
# To facilitate our future experiments
# with sequence data,
# we will dedicate this section
# to explain common preprocessing steps for text.
# Usually, these steps are:
#
# 1. Load text as strings into memory.
# 1. Split strings into tokens (e.g., words and characters).
# 1. Build a table of vocabulary to map the split tokens to numerical indices.
# 1. Convert text into sequences of numerical indices so they can be manipulated by models easily.
#
# + origin_pos=2 tab=["pytorch"]
import collections
from d2l import torch as d2l
import re
# + [markdown] origin_pos=4
# ## Reading the Dataset
#
# To get started we load text from <NAME>' [*The Time Machine*](http://www.gutenberg.org/ebooks/35).
# This is a fairly small corpus of just over 30000 words, but for the purpose of what we want to illustrate this is just fine.
# More realistic document collections contain many billions of words.
# The following function reads the dataset into a list of text lines, where each line is a string.
# For simplicity, here we ignore punctuation and capitalization.
#
# + origin_pos=5 tab=["pytorch"]
#@save
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine(): #@save
"""Load the time machine dataset into a list of text lines."""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
lines = read_time_machine()
print(f'# text lines: {len(lines)}')
print(lines[0])
print(lines[10])
# + [markdown] origin_pos=6
# ## Tokenization
#
# The following `tokenize` function
# takes a list (`lines`) as the input,
# where each list is a text sequence (e.g., a text line).
# Each text sequence is split into a list of tokens.
# A *token* is the basic unit in text.
# In the end,
# a list of token lists are returned,
# where each token is a string.
#
# + origin_pos=7 tab=["pytorch"]
def tokenize(lines, token='word'): #@save
"""Split text lines into word or character tokens."""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unknown token type: ' + token)
tokens = tokenize(lines)
for i in range(11):
print(tokens[i])
# + [markdown] origin_pos=8
# ## Vocabulary
#
# The string type of the token is inconvenient to be used by models, which take numerical inputs.
# Now let us build a dictionary, often called *vocabulary* as well, to map string tokens into numerical indices starting from 0.
# To do so, we first count the unique tokens in all the documents from the training set,
# namely a *corpus*,
# and then assign a numerical index to each unique token according to its frequency.
# Rarely appeared tokens are often removed to reduce the complexity.
# Any token that does not exist in the corpus or has been removed is mapped into a special unknown token “<unk>”.
# We optionally add a list of reserved tokens, such as
# “<pad>” for padding,
# “<bos>” to present the beginning for a sequence, and “<eos>” for the end of a sequence.
#
# + origin_pos=9 tab=["pytorch"]
class Vocab: #@save
"""Vocabulary for text."""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# The index for the unknown token is 0
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
def count_corpus(tokens): #@save
"""Count token frequencies."""
# Here `tokens` is a 1D list or 2D list
if len(tokens) == 0 or isinstance(tokens[0], list):
# Flatten a list of token lists into a list of tokens
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
# + [markdown] origin_pos=10
# We construct a vocabulary using the time machine dataset as the corpus.
# Then we print the first few frequent tokens with their indices.
#
# + origin_pos=11 tab=["pytorch"]
vocab = Vocab(tokens)
print(list(vocab.token_to_idx.items())[:10])
# + [markdown] origin_pos=12
# Now we can convert each text line into a list of numerical indices.
#
# + origin_pos=13 tab=["pytorch"]
for i in [0, 10]:
print('words:', tokens[i])
print('indices:', vocab[tokens[i]])
# + [markdown] origin_pos=14
# ## Putting All Things Together
#
# Using the above functions, we package everything into the `load_corpus_time_machine` function, which returns `corpus`, a list of token indices, and `vocab`, the vocabulary of the time machine corpus.
# The modifications we did here are:
# i) we tokenize text into characters, not words, to simplify the training in later sections;
# ii) `corpus` is a single list, not a list of token lists, since each text line in the time machine dataset is not necessarily a sentence or a paragraph.
#
# + origin_pos=15 tab=["pytorch"]
def load_corpus_time_machine(max_tokens=-1): #@save
"""Return token indices and the vocabulary of the time machine dataset."""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# Since each text line in the time machine dataset is not necessarily a
# sentence or a paragraph, flatten all the text lines into a single list
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
corpus, vocab = load_corpus_time_machine()
len(corpus), len(vocab)
# + [markdown] origin_pos=16
# ## Summary
#
# * Text is an important form of sequence data.
# * To preprocess text, we usually split text into tokens, build a vocabulary to map token strings into numerical indices, and convert text data into token indices for models to manipulate.
#
#
# ## Exercises
#
# 1. Tokenization is a key preprocessing step. It varies for different languages. Try to find another three commonly used methods to tokenize text.
# 1. In the experiment of this section, tokenize text into words and vary the `min_freq` arguments of the `Vocab` instance. How does this affect the vocabulary size?
#
# [Discussions](https://discuss.d2l.ai/t/115)
#
| d2l-en/pytorch/chapter_recurrent-neural-networks/text-preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:graphdta] *
# language: python
# name: conda-env-graphdta-py
# ---
import cddd
import mso
from mso.optimizer import BasePSOptimizer
from mso.objectives.scoring import ScoringFunction
from mso.objectives.mol_functions import qed_score
from cddd.inference import InferenceModel
infer_model = InferenceModel() # The CDDD inference model used to encode/decode molecular SMILES strings to/from the CDDD space. You might need to specify the path to the pretrained model (e.g. default_model)
init_smiles = "c1ccccc1" # SMILES representation of benzene
scoring_functions = [ScoringFunction(func=qed_score, name="qed", is_mol_func=True)] # wrap the drug likeness score inside a scoring function instance
opt = BasePSOptimizer.from_query(
init_smiles=init_smiles,
num_part=200,
num_swarms=1,
inference_model=infer_model,
scoring_functions=scoring_functions)
opt.run(20)
opt.best_solutions
from rdkit.Chem import Draw
from rdkit import Chem
mol = Chem.MolFromSmiles(init_smiles)
mol
mol = Chem.MolFromSmiles(opt.best_solutions['smiles'].values[0])
mol
opt.run(20)
mol = Chem.MolFromSmiles(opt.best_solutions['smiles'].values[0])
mol
opt.best_solutions
opt.best_fitness_history
# # An progesterone example
# +
from mso.objectives.mol_functions import qed_score, penalize_macrocycles, docking_score, penalize_molecular_weight
from mso.objectives.emb_functions import distance_score
from mso.optimizer import BasePSOptimizer
from mso.objectives.scoring import ScoringFunction
from mso.objectives.mol_functions import qed_score
from cddd.inference import InferenceModel
import mso
import cddd
init_smiles = "c1ccccc1"
# +
infer_model = InferenceModel()
smiles_embedding = infer_model.seq_to_emb([init_smiles, ])
scoring_functions = [ScoringFunction(func=docking_score, name='docking', weight=60,
is_mol_func=True,
additional_args={'receptor': "/data1/zlzzheng/apps/mso/notebooks/5xq0_A_rec.pdb_mgltools.pdbqt",
'pocket': [39.929, 6.848, -49.476],
'exe': "idock",
'verbose': False,
}),
ScoringFunction(func=qed_score, name="qed", weight=10, is_mol_func=True),
ScoringFunction(func=penalize_macrocycles, weight=10, name="marcocycles", is_mol_func=True),
ScoringFunction(func=penalize_molecular_weight, weight=20, name="pmw", is_mol_func=True)
]
opt = BasePSOptimizer.from_query(
init_smiles=init_smiles,
num_part=10,
num_swarms=1,
inference_model=infer_model,
scoring_functions=scoring_functions)
# -
opt.run(100)
from rdkit import Chem
mol = Chem.MolFromSmiles(init_smiles)
mol
i = 6
m = opt.best_fitness_history.values[:, -1][i].ravel()[0]
mol = Chem.MolFromSmiles(m)
mol
opt.best_solutions
| notebooks/sample_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + slideshow={"slide_type": "skip"}
#Configuracion para recargar módulos y librerías cada vez
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
from IPython.core.display import HTML
HTML(open("style/mat281.css", "r").read())
# + [markdown] slideshow={"slide_type": "slide"}
# <header class="w3-container w3-teal">
# <img src="images/utfsm.png" alt="" height="100px" align="left"/>
# <img src="images/mat.png" alt="" height="100px" align="right"/>
# </header>
# <br/><br/><br/><br/><br/>
# # MAT281
# ## Aplicaciones de la Matemática en la Ingeniería
#
# ### <NAME>
#
# https://www.github.com/sebastiandres/mat281
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Unidad anterior: Introducción y Proyectos
#
# * Introducción y reglas
# * Proyectos 2014 y 2015
# * Minería Subterránea
#
# ## Unidad actual: Herramientas Transversales en Ingeniería
# * Conejtura Razonables
# * Adimensionalización y Teorema Pi
# * Visualización y Metodologías de trabajo
# + [markdown] slideshow={"slide_type": "slide"}
# ## ¿Qué contenido aprenderemos?
#
# * Conjeturas razonables
# * Reglas de aproximación
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## ¿Porqué aprenderemos ese contenido?
#
# * Conjeturas razonables
# * Reglas de aproximación
# * Porque todo ingeniero necesita conocer el orden de magnitud de las soluciones que está buscando.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## MOOC
# * Soy ferviente partidario de los MOOCs.
# * Gran parte de esta clase está basada en el curso Street Fighting Mathematics de Edx (libro asociado disponible en línea).
#
# IMAGEN
# <img src="images/utfsm.png" alt="" height="50px" align="middle"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Notación
# * $=$ : igual a
# * $\approx$ : aproximadamente (latex \approx)
# * $\sim$ : del orden de (latex \sim)
# * $\propto$ : proporcional (latex \propto)
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Notación
# * $=$ : Se usa para una igualdad.
# $$A = \pi a b$$
# * $\approx$ : Se usa para igualdad excepto por factor cercano a uno.
# $$A \approx 3 a b$$
# * $\sim$ : Se usa para igualdad excepto por factor sin dimensiones.
# $$A \sim a b$$
# * $\propto$ : Se usa para igualdad excepto por factor con dimensiones.
# $$A \propto a $$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Regla 1-$\pi$-10
#
# #### Motivación
# Multiplicar y dividir números rápidamente.
#
# #### Técnica
# * Utilizar notación científica.
# * Multiplicar factores de 10.
# * Multiplicar factores numericos utilizando la siguiente aproximacion:
# * Si $1 \leq |x| < 2$: aproximar a 1.
# * Si $2 \leq |x| < 6$: aproximar a $\pi$.
# * Si $6 \leq |x| < 10$: aproximar a 10.
# * Utilizar $\pi^2 ≈ 10$.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Regla 1-$\pi$-10
# #### Ejemplo 1
# $$ 1,312 \times 3,124 \times 542 ≈ 1.3 \times 10^3 \times 3.1 \times 10^3 · 5.4 · 10^2 $$
# $$\approx 1.3 \times 3.1 \times 5.4 \times 10^3 \times 10^3 \times 10^2$$
# $$\approx 1 \times \pi \times \pi \times 10^8$$
# $$\approx \pi^2 10^8$$
# $$\approx 10^9$$
# + [markdown] slideshow={"slide_type": "fragment"}
# La respuesta exacta es:
#
# $$1,312 \times 3,124 \times 542 = 2,221,488,896 \approx 2.2 10^9$$
# ¡Suficientemente cerca!
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Regla 1-$\pi$-10
# #### Ejemplo 2
# $$4, 675 · 0.007432 · 892 ≈ 4.6 · 10^3 · 7.4 · 10^{ −3} · 8.9 · 10^2$$
# $$\approx 4.6 · 7.4 · 8.9 · 10^3 · 10^{ −3} · 10^2$$
# $$\approx \pi · 10 · 10 · 10^2$$
# $$\approx \pi · 10^4$$
# $$\approx 3 · 10^4$$
# + [markdown] slideshow={"slide_type": "fragment"}
# La respuesta exacta es:
#
# $$ 4,675 \times 0.007432 \times 892 = 30,992.1832 \approx 3.1 · 10^4$$
#
# ¡Increíblemente cerca!
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Conjeturas razonables
#
# #### Educated Guessing
#
# *A guess that is made using judgment and some degree of knowledge.*
#
# #### Conjetura razonable
# El arte de las conjeturas razonables es mezclar adecuadamente
# formulas y valores conocidos, y completar lo desconocido con
# hipotesis razonables.
#
# #### Ejercicio
#
# Sin googlear, anoten en un papel su estimacion de la magnitud de los
# siguientes valores:
# * Litros de agua en la tierra.
# * Pañales utilizados en Chile al año.
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Conjeturas razonables
# ## Litros de agua en la tierra.
#
# #### Conocimiento relevante
# * Radio de la tierra: 6.000 km
# * Profundidad media oceano: 5 km
#
# #### HIpótesis simplificadora
# * Tierra es una esfera
# * Superficie océano es es 3/4 de la superficie terrestre
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Conjeturas razonables
# ## Litros de agua en la tierra.
#
# Superficie Oceano × Profundidad Océano = 3/4 Superficie Tierra × Profundidad Oceano
# $$= (3/4) 4 \pi 6000^2 [km^2] × (5 [km])$$
# $$= 3 · 5 · \pi · 36 · 106 [km^3 ]$$
# $$= \pi^4 · 10^7 [km^3]$$
# $$= 10^9 [km^3]$$
#
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# * Conjetura Razonable:
# $$10^9 [km^3]$$
#
# * Valor correcto1 :
# $$1.4 · 10^9 [km^3 ]$$
#
# 40 % de error para un calculo que tomó menos de 2 minutos.
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Conjeturas razonables
# ## Pañales utilizados en Chile al año
#
# #### Conocimiento relevante
# * Poblacion en Chile: 17 millones.
# * Un bebe usa 4 panales por día hasta los 2 anos.
#
# #### HIpótesis simplificadora
# * Tierra es una esfera
# * Superficie océano es es 3/4 de la superficie terrestre
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Conjeturas razonables
# ## Pañales utilizados en Chile al año
#
# Numero de Bebes en Chile × Pañales por Bebe por Día × Días en el año
#
# = Número de Bebés en Chile × 4 [pañales/día] × 365 [días/año]
#
# = Numero de Bebes en Chile × $10^3$ [pañales/año]
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Conjeturas razonables
# ## Pañales utilizados en Chile al año
#
# ¿Como
# estimar el numero
# de bebes?
#
# * Los bebes
# ́ usan panales
# hasta los 2 anos.
# * Por lo tanto el numero
# de bebes
# ́ en chile es, aproximadamente, 2 veces el numero
# de nacimientos anuales.
# * Si asumimos que la poblacion
# ́ vive hasta los 100 anos,
# ̃
# y la tasa
# de mortalidad es constante, podemos estimar que nacen al ano
# ̃
# 17 · 106 /50 = 34 · 104 bebes
# ́
# * Por tanto, hay ≈ 7 · 105 bebes
# ́ entre 0 y 2 anos.
# ̃
#
# * En conclusion,
# ́ en Chile se utilizan ∼ 7 · 108
# panales
# ̃
# al ano.
# ̃
# * Lo mas
# ́ correcto es decir, en Chile se utilizan entre 108 y 109
# panales
# ̃
# al ano.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Conjeturas razonables
# ## Pañales utilizados en Chile al año
#
# * Conjetura Razonable: $7 · 10^8$ [pañales/año]
# * Valor correcto: No hay
#
# El INE cifra en $2.4 · 10^5$ nacimientos anuales, y nuestra estimación fue de $3.4 · 10^5$ nacimientos anuales-
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Unidades y Dimensiones
#
# Para poder comparar 2 mediciones, éstas deben tener las mismas
# dimensiones y las mismas unidades.
#
# + [markdown] slideshow={"slide_type": "fragment"}
# #### Ejemplo Trivial
# “Sebastian mide 5.7 pies y Camilo mide 2.0 metros.
# Evidentemente Sebastian es mas alto, pues 5.7 > 2.0”
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Unidades y Dimensiones
# #### Ejemplo no trivial
# "In Nigeria, a relatively economically strong country, the GDP
# [gross domestic product] is USD 99 billion. The net worth of Exxon is
# USD 119 billion. When multi- nationals have a net worth higher than
# the GDP of the country in which they operate, what kind of
# power relationship are we talking about?"
# + [markdown] slideshow={"slide_type": "fragment"}
# * GDP (PIB) se mide anualmente, por lo que la unidad correcta sería en
# USD/año.
# * El valor neto de la compañía se mide en USD. Las unidades no
# son consistentes y la comparacion es incorrecta.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Análisis dimensional de fórmulas
# Podemos aplicar analisis
# dimensional para obtener estimaciones de
# formulas complejas.
# #### Ejemplo motivador
# $$A_a = \int_{-\infty}^{+\infty} e^{-ax^2} dx$$
#
# ¿Qué relación es correcta?:
# * $A_a \sim a$
# * $A_a \sim \sqrt{a} $
# * $A_a \sim 1/\sqrt{a} $
# * $A_a \sim 1/a$
# + [markdown] slideshow={"slide_type": "slide"}
# #### Análisis dimensional de fórmulas
# ## Observación crucial
# Funciones tales como $e^x$, $\sin(x)$, $log(x)$ sólo pueden aplicarse a variables adimensionales.
#
# ¿ Porqué ?
# + [markdown] slideshow={"slide_type": "fragment"}
# Considere la expansión en Serie de Taylor de las funciones:
# $$e^x = 1 + x + \frac{1}{2!}x^2 + \frac{1}{3!} x^3 + ... $$
# $$\sin(x) = x - \frac{1}{3!}x^3 + \frac{1}{5!} x^5 + ...$$
# $$log(1+x) = x - \frac{1}{2!}x^2 + \frac{1}{3!} x^3 - \frac{1}{4!} x^4 + ... $$
# El lado derecho sólo puede tener sentido si $x$ es adimensional.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Análisis dimensional de fórmulas
# ## Análisis dimensional del ejemplo
#
# $$A_a = \int_{-\infty}^{+\infty} e^{-ax^2} dx$$
#
# * Supongamos que $x$ tiene una cierta unidad $U$.
#
# * $e^{-ax^2}$ es adimensional, por lo tanto $a$ tiene dimensiones de $\sqrt{U}$.
# * $dx$ tiene unidades de $U$.
# * La integral representa solo suma infinitesimal, por lo que no cambia las unidades.
# * $A_a$ debe tener unidades de $1 \times U$.
#
# Por lo tanto, $A_a$ debe ser proporcional a $1/\sqrt{a}$.
#
# Por supuesto, la respuesta exacta es $\sqrt{\pi/a}$
# + [markdown] slideshow={"slide_type": "slide"}
# #### Análisis dimensional de fórmulas
# ## Ejemplo 2
#
# $$A_b = \int_{-\infty}^{+\infty} e^{-bx} dx$$
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Análisis dimensional de fórmulas
# ## Análisis dimensional del ejemplo 2
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Análisis Dimensional
#
# #### Definición
# Analisis
# ́
# Dimensional: simplificacion
# ́ de un fenomeno
# ́
# al reducir las
# magnitudes f ́ısicas involucradas al m ́ınimo numero
# ́
# posible.
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Casos
# * Si el fenómeno no tiene ecuacion(es) asociadas conocidas: utilizaremos el **Teorema Buckingham** (aka Teorema $\Pi$) que entrega las posibles variables adimensionales, es decir, el espacio dimensional mínimo a estudiar.
#
# * Si el fenómeno si tiene ecuacion(es) asociadas conocidas: realizaremos la **Adimensionalización** de las ecuaciones permite expresarla de la manera más compacta posible.
| clases/Unidad2-HerramientasTransversalesEnIngenieria/Clase01-EducatedGuessing/.ipynb_checkpoints/base-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from kafka import KafkaConsumer
import time
def log(str):
t = time.strftime(r"%Y-%m-%d %H-%M-%S",time.localtime())
print("[%s]%s"%(t,str))
log('start consumer')
consumer=KafkaConsumer('test3',bootstrap_servers=['192.168.19.137:9092'])
for msg in consumer:
recv = "%s:%d:%d: key=%s value=%s" %(msg.topic,msg.partition,msg.offset,msg.key,msg.value)
log(recv)
# -
| wenben_project/reco_sys/offline/full_cal/KafkaConsumer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predicting patient death in the ICU
# Team members included: <NAME>, <NAME>, <NAME>, <NAME>
# This blog post will describe the process of analyzing ICU data using machine learning methods and creating a dashboard using Streamlit. I will also describe what I learned while completing this project. The data used was obtained from the Medical Information Mart for Intensive Care (MIMIC-III) and can be found at https://mimic.mit.edu/.
#
# **View dashboard visualization: https://share.streamlit.io/delashu/bios823_project/main/scripts/dashboard/icu_dash.py**
#
# **View github repository: https://github.com/delashu/BIOS823_Project**
# Our primary aim was to predict death in the ICU. Specifically, we wanted to utilize patient demographics, prescibed medications, procedures, and admission information to predict the probability of a patient dying in the ICU. Mortality risk predictions can be useful for hospitals to determine where to allocate resources and for doctors to better understand their patients' outcomes.
#
# Source: https://link.springer.com/chapter/10.1007/978-3-319-43742-2_21#:~:text=A%20number%20of%20severity%20of,SOFA)%20score%20%5B7%5D.
# In order to obtain access to the data, we had to complete CITI training. Because the data set was so large, we began by using a subset of the data, which can be found here: https://physionet.org/content/mimiciii-demo/1.4/. We used four different data sets: admissions, ICU, procedures, and prescriptions.
# The first step was to perform feature engineering. We initialized an SQL database using sqlite3 with the necessary data frames. We queried the data using SQL as shown in the example code below. We initially tried using GoogleBit Query, but determined that a SQL database would be equally effective.
con = sqlite3.connect('MIMIC3_full.db')
admitdf = pd.read_sql('select * from admission', con)
icudf = pd.read_sql('select * from icu', con)
procdf = pd.read_sql('select * from procedure', con)
drugdf = pd.read_sql('select * from prescription', con)
# **Feature Engineering**
# We identified the procedures and prescriptions with the most occurences in the data. Then, we created crosstables with the top 8 procedures and top 20 prescriptions in order to limit the number of features that would be included in the model.
top_twenty = drugdf['formulary_drug_cd'].value_counts().head(20).index.tolist()
prescription_list = ['ACET325', 'CALG1I', 'D5W1000', 'D5W250', 'FURO40I',
'HEPA5I', 'INSULIN', 'KCL20P', 'KCL20PM', 'KCLBASE2', 'LR1000',
'MAG2PM', 'METO25', 'MORP2I', 'NACLFLUSH', 'NS1000', 'NS250', 'NS500',
'VANC1F', 'VANCOBASE']
with open("../../crosstables/prescription_list.txt", "wb") as dl:
pickle.dump(prescription_list, dl)
# Next, we created a function to perform feature engineering. This function takes the four data frames as inputs, cleans and merges the data, and returns an analytic data frame to be used for modeling. The code below shows an example how the data frames were merged and how we dealt with missingness (code is not exhaustive). Using lists of top procedures and medications, we subsetted the relevant data frames to those lists and converted the data to a long format, so that procedures and prescriptions could be used as features (columns) in the model. We converted nas to 0 (patient did not have that procedure or medication) in these columns. Similarly, for our outcome of death, we converted nas to 0 because we could not confirm that that patient had died. Using domain knowledge of diagnoses in the ICU that have a high likelihood of leading to death (https://pubmed.ncbi.nlm.nih.gov/17083735/), we categorized diagnoses into four groups and labelled the remaining diagnoses as "other". We determined the top two wards that had the most deaths and labelled the remaining wards as "other".
#
# One limitation is that we should have used a similar method for admission location and first care unit as we later realized that there were values that appeared in the full data that did not appear in the demo data, which caused us to have to reevaluate all the features when modeling.
# +
# merge icu and admissions data frames
icu_admin = pd.merge(icudf, admitdf, how='left', on='hadm_id')
icu_full = (
icu_admin.
drop(columns=['subject_id_y']).
rename(columns={"subject_id_x": "subject_id"})
)
procdf = procdf[procdf['ordercategoryname'].isin(procedure_list)].reset_index()
procdf = procdf[procdf['icustay_id'].notna()]
myproc_counts = procdf.groupby(['subject_id', 'icustay_id', 'ordercategoryname']).size().reset_index(name='counts')
# convert data to long format with procedures as columns
myproc_counts_long = myproc_counts.pivot(index = ['subject_id','icustay_id'],
columns = 'ordercategoryname',
values = 'counts').reset_index()
myproc_counts_long = myproc_counts_long.replace(np.nan,0)
# convert nas in the outcome column to 0
analyticdf['hospital_expire_flag'] = analyticdf['hospital_expire_flag'].fillna(0)
# categorize diagnoses (based on domain knowledge)
analyticdf['diagnosis'] = np.where(analyticdf['diagnosis'].str.contains("congestive heart failure", case=False), "CV Failure",
np.where(analyticdf['diagnosis'].str.contains("sepsis", case=False), "Sepsis",
np.where(analyticdf['diagnosis'].str.contains("seizure", case=False), "CNS Failure",
np.where(analyticdf['diagnosis'].str.contains("stroke", case=False), "CNS Failure",
np.where(analyticdf['diagnosis'].str.contains("tia", case=False), "CNS Failure",
np.where(analyticdf['diagnosis'].str.contains("ACUTE CHOLANGITIS", case=False), "Organ Failure",
np.where(analyticdf['diagnosis'].str.contains("GI BLEED", case=False), "Organ Failure",
np.where(analyticdf['diagnosis'].str.contains("lung failure", case=False), "Organ Failure",
np.where(analyticdf['diagnosis'].str.contains("liver failure", case=False), "Organ Failure",
np.where(analyticdf['diagnosis'].str.contains("MYOCARDIAL INFARCTION", case=False), "CV Failure", "Other"))))))))))
# -
# **Model Building**
# Now, let's discuss the modeling! The analytic dataframe from feature engineering was loaded and categorical variables were converted to dummy variables. Training and test datasets were created using a 0.8 split as shown below. We ran several classification models including: dummy classifier, logistic regression, decision tree, KNN, SVC, random forest, xgboost random forest, and catboost. We assessed these models using area under the curve (AUC). Results are shown below.
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1125, stratify=y)
DummyClassifier AUC:0.497 STD: 0.15
LogisticRegression AUC:0.700 STD: 0.22
DecisionTreeClassifier AUC:0.597 STD: 0.14
KNeighborsClassifier AUC:0.640 STD: 0.21
SVC AUC:0.714 STD: 0.17
RandomForestClassifier AUC:0.762 STD: 0.14
XGBRFClassifier AUC:0.738 STD: 0.15
CatBoostClassifier AUC:0.749 STD: 0.20
# We also performed stacking of the models to explore the models and help us determine which model to use. We chose to use xgboost random forest classifier for our prediction model and peformed a grid search to determine the best hyperparameters. Finally, we created a confusion matrix, ROC curve, precision-recall curve, and learning curve to assess our model. The plots of these curves showed that our xgboost random forest model performed well. The mean prediction accuracy for our model trained on the demo data was 76%. The code below shows the grid search and resulting best parameters.
# +
# create and optimize model using xgboost random forest classifier
clf = xgboost.XGBRFClassifier(eval_metric='logloss')
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
roc_auc_score(
y_test, clf.predict(X_test)
)
clf_ = xgboost.XGBRFClassifier(eval_metric='logloss')
params = {
'min_child_weight': [1, 5, 10],
'gamma': [0, 0.5, 1, 1.5, 2, 5],
'colsample_bytree': [0.6, 0.8, 1.0],
'max_depth': [4, 5, 6, 7],
}
# perform grid search to determine best parameters
clf = model_selection.GridSearchCV(
clf_, params, n_jobs=-1,
).fit(X_train, y_train)
clf.score(X_test, y_test)
clf_best = xgboost.XGBRFClassifier(**clf.best_params_)
# -
'min_child_weight': 1,
'gamma': 1,
'colsample_bytree': 1.0,
'max_depth': 7,
# Using the best parameters from this model, we ran the xgboost random forest classifier model on the full data and peformed a grid search. This allowed the grid search to run more efficiently as we were able to specify a smaller set of parameters. Without this information, the grid search would have taken over 24 hours to complete. The grid search produced the best parameters that we then used to make predictions. We calculated the same metrics on the model using the full dataset (confusion matrix, precision-recall curve, ROC curve, and learning curves) and saved the model as a pickle file to be used for our dashboard. The prediction accuracy was 90%, which was quite a bit higher than our baseline model.
clf_ = xgboost.XGBRFClassifier(eval_metric='logloss')
params = {
'min_child_weight': [1, 5],
'gamma': [1, 1.5],
'colsample_bytree': [0.8, 1.0],
'max_depth': [4, 7],
}
clf = model_selection.GridSearchCV(
clf_, params, n_jobs=-1,
).fit(X_train, y_train)
clf.best_params_
clf.score(X_test, y_test)
clf_best = xgboost.XGBRFClassifier(**clf.best_params_)
# **Dashboard Creation**
# The final step was creating the product. We chose to visualize the data and ouput predictions from our model through a dashboard. The dashboard was created using Streamlit with intentions of taking user inputs for the different features (characteristics of a patient) to output a prediction of the probability of that patient dying. We integrated interactive exploratory plots of the continuous and categorical variables in our dataset. We also add user inputs for the different features in our model and used the user inputs to create a dataframe that we could input into our model to make a prediction. This dataframe is similar to one row in our original analytic dataframe. The dashboard would be useful for a doctor, who could input the characteristics of their patient and determine the probability their patient dying. Below is a sample of the code used in the dashboard.
# +
# user inputs
st.subheader(str("**II: Predictive Modeling**"))
st.markdown(str("Input patient information below to output risk of ICU death."))
los_select = col5.number_input('Length of Stay', value = 5)
ACE_select = col6.number_input('ACET325', value=0, step=1)
CAL_select = col7.number_input('CALG1I', value=0, step=1)
# make pandas dataframe from the user input with default values
predat = {'los': [los_select], 'ACET325': [ACE_select],'CALG1I': [CAL_select], 'D5W1000': [DW_select],
'D5W250': [DW2_select]}
# data frame variables need to match features of model
# set categorical features based on user input
if fw_select == "52":
pred_df['first_wardid_52'] = 1
elif fw_select == "Other":
pred_df['first_wardid_Other'] = 1
#make np dataframe into array
pred_array = pred_df.to_numpy()
#make prediction!
prediction = clf.predict_proba(pred_array[:1])[:,1]
# output death predictions as score from 0 to 100
model_score = round(100*prediction[0],2)
#output the score
st.subheader(str("**Patient Death Prediction Score**"))
st.metric(label="Model Risk Prediction Score", value=str(model_score) + str("%"))
# -
# **Individual Contributions**
# I contributed to various components of this project. I helped with the feature engineering. I explored the data and specifically the missingness and decided how to deal with these values. For example, I relabelled nas in other in many of the columns and as 0 in the outcome column. Once the crosstables of prescriptions and procedures had been created, I helped with writing a function that would take the four dataframes of interest, merge and clean the data, and output an analytic dataframe. I helped merge the dataframes and confirm that the data looked like what we expected. I determined how to categorize diagnoses. Once we had the analytic dataframe, I ran the baseline modeling on the demo data. This involved processing the data and splitting into training and test sets before running several classification models. I converted categorical variables into dummy variables and ran the models. I compared the AUC and SD of each of the models, performed stacking, and grid search to determine the best parameters. I passed along the best parameters to be used for the full data. I created plots of the confusion matrix, ROC curve, precision-recall curve, and learning curves. I saved the best model and used that to create a skeleton of model predictions in our dashboard until the full data could be used. I matched the user inputs to the model features by creating a dataframe from the user inputs that were in the same order and had the same number of features as in the model. This was long process because we didn't want the user inputs on the dashboard to look clunky, so we had to use a lot of if, else statements to create the dataframe. I, then, deployed the best model onto the dashboard that took the user inputs and would make a prediction of the probability of a patient dying in the ICU. I helped with publishing the dashboard.
#
# Through completing this project, I learned how to perform machine learning algorithms in python and improved both my content knowledge of classification models and python proficiency with running models and making predictions. It was difficult to prioritize work on this project along with many other group projects, but I learned to set goals for each of our team meetings and slowly chip away at tasks for the project to keep it moving. I learned to utilize the skill sets of my team members as some were good at envisoning the dashboard and its different components and some were comfortable working with big data sets and to build on my own strengths of thinking through how the analytic data should look and trying different analytic methods. This project was a good culmination of the course as we got to implement different concepts that we learned and had the freedom to create a data product of our choice. I enjoyed exploring "real world" data and the different features as it felt like we were tackling a very relevant problem. I also enjoyed problem solving to figure out how to best create the analytic data frame and how to make model predictions from user inputs.
| _notebooks/2021-11-22-ICU_predictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Predict student admission based on marks in two exams
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
#load file data
import os
path=os.getcwd()#gets current working directory
path=path+'/andrew_ng_data/exercise2'
data=pd.read_csv(path,header=None,names=['Exam1','Exam2','Admitted'])
data.head()
positive=data[data['Admitted'].isin([1])]
negative=data[data['Admitted'].isin([0])]
positive.head()
negative.head()
fig, ax=plt.subplots(figsize=(12,8))
ax.scatter(positive['Exam1'],positive['Exam2'],s=50,marker='o',c='b',label='Admitted')
ax.scatter(negative['Exam1'],negative['Exam2'],s=50,marker='x',c='r',label='Not Admitted')
ax.legend()
ax.set_xlabel('Exam1 score')
ax.set_ylabel('Exam2 score')
def sigmoid(z):#activation function for output of logistic regression, it converts continuous input into a value between 0 and 1
return 1/(1+np.exp(-z))
nums=np.arange(-10,10,step=1)
fig, ax=plt.subplots(figsize=(12,8))
ax.plot(nums, sigmoid(nums),'r')
#define cost function
def cost(theta,X,y):
X=np.matrix(X)
y=np.matrix(y)
theta=np.matrix(theta)
first_term=np.multiply(-y,np.log(sigmoid(X*theta.T)))
second_term=np.multiply((1-y),np.log(1-sigmoid(X*theta.T)))
return np.sum(first_term-second_term)/(len(X))
#add ones column
data.insert(0,'Ones',1)
#set X,y
cols=data.shape[1]
X=data.iloc[:,0:cols-1]
y=data.iloc[:,cols-1:cols]
#initialize parameter arrays
X=np.array(X.values)
y=np.array(y.values)
theta=np.zeros(3)
X.shape, y.shape, theta.shape
cost(theta,X,y)
def gradient(theta,X,y):
theta=np.matrix(theta)
X=np.matrix(X)
y=np.matrix(y)
parameters=int(theta.ravel().shape[1])
grad=np.zeros(parameters)
error=sigmoid(X*theta.T)-y
for i in range(parameters):
term=np.multiply(error,X[:,i])
grad[i]=np.sum(term)/len(X)
return grad
import scipy.optimize as opt
result = opt.fmin_tnc(func=cost, x0=theta, fprime=gradient, args=(X, y))
cost(result[0], X, y)
def predict(theta,X):
probability=sigmoid(X*theta.T)
return [1 if x>=0.5 else 0 for x in probability]
theta_min=np.matrix(result[0])
predictions=predict(theta_min,X)
correct = [1 if ((a==1)and b==1) or (a==0 and b==0) else 0 for (a,b) in zip(predictions,y)]
accuracy=(sum(map(int,correct))%len(correct))
print ('accuracy={0}%'.format(accuracy))
| machine_learning/Regression_exmaple/LogisticRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# Header cell
from __future__ import division
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
plt.ion()
# Specific things needed
import time
import math
import sys
# Add parent directory to path
sys.path.append('../code/')
sys.path.append('../sim/')
# Import deft modules
import deft_1d
import simulate_data_1d
import utils
# +
# Simulate data
N = 300
data_type = 'wide'
# Simulate data and get default deft settings
data, defaults = simulate_data_1d.run(data_type,N)
# +
# Deft parameter settings
G = 200
alpha = 3
bbox = [-10,10]
# Do density estimation
results = deft_1d.run(data, G=G, alpha=alpha, \
bbox=bbox,
periodic=False,
num_samples=0,
print_t=False,
tollerance=1E-3)
# -
# Compute true density
xs = results.bin_centers
Q_true = np.zeros(G)
for i, x in enumerate(xs):
Q_true[i] = eval(defaults['pdf_py'])
Q_true /= results.h*sum(Q_true)
# +
# Plot density estimate
# Make figure and set axis
plt.figure(figsize=[6, 6])
ax = plt.subplot(1,1,1)
# Plot histogram density
left_bin_edges = results.bin_edges[:-1]
plt.bar(left_bin_edges, results.R, \
width=results.h, linewidth=0, color='gray', zorder=0, alpha=0.5)
# Plot deft density estimate
plt.plot(xs, results.Q_star, \
color='blue', linewidth=2, alpha=1, zorder=2)
# Plot the true density
plt.plot(xs, Q_true, color='black', linewidth=2)
# Tidy up the plot
#plt.yticks([])
plt.ylim([0, 1.2*max(results.Q_star)])
plt.xlim(results.bbox)
t = results.deft_1d_compute_time
plt.title("%s, $\\alpha = %d$, t=%1.2f sec"%(data_type, alpha, t), \
fontsize=10)
# Save plot
plt.savefig('report.test_deft_1d.png')
# -
# Something is wrong. This distribution is clearly biomodal. I think the regularization I'm using isn't properly implemented when computing the evidence ratio.
# +
# Derivative operator
L = results.G*results.h
h = L/G
delta_alpha_unitless=results.Delta.get_sparse_matrix()
delta_alpha=delta_alpha_unitless/(h**(2*alpha)) # Give delta unites
Q_star = results.Q_star
phi_star = -np.log(Q_star) - np.log(L)
t_star = results.t_star
l_star = results.l_star
PHI_STD_REG = utils.PHI_STD_REG
hessian = (h*l_star**(2*alpha)/L)*delta_alpha + (h*N/L)*np.diag(np.exp(-phi_star)) + (N/G)*np.eye(G)/(N*PHI_STD_REG**2)
# Compute spectrum
Q=np.linalg.eig(hessian)
e_vals = Q[0]
e_vecs = Q[1]
# Reorder eigenvalues and eigenvectors from smallest to largest
indices = np.argsort(e_vals)
e_vals = e_vals[indices]
e_vecs = e_vecs[:,indices]
# +
# Define actions
def S(x):
x=np.mat(np.ravel(x))
return (h*l_star**(2*alpha)/(2*L))*(x*delta_alpha*x.T)[0,0] \
+ (h*N*np.mat(results.R)*x.T)[0,0] \
+ (h*N/L)*np.sum(np.exp(-x)) \
+ (N/G)*0.5*(x*x.T)[0,0]/(N*PHI_STD_REG**2)
# Define Laplacian action
def S_lap(x):
x = np.mat(np.ravel(x))
return 0.5*((x - phi_star) * hessian * (x - phi_star).T)[0,0]
# Define correction action
def S_cor(phi_star,dphi):
x_star = np.ravel(phi_star)
dx = np.ravel(dphi)
return (h*N/L)*np.sum(np.exp(-x_star)*(np.exp(-dx) - 1 + dx - (dx**2)/2.0))
# +
# Compute list of modes to MCMC
z = 3.0
T_max = 0.1
Ts = np.zeros(G)
S0 = S(phi_star)
for g in range(G):
e_vec = np.ravel(e_vecs[:,g])
e_val = e_vals[g]
dphi = z*e_vec/np.sqrt(e_val)
T_right = S_cor(phi_star,dphi)
T_left = S_cor(phi_star,-dphi)
Ts[g] = max(abs(T_right),abs(T_left))
modes = Ts > T_max
plt.semilogy(Ts)
plt.title('Num modes to MCMC: %d'%sum(modes))
# +
xs = np.linspace(-3,3,100) # x-axis of plot
S0 = S(phi_star)
S0_lap = S_lap(phi_star)
num_modes = sum(modes)
num_cols = 5
num_rows = np.ceil(num_modes/num_cols)
gs = np.arange(G)[modes]
plt.figure(figsize=(3*num_cols,3*num_rows))
for m,g in enumerate(gs):
plt.subplot(num_rows,num_cols,m+1)
e_vec = e_vecs[:,g].T
e_val = e_vals[g]
v = e_vec / np.sqrt(e_val) # direction of perturbation
dSs_real = np.zeros(len(xs))
dSs_lap = np.zeros(len(xs))
for k,x in enumerate(xs):
phi = phi_star + x*v
dSs_real[k] = S(phi)-S0
dSs_lap[k] = S_lap(phi)-S0_lap
plt.plot(xs,dSs_lap,'b',linewidth=3,alpha=.5)
plt.plot(xs,dSs_real,'g',linewidth=3,alpha=.5)
plt.ylim([-0.5,max(dSs_lap)])
plt.title('m = %d, T=%1.1e'%(m,Ts[g]))
plt.tight_layout()
plt.show()
# +
# Draw phi from laplace approximation
def laplace_phi(phi_star,e_vals,e_vecs):
G,K = e_vecs.shape
assert len(phi_star)==G
assert len(e_vals)==K
# Build up phi
phi = np.ravel(phi_star.copy())
for k in range(K):
phi += np.random.randn()*np.ravel(e_vecs[:,k])/np.sqrt(e_vals[k])
return phi
# Add Laplace approx phis to MCMC sampled phis
def add_laplace_to_mcmc(mcmc_phis,lap_e_vals,lap_e_vecs):
G,N = mcmc_phis.shape
K = len(lap_e_vals)
assert lap_e_vecs.shape == (G,K)
phis_new = mcmc_phis.copy()
for n in range(N):
for k in range(K):
phis_new[:,n] += np.random.randn()*np.ravel(lap_e_vecs[:,k])/np.sqrt(lap_e_vals[k])
return phis_new
# +
# Convert phis to Qs
def phis_to_Qs(phis,h):
G,N = phis.shape
Qs = np.zeros([G,N])
for n in range(N):
phi = np.ravel(phis[:,n].copy())
Qs[:,n] = np.exp(-phi)/np.sum(h*np.exp(-phi))
return Qs
# Compute entropies
def Qs_to_entropies(Qs,h):
N = Qs.shape[1]
entropies = np.zeros(N)
for n in range(Qs.shape[1]):
Q = np.ravel(Qs[:,n].copy())
entropies[n] = -np.sum(h*Q*np.log(Q+1E-10))
return entropies
# -
# Function to do MCMC sampling
def mcmc(num_steps, action, phi0, e_vals, e_vecs, record_every = 1, tol=1E-6):
# Get dimensions
G = e_vecs.shape[0]
K = e_vecs.shape[1]
assert K <= G, 'G == %d, K == %d'%(G,K)
# Make sure there are the proper number of eigenvalues
assert len(e_vals) == K
# Make sure e_vecs are normalized eigenvectors
assert e_vecs.shape[0] == G
for g in range(K):
e_vec = np.mat(e_vecs[:,g])
dot = (e_vec.T*e_vec)[0,0]
assert abs(dot-1.0) < tol, 'v.T*v == %f'%dot
# Reshape phi
assert len(phi0) == G
# Initialize phi_current
phi_current = np.ravel(phi0.copy())
# Reserve memory for samples
num_samples = int(np.floor((num_steps-1)/record_every))+1
phis_sampled = np.zeros([G,num_samples])
action_values = np.zeros(num_samples)
# Do MCMC loop
acceptances = 0
for n in range(num_steps):
# Choose mode
g=np.random.randint(0,K)
e_vec = np.ravel(e_vecs[:,g])
e_val = e_vals[g]
# Choose step
step_length = np.random.normal(0,1.0)
step_vector = (step_length/np.sqrt(e_val)) * e_vec
phi_new = phi_current + step_vector
# Accept or reject
u = np.random.uniform(0,1)
if u < np.exp(action(phi_current)-action(phi_new)):
phi_current = phi_new
acceptances += 1
# Record acceptance if want to
if n%record_every == 0:
sample_num = n/record_every
phis_sampled[:,sample_num] = phi_current
action_values = action(phi_current)
# Report acceptance
print 'Acceptance rate = %0.1f%%'%(100.*acceptances/num_steps)
# Return sampled phis
return phis_sampled
# +
# Track convergence of samples
num_chains=10
num_steps=1000
record_every = sum(modes)
K = sum(modes)
num_samples = int(np.floor((num_steps-1)/record_every)+1)
# Run MCMC and compute chains
mcmc_e_vals = e_vals[modes]
mcmc_e_vecs = e_vecs[:,modes]
lap_e_vals = e_vals[~modes]
lap_e_vecs = e_vecs[:,~modes]
entropies = np.zeros([num_samples,num_chains])
Ts = np.zeros([num_samples,num_chains])
mcmc_phis = np.zeros([G,num_samples])
phis = np.zeros([G,num_samples])
for m in range(num_chains):
# Use Laplace approximation to set initial phi
#phi0 = laplace_phi(phi_star,e_vals,e_vecs)
# Do MCMC sampling
mcmc_phis = mcmc(num_steps, S, phi_star, mcmc_e_vals, mcmc_e_vecs, record_every)
# Add Laplace components
phis = add_laplace_to_mcmc(mcmc_phis,lap_e_vals,lap_e_vecs)
# Compute weights
for n in range(num_samples):
dphi = np.ravel(phis[:,n])-np.ravel(phi_star)
mcmc_dphi = np.ravel(mcmc_phis[:,n])-np.ravel(phi_star)
Ts[n,m] = S_cor(phi_star,dphi) - S_cor(phi_star,mcmc_dphi)
# Comptue entropies
Qs = phis_to_Qs(phis,h)
entropies[:,m] = Qs_to_entropies(Qs,h)
# Compute weights
weights = np.exp(-Ts)
# -
plt.figure(figsize=(15,5))
plt.imshow(weights.T,interpolation='nearest',vmin=0)
plt.colorbar()
# +
entropy_star = Qs_to_entropies(np.mat(Q_star).T,h)[0]
print entropy_star
# Plot chains
sample_nums = range(num_samples)
ax = plt.plot(entropies)
plt.axhline(entropy_star,linestyle=':',color='k')
# -
print np.mean(entropies,axis=0)
print np.var(entropies,axis=0)
# +
# MCMC sampling for S
# K is no. of metropolis iterations
num_steps=1000
num_modes=10
record_every = num_modes
# MCMC sampling for S
phis = mcmc(num_steps, S, phi_star, e_vals[:num_modes], e_vecs[:,:num_modes], record_every)
Qs = phis_to_Qs(phis,h)
# MCMC sampling for S_lap
phis_lap = mcmc(num_steps, S_lap, phi_star, e_vals[:num_modes], e_vecs[:,:num_modes], record_every)
Qs_lap = phis_to_Qs(phis_lap,h)
# Plot results
plt.figure(figsize=[5,5])
# Plot Qs for true action
plt.subplot(211)
plt.imshow(Qs.T,interpolation='nearest')
plt.title('S')
# Plot Qs for Laplace action
plt.subplot(212)
vmax = max(np.ravel(Qs))
plt.imshow(Qs_lap.T,interpolation='nearest',vmax=vmax, vmin=0)
plt.title('S_lap')
# -
| ipynb/16.12.17_jbk_mcmc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
import numpy as np
import pickle
# -
# %matplotlib inline
# +
import os
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier
from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
from yellowbrick.classifier import ClassificationReport
from sklearn.model_selection import StratifiedKFold
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from yellowbrick.datasets import load_game
from yellowbrick.model_selection import LearningCurve
# -
df1 = pd.read_pickle('orig_final.pkl')
type(df1)
len(df1)
df2=
pd.set_option('display.max_columns',30)
df1.head()
df1.describe()
pd.options.display.float_format = '{:.2f}'.format
df1.describe()
# %matplotlib inline
import matplotlib.pyplot as plt
df1.plot("orig_ltv", "orig_upb", kind= "scatter")
df1.hist(column = ["credit_score","msa_code","mi_percent"])
# +
dataset=df1
features = ['credit_score', 'mi_percent','property_type','borrower_cnt','orig_upb','orig_ltv']
target = ['curr_ln_delq_stas_1yr']
X = dataset[features]
y = dataset[target]
# +
# Encode the categorical data
X = OneHotEncoder().fit_transform(X)
y = LabelEncoder().fit_transform(y)
# Create the learning curve visualizer
cv = StratifiedKFold(n_splits=12)
sizes = np.linspace(0.3, 1.0, 10)
# Instantiate the classification model and visualizer
model = MultinomialNB()
visualizer = LearningCurve(
model, cv=cv, scoring='f1_weighted', train_sizes=sizes, n_jobs=6
)
visualizer.fit(X, y) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
# +
dataset=df1
features = ['credit_score', 'mi_percent','property_type']
target = ['borrower_cnt']
X = dataset[features]
y = dataset[target]
# +
def score_model(X, y, estimator, **kwargs):
"""
Test various estimators.
"""
y = LabelEncoder().fit_transform(y)
model = Pipeline([
('one_hot_encoder', OneHotEncoder()),
('estimator', estimator)
])
# Instantiate the classification model and visualizer
model.fit(X, y.ravel(), **kwargs)
expected = y
predicted = model.predict(X)
# Compute and return F1 (harmonic mean of precision and recall)
print("{}: {}".format(estimator.__class__.__name__, f1_score(expected, predicted)))# Try them all!
models = [
SVC(gamma='auto'), NuSVC(gamma='auto')
]
for model in models:
score_model(X, y, model)
# +
# Try them all!
models = [
SVC(gamma='auto'), NuSVC(gamma='auto')
]
for model in models:
score_model(X, y, model)
# +
# Try them all!
models = [
SVC(gamma='auto'), LinearSVC(),
SGDClassifier(max_iter=100, tol=1e-3), KNeighborsClassifier(),
LogisticRegression(solver='lbfgs'), LogisticRegressionCV(cv=3),
BaggingClassifier(), ExtraTreesClassifier(n_estimators=100),
RandomForestClassifier(n_estimators=100)
]
for model in models:
score_model(X, y, model)
# -
df1.dtypes
sns.lmplot(x = "credit_score", y = "orig_int_rate", data=df1 )
sns.lmplot(y="orig_upb", x="orig_ltv", data=df1, fit_reg=False, hue="occupancy_status")
sns.set_style("whitegrid")
sns.violinplot(x="occupancy_status", y="orig_upb", data=df1)
sns.distplot(df1["orig_ltv"])
sns.jointplot(df1["orig_upb"], df1["orig_ltv"])
sns.lmplot(x="orig_upb", y="orig_ltv", data=df1, fit_reg=False, hue="curr_ln_delq_stas_1yr")
sns.lmplot(x="orig_upb", y="orig_ltv", data=df1, fit_reg=False, hue="curr_ln_delq_stas_1yr")
| Archive/code/Notebooks/All_algo_test_manish_v1.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pylab as p
import matplotlib.font_manager as font_manager
import matplotlib.gridspec as gridspec
from matplotlib import colors
# Set font
plt.rc('font', family='serif', size=18)
bw_df = pd.read_csv("../csv/bandwidth.csv")
figurepath="../figures/"
# compute diverse metrics
received_fields=[]
restransmits_fields=[]
snd_cwnd_fields=[]
rt_bw_ratio_fields=[]
rt_snd_cwnd_ratio_fields=[]
xticklabels_time=[]
xticks_time=[]
loss_rates=[x/100 for x in list(range(0,21))]
received_fields_sec = []
for s in range(20):
received_fields_sec.append("sum_received_at_{}".format(str(s)))
def bandwidth_per_sec(row):
sub_index=0
this_count = 0
persec_sum = 0
res=[]
for f in received_fields:
this_count += 1
if this_count == 10:
res.append(persec_sum)
persec_sum = row[f]
this_count = 0
sub_index += 1
else:
persec_sum += row[f]
return row.append(pd.Series(res, index=received_fields_sec))
for t in range(201):
if t != 200:
# Save up fields in order
received="sum_received_at_"+str(t/10)
received_fields.append(received)
retransmits="retransmits_at_"+str(t/10)
restransmits_fields.append(retransmits)
snd_cwnd="snd_cwnd_at_"+str(t/10)
snd_cwnd_fields.append(snd_cwnd)
# RT Data received at
rt_bw_ratio="retransmits_bandwidth_ratio_at_"+str(t/10)
rt_bw_ratio_fields.append(rt_bw_ratio)
bw_df[rt_bw_ratio] = 0.0
selection =(bw_df[received] == 0) & (bw_df[retransmits] != 0)
bw_df.loc[selection,rt_bw_ratio] = 100.0
selection = (bw_df[received] != 0) & (bw_df[retransmits] != 0)
bw_df.loc[selection,rt_bw_ratio] = bw_df.loc[selection,retransmits].astype(float) / bw_df.loc[selection,received].astype(float)*1500*100
rt_snd_cwnd_ratio="retransmits_snd_cwnd_ratio_at_"+str(t/10)
rt_snd_cwnd_ratio_fields.append(rt_snd_cwnd_ratio)
bw_df[rt_snd_cwnd_ratio] = 0.0
selection = (bw_df[snd_cwnd] == 0) & (bw_df[retransmits] != 0)
bw_df.loc[selection,rt_snd_cwnd_ratio] = 100.0
selection = (bw_df[snd_cwnd] != 0) & (bw_df[retransmits] != 0)
bw_df.loc[selection,rt_snd_cwnd_ratio] = bw_df.loc[selection,retransmits].astype(float) / bw_df.loc[selection,snd_cwnd].astype(float)*1500*100
if t%10 == 0:
xticks_time.append(t)
if t%50 == 0:
xticklabels_time.append(str(int(t/10)))
else:
xticklabels_time.append("")
bw_df=bw_df.apply(bandwidth_per_sec, axis=1)
bw_df["total_received"] = bw_df[received_fields].sum(axis=1)
# -
# # ECN
# +
save=False
fig, ax = plt.subplots()
this_df = bw_df
data = this_df[(this_df.usecase == "forward")
& (this_df.experiment == "figure") & (this_df.configuration == "direct")
& (this_df.packet_size == "default")][received_fields].max().cumsum()
ax.plot(data,label="direct baseline",linestyle="--")
data = this_df[(this_df.usecase == "ip-ecn") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")][received_fields].median().cumsum()
ax.plot(data,label="disabled ECN", markevery=[0,50,100,150,199],
marker="8", color="forestgreen", alpha=0.7)
data = pd.Series([0 for x in range(10)]).append(this_df[(this_df.usecase == "tcp-ecn")
& (this_df.configuration == "indirect")
& (this_df.experiment == "figure")][received_fields].median()).cumsum().values
ax.plot(data,label="blocked ECN", markevery=[0,50,100,150,199],
marker="d", color="darkorange", alpha=0.7)
data = this_df[(this_df.usecase == "ip-ecn-11") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")][received_fields].median().cumsum()
ax.plot(data,label="broken ECN",markevery=[0,50,100,150,199], marker="x", color="red", alpha=0.7)
ax.set_xlim([0,200])
ax.set_xticks(xticks_time)
ax.set_xticklabels(xticklabels_time)
ax.set_xlabel("Time (sec)")
ax.set_ylim([0,90000000000])
ax.set_yticklabels(range(0,91,10))
ax.set_ylabel("Received data (GB)")
ax.legend(prop={'size': 13})
if save:
plt.savefig(figurepath+"ecn-cumsum-nocong.png", dpi=200,aspect=2.0,bbox_inches = "tight")
_=_
# +
save=False
fig, ax = plt.subplots()
this_df = bw_df
data = this_df[(this_df.usecase == "forward")
& (this_df.experiment == "wred") & (this_df.configuration == "indirect")
& (this_df.packet_size == "default")
& (this_df.loss==0)][restransmits_fields].max().cumsum()
ax.plot(data,label="disabled ECN", markevery=[0,50,100,150,199],marker="d", color="darkorange", alpha=0.7)
data = this_df[(this_df.usecase == "forward")
& (this_df.experiment == "wredecn") & (this_df.configuration == "indirect")
& (this_df.packet_size == "default")
& (this_df.loss==0)][restransmits_fields].max().cumsum()
ax.plot(data,label="ECN", markevery=[0,50,100,150,199],marker="8", color="forestgreen", alpha=0.7)
ax.set_xlim([0,200])
ax.set_xticks(xticks_time)
ax.set_xticklabels(xticklabels_time)
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Retransmissions (Packets)")
ax.set_ylim([0,2000])
ax.legend(prop={'size': 14})
if save:
plt.savefig("../figures/ecn-congestion-rt.png", dpi=200,aspect=2.0,bbox_inches = "tight")
_=_
# -
# # SACK
# +
save=False
fig, ax = plt.subplots()
this_df = bw_df
data = this_df[(this_df.usecase == "forward")
& (this_df.experiment == "figure") & (this_df.configuration == "direct")
& (this_df.packet_size == "default")
& (this_df.loss==0)][received_fields].max().cumsum()
ax.plot(data,label="direct baseline",linestyle="--")
data = this_df.loc[this_df[(this_df.usecase == "forward")
& (this_df.experiment == "figure") & (this_df.configuration == "indirect")
& (this_df.packet_size == "default")
& (this_df.loss==0)]["bandwidth"].idxmax()][received_fields].cumsum()
ax.plot(data,label="SACK",markevery=[0,50,100,150,199],marker="8", color="forestgreen", alpha=0.7)
data = this_df.loc[this_df[(this_df.usecase == "loss-nosack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")& (this_df.loss==0)
]["bandwidth"].idxmax()][received_fields].cumsum()
ax.plot(data,label="disabled SACK",markevery=[0,50,100,150,199], marker="d", color="darkorange", alpha=0.7)
data = this_df.loc[this_df[(this_df.usecase == "rand-seqnum-sack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")
& (this_df.loss==0)]["bandwidth"].idxmax()][received_fields].cumsum()
ax.plot(data,label="broken SACK",markevery=[0,50,100,150,199], marker="x", color="red", alpha=0.7)
ax.set_xlim([0,200])
ax.set_xticks(xticks_time)
ax.set_xticklabels(xticklabels_time)
ax.set_xlabel("Time (sec)")
ax.set_ylim([0,100000000001])
ax.set_yticklabels(range(0,101,20))
ax.set_ylabel("Received data (GB)")
ax.legend(prop={'size': 14})
if save:
plt.savefig(figurepath+"sack-cumsum-noloss.png", dpi=200,aspect=2.0,bbox_inches = "tight")
_=_
# +
save=False
fig, ax = plt.subplots()
this_df = bw_df
sub_df = this_df[(this_df.usecase == "forward")
& (this_df.experiment == "figure") & (this_df.configuration == "direct")
& (this_df.packet_size == "default")
& (this_df.loss==0)]
data = sub_df[np.isclose(sub_df.bandwidth, sub_df.median()["bandwidth"], 0.01)][restransmits_fields].mean().cumsum()
ax.plot(data,label="direct baseline",linestyle="--")
sub_df = this_df[(this_df.usecase == "forward")
& (this_df.experiment == "figure") & (this_df.configuration == "indirect")
& (this_df.packet_size == "default")
& (this_df.loss==0)]
data = sub_df[np.isclose(sub_df.bandwidth, sub_df.median()["bandwidth"], 0.0001)][restransmits_fields].mean().cumsum()
ax.plot(data,label="SACK",markevery=[0,50,100,150,199], marker="8", color="forestgreen", alpha=0.7)
sub_df = this_df[(this_df.usecase == "loss-nosack")
& (this_df.experiment == "figure") & (this_df.configuration == "indirect")
& (this_df.loss==0)]
data = sub_df[np.isclose(sub_df.bandwidth, sub_df.median()["bandwidth"], 0.01)][restransmits_fields].mean().cumsum()
ax.plot(data,label="disabled SACK", markevery=[0,50,100,150,199],marker="d", color="darkorange", alpha=0.7)
sub_df = this_df[(this_df.usecase == "rand-seqnum-sack")
& (this_df.configuration == "indirect")
& (this_df.loss==0)]
data = sub_df[np.isclose(sub_df.bandwidth, sub_df.max()["bandwidth"], 0.01)][restransmits_fields].mean().cumsum()
ax.plot(data,label="broken SACK", markevery=[0,50,100,150,199],marker="x", color="red", alpha=0.7)
ax.set_xlim([0,200])
ax.set_xticks(xticks_time)
ax.set_xticklabels(xticklabels_time)
ax.set_xlabel("Time (sec)")
ax.set_yscale("log", nonposy='clip')
ax.set_ylim([1,100000])
ax.set_ylabel("Retransmissions (Packets)")
ax.legend(prop={'size': 14})
if save:
plt.savefig("../figures/sack-rt-noloss.png", dpi=200,aspect=2.0,bbox_inches = "tight")
_=_
# +
save=False
plt.rc('font', family='serif', size=14)
plt.rc('text', usetex=False)
fig, ax = plt.subplots()
this_df = bw_df
data=[]
for loss_rate in loss_rates:
if loss_rate == 0:
data.append(this_df[(this_df.usecase == "forward") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")]["bandwidth"].median())
else:
data.append(this_df[(this_df.usecase == "loss-sack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure") & (this_df.loss == loss_rate)]["bandwidth"].median())
ax.plot(data,label="SACK", marker="8", color="forestgreen", alpha=0.7)
data=[]
for loss_rate in loss_rates:
data.append(this_df[(this_df.usecase == "loss-nosack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure") & (this_df.loss == loss_rate)]["bandwidth"].median())
ax.plot(data,label="disabled SACK", marker="d", color="darkorange", alpha=0.7)
data=[]
for loss_rate in loss_rates:
data.append(this_df[(this_df.usecase == "rand-seqnum-sack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")
& (this_df.loss == loss_rate)]["bandwidth"].max())
ax.plot(data,label="broken SACK", marker="x", color="red", alpha=0.7)
ax.set_xlim([0,20])
ax.set_xticks(range(0,21))
ax.set_xticklabels([str(l) if i%5==0 else "" for i,l in enumerate(loss_rates)])
ax.set_xlabel("Packet loss (%)")
ax.set_ylim([0,40000000000])
ax.set_yticks(range(5*10**9,36*10**9,10*10**9), minor=True)
ax.set_yticklabels(["0","10","20", "30", "40"])
ax.set_ylabel("Bandwidth (Gbps)")
ax.legend(prop={'size': 13})
if save:
plt.savefig("../figures/sack-bw.png", dpi=200,aspect=2.0,bbox_inches = "tight")
_=_
# +
# cdf Absolute retransmissions
save=True
fig, ax = plt.subplots()
this_df = bw_df
data=[]
for loss_rate in loss_rates:
if loss_rate == 0:
data.append(this_df[(this_df.usecase == "forward") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")
& (this_df.loss == loss_rate)]["total_retransmits"].median())
else:
data.append(this_df[(this_df.usecase == "loss-sack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")
& (this_df.loss == loss_rate)]["total_retransmits"].median())
ax.plot(data,label="SACK", marker="8", color="forestgreen", alpha=0.7)
data=[]
for loss_rate in loss_rates:
data.append(this_df[(this_df.usecase == "loss-nosack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")
& (this_df.loss == loss_rate)]["total_retransmits"].median())
ax.plot(data,label="disabled SACK", marker="d", color="darkorange",alpha=0.7)
data=[]
for loss_rate in loss_rates:
data.append(this_df[(this_df.usecase == "rand-seqnum-sack") & (this_df.configuration == "indirect")
& (this_df.experiment == "figure")
& (this_df.loss == loss_rate)]["total_retransmits"].median())
ax.plot(data,label="broken SACK", marker="x", color="red", alpha=0.7)
ax.set_xlim([0,20])
ax.set_xticks(range(0,21))
ax.set_xticklabels([str(l) if int(l*100)%5==0 else "" for l in loss_rates])
ax.set_xlabel("Packet loss (%)")
ax.set_ylim([0,40000])
ax.set_yticklabels(["{}K".format(l) if l!=0 else "0" for l in range(0,51,10)])
ax.set_ylabel("Total Retransmissions (Packets)")
ax.legend(prop={'size': 13})
if save:
plt.savefig("../figures/sack-rt.png", dpi=200,aspect=2.0,bbox_inches = "tight")
_=_
# -
# # WScale
# +
from matplotlib.ticker import LogFormatter
save=False
fig, axs = plt.subplots(2,2,sharex=True, sharey=True)
plt.rc('font', family='serif', size=12)
cc_algo=["reno", "cubic", "htcp", "bbr"]
these_df = [this_df for this_df in [bw_df[(bw_df.usecase == "wscale" )
& (bw_df.experiment == "figure")
& (bw_df.congestion_control == cc)] for cc in cc_algo]]
group_fields = ["delay","wscale"]
c=[]
df=(these_df[0][group_fields+["bandwidth"]].groupby(group_fields).median())
c.append(axs[0,0].pcolor(df.unstack(),cmap='RdBu',
norm=colors.LogNorm(10000000,10000000000),alpha=0.9))
df=(these_df[1][group_fields+["bandwidth"]].groupby(group_fields).median())
c.append(axs[0,1].pcolor(df.unstack(),cmap='RdBu',
norm=colors.LogNorm(10000000,10000000000),alpha=0.9))
df=(these_df[2][group_fields+["bandwidth"]].groupby(group_fields).median())
c.append(axs[1,0].pcolor(df.unstack(),cmap='RdBu',
norm=colors.LogNorm(10000000,10000000000),alpha=0.9))
df=(these_df[3][group_fields+["bandwidth"]].groupby(group_fields).median())
c.append(axs[1,1].pcolor(df.unstack(),cmap='RdBu',
norm=colors.LogNorm(10000000,10000000000),alpha=0.9))
wscale_labels=["0\n64K ", "1\n128K ","2\n256K ","3\n 512K","4\n 1M","5\n 2M",
"6\n4M","7\n8M","8\n16M","9\n32M","10\n64M","11\n128M",
"12\n256M","13\n512M","14\n1G"]
wscale_labels=["0 (64K)", "1 (128K)","2 (256K)","3 (512K)","4 (1M)","5 (2M)",
"6 (4M)","7 (8M)","8 (16M)","9 (32M)","10 (64M)","11 (128M)",
"12 (256M)","13 (512M)","14 (1G)"]
axs[0,0].set_xticks([x-0.5 for x in filter(lambda x: x, range(0,16))])
axs[0,0].set_xlim([0,15])
axs[1,1].set_xticklabels(wscale_labels,rotation=-90)
axs[1,0].set_xticklabels(wscale_labels,rotation=-90)
axs[0,0].set_yticks([x+0.5 for x in filter(lambda x: x%4 == 0, range(18))])
axs[0,0].set_yticks([x+0.5 for x in filter(lambda x: x%4 != 0, range(18))], minor=True)
axs[0,0].set_ylim([0,17])
# *2 because it's bidirectional
axs[0,0].set_yticklabels([str(d) for d in filter(lambda x: x%200 == 0, sorted(these_df[0].delay.unique()*2))])
axs[0,0].set_title("TCP Reno")
axs[0,1].set_title("TCP CUBIC")
axs[1,0].set_title("H-TCP")
axs[1,1].set_title("TCP BBR")
plt.gcf().subplots_adjust(bottom=0.25,left=0.1,right=0.90)
fig.text(0.0, 0.59, 'Delay (ms)', ha='center', va='center', rotation='vertical')
fig.text(0.5, 0., 'TCP Window Scale Factor', ha='center', va='center')
formatter = LogFormatter(10, labelOnlyBase=False)
cbaxes = fig.add_axes([0.99, 0.29, 0.015, 0.61])
cbar=fig.colorbar(c[3], ax=fig.get_axes(),cax=cbaxes, label="Throughput",
fraction=0.05, pad=0.05, anchor=(0.0, 0.5))
cbar_index=["10 Mb", "100 Mb","1 Gb","10 Gb"]
cbar.ax.set_yticklabels([cbar_index[int(i/9)] if not i%9 else "" for i in range(28)])
plt.tight_layout()
if save:
plt.savefig("../figures/wscale-hm.png", dpi=200, aspect=2.0,bbox_inches = "tight")
_=_
# -
| notebook/quantify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp repr.codebert
# -
# # Code Bert Adapatation
#
# > This module adapts codebert from Microsoft
# >
# > By @danaderp 11-12-2020
# >
#export
# Imports
import torch
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
#hide
from nbdev.showdoc import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = RobertaTokenizer.from_pretrained("microsoft/codebert-base")
model = RobertaModel.from_pretrained("microsoft/codebert-base")
model.to(device)
path = Path('/tf/data/models/JavaBert-v1')
#Building Library
# ! nbdev_build_docs
| nbs/2.0_repr.codebert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/golesuman/66daysofdata/blob/main/Day68/DataAnalysisonAirdata1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="QlTeudxwaqAB" outputId="021e377b-d64d-4744-dbf7-0f62b218bcb7"
# !wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
# + colab={"base_uri": "https://localhost:8080/"} id="3Y-kV3LsasrT" outputId="497ab21d-1280-4d3e-cc61-a733c325259f"
# !unzip AirQualityUCI.zip
# + id="1nI4xaqga85H"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 600} id="n86ijI8Hazwf" outputId="6c474ed0-bf87-4c67-e954-b493a6bcbc02"
df=pd.read_excel('AirQualityUCI.xlsx')
df.sample(10)
# + id="i_yO-Utfhs_F"
# + colab={"base_uri": "https://localhost:8080/"} id="PwjkgZWwdB85" outputId="91aa4115-5894-409c-8c7a-46a2a2d5605b"
df.shape
# + colab={"base_uri": "https://localhost:8080/"} id="JQklBs22dQWJ" outputId="9b0bbcbd-fdaf-4c7b-e10d-c483b8ee0d05"
features=[feature for feature in df.columns]
# printing the no of null values in dataframe
for category in features:
print(f'The total no of null values in {category} is {df[category].isna().sum()}')
# + colab={"base_uri": "https://localhost:8080/"} id="VS_MLwOzdqCX" outputId="24ed7f36-a4e2-49b6-8171-da2c351fb1b6"
df.info()
# + [markdown] id="7bNim-8yd3tM"
# ## Finding numerical values
# + colab={"base_uri": "https://localhost:8080/"} id="m_XQYHEfhkfT" outputId="beff8c7a-2e27-4790-e90c-f15785c5cfbd"
len(df.columns)
# + colab={"base_uri": "https://localhost:8080/"} id="WKTjpgBhd2qg" outputId="34de88e5-2bf5-4f7a-c6c4-6b9c0d8a6112"
numerical_features=[feature for feature in df.columns if df[feature].values.dtype!='O']
numerical_features
# + colab={"base_uri": "https://localhost:8080/"} id="iWztX_23eh-S" outputId="6777bf8b-da5a-436e-f655-12a24e096e3f"
len(numerical_features)
# + colab={"base_uri": "https://localhost:8080/"} id="_-Z96tX0iAKO" outputId="fc39295a-3b8b-4831-d05b-2d9410d20926"
times='22:00:00'
print(times.split(":")[0])
# + colab={"base_uri": "https://localhost:8080/"} id="_GKSh-aIjH1P" outputId="79f761b0-d379-4c76-931d-88944a855cb3"
df.Time.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="FoeYj3fajHqX" outputId="2e5ac753-4931-4f35-bb8d-9f4ab6421376"
df.describe()
# + id="BumkrdOAnCUF"
def delete_item():
for feature in df.columns:
if feature=='Date' and feature=='Time':
df.drop(df[df[feature<0]].index,inplace=True)
delete_item()
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="1PdvTaQSoR1N" outputId="b755c5b6-9e00-481c-9d32-28e8e0ce9c8a"
df.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="B92YFZo3oTt9" outputId="78b9482b-8c92-43e8-a5e6-04da2831b15c"
df['NMHC(GT)'].unique()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="oOPzv38_pxmz" outputId="fb5e36e0-6e8a-41b2-8863-5f48e9701a3f"
sns.set_theme()
for category in df.columns:
if category!='Date' and category!='Time':
sns.histplot(x=category,data=df,bins=50,kde=True)
plt.show()
# + id="U1huU88XtN3p"
| Day68/DataAnalysisonAirdata1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!DOCTYPE html>
# <html>
# <body>
# <div align="center">
# <h3>Prepared by <NAME></h3>
#
# <h1>Data Visualization With Matplotlib</h1>
#
# <h3>Follow Me on - <a href="https://www.linkedin.com/in/asif-bhat/">LinkedIn</a> <a href="https://mobile.twitter.com/_asifbhat_">Twitter</a> <a href="https://www.instagram.com/datasciencescoop/?hl=en">Instagram</a> <a href="https://www.facebook.com/datasciencescoop/">Facebook</a></h3>
# </div>
#
# </div>
# </body>
# </html>
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
# # Pie Charts
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45)
plt.show()
# #### Display percentage and actual value in Pie Chart
# Display percentage in Pie Chart using autopct='%1.1f%%'
plt.figure(figsize=(8,8))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#7CB342','#C0CA33','#FFB300','#F57C00']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , shadow='true', autopct='%1.1f%%', explode=[0,0 , 0 , 0])
plt.show()
# +
plt.figure(figsize=(8,8))
tickets = [48 , 30 , 20 , 15]
total = np.sum(tickets)
labels = ['Low' , 'Medium' , 'High' , 'Critical']
def val_per(x):
return '{:.2f}%\n({:.0f})'.format(x, total*x/100)
colors = ['#7CB342','#C0CA33','#FFB300','#F57C00']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , shadow='true', autopct=val_per, explode=[0,0 , 0 , 0])
plt.show()
# -
# #### Explode Slice in Pie Chart
#Explode 4th Slice
plt.figure(figsize=(8,8))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#7CB342','#C0CA33','#FFB300','#F57C00']
# explode = [0,0,0,0.1] will explode the fourth slice
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , autopct='%1.1f%%' , shadow='true', explode=[0,0 , 0 , 0.1])
plt.show()
#Explode 3rd & 4th Slice
plt.figure(figsize=(8,8))
tickets = [48 , 30 , 20 , 15]
label = ['Low' , 'Medium' , 'High' , 'Critical']
color = ['#7CB342','#C0CA33','#FFB300','#F57C00']
# explode = [0,0,0.1,0.1] will explode the 3rd & 4th slice
plt.pie (tickets , labels= label , colors= color , startangle=45 ,autopct='%1.1f%%', shadow='true', explode=[0,0 , 0.1 , 0.1])
plt.legend()
plt.show()
# #### Display multiple pie plots in one figure
# +
fig = plt.figure(figsize=(20,6))
tickets = [48 , 30 , 20 , 15]
priority = ['Low' , 'Medium' , 'High' , 'Critical']
status = ['Resolved' , 'Cancelled' , 'Pending' , 'Assigned']
company = ['IBM' , 'Microsoft', 'BMC' , 'Apple']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.subplot(1,3,1)
plt.pie (tickets , labels= priority , colors= colors , startangle=45)
plt.subplot(1,3,2)
plt.pie (tickets , labels= status , colors= colors , startangle=45)
plt.subplot(1,3,3)
plt.pie (tickets , labels= company , colors= colors , startangle=45)
plt.show()
# +
fig = plt.figure(figsize=(20,13))
tickets = [48 , 30 , 20 , 15]
priority = ['Low' , 'Medium' , 'High' , 'Critical']
status = ['Resolved' , 'Cancelled' , 'Pending' , 'Assigned']
company = ['IBM' , 'Microsoft', 'BMC' , 'Apple']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.subplot(2,3,1)
plt.pie (tickets , labels= priority , colors= colors , startangle=45 , autopct='%1.1f%%')
plt.subplot(2,3,2)
plt.pie (tickets , labels= status , colors= colors , startangle=45 , autopct='%1.1f%%')
plt.subplot(2,3,3)
plt.pie (tickets , labels= company , colors= colors , startangle=45 , autopct='%1.1f%%')
plt.subplot(2,3,4)
plt.pie (tickets , labels= priority , colors= colors , startangle=45, autopct='%1.1f%%')
plt.subplot(2,3,5)
plt.pie (tickets , labels= status , colors= colors , startangle=45 ,autopct='%1.1f%%')
plt.subplot(2,3,6)
plt.pie (tickets , labels= company , colors= colors , startangle=45, autopct='%1.1f%%')
plt.show()
# -
# # Donut plot
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
# Changing background color
fig = plt.figure(figsize=(9,9))
fig.patch.set_facecolor('#DADADA') # Changing background color of donut chart
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='#DADADA') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
# #### Explode Slice in Donut Chart
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , explode=[0,0 , 0.0 , 0.1])
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , explode=[0,0 , 0.1 , 0.1])
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , explode=[0.03,0.03 , 0.03 , 0.03])
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
# #### Displaying percentage and actual values in Donut Chart
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , autopct='%1.1f%%', explode=[0,0 , 0 , 0])
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , autopct='%1.1f%%', pctdistance=0.85 ,explode=[0,0 , 0 , 0])
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
# +
plt.figure(figsize=(9,9))
tickets = [48 , 30 , 20 , 15]
total = np.sum(tickets)
def val_per(x):
return '{:.2f}%\n({:.0f})'.format(x, total*x/100)
labels = ['Low' , 'Medium' , 'High' , 'Critical']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.pie (tickets , labels= labels , colors= colors , startangle=45 , autopct=val_per, pctdistance=0.85 ,explode=[0,0 , 0 , 0])
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
# -
# #### Display multiple Donut plots in one figure
# +
fig = plt.figure(figsize=(20,6))
tickets = [48 , 30 , 20 , 15]
priority = ['Low' , 'Medium' , 'High' , 'Critical']
status = ['Resolved' , 'Cancelled' , 'Pending' , 'Assigned']
company = ['IBM' , 'Microsoft', 'BMC' , 'Apple']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.subplot(1,3,1)
plt.pie (tickets , labels= priority , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(1,3,2)
plt.pie (tickets , labels= status , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(1,3,3)
plt.pie (tickets , labels= company , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
# +
fig = plt.figure(figsize=(20,13))
tickets = [48 , 30 , 20 , 15]
priority = ['Low' , 'Medium' , 'High' , 'Critical']
status = ['Resolved' , 'Cancelled' , 'Pending' , 'Assigned']
company = ['IBM' , 'Microsoft', 'BMC' , 'Apple']
colors = ['#8BC34A','#D4E157','#FFB300','#FF7043']
plt.subplot(2,3,1)
plt.pie (tickets , labels= priority , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(2,3,2)
plt.pie (tickets , labels= status , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(2,3,3)
plt.pie (tickets , labels= company , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(2,3,4)
plt.pie (tickets , labels= priority , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(2,3,5)
plt.pie (tickets , labels= status , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.subplot(2,3,6)
plt.pie (tickets , labels= company , colors= colors , startangle=45)
my_circle=plt.Circle( (0,0), 0.7, color='white') # Adding circle at the centre
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.show()
| .ipynb_checkpoints/Pie Charts-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="oc9gNLcD9wJK"
# ### Negative sampling
# + [markdown] id="ESAjkybH-NUO"
# For implicit data with only positive labels, negative sampling is typically needed for model training. There are some special cases, such as user_cf, item_cf, BPR, YouTubeMatch, RNN4Rec with bpr loss, because these models do not need negative sampling during training. However, when evaluating these models using some metrics such as cross_entropy loss, roc_auc, pr_auc, negative labels are indeed needed. So we recommend doing negative sampling as long as the data is implicit and only contains positive labels, no matter which model you choose. Also note that train_data and test_data should use different sampling seed.
# + id="12gkNqpW_hB8"
from math import floor
from random import random, seed as set_random_seed
import numpy as np
from tqdm import tqdm
import time
from contextlib import contextmanager
# + id="uEKw3KzY_fpU"
@contextmanager
def time_block(block_name="block", verbose=1):
if verbose > 0:
start = time.perf_counter()
try:
yield
except Exception:
raise
else:
end = time.perf_counter()
print(f"{block_name} elapsed: {(end - start):3.3f}s")
else:
try:
yield
except Exception:
raise
# + id="shoGskxh_3r2"
class SamplingBase(object):
def __init__(self, dataset, data_info, num_neg=1):
self.dataset = dataset
self.data_info = data_info
self.num_neg = num_neg
def sample_items_random(self, seed=42):
set_random_seed(seed)
n_items = self.data_info.n_items
item_indices_sampled = list()
# set is much faster for search contains
user_consumed = {
u: set(items) for u, items in self.data_info.user_consumed.items()
}
# sample negative items for every user
with time_block("random neg item sampling"):
for u, i in zip(self.dataset.user_indices,
self.dataset.item_indices):
item_indices_sampled.append(i)
for _ in range(self.num_neg):
item_neg = floor(n_items * random())
if u in user_consumed:
while item_neg in user_consumed[u]:
item_neg = floor(n_items * random())
item_indices_sampled.append(item_neg)
return np.asarray(item_indices_sampled)
def sample_items_popular(self, seed=42):
data = self.data_info.get_indexed_interaction()
item_counts = data.item.value_counts().sort_index().to_numpy()
user_consumed = self.data_info.user_consumed
items = np.arange(self.data_info.n_items)
item_order = list()
item_indices_sampled = list()
with time_block("popularity-based neg item sampling"):
for user, u_data in data.groupby("user", sort=False):
item_indices = u_data.index.to_list()
item_indices = item_indices * (self.num_neg + 1)
item_order.extend(item_indices)
# add positive items
item_indices_sampled.extend(u_data.item.tolist())
u_consumed = user_consumed[user]
u_item_counts = item_counts.copy()
u_item_counts[u_consumed] = 0
item_prob = u_item_counts / np.sum(u_item_counts)
neg_size = len(u_consumed) * self.num_neg
neg_sampled = np.random.choice(
items, size=neg_size, p=item_prob, replace=True)
item_indices_sampled.extend(neg_sampled)
item_indices_sampled = np.asarray(item_indices_sampled)
# must be stable sort to keep relative order
item_order = np.argsort(item_order, kind="mergesort")
return item_indices_sampled[item_order]
def _label_negative_sampling(self, size):
factor = self.num_neg + 1
total_length = size * factor
labels = np.zeros(total_length, dtype=np.float32)
labels[::factor] = 1.0
return labels
# + id="xubmN20LAJAN"
class NegativeSampling(SamplingBase):
def __init__(self, dataset, data_info, num_neg, sparse=None, dense=None,
batch_sampling=False):
super(NegativeSampling, self).__init__(dataset, data_info, num_neg)
if batch_sampling and dataset.has_sampled:
self.user_indices = dataset.user_indices_orig
self.item_indices = dataset.item_indices_orig
self.sparse_indices = (
dataset.sparse_indices_orig if sparse else None)
self.dense_values = (
dataset.dense_values_orig if dense else None)
else:
self.user_indices = dataset.user_indices
self.item_indices = dataset.item_indices
self.sparse_indices = dataset.sparse_indices if sparse else None
self.dense_values = dataset.dense_values if dense else None
self.data_size = len(self.user_indices)
self.sparse = sparse
self.dense = dense
def generate_all(self, seed=42, item_gen_mode="random"):
user_indices_sampled = np.repeat(
self.user_indices, self.num_neg + 1, axis=0
)
if item_gen_mode not in ["random", "popular"]:
raise ValueError(
"sampling item_gen_mode must either be 'random' or 'popular'"
)
elif item_gen_mode == "random":
item_indices_sampled = self.sample_items_random(seed=seed)
elif item_gen_mode == "popular":
item_indices_sampled = self.sample_items_popular(seed=seed)
sparse_indices_sampled = self._sparse_indices_sampling(
self.sparse_indices, item_indices_sampled
) if self.sparse else None
dense_values_sampled = self._dense_values_sampling(
self.dense_values, item_indices_sampled
) if self.dense else None
label_sampled = self._label_negative_sampling(self.data_size)
return (
user_indices_sampled,
item_indices_sampled,
label_sampled,
sparse_indices_sampled,
dense_values_sampled
)
def __call__(self, shuffle=True, batch_size=None):
if shuffle:
mask = np.random.permutation(range(self.data_size))
self.sparse_indices = (
self.sparse_indices[mask] if self.sparse else None)
self.dense_values = (
self.dense_values[mask] if self.dense else None)
user_consumed = {
u: set(items) for u, items in self.data_info.user_consumed.items()
}
n_items = self.data_info.n_items
return self.sample_batch(user_consumed, n_items, batch_size)
def sample_batch(self, user_consumed, n_items, batch_size):
for k in tqdm(range(0, self.data_size, batch_size),
desc="batch_sampling train"):
batch_slice = slice(k, k + batch_size)
batch_user_indices = self.user_indices[batch_slice]
batch_item_indices = self.item_indices[batch_slice]
batch_sparse_indices = (
self.sparse_indices[batch_slice] if self.sparse else None)
batch_dense_values = (
self.dense_values[batch_slice] if self.dense else None)
user_indices_sampled = np.repeat(
batch_user_indices, self.num_neg + 1, axis=0
)
item_indices_sampled = list()
for u, i in zip(batch_user_indices, batch_item_indices):
item_indices_sampled.append(i)
for _ in range(self.num_neg):
item_neg = floor(random() * n_items)
while item_neg in user_consumed[u]:
item_neg = floor(random() * n_items)
item_indices_sampled.append(item_neg)
item_indices_sampled = np.array(item_indices_sampled)
sparse_indices_sampled = self._sparse_indices_sampling(
batch_sparse_indices, item_indices_sampled
) if self.sparse else None
dense_values_sampled = self._dense_values_sampling(
batch_dense_values, item_indices_sampled
) if self.dense else None
label_sampled = self._label_negative_sampling(
len(batch_user_indices)
)
yield (
user_indices_sampled,
item_indices_sampled,
label_sampled,
sparse_indices_sampled,
dense_values_sampled
)
def _sparse_indices_sampling(self, sparse_indices, item_indices_sampled):
user_sparse_col = self.data_info.user_sparse_col.index
item_sparse_col = self.data_info.item_sparse_col.index
if user_sparse_col and item_sparse_col:
user_sparse_indices = np.take(
sparse_indices, user_sparse_col, axis=1)
user_sparse_sampled = np.repeat(
user_sparse_indices, self.num_neg + 1, axis=0)
item_sparse_sampled = self.data_info.item_sparse_unique[
item_indices_sampled]
assert len(user_sparse_sampled) == len(item_sparse_sampled), (
"num of user sampled must equal to num of item sampled")
# keep column names in original order
orig_cols = user_sparse_col + item_sparse_col
col_reindex = np.arange(len(orig_cols))[np.argsort(orig_cols)]
return np.concatenate(
[user_sparse_sampled, item_sparse_sampled], axis=-1
)[:, col_reindex]
elif user_sparse_col:
user_sparse_indices = np.take(
sparse_indices, user_sparse_col, axis=1)
user_sparse_sampled = np.repeat(
user_sparse_indices, self.num_neg + 1, axis=0)
return user_sparse_sampled
elif item_sparse_col:
item_sparse_sampled = self.data_info.item_sparse_unique[
item_indices_sampled]
return item_sparse_sampled
def _dense_indices_sampling(self, item_indices_sampled):
n_samples = len(item_indices_sampled)
user_dense_col = self.data_info.user_dense_col.index
item_dense_col = self.data_info.item_dense_col.index
total_dense_cols = len(user_dense_col) + len(item_dense_col)
return np.tile(np.arange(total_dense_cols), [n_samples, 1])
def _dense_values_sampling(self, dense_values, item_indices_sampled):
user_dense_col = self.data_info.user_dense_col.index
item_dense_col = self.data_info.item_dense_col.index
if user_dense_col and item_dense_col:
user_dense_values = np.take(dense_values, user_dense_col, axis=1)
user_dense_sampled = np.repeat(
user_dense_values, self.num_neg + 1, axis=0)
item_dense_sampled = self.data_info.item_dense_unique[
item_indices_sampled]
assert len(user_dense_sampled) == len(item_dense_sampled), (
"num of user sampled must equal to num of item sampled")
# keep column names in original order
orig_cols = user_dense_col + item_dense_col
col_reindex = np.arange(len(orig_cols))[np.argsort(orig_cols)]
return np.concatenate(
[user_dense_sampled, item_dense_sampled], axis=-1
)[:, col_reindex]
elif user_dense_col:
user_dense_values = np.take(dense_values, user_dense_col, axis=1)
user_dense_sampled = np.repeat(
user_dense_values, self.num_neg + 1, axis=0)
return user_dense_sampled
elif item_dense_col:
item_dense_sampled = self.data_info.item_dense_unique[
item_indices_sampled]
return item_dense_sampled
# + id="yAY2Pfsw9xwA"
class PairwiseSampling(SamplingBase):
def __init__(self, dataset, data_info, num_neg=1):
super(PairwiseSampling, self).__init__(dataset, data_info, num_neg)
if dataset.has_sampled:
self.user_indices = dataset.user_indices_orig
self.item_indices = dataset.item_indices_orig
else:
self.user_indices = dataset.user_indices
self.item_indices = dataset.item_indices
self.data_size = len(self.user_indices)
def __call__(self, shuffle=True, batch_size=None):
if shuffle:
mask = np.random.permutation(range(self.data_size))
self.user_indices = self.user_indices[mask]
self.item_indices = self.item_indices[mask]
user_consumed_set = {
u: set(items) for u, items in self.data_info.user_consumed.items()
}
n_items = self.data_info.n_items
return self.sample_batch(user_consumed_set, n_items, batch_size)
def sample_batch(self, user_consumed_set, n_items, batch_size):
for k in tqdm(range(0, self.data_size, batch_size),
desc="pair_sampling train"):
batch_slice = slice(k, k + batch_size)
batch_user_indices = self.user_indices[batch_slice]
batch_item_indices_pos = self.item_indices[batch_slice]
batch_item_indices_neg = list()
for u in batch_user_indices:
item_neg = floor(n_items * random())
while item_neg in user_consumed_set[u]:
item_neg = floor(n_items * random())
batch_item_indices_neg.append(item_neg)
batch_item_indices_neg = np.asarray(batch_item_indices_neg)
yield (
batch_user_indices,
batch_item_indices_pos,
batch_item_indices_neg
)
class PairwiseSamplingSeq(PairwiseSampling):
def __init__(self, dataset, data_info, num_neg=1, mode=None, num=None):
super(PairwiseSamplingSeq, self).__init__(dataset, data_info, num_neg)
self.seq_mode = mode
self.seq_num = num
self.n_items = data_info.n_items
self.user_consumed = data_info.user_consumed
def sample_batch(self, user_consumed_set, n_items, batch_size):
# avoid circular import
from ..data.sequence import user_interacted_seq
for k in tqdm(range(0, self.data_size, batch_size),
desc="pair_sampling sequence train"):
batch_slice = slice(k, k + batch_size)
batch_user_indices = self.user_indices[batch_slice]
batch_item_indices_pos = self.item_indices[batch_slice]
(
batch_interacted,
batch_interacted_len
) = user_interacted_seq(
batch_user_indices,
batch_item_indices_pos,
self.user_consumed,
self.n_items,
self.seq_mode,
self.seq_num,
user_consumed_set
)
batch_item_indices_neg = list()
for u in batch_user_indices:
item_neg = floor(n_items * random())
while item_neg in user_consumed_set[u]:
item_neg = floor(n_items * random())
batch_item_indices_neg.append(item_neg)
batch_item_indices_neg = np.asarray(batch_item_indices_neg)
yield (
batch_user_indices,
batch_item_indices_pos,
batch_item_indices_neg,
batch_interacted,
batch_interacted_len
)
# + id="15TnI83cAfZp"
train_data.build_negative_samples(data_info, item_gen_mode="random", num_neg=1, seed=2020)
# + id="TmPoVU7cAdyA"
test_data.build_negative_samples(data_info, item_gen_mode="random", num_neg=1, seed=2222)
| _docs/nbs/T445626-Negative-Sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # US Flight Delay and Cancellation Trends in 2019
# ## by <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Investigation Overview
#
# In this project, I wanted to investigate arrival delays and cancellations. In particular, I begin by looking at both, then focus only on characteristics of cancelled flights, such as location and timing.
#
# ## Dataset Overview
#
# This analysis includes almost 7.5 million flights in the US throughout the whole year of 2019. The datasest includes flight details such as time, origin/destination airports, carriers, and delay times/cancellations along with their causes.
# + slideshow={"slide_type": "skip"}
# import all packages and set plots to be embedded inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
# suppress warnings from final output
import warnings
warnings.simplefilter("ignore")
# + slideshow={"slide_type": "skip"}
# load in the dataset into a pandas dataframe
flights2019 = pd.read_csv("flights2019_cleaned.csv")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Distribution of Arrival Delay Times and Cancellations
#
# The flight arrival delays have a very large range, from about 1.5 hours *early* to almost 2 full *days* delayed. The delays take on a unimodal distribution centered around -10, or 10 minutes *early*. To see the main data better, the first plot shows only the top 95% of delays. The second plot shows the complete distribution, but binned into 4 categories including cancellations. Less than 2% of flights, or 139,000 were cancelled in 2019.
#
# The remainder of the presentation will focus on only the cancelled flights.
# + slideshow={"slide_type": "subslide"}
# Filter to only top 95% of the data to visualize better:
flights2019_top95 = flights2019[flights2019.ARR_DELAY < flights2019.ARR_DELAY.quantile(.95)]
binsize = 1
bins = np.arange(flights2019_top95.ARR_DELAY.min(), flights2019_top95.ARR_DELAY.max()+binsize, binsize)
plt.figure(figsize=[14.70, 8.27])
plt.hist(data = flights2019_top95, x = 'ARR_DELAY', bins = bins)
plt.xlabel('Arrival Delay (min)'.title(),fontsize=10,weight='bold')
plt.ylabel('Count'.title(),fontsize=10,weight='bold')
plt.title('Distribution of Arrival Delay Times (Top 95%)'.title(),fontsize=14,weight='bold')
plt.show()
# + slideshow={"slide_type": "subslide"}
plt.figure(figsize=[14.70, 8.27])
default_color = sb.color_palette()[0]
delay_bin_order = flights2019.DELAY_BIN.value_counts().index
g = sb.countplot(data = flights2019, x = 'DELAY_BIN', color = default_color, order = delay_bin_order);
plt.xlabel('Arrival Status'.title(),fontsize=10,weight='bold');
plt.ylabel('Count'.title(),fontsize=10,weight='bold')
# plt.xticks(rotation=45);
plt.title('Distribution of Arrival Delays and Cancellations'.title(),fontsize=14,weight='bold');
# + [markdown] slideshow={"slide_type": "slide"}
# ## Distribution of Cancellation Causes
#
# For the 2% of flights that were cancelled in 2019, the plot below shows the distribution of the 4 causes of cancellations, by percent of cancelled flights. Weather is by far the most common cause, at just over half. Carrier cancellations, which include circumstances within an airline's control, were the next most common cause, followed closely by National Air System (NAS) cancellations. NAS includes circumstances such as non-extreme weather, airport operations, heavy traffic, and air traffic control. Lastly, security cancellations were very rare (0.01%, or just 15 by count).
# + slideshow={"slide_type": "skip"}
cancel_dict = {'A': 'carrier', 'B': 'weather', 'C': 'NAS', 'D': 'security'}
n_points = flights2019[flights2019.CANCELLED==1].shape[0]
max_count = flights2019.CANCELLATION_CODE.value_counts().max()
max_prop = max_count / n_points
tick_props = np.arange(0, max_prop, .1)
# tick_names = ['{:0.1f}'.format(v) for v in tick_props]
tick_names = ['{:0.1f}'.format(v) for v in tick_props*100]
cancel_order = flights2019.CANCELLATION_CODE.value_counts().index
cancel_x_labels = [] #create an empty list to store the labels
for key in cancel_order:
cancel_x_labels.append(cancel_dict[key]) #store each label in the correct order
# + slideshow={"slide_type": "subslide"}
plt.figure(figsize=[14.70, 8.27])
g = sb.countplot(data = flights2019, x = 'CANCELLATION_CODE', color = default_color, order = cancel_order);
plt.xlabel('Cancellation Cause'.title(),fontsize=10,weight='bold');
g.set(xticklabels=cancel_x_labels);
plt.title('Distribution of Flight Cancellation Causes'.title(),fontsize=14,weight='bold');
plt.yticks(tick_props*n_points, tick_names);
plt.ylabel('Percentage'.title(),fontsize=10,weight='bold');
# + [markdown] slideshow={"slide_type": "slide"}
# ## Cancellation Cause vs. Season
#
# Weather was the most common cause of delay in 2019 winter, summer, and fall flights. However, cancellations due to the carrier were slightly more common than those due to weather in the spring. Weather-related cancellations accounted for the majority of cancellations in the winter, as may be expected due to the prevalence of snowstorms.
#
# Since there were an unequal number of flights in each season, the distribution of causes is plotted as percentage within each season.
# + slideshow={"slide_type": "subslide"}
# Make a data subset for only the cancelled flights
flights2019_cancelled = flights2019[flights2019.CANCELLED==1]
flights2019_cancelled['CANCELLATION_CODE'] = flights2019_cancelled['CANCELLATION_CODE'].replace(['A','B','C','D'], ['carrier','weather','NAS','security'])
counts = (flights2019_cancelled.groupby(['SEASON'])['CANCELLATION_CODE']
.value_counts(normalize=True)
.rename('percentage')
.mul(100)
.reset_index()
.sort_values('SEASON'))
cancel_order = flights2019_cancelled.CANCELLATION_CODE.value_counts().index
plt.figure(figsize=[14.70, 8.27])
p = sb.barplot(x="SEASON", y="percentage", hue="CANCELLATION_CODE", data=counts, order = ['winter','spring','summer','fall'], hue_order = cancel_order)
plt.xlabel('Season'.title(),fontsize=10,weight='bold');
plt.ylabel('Percentage'.title(),fontsize=10,weight='bold')
plt.title('Cancellation Cause vs. Season'.title(),fontsize=14,weight='bold')
# leg = p.axes.get_legend()
# leg.set_title('Cancellation Cause')
plt.legend(loc='top right', title="Cancellation Cause", title_fontsize=12);
# + [markdown] slideshow={"slide_type": "slide"}
# ## Cancellation Cause vs. Season by Airport
#
# The 6 busiest airports (by number of incoming flights) in 2019 were as follows:
#
# ATL (Hartsfield-Jackson Atlanta International Airport) - Atlanta, GA <br />
# ORD (Chicago O'Hare International Airport) - Chicago, IL <br />
# DFW (Dallas/Fort Worth International Airport) - Dallas, TX <br />
# DEN (Denver International Airport) - Denver, CO <br />
# CLT (Charlotte Douglas International Airport) - Charlotte, NC <br />
# LAX (Los Angeles International Airport) - Los Angeles, CA <br />
#
# The effect of these airports was added to the analysis on the last slide. At ORD, DFW, and DEN, weather was the leading cause of cancellation for all seasons. Additionally, the only times that cancellations due to NAS were more common than carrier cancellations were at ORD in summer and winter, as well as DFW in the fall.
#
# Contrary to what we may expect, winter weather cancellations are prevalent not only in airports located in colder-climate cities. One explanation could be that weather in one part of the country affects flights to many other cities. Once again, LAX seems to be an outlier, as weather was not the most common cause of cancellation during any season at that airport.
# + slideshow={"slide_type": "skip"}
# Since the airports are encoded based on id number, make a dictionary to airport codes, which more people understand
airport_dict = {10397: 'ATL', 13930: 'ORD', 11298: 'DFW', 11292: 'DEN', 11057: 'CLT', 12892: 'LAX'}
top_airports = list(airport_dict.keys())
flights2019_top_airports = flights2019[flights2019.DEST_AIRPORT_ID.isin(top_airports)]
airport_order = flights2019_top_airports.DEST_AIRPORT_ID.value_counts().index.to_list()
flights2019_top_airports_cancelled = flights2019[(flights2019.DEST_AIRPORT_ID.isin(top_airports)) & (flights2019.CANCELLED==1)] # Redo for cancelled flights
flights2019_top_airports_cancelled['CANCELLATION_CODE'] = flights2019_top_airports_cancelled['CANCELLATION_CODE'].replace(['A','B','C','D'], ['carrier','weather','NAS','security'])
counts = (flights2019_top_airports_cancelled.groupby(['DEST_AIRPORT_ID','SEASON'])['CANCELLATION_CODE']
.value_counts(normalize=True)
.rename('percentage')
.mul(100)
.reset_index()
.sort_values(['DEST_AIRPORT_ID','SEASON']))
# + slideshow={"slide_type": "subslide"}
g = sb.FacetGrid(counts, col='DEST_AIRPORT_ID', col_wrap=3, col_order = airport_order, height=8.27/2, aspect=(14.70/3)/(8.27/2));
g.map(sb.barplot,'SEASON','percentage','CANCELLATION_CODE', order = ['winter','spring','summer','fall'], hue_order=cancel_order, palette="deep").add_legend();
axes = g.axes.flatten();
axes[0].set_title("ATL",fontsize=12,weight='bold');
axes[1].set_title("ORD",fontsize=12,weight='bold');
axes[2].set_title("DFW",fontsize=12,weight='bold');
axes[3].set_title("DEN",fontsize=12,weight='bold');
axes[4].set_title("CLT",fontsize=12,weight='bold');
axes[5].set_title("LAX",fontsize=12,weight='bold');
plt.subplots_adjust(top=0.85)
g.fig.suptitle('Cancellation Cause for Busiest Airports'.title(),fontsize=14,weight='bold');
axes[3].set_xlabel("season".title(),fontsize=10,weight='bold');
axes[4].set_xlabel("season".title(),fontsize=10,weight='bold');
axes[5].set_xlabel("season".title(),fontsize=10,weight='bold');
axes[0].set_ylabel("Percentage".title(),fontsize=10,weight='bold');
axes[3].set_ylabel("Percentage".title(),fontsize=10,weight='bold');
# + slideshow={"slide_type": "skip"}
# !jupyter nbconvert slide_deck_flights.ipynb --to slides --post serve --template output_toggle
# -
| slide_deck_flights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchfly.modules.transformers import CachedBertEncoder, CachedBertDecoderLM, ChineseBERTBaseConfig
from torchfly.text.tokenizers import BertTokenizer
from torchfly.utils import get_pretrained_states
from torchfly.text.decode import top_filtering
# -
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
model_states = torch.load("models/TSP-best.th")
encoder = CachedBertEncoder(ChineseBERTBaseConfig)
decoder = CachedBertDecoderLM(ChineseBERTBaseConfig)
encoder.load_state_dict(model_states['encoder'], strict=False)
decoder.load_state_dict(model_states['decoder'], strict=False)
device = torch.device("cuda")
encoder = encoder.to(device)
decoder = decoder.to(device)
prompt = tokenizer.encode("阿里巴巴集团宣布收购雅虎")
batch_size = 1
tokenizer.tokenize("阿里巴巴集团宣布收购雅虎[SEP][SEP]")
| Inference/Headline Editing Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 1) Params
# Quickbooks Integration - TOM
import sys
args = sys.argv
if len(args) > 1 and args[1]!='-f':
date=args[1]
else:
date="2020-08-31"
#date="2020-07-28"
#date="2020-07-10"
#date="2020-07-13"
#date="2020-07-14"
#date="2020-07-21"
#date="2020-07-22"
#date="2020-07-24"
#date="2020-07-25"
# # cat QB_2020-06-30c.log| grep '+' # Daily report
# # cat QB_2020-06-30c.log| grep '*' # Monthly report
# Convert to .py file
# jupyter nbconvert --to script 'qb_daily.ipynb'
# Override print function for logging
filename = 'QB_'+date+'.log'
dirname = 'log'
import os
#print(str(os.path.join(dirname, filename)))
if not os.path.exists(dirname):
os.mkdir(dirname)
def my_decorator(func):
def wrapped_func(*args, **kwargs):
with open(str(os.path.join(dirname, filename)), 'a+') as f:
func(*args, **kwargs, file=f)
return func(*args, **kwargs)
return wrapped_func
print = my_decorator(print)
print('* +',date)
# +
# 2) Auth
from intuitlib.client import AuthClient
auth_client = AuthClient(
client_id='<KEY>',
client_secret='<KEY>',
environment='production',
redirect_uri='http://localhost:8000/callback'
)
from intuitlib.exceptions import AuthClientError
from quickbooks import QuickBooks
try:
client = QuickBooks(
auth_client=auth_client,
refresh_token='<KEY>',
company_id='1428933025', #Realm ID
minorversion=4
)
except AuthClientError:
raise AuthClientError
except Error:
raise Error
# -
# 3) Import Quickbooks
from quickbooks.objects import (Ref, Invoice, SalesReceipt,
SalesItemLineDetail,
SalesItemLine, DiscountLine, DiscountLineDetail, GroupLineDetail, Address, PaymentMethod, EmailAddress, PhoneNumber, Customer, Item, DescriptionLineDetail)
# +
# 4) QB Inventry look-up
import pprint
price_clips = [[0, 0, 0, 0, 0, 0], [0.07, 0.065, 0.06, 0.055, 0.05, 0.045], [0.15, 0.145, 0.140, 0.135, 0.130, 0.125]]
def get_sku(order):
line_num = 1
missing_skus = []
items = []
tax = False
c_i = 0
for i, item in enumerate(order['cart']['cartItems']):
sku = item['sku']
# Check to see if wick
clips = False
custom = False #new
# Pricing/sku for custom wicks
if 'WICK-CUST' in sku:
sku_parts = sku.split('-')
sku = sku_parts[0] + '-' + sku_parts[2] + '-.' + sku_parts[3] + '-' + sku_parts[4] + '-' + sku_parts[5]
if int(item['quantity']) < 1000:
# item['total'] = float(item['quantity'])*float(item['unit_price'])
print('Should there be a fee?')
#item['total'] = '%.2f' % (float(item['total']) + 30.00) # Extra fee of $30
#item['unit_price'] = '%.2f' % (float(item['total'])/float(item['quantity']))
custom = True
if str(sku_parts[6]) == '1' or str(sku_parts[6]) == '2':
clips = True
c_i = int(sku_parts[6])
# Pricing for wicks
elif sku[:4] == 'WICK' and sku != 'WICK-SK-INC' and 'WICK-CUST' not in sku and 'WICK-VP' not in sku:
sku_parts = sku.split('-')
sku = sku_parts[0] + '-' + sku_parts[1] + '-.' + sku_parts[2] + '-' + sku_parts[3] + '-' + sku_parts[4]
if str(sku_parts[5]) == '1' or str(sku_parts[5]) == '2':
clips = True
c_i = int(sku_parts[5])
# Find SKU or create it.
item_names = [sku]
item_objects = Item.choose(item_names, field="Sku", qb=client)
# If the SKU is found
if len(item_objects) > 0:
# If item is a bundle
if item_objects[0].Type == 'Group':
group_lines = []
details = item_objects[0].__dict__
item_ref = Ref()
item_ref.value = details['Id']
item_ref.type = 'Group'
item_group = GroupLineDetail()
item_group.GroupItemRef = item_ref
item_group.Quantity = item['quantity']
line = SalesItemLine()
line.LineNum = line_num
line.Description = item['name']
line.GroupLineDetail = item_group
line.DetailType = 'GroupLineDetail'
items.append(line)
line_num += 1
# Get Bundle Cost
sub_items = []
sub_qty = {}
for sub_item in details['ItemGroupDetail']['ItemGroupLine']:
sub_items.append(sub_item['ItemRef']['value'])
sub_qty[str(sub_item['ItemRef']['value'])] = sub_item['Qty']
found_items = Item.choose(sub_items, field="Id", qb=client)
unit_price = 0
for sub_item in found_items:
unit_price += float("%.2f" % (float(sub_item.__dict__['UnitPrice'])*float(sub_qty[sub_item.__dict__['Id']])))
unit_price = "%.2f" % (unit_price)
# If price is not the same as bundle then add line item for correction
if str(unit_price) != str(item['unit_price']):
#print('PRICES', str(unit_price), str(item['unit_price']))
com_price = float(item['unit_price']) - float(unit_price)
item_objects = Item.choose(['correction-sku'], field="Sku", qb=client)
item_ref = Ref()
item_ref.value = item_objects[0].Id
line_detail = SalesItemLineDetail()
line_detail.ServiceDate = None
line_detail.ItemRef = item_ref
line_detail.UnitPrice = com_price # in dollars
line_detail.Qty = item['quantity'] # quantity can be decimal
line = SalesItemLine()
line.LineNum = line_num
line.Amount = float(item['quantity'])*float(com_price) # in dollars
line.Description = 'Price Correction: ' + item['name']
line.SalesItemLineDetail = line_detail
items.append(line)
line_num += 1
else:
# If the item is not a bundle
unit_price_ = item['unit_price']
total_price_ = item['total']
bulk_unit_ = item['unit_price']
bulk_price_ = item['total']
#If there is a bulk discount
if "bulk_discount" in item.keys():
if item["bulk_discount"] != "":
unit_price_ = (100*float(unit_price_))/(100 - float(item["bulk_discount"]))
total_price_ = float(item['quantity'])*float(unit_price_)
new_price = float(bulk_price_) - float(total_price_)
new_unit = float(bulk_unit_) - float(unit_price_)
bulk_item = Item.choose(['bulk-sku'], field="Sku", qb=client)
item_ref = Ref()
item_ref.value = bulk_item[0].Id
line_detail = SalesItemLineDetail()
line_detail.ServiceDate = None
line_detail.ItemRef = item_ref
line_detail.UnitPrice = new_unit # in dollars
line_detail.Qty = item['quantity'] # quantity can be decimal
line = SalesItemLine()
line.LineNum = line_num
line.Amount = new_price # in dollars
line.Description = 'Bulk Discount: ' + item['name']
line.SalesItemLineDetail = line_detail
items.append(line)
line_num += 1
if clips:
unit_price_ = float(item['unit_price']) - float(price_clips[c_i][item['q_i']])
total_price_ = float(item['total']) - int(item['quantity'])*float(price_clips[c_i][item['q_i']])
if custom:
unit_price_ = float(item['unit_price']) + 30.00/float(item['quantity'])
total_price_ = unit_price_*float(item['quantity'])
item_ref = Ref()
item_ref.value = item_objects[0].Id
item_ref.type = 'Inventory'
line_detail = SalesItemLineDetail()
line_detail.ServiceDate = None
line_detail.ItemRef = item_ref
line_detail.UnitPrice = unit_price_ # in dollars
line_detail.Qty = item['quantity'] # quantity can be decimal
line = SalesItemLine()
line.LineNum = line_num
line.Amount = total_price_ # in dollars
line.Description = item['name']
line.SalesItemLineDetail = line_detail
items.append(line)
line_num += 1
# If the item is a wick with clips
if clips:
#print('CLIP TIME')
item_objects = Item.choose(['CLIP-FLAT-KTS'], field="Sku", qb=client)
item_ref = Ref()
item_ref.value = item_objects[0].Id
item_ref.type = 'Inventory'
line_detail = SalesItemLineDetail()
line_detail.ItemRef = item_ref
line_detail.ServiceDate = None
line_detail.UnitPrice = price_clips[c_i][item['q_i']] # in dollars
line_detail.Qty = item['quantity'] # quantity can be decimal
line = SalesItemLine()
line.LineNum = line_num
line.Amount = int(item['quantity'])*float(price_clips[c_i][item['q_i']]) # in dollars
line.Description = item['name']
line.SalesItemLineDetail = line_detail
items.append(line)
line_num += 1
else:
# If the sku is not found
missing_skus.append(sku)
line_detail = SalesItemLineDetail()
line_detail.ServiceDate = None
line_detail.UnitPrice = item['unit_price'] # in dollars
line_detail.Qty = item['quantity'] # quantity can be decimal
line = SalesItemLine()
line.LineNum = line_num
line.Amount = item['total'] # in dollars
line.Description = item['name']
line.SalesItemLineDetail = line_detail
items.append(line)
line_num += 1
if order['tax'] is not None:
item_objects = Item.choose(['tax-sku'], field="Sku", qb=client)
item_ref = Ref()
#if len(item_objects)>0: # TODO: Remove
# item_ref.value = item_objects[0].Id # TODO: Remove
item_ref.value = item_objects[0].Id # TODO: Uncomment
item_ref.type = 'Inventory'
line_detail = SalesItemLineDetail()
line_detail.ServiceDate = None
line_detail.ItemRef = item_ref
line_detail.UnitPrice = order['tax'] # in dollars
line_detail.Qty = 1 # quantity can be decimal
line = SalesItemLine()
line.LineNum = line_num
line.Amount = order['tax']
line.Description = 'Sales Tax'
line.SalesItemLineDetail = line_detail
items.append(line)
line_num += 1
return items, missing_skus
# -
# 5) Create sale receipt
def create_sales_receipt(order):
phone = PhoneNumber()
if str(order['billing']['phoneNumber']) != 'null' and str(order['billing']['phoneNumber']) != '':
phone.FreeFormNumber = order['billing']['phoneNumber'].replace('-', '').replace('.', '')
# Email
email = EmailAddress()
email_in = True
if str(order['billing']['email']) == 'null':
email_add = order['billing']['email']
elif str(order['shipping']['email']) == 'null':
email_add = order['shipping']['email']
else:
email_in = False
# Shipping
shipping = Address()
#shipping.Line1 = order['Shipping First Name'].iloc[0] + ' ' + order['Shipping Last Name'].iloc[0]
shipping.Line1 = str(order['shipping']['street1'])
if str(order['shipping']['street2']) != 'null' and order['shipping']['street2'] != '':
shipping.Line2 = order['shipping']['street2']
shipping.City = str(order['shipping']['city'])
shipping.PostalCode = str(order['shipping']['zip'])
shipping.Country = str(order['shipping']['country'])
shipping.CountrySubDivisionCode = str(order['shipping']['state'])
# Encoding # TODO: Allow UTF-8
order['billing']['firstName'] = order['billing']['firstName'].encode('latin-1', "replace").decode('latin-1')
order['billing']['lastName'] = order['billing']['lastName'].encode('latin-1', "replace").decode('latin-1')
# Billing
billing = Address()
customer_name = order['billing']['firstName'] + ' ' + order['billing']['lastName']
print('order[shipping][name]:', order['shipping']['name'])
print('customer_name:',customer_name)
billing.Line1 = str(order['billing']['address'])
if str(order['billing']['addressInfo']) != 'null' and str(order['billing']['addressInfo']) != '':
billing.Line2 = order['billing']['addressInfo']
billing.City = str(order['billing']['city'])
billing.PostalCode = str(order['billing']['zipcode'])
billing.Country = str(order['billing']['country'])
billing.CountrySubDivisionCode = str(order['billing']['state'])
# Find Payment Method or Create It
if str(order['paymentMethod']['lastFour']) != 'null' and str(order['paymentMethod']['lastFour']) != '':
payment_ref = Ref()
cc = order['paymentMethod']['lastFour']
#print('Last Four CC: ', cc)
names = [cc]
payment_objects = PaymentMethod.choose(names, field="Name", qb=client)
if len(payment_objects) > 0:
payment_id = payment_objects[0].Id
#print("Found Payment Method: ", payment_id)
else:
payment = PaymentMethod()
payment.Type = 'CREDIT_CARD'
payment.Name = cc
payment_id = payment.save(qb=client).Id
#print("Created Payment Method: ", new_id)
payment_ref.value = payment_id
# Find Customer or Create It
customer_ref = Ref()
names = [customer_name]
customer_objects = Customer.choose(names, field="DisplayName", qb=client)
customer_ = False
try:
if len(customer_objects) > 0:
customer_id = customer_objects[0].Id
customer_ref.value = customer_id
#print('Found Customer ID: ', customer_id)
else:
#print('Not Seen', print(customer_objects))
#print(customer_name)
customer = Customer()
#print(order['billing']['firstName'], order['billing']['lastName'])
customer.GivenName = order['billing']['firstName']
customer.FamilyName = order['billing']['lastName']
customer.DisplayName = customer_name
customer.BillAddr = billing
customer.ShipAddr = shipping
customer.PrimaryPhone = phone
customer.PrimaryEmailAddr = email
customer_id = customer.save(qb=client).Id
#print('Created New Customer ID: ', customer_id)
customer_ref.value = customer_id
customer_ = True
except:
pass
# Add SKUs to line item
items, missing_skus = get_sku(order)
# Discounts
if order['cart']['discountId'] != '':
line_detail = DiscountLineDetail()
line_detail.PercentBased = False # in dollars
#line_detail.Discount = order['cart']['discountId'] # quantity can be decimal
line = DiscountLine()
line.LineNum = len(items) + 1
#line.Amount = order['cart']['discounts'] # in dollars
line.Amount = order['cart']['discounts'] # in dollars
line.DiscountLineDetail = line_detail
items.append(line)
# Add shipping
item_objects = Item.choose(['shipping-sku'], field="Sku", qb=client)
item_ref = Ref()
item_ref.value = item_objects[0].Id
#item_ref.type = 'Inventory'
line_detail = SalesItemLineDetail()
line_detail.ItemRef = item_ref
line_detail.ServiceDate = None
line_detail.UnitPrice = order['shippingCost'] # in dollars
line_detail.Qty = 1 # quantity can be decimal
line = SalesItemLine()
line.LineNum = len(items) + 1
line.Amount = order['shippingCost']
line.Description = order['shipmentChoice']
line.SalesItemLineDetail = line_detail
items.append(line)
print('Total:', order['total'], 'Tax:', order['tax'], 'discounts:', order['cart']['discounts'], 'shipping:', order['shippingCost'])
invoice = SalesReceipt()
if customer_:
invoice.CustomerRef = customer_ref
invoice.Line = items
invoice.ShipAddr = shipping
invoice.BillAddr = billing
if email_in:
invoice.BillEmail = email
invoice.DocNumber = order['orderId']
invoice.TxnDate = order['date']
invoice.save(qb=client)
# 6) Import PyMongo
import pymongo
import pandas
#db = pymongo.MongoClient('mongodb+srv://wooden_wick:mongo1234@wooden-wick-gwdsi.mongodb.net/test?retryWrites=true&w=majority')['WW']
#db = pymongo.MongoClient('mongodb+srv://tomww:<EMAIL>@<EMAIL>/?retryWrites=true')['WW']
db = pymongo.MongoClient('mongodb+srv://tomww:newpassbecause...@cluster0-oniay.mongodb.net/?retryWrites=true')['WW']
# +
# Note: Just re-run this script from the beining if "CursorNotFound" occured.
# "CursorNotFound" Simply means timeout.
print('\n\n\n\n+ 7) Exporting sales from Database to QB sale receipt')
#order_id = ['W1579559730']
#orders = df[df['Order ID'].isin(order_id)].groupby('Order ID')
import numpy
import datetime
#date="2020-04-24"#date="2020-04-21"
#orders = db.orders.find({'created':{'$gte':startdate,'$lt':enddate}})
#orders = db.orders.find({"date" : {"$regex" : ".*(2020-01-[2-3][1-9]).*"}})
#li = ['W1587049373', 'W1587055746', 'W1587151264', 'W1587151362', 'W1587151663', 'W1587152083', 'W1587152168', 'W1587152412', 'W1587152535', 'W1587153031', 'W1587153085', 'W1587153602', 'W1587153657', 'W1587153974', 'W1587154151', 'W1587154316', 'W1587154635', 'W1587154665', 'W1587154678', 'W1587155178', 'W1587155644', 'W1587155721', 'W1587156141', 'W1587156756', 'W1587157180', 'W1587157293', 'W1587157569', 'W1587157708', 'W1587157781', 'W1587158230', 'W1587158328', 'W1587158408', 'W1587158519', 'W1587158725', 'W1587159077', 'W1587159099', 'W1587159228', 'W1587159545', 'W1587159689', 'W1587159774', 'W1587159898', 'W1587160431', 'W1587160515', 'W1587160660', 'W1587161086', 'W1587161308', 'W1587161451', 'W1587161522', 'W1587161721', 'W1587162006', 'W1587162172', 'W1587162224', 'W1587162263', 'W1587162932', 'W1587163513', 'W1587163878', 'W1587164622', 'W1587165125', 'W1587165983', 'W1587166972', 'W1587167275', 'W1587168189', 'W1587169059', 'W1587169675', 'W1587169834', 'W1587169886', 'W1587169890', 'W1587169972', 'W1587170334', 'W1587171348', 'W1587171537', 'W1587171659', 'W1587172500', 'W1587172730', 'W1587172936', 'W1587173048', 'W1587173091', 'W1587173703', 'W1587174880', 'W1587175106', 'W1587176803', 'W1587177121', 'W1587178160', 'W1587178174', 'W1587178453', 'W1587179126', 'W1587179160', 'W1587179832', 'W1587180419', 'W1587181000', 'W1587181382', 'W1587181531', 'W1587181551', 'W1587183015', 'W1587183018', 'W1587183291', 'W1587183939', 'W1587184959', 'W1587185242', 'W1587185424', 'W1587185581', 'W1587186561', 'W1587188811', 'W1587189142', 'W1587189362', 'W1587189393', 'W1587189705', 'W1587192326', 'W1587192564', 'W1587192645', 'W1587196379', 'W1587197026', 'W1587199527', 'W1587200588', 'W1587205738', 'W1587206770', 'W1587210313', 'W1587213958', 'W1587214092', 'W1587214461', 'W1587216497', 'W1587220077', 'W1587220854', 'W1587221467', 'W1587221490', 'W1587221527', 'W1587221867', 'W1587222241', 'W1587222531', 'W1587223191', 'W1587223856', 'W1587223932', 'W1587224164', 'W1587225004', 'W1587225216', 'W1587225533', 'W1587226625', 'W1587226738', 'W1587226898', 'W1587227227', 'W1587227464', 'W1587227564', 'W1587227767', 'W1587228130', 'W1587228499', 'W1587228834', 'W1587228896', 'W1587229073', 'W1587230311', 'W1587230343', 'W1587230882', 'W1587230951', 'W1587231063', 'W1587231314', 'W1587231365', 'W1587231799', 'W1587232168', 'W1587232277', 'W1587232394', 'W1587232501', 'W1587232663', 'W1587232945', 'W1587233003', 'W1587233250', 'W1587233435', 'W1587233440', 'W1587234182', 'W1587234891', 'W1587234938', 'W1587234966', 'W1587235012', 'W1587235185', 'W1587235282', 'W1587235373', 'W1587235743', 'W1587236217', 'W1587236744', 'W1587237553', 'W1587238251', 'W1587238643', 'W1587238763', 'W1587238822', 'W1587238861', 'W1587239072', 'W1587239224', 'W1587239973', 'W1587239976', 'W1587240119', 'W1587240596', 'W1587240703', 'W1587241509', 'W1587241745', 'W1587242151', 'W1587243251', 'W1587244055', 'W1587244642', 'W1587244784', 'W1587245021', 'W1587246118', 'W1587246585', 'W1587246798', 'W1587247652', 'W1587247896', 'W1587249692', 'W1587249940', 'W1587250116', 'W1587250441', 'W1587250556', 'W1587251063', 'W1587251121', 'W1587251509', 'W1587251627', 'W1587251976', 'W1587252468', 'W1587254561', 'W1587254592', 'W1587255241', 'W1587257352', 'W1587258244', 'W1587258439', 'W1587259753', 'W1587260234', 'W1587260938', 'W1587261230', 'W1587261709', 'W1587261768', 'W1587262676', 'W1587262687', 'W1587264011', 'W1587266356', 'W1587267495', 'W1587268706', 'W1587269059', 'W1587269080', 'W1587269093', 'W1587269161', 'W1587269776', 'W1587270304', 'W1587270440', 'W1587270635', 'W1587271955', 'W1587272610', 'W1587273312', 'W1587273353', 'W1587273634', 'W1587273781', 'W1587274601', 'W1587276576', 'W1587276746', 'W1587278732', 'W1587299549', 'W1587301770', 'W1587302277', 'W1587302355', 'W1587303000', 'W1587304019', 'W1587304704', 'W1587305105', 'W1587305987', 'W1587306257', 'W1587306979', 'W1587309091', 'W1587309207', 'W1587310030', 'W1587310746', 'W1587310815', 'W1587311770', 'W1587311776', 'W1587312594', 'W1587313019', 'W1587313668', 'W1587314162', 'W1587314871', 'W1587315585', 'W1587315740', 'W1587316065', 'W1587317196', 'W1587317359', 'W1587317671', 'W1587318882', 'W1587318998', 'W1587319028', 'W1587319064', 'W1587319335', 'W1587320485', 'W1587321431', 'W1587321830', 'W1587321839', 'W1587322223', 'W1587322232', 'W1587322280', 'W1587322921', 'W1587324136', 'W1587324551', 'W1587325005', 'W1587326697', 'W1587326871', 'W1587327355', 'W1587328616', 'W1587329110', 'W1587330180', 'W1587330672', 'W1587331889', 'W1587331963', 'W1587332238', 'W1587332533', 'W1587332739', 'W1587332789', 'W1587334055', 'W1587334097', 'W1587334547', 'W1587334856', 'W1587335042', 'W1587335744', 'W1587337847', 'W1587338351', 'W1587338603', 'W1587339298', 'W1587339426', 'W1587339730']
#li = ['W1587049373', 'W1587055746']
#orders = db.orders.find({"orderId" : {"$in" : li}})
order_ids = []
order_ = {}
incomplete = 0
upload = 0
declined = 0
failed = 0
total = 0
fails = []
orders = db.orders.find({"date" : {"$regex" : ".*("+date+").*"}})
try:
for _, order in enumerate(orders):
if 'capture_response' in order.keys():
if order['capture_response']['success']:
try:
i = [order['orderId']] #i = ['W1234']#
order_ids.append(order['orderId'])
order_[order['orderId']] = order['total']
if len(SalesReceipt.choose(i, field="DocNumber", qb=client)) == 0:
#add sales receipt
print(_, order['orderId'], order['date']) #print(_, order['orderId'], order['date'])
create_sales_receipt(order) # 5) Create sale receipt
#order_[order['orderId']] = order['total']
#
upload += 1
print('Success')
else: #
print('Already in DB: ', order['orderId'])
except Exception as ex:
print('ex:',str(ex))
failed += 1
print('Failed')
fails.append((str(ex), order['orderId'], order['date'] ))
else:
declined += 1
print(order['capture_response']['description'])
else:
print('Incomplete Order')
incomplete += 1
total += 1
print('+','Total :', total)
print('+','Upload to QB :', upload)
print('+','Failed to upload :', failed, fails)
print('+','Declined(Should be 0) :', declined)
print('+','Incomplete(by users action):', incomplete)
#for order in order_ids:
# print(order)
print('+','len(order_ids)',len(order_ids))
print('order_',order_)
except:
import traceback
traceback.print_exc()
# Play sound
from IPython.lib.display import Audio
import numpy as np
framerate = 4410
play_time_seconds = 3
t = np.linspace(0, play_time_seconds, framerate*play_time_seconds)
audio_data = np.sin(2*np.pi*300*t) + np.sin(2*np.pi*240*t)
Audio(audio_data, rate=framerate, autoplay=True)
# Exit
exit()
# +
# 8) Payment gateway
import logging
from authorizenet import apicontractsv1
from authorizenet.constants import constants
from authorizenet.apicontrollers import createTransactionController
from pprint import pprint
from datetime import datetime
from authorizenet.apicontrollers import getTransactionListController
# Logger
log = logging.Logger(__name__)
import imp
from authorizenet.apicontrollers import getTransactionListController
from datetime import datetime, timedelta
from authorizenet import apicontractsv1
from authorizenet.apicontrollers import getSettledBatchListController
class authorizeNetApi():
"""
Class to handle all authorizations/transactions from
authorizenet API
"""
def __init__(self):
"""
Initialize object
"""
#API LOGIN ID: 65wyTnY5dD6L
#TRANSACTION KEY: <KEY>
# API Login 5pM8BWe6j6
# 4t7F6Ya6Tn382vKf
#KEY: Simon
self.api_login_id = '5pM8BWe6j6'
self.transaction_key = '<KEY>'
self.ref_id = "MerchantID-0001"
self.setting_name = "duplicateWindow"
self.setting_value = "600"
self.authorize_payment = "authOnlyTransaction"
self.charge_payment = "authCaptureTransaction"
def get_settled_batch_list(self, start, end):
"""get settled batch list"""
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = self.api_login_id
merchantAuth.transactionKey = self.transaction_key
settledBatchListRequest = apicontractsv1.getSettledBatchListRequest()
settledBatchListRequest.merchantAuthentication = merchantAuth
settledBatchListRequest.refId = self.ref_id
settledBatchListRequest.includeStatistics = False
settledBatchListRequest.firstSettlementDate = start
settledBatchListRequest.lastSettlementDate = end
settledBatchListController = getSettledBatchListController(settledBatchListRequest)
settledBatchListController.setenvironment(constants.PRODUCTION)
settledBatchListController.execute()
response = settledBatchListController.getresponse()
batchz = []
# Work on the response
if response is not None:
if response.messages.resultCode == apicontractsv1.messageTypeEnum.Ok:
if hasattr(response, 'batchList'):
print('Successfully retrieved batch list.')
if response.messages is not None:
print('Message Code: %s' % response.messages.message[0]['code'].text)
print('Message Text: %s' % response.messages.message[0]['text'].text)
print()
for batchEntry in response.batchList.batch:
batchz.append(batchEntry.batchId)
print('Batch Id: %s' % batchEntry.batchId)
print('Settlement Time UTC: %s' % batchEntry.settlementTimeUTC)
print('Payment Method: %s' % batchEntry.paymentMethod)
if hasattr(batchEntry, 'marketType'):
print('Market Type: %s' % batchEntry.marketType)
if hasattr(batchEntry, 'product'):
print('Product: %s' % batchEntry.product)
if hasattr(batchEntry, 'statistics'):
if hasattr(batchEntry.statistics, 'statistic'):
for statistic in batchEntry.statistics.statistic:
if hasattr(statistic, 'accountType'):
print('Account Type: %s' % statistic.accountType)
if hasattr(statistic, 'chargeAmount'):
print(' Charge Amount: %.2f' % statistic.chargeAmount)
if hasattr(statistic, 'chargeCount'):
print(' Charge Count: %s' % statistic.chargeCount)
if hasattr(statistic, 'refundAmount'):
print(' Refund Amount: %.2f' % statistic.refundAmount)
if hasattr(statistic, 'refundCount'):
print(' Refund Count: %s' % statistic.refundCount)
if hasattr(statistic, 'voidCount'):
print(' Void Count: %s' % statistic.voidCount)
if hasattr(statistic, 'declineCount'):
print(' Decline Count: %s' % statistic.declineCount)
if hasattr(statistic, 'errorCount'):
print(' Error Count: %s' % statistic.errorCount)
if hasattr(statistic, 'returnedItemAmount'):
print(' Returned Item Amount: %.2f' % statistic.returnedItemAmount)
if hasattr(statistic, 'returnedItemCount'):
print(' Returned Item Count: %s' % statistic.returnedItemCount)
if hasattr(statistic, 'chargebackAmount'):
print(' Chargeback Amount: %.2f' % statistic.chargebackAmount)
if hasattr(statistic, 'chargebackCount'):
print(' Chargeback Count: %s' % statistic.chargebackCount)
if hasattr(statistic, 'correctionNoticeCount'):
print(' Correction Notice Count: %s' % statistic.correctionNoticeCount)
if hasattr(statistic, 'chargeChargeBackAmount'):
print(' Charge Chargeback Amount: %.2f' % statistic.chargeChargeBackAmount)
if hasattr(statistic, 'chargeChargeBackCount'):
print(' Charge Chargeback Count: %s' % statistic.chargeChargeBackCount)
if hasattr(statistic, 'refundChargeBackAmount'):
print(' Refund Chargeback Amount: %.2f' % statistic.refundChargeBackAmount)
if hasattr(statistic, 'refundChargeBackCount'):
print(' Refund Chargeback Count: %s' % statistic.refundChargeBackCount)
if hasattr(statistic, 'chargeReturnedItemsAmount'):
print(' Charge Returned Items Amount: %.2f' % statistic.chargeReturnedItemsAmount)
if hasattr(statistic, 'chargeReturnedItemsCount'):
print(' Charge Returned Items Count: %s' % statistic.chargeReturnedItemsCount)
if hasattr(statistic, 'refundReturnedItemsAmount'):
print(' Refund Returned Items Amount: %.2f' % statistic.refundReturnedItemsAmount)
if hasattr(statistic, 'refundReturnedItemsCount'):
print(' Refund Returned Items Count: %s' % statistic.refundReturnedItemsCount)
print()
else:
if response.messages is not None:
print('Failed to get transaction list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
if response.messages is not None:
print('Failed to get transaction list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
print('Error. No response received.')
return batchz
def get_transactions(self, batch_):
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = self.api_login_id
merchantAuth.transactionKey = self.transaction_key
# set sorting parameters
sorting = apicontractsv1.TransactionListSorting()
sorting.orderBy = apicontractsv1.TransactionListOrderFieldEnum.id
sorting.orderDescending = True
# set paging and offset parameters
paging = apicontractsv1.Paging()
# Paging limit can be up to 1000 for this request
paging.limit = 1000
paging.offset = 1
transactionListRequest = apicontractsv1.getTransactionListRequest()
transactionListRequest.merchantAuthentication = merchantAuth
transactionListRequest.refId = self.ref_id
transactionListRequest.batchId = batch_
transactionListRequest.sorting = sorting
transactionListRequest.paging = paging
transactionListController = getTransactionListController(transactionListRequest)
transactionListController.setenvironment(constants.PRODUCTION)
transactionListController.execute()
# Work on the response
invoice_ = {}
t_web = 0
t_auth = 0
response = transactionListController.getresponse()
if response is not None:
if response.messages.resultCode == apicontractsv1.messageTypeEnum.Ok:
if hasattr(response, 'transactions'):
print('Successfully retrieved transaction list.')
if response.messages is not None:
print('Message Code: %s' % response.messages.message[0]['code'].text)
print('Message Text: %s' % response.messages.message[0]['text'].text)
print('Total Number In Results: %s' % response.totalNumInResultSet)
print()
for transaction in response.transactions.transaction:
try:
if str(transaction.transactionStatus) == 'settledSuccessfully':
print('Invoice Id : %s' % transaction.invoiceNumber)
invoice_[transaction.invoiceNumber] = transaction.settleAmount
t_web += float(transaction.settleAmount)
except:
if str(transaction.transactionStatus) == 'settledSuccessfully':
t_auth += float(transaction.settleAmount)
print('Transaction Id: %s' % transaction.transId)
print('Transaction Status: %s' % transaction.transactionStatus)
if hasattr(transaction, 'accountType'):
print('Account Type: %s' % transaction.accountType)
print('Settle Amount: %.2f' % transaction.settleAmount)
if hasattr(transaction, 'profile'):
print('Customer Profile ID: %s' % transaction.profile.customerProfileId)
print()
else:
if response.messages is not None:
print('Failed to get transaction list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
if response.messages is not None:
print('Failed to get transaction list.')
print('Code: %s' % (response.messages.message[0]['code'].text))
print('Text: %s' % (response.messages.message[0]['text'].text))
else:
print('Error. No response received.')
return invoice_, t_web, t_auth
# +
print('\n\n\n\n+ 9) Gets QB receipts')
t_ = order_ids
t_count = len(t_)
orders_ = [t_[i:i+100] for i in range(0,t_count,100)]
sales = []
for i in orders_:
print('+','ITEMS', len(i))
sales += SalesReceipt.choose(i, field="DocNumber", qb=client)
print('+','TOTAL', len(sales))
# +
print('\n\n\n\n10) Get all authorized data')
from datetime import datetime
api = authorizeNetApi()
#date_start = '2020-04-23'
#date_end = '2020-04-23'
date_start = date
date_end = date
start = datetime.strptime(date_start, '%Y-%m-%d')
end = datetime.strptime(date_end, '%Y-%m-%d')
batches = api.get_settled_batch_list(start, end)
info = {}
t_web = 0
t_auth = 0
for batch in batches:
batch_ = api.get_transactions(str(batch))
info_ = batch_[0]
t_web += batch_[1]
t_auth += batch_[2]
info = {**info, **info_}
# +
print('\n\n\n\n* + 11) Set comparison of count of order numbers')
print('* ',date)
o_ = list(info.keys())
#print(len(o_))# TODO: REMOVE
#print(o_[0],o_[1])# TODO: REMOVE
o_ = [ele for ele in o_ if 'W' in str(ele)] # Only allows if starts with 'W'
#print(len(o_))# TODO
o_.sort(reverse=False)
#print(t_web, t_auth, t_web+t_auth)
print('+','Website: ',t_web,'Other/t_auth: ', t_auth, 'Total: ',t_web+t_auth)
print('+','authorize:', len(o_))
print('+','authorize:', len(set(o_)))
x = []
for sale in sales:
x.append(sale.__dict__['DocNumber'])
not_foun = []
for _, o in enumerate(t_):
if o not in x:
not_foun.append(o)
#print(o)
#print(len(not_foun))
#print(not_foun)
#print(set(x).difference(set(t_)))
#print('modded database:', len(t_))
print('+','modded database:', len(set(t_)))
not_foun = []
# 3 parties should be same : 1) data 2) QB 3) auth
# data - qb: There is an order in the database that did not get uploaded to QB
# qb - data: This is an order in QB that is not in the database
# data - authorized: There is an order in the database that did not get charged on authorized
# authorized - data: There is an order that got charged on authorized.net but is not in the database
print('+','quickbooks:', len(set(x)))
print('* + data - qb:', set(t_).difference(set(x)))
print('* + qb - data:', set(x).difference(set(t_)))
print('* + data - authorized:', set(t_).difference(set(o_)))
print('* + authorized - data:', set(o_).difference(set(t_)))
for lol in x:
if lol not in t_:
not_foun.append(lol)
#print(len(not_foun))
#print(not_foun)
print('+','database:', len(order_ids))
print('+','database:', len(set(order_ids)))
import collections
nn = []
countz = dict(collections.Counter(x))
#print(countz)
for key in countz.keys():
if countz[key] > 1:
nn.append(key)
#print(nn)
#print(len(nn))
print('+','quickbooks:', len(x))
print('+','quickbooks:', len(set(x)))
#print(x)
# +
print('\n\n\n\n* + 12) Comparison of order totals')
print('* ',date)
import pprint
#print(info)
#print(order_)
bad = []
count = 0
good = 0
total_orders = 0
total_qb = 0
#print('authorize:', len(o_))
#print('authorize:', len(set(o_)))
#print(order_)
x = []
for sale in sales:
x.append(sale.__dict__['DocNumber'])
import collections
#print(collections.Counter(x))
print('+','quickbooks:', len(x))
print('+','quickbooks:', len(set(x)))
#print(n[])
m = set(info.keys()).difference(set(x))
#print(m)
#print(len(set(m)))
t_j = 0
for j in set(info.keys()).difference(set(x)):
t_j += info[j]
#print(info[j])
#print(t_j)
#new_ = pandas.read_csv('order-export-2020-04-10 (2).csv')
#print(new_.head)
#jan_ = set([_[0] for _ in new_.iloc[:,0:1].values if ('S1' not in _[0] and 'S2' not in _[0])])
#print(len(jan_))
#print(m.intersection(jan_))
#print('missing in auth and on jan 21', len(m.intersection(jan_)))
#print(m.difference(jan_))
#print(len(m.difference(jan_)))
#for lmao in m.difference(jan_):
# print(lmao)
#print('sales and jan', set(x).intersection(jan_))
t_j = 0
'''
for j in set(m.difference(jan_)):
t_j += info[j]
'''
#print(info[j])
#print('value of remaining:', t_j)
#o_ = list(set(o_).difference(m.intersection(jan_)))
new_order = {}
t_x = 0
for h in o_:
t_x += float(info[h])
new_order[h] = info[h]
o_ = new_order
print('+','Database', t_x)
print('+','authorize mod:', len(o_))
print('+','authorize mod:', len(set(o_)))
#print(m.union(set(jan_)))
#print(len((set(jan_).intersection(set(info.keys()).difference(set(x))))))
#print(set(jan_).intersection(set(x)))
#print(len((set(jan_).union(set(x)))))
#if 'W1583810212' in info.keys(): print('IT INNN')
for sale in sales:
#print('Invoice:', n, 'Backen:', order_[sale.__dict__['DocNumber']], 'Charge:', info[n], 'Quickbooks Total:', sale.__dict__['TotalAmt'] )
#if float(info[n]) != float(sale.__dict__['TotalAmt']):
# print('Totals Dont equal')
# print('Invoice:', n, 'Backen:', order_[sale.__dict__['DocNumber']], 'Charge:', info[n], 'Quickbooks Total:', sale.__dict__['TotalAmt'] )
#if str(sale.__dict__['DocNumber']) in order_.keys():
#print(str(sale.__dict__['DocNumber']))
if str(sale.__dict__['DocNumber']) in info.keys():
n = sale.__dict__['DocNumber']
d_ = float(info[n]) - float(sale.__dict__['TotalAmt'])
total_orders += float(info[n])
total_qb += float(sale.__dict__['TotalAmt'])
if not (-0.50 <= d_ <= 0.50):
bad.append(n)
print('* + Difference', d_)
print('* + Invoice:', n, 'Backen:', order_[sale.__dict__['DocNumber']], 'Charge:', info[n], 'Quickbooks Total:', sale.__dict__['TotalAmt'] )
#print(n)
#print('Invoice:', n, 'Backen:', order_[sale.__dict__['DocNumber']], 'Charge:', info[n], 'Quickbooks Total:', sale.__dict__['TotalAmt'] )
#print('Bad')
else:
good += 1
#print('Good')
#print('GOOOOD', 'Invoice:', n, 'Backen:', order_[sale.__dict__['DocNumber']], 'Charge:', info[n], 'Quickbooks Total:', sale.__dict__['TotalAmt'] )
#else:
#print('pass', n)
count +=1
else:
print('+','\nNot found in auth:', sale.__dict__['DocNumber'], '\n')
#pprint.pprint(sale.__dict__)
#else:
# print('\nNot found in backend:', n, '\n')
#print(count)
#print(good ,bad)
print('+','good:',good, 'len(bad):',len(bad))
print('+','bad:',bad)
print('* + ',date)
print('* + ', total_orders, 'total in qb:', total_qb, 'total in authorize', t_web)
print("* + QB - Auth for all orders:", total_qb - t_web)
print("* + QB - Database for orders found:", total_qb - total_orders)
# +
# 13) Removes everything of the given date
"""
from quickbooks.batch import batch_delete
# Define order_ids: list
# order_ids=['W1587573851', 'W1587566260', 'W1587563932', 'W1587539041', 'W1587529510', 'W1587529290', 'W1587528855', 'W1587526296', 'W1587521655', 'W1587521129', 'W1587521115', 'W1587516147', 'W1587516175', 'W1587593340', 'W1587598225', 'W1587595595', 'W1587590935', 'W1587588532', 'W1587587917', 'W1587586584', 'W1587579956', 'W1587578043', 'W1587578424']#order_ids=['W1587443596']
'''
# Obtain order_ids: list
order_ids=[]
orders = db.orders.find({"date" : {"$regex" : ".*(2020-04-21).*"}})
for _, order in enumerate(orders):
if 'capture_response' in order.keys():
if order['capture_response']['success']:
order_ids.append(order['orderId'])
#sales = SalesReceipt.choose(order_ids, field="DocNumber", qb=client)
#print('len(sales): ',len(sales))
'''
t_ = order_ids
t_count = len(t_)
orders_ = [t_[i:i+20] for i in range(0,t_count,20)]
sales = []
for i in orders_:
print('ITEMS', len(i))
sales = SalesReceipt.choose(i, field="DocNumber", qb=client)
print('TOTAL', len(sales))
results = batch_delete(sales, qb=client)
"""
# -
# Play sound
from IPython.lib.display import Audio
import numpy as np
framerate = 4410
play_time_seconds = 3
t = np.linspace(0, play_time_seconds, framerate*play_time_seconds)
audio_data = np.sin(2*np.pi*300*t) + np.sin(2*np.pi*240*t)
Audio(audio_data, rate=framerate, autoplay=True)
print('a')
| .ipynb_checkpoints/qb_daily-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import music21
import pathlib
import pickle
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
# + pycharm={"name": "#%%\n"}
# 输入 100 个音符,输出之后的一个音符
input_notes_length = 100
output_dir = 'Bach'
generate_file = 'generate'
# + pycharm={"name": "#%%\n"}
def get_all_notes():
with open('output/{}/all_notes'.format(output_dir), 'rb') as f:
all_notes = pickle.load(f)
vocab = sorted(set(all_notes))
print("\nvocab's length: ", len(vocab))
print("#notes: ", len(all_notes))
return all_notes, vocab
# + pycharm={"name": "#%%\n"}
def make_seq(all_notes, vocab):
# 建立 one-hot 词典
note_dict = {}
for i, note in enumerate(vocab):
note_dict[note] = i
num_training = len(all_notes) - input_notes_length
input_notes_in_vocab = np.zeros((num_training, input_notes_length, len(vocab)))
# output_notes_in_vocab = np.zeros((num_training, len(vocab)))
for i in range(num_training):
input_notes = all_notes[i: i + input_notes_length]
# output_note = all_notes[i + input_notes_length]
for j, note in enumerate(input_notes):
input_notes_in_vocab[i, j, note_dict[note]] = 1
# output_notes_in_vocab[i, note_dict[output_note]] = 1
print("\r{} / {}".format(i+1, num_training), end="")
print()
return input_notes_in_vocab
# + pycharm={"name": "#%%\n"}
def choose_seq(input_notes_in_vocab):
# 随机挑选一个序列作为开始
n = np.random.randint(0, len(input_notes_in_vocab) - 1)
random_sequence = input_notes_in_vocab[n]
random_sequence = random_sequence[np.newaxis, :]
return random_sequence
# + pycharm={"name": "#%%\n"}
def build_network(num_vocab):
model = keras.Sequential([
keras.layers.LSTM(512, recurrent_dropout=0.3, return_sequences=True,
input_shape=(input_notes_length, num_vocab)),
keras.layers.LSTM(512),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.3),
keras.layers.Dense(256, activation='relu'),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.3),
keras.layers.Dense(num_vocab, activation='softmax')
])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
return model
# + pycharm={"name": "#%%\n"}
def predict(model, sequence, num_vocab):
outputs = []
for i in range(500):
newNote = model.predict(sequence, verbose=0)
note_in_vocab = np.argmax(newNote)
note = np.zeros(num_vocab)
note[note_in_vocab] = 1
outputs.append(note)
# 序列向前移动
sequence = sequence[0][1:]
sequence = np.concatenate((sequence, note[np.newaxis, :]))
sequence = sequence[np.newaxis, :]
print("\rgenerating... {} / {}".format(i+1, 500), end="")
print()
print(len(outputs))
print(outputs[0])
return outputs
# + pycharm={"name": "#%%\n"}
def save_to_midi_file(sequence):
output_notes = []
for element in sequence:
note_in_vocab = list(element).index(1)
output_notes.append(vocab[note_in_vocab])
offset = 0
output_music_notes = []
for note in output_notes:
# chord
if ('.' in note) or note.isdigit():
notes_in_chord = note.split('.')
notes = []
for note_in_chord in notes_in_chord:
one_note = music21.note.Note(int(note_in_chord))
one_note.storedInstrument = music21.instrument.Piano()
notes.append(one_note)
new_chord = music21.chord.Chord(notes)
new_chord.offset = offset
output_music_notes.append(new_chord)
# note
else:
new_note = music21.note.Note(note)
new_note.offset = offset
new_note.storedInstrument = music21.instrument.Piano()
output_music_notes.append(new_note)
# 音符的位置 + 0.5
offset += 0.5
midi_stream = music21.stream.Stream(output_music_notes)
midi_stream.write('midi', fp='output/{}/{}.mid'.format(output_dir, generate_file))
# + pycharm={"name": "#%%\n"}
all_notes, vocab = get_all_notes()
inputs = make_seq(all_notes, vocab)
seq = choose_seq(inputs)
model = build_network(len(vocab))
model.load_weights('output/{}/weights.h5'.format(output_dir))
seq = predict(model, seq, len(vocab))
save_to_midi_file(seq)
# + pycharm={"name": "#%%\n"}
| ai_music_composer/start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="h39mZstqmVn2"
# **Online Learning: Backpropagation applied to Perceptron Neural Network**
#
#
# + id="wjdcc-nKmeCR" executionInfo={"status": "ok", "timestamp": 1644646992579, "user_tz": 0, "elapsed": 204, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] id="tt3QjmTywa8e"
# Creating initial Data:
# + id="YCAl4chGpSGo" executionInfo={"status": "ok", "timestamp": 1644646992790, "user_tz": 0, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}}
T = np.array([-0.960, -0.577, -0.073, 0.377, 0.641, 0.660, 0.461, 0.134, -0.201, -0.434, -0.500, -0.393, -0.165, 0.099, 0.307, 0.396, 0.345, 0.182, -0.031, -0.219, -0.320])
X = np.linspace(-1,1,21)
X_bias = np.ones(21)
# Creating a relational data set
X = np.concatenate([X_bias, X])
X = X.reshape(2,21).T
dataset = pd.DataFrame(X , columns={"Bias","X"})
dataset["T"] = T
# + colab={"base_uri": "https://localhost:8080/", "height": 708} id="V7kzUy9N0ax5" executionInfo={"status": "ok", "timestamp": 1644646992791, "user_tz": 0, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}} outputId="aafcba9e-9653-4ad4-bcd7-4bf3ab515677"
dataset
# + [markdown] id="jp83WvscycAO"
# Plotting input Data:
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="hwvctbAJyexp" executionInfo={"status": "ok", "timestamp": 1644646993144, "user_tz": 0, "elapsed": 360, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}} outputId="95b08f6e-6a05-4a69-cb0c-e1a4e3c7ef7e"
plt.plot(dataset, label='Inline Label')
plt.legend(["Bias","X:input","Target"])
# + [markdown] id="EUFgdHnZ21yl"
# **Create parameters of the Perceptron neural network**
# + id="e4mEJgrk1uu9" executionInfo={"status": "ok", "timestamp": 1644646993466, "user_tz": 0, "elapsed": 327, "user": {"displayName": "adrian\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}} colab={"base_uri": "https://localhost:8080/"} outputId="64432bae-ec25-4455-d865-da2423d4d112"
no_inputs = 1
no_hidden_neurons = 5
no_outputs = 1
no_samples = 21
# Weights for layer 1
#V1 = np.array([ 3.6204,3.8180,3.5548,3.0169,3.6398])
#V1 = np.array([[-2.7110, 3.6204],[1.2214, 3.8180],[-0.7778, 3.5548],[2.1751, 3.0169],[2.9979, 3.6398]])
V1 = np.random.rand(5,2)
print(V1.shape)
##Bias1 = np.array([-2.7110, 1.2214, -0.7778, 2.1751, 2.9979])
# Weights for layer 2
#W2 = np.array([-1.0295, -0.6334, -1.2985, 0.8719, 0.5937, 0.9906 ])
W2 = np.random.rand(6,1)
##Bias2 = np.array([-1.0295])
print(W2.shape, W2)
print(X.shape,X, V1)
# + [markdown] id="V3mVTVYA5Vve"
# **1. Forward Pass**
# + id="isJ2g14hGNTy" executionInfo={"status": "ok", "timestamp": 1644646993467, "user_tz": 0, "elapsed": 5, "user": {"displayName": "adrian\u00f1 Rubio", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}}
# Model of the Neural Network (to be discussed in the tutorial)
def forward_prop(X, V1, W2):
# FIRST LAYER
u_li = X.dot(V1.T)
M = (np.exp(u_li) - np.exp(-u_li))/(np.exp(u_li) + np.exp(-u_li)) # Tangent Hyperbolic function
# SECOND LAYER
# print(S1, M.shape)
bias_2 = np.ones(no_outputs) # getting number of columns
# print(bias_2.shape,M.shape, W2.shape)
# Concatenate vectors bias_2 and M1
zl = np.concatenate((bias_2, M), axis=0)
# print(M1, M1.shape)
Y = zl.dot(W2)
# print(Y.shape)
return Y, zl
# + [markdown] id="tknjTcqv5g7F"
# 2. Backpropagation Pass
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="SrSUQ6zHzS_S" executionInfo={"status": "ok", "timestamp": 1644646995480, "user_tz": 0, "elapsed": 2017, "user": {"displayName": "adrian\u00f1 Rubio", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01131457097951893645"}} outputId="1fe25ce6-ca6e-4e56-abd3-c4409855a2e4"
learning_rate = 0.08
error = np.zeros([21,1])
Yi = np.zeros([21,1]) # network output
# delta_l1 = np.zeros([5,1])
for epoch in range(700):
for sample in range(0,21):
# --- FeedForward PASS ---
Yi[sample] , zl = forward_prop(X[sample,:], V1, W2)
# # --- Calculation of Individual Error ---
error[sample] = T[sample] - Yi[sample]
# # print(error[sample], Yi[sample],X[sample,:])
# # --- Backpropagation PASS ---
# # Update for Output layer Weights wil (W2)
# # (Only one output for Regression problems)
# print(zl.shape)
for l in range(0,no_hidden_neurons+1):
# # is plus 1 because we consider the bias
delta_i2 = -error[sample]*zl[l]
W2[l] = W2[l] - learning_rate*delta_i2
# print(zl[l], W2, delta_i2)
# # print(l)
# # Update Input weights vlj (V1)
# # print(V1.shape)
for l in range(0,no_hidden_neurons):
delta_l1 = -error[sample] * W2[l+1] * (1 - zl[l+1]*zl[l+1] )
for j in range(0,no_inputs+1):
V1[l][j] = V1[l][j] - learning_rate*delta_l1*X[sample][j]
plt.plot(Yi, 'red', marker="o", label='Inline Label')
plt.plot(T, 'b', marker="x", label='Inline Label')
plt.legend(["Network Output","Desired target"])
plt.show()
print(V1, zl.shape, zl)
print(W2, Yi,X)
| Online_Learning_Perceptron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Grove Temperature Sensor 1.2
#
# This example shows how to use the [Grove Temperature Sensor v1.2](http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor_V1.2). You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC.
#
# A [Grove Temperature sensor](http://www.seeedstudio.com/depot/grove-led-bar-p-1178.html) and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example.
#
# You can read a single value of temperature or read multiple values at regular intervals for a desired duration.
#
# At the end of this notebook, a Python only solution with single-sample read functionality is provided.
# ### 1. Load overlay
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
# ### 2. Read single temperature
# This example shows on how to get a single temperature sample from the Grove TMP sensor.
#
# The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.
#
# Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
# +
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
# -
# ### 3. Start logging once every 100ms for 10 seconds
# Executing the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.
#
# You can vary the logging interval and the duration by changing the values 100 and 10 in the cellbelow. The raw samples are stored in the internal memory, and converted into temperature values.
# +
import time
# %matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
# -
# ### 4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IIC
# This class is ported from http://www.seeedstudio.com/wiki/Grove_-_Temperature_Sensor.
# +
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
# +
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
| boards/Pynq-Z1/base/notebooks/pmod/pmod_grove_tmp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <!--NOTEBOOK_HEADER-->
# *This notebook contains material from [cbe61622](https://jckantor.github.io/cbe61622);
# content is available [on Github](https://github.com/jckantor/cbe61622.git).*
#
# <!--NAVIGATION-->
# < [1.1 Configuring a Workstation](https://jckantor.github.io/cbe61622/01.01-Configuring-a-Workstation.html) | [Contents](toc.html) | [1.20 Semester Projects](https://jckantor.github.io/cbe61622/01.20-Projects.html) ><p><a href="https://colab.research.google.com/github/jckantor/cbe61622/blob/master/docs/01.10-Workstation-Learning-Exercises.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/cbe61622/01.10-Workstation-Learning-Exercises.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
# + [markdown] nbpages={"level": 1, "link": "[1.10 Workstation Learning Exercises](https://jckantor.github.io/cbe61622/01.10-Workstation-Learning-Exercises.html#1.10-Workstation-Learning-Exercises)", "section": "1.10 Workstation Learning Exercises"}
# # 1.10 Workstation Learning Exercises
# + [markdown] nbpages={"level": 2, "link": "[1.10.1 Understanding PWM](https://jckantor.github.io/cbe61622/01.10-Workstation-Learning-Exercises.html#1.10.1-Understanding-PWM)", "section": "1.10.1 Understanding PWM"}
# ## 1.10.1 Understanding PWM
#
# 1. Using the pins on an MCU, use the oscilloscope to observe PWM signals and how they respond to Python commands.
#
# 2. Attach a servo, and observe the response of the servo to a PWM command.
# + [markdown] nbpages={"level": 2, "link": "[1.10.2 Characterizing noise in a MEMS device.](https://jckantor.github.io/cbe61622/01.10-Workstation-Learning-Exercises.html#1.10.2-Characterizing-noise-in-a-MEMS-device.)", "section": "1.10.2 Characterizing noise in a MEMS device."}
# ## 1.10.2 Characterizing noise in a MEMS device.
#
# Accelerometers are MEMS devices with inherent noise characteristics. This exercise explores the noise characteristics of this device. The learning goals are to understand typical types of noise encountered in sensors.
#
# Mount the ADXL327 3-axis accelerometer on ADS breadboard. Apply 3.3v power supply and ground. Using the datasheet, select capacitors to provide maximum bandwidth on each axis. Using the spectrum analyzer, identify the noise characteristics of the sensor.
#
# Assume for each axis,
#
# $$a_x = \bar{a_x} + e_x$$
# $$a_y = \bar{a_y} + e_y$$
# $$a_z = \bar{a_z} + e_z$$
#
# What is frequency spectrum for $e_x$, $e_y$, and $e_z$? Are we seeing 1/f noise, broadband noise,
# + nbpages={"level": 2, "link": "[1.10.2 Characterizing noise in a MEMS device.](https://jckantor.github.io/cbe61622/01.10-Workstation-Learning-Exercises.html#1.10.2-Characterizing-noise-in-a-MEMS-device.)", "section": "1.10.2 Characterizing noise in a MEMS device."}
# -
# <!--NAVIGATION-->
# < [1.1 Configuring a Workstation](https://jckantor.github.io/cbe61622/01.01-Configuring-a-Workstation.html) | [Contents](toc.html) | [1.20 Semester Projects](https://jckantor.github.io/cbe61622/01.20-Projects.html) ><p><a href="https://colab.research.google.com/github/jckantor/cbe61622/blob/master/docs/01.10-Workstation-Learning-Exercises.ipynb"> <img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://jckantor.github.io/cbe61622/01.10-Workstation-Learning-Exercises.ipynb"> <img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
| docs/01.10-Workstation-Learning-Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: "Python (venv)\u2026"
# language: python
# name: venv
# ---
# +
#default_exp data.load
# -
# %load_ext autoreload
# %autoreload 2
#export
from unoai.imports import *
# +
#export
def _bytes_feature(value) -> tf.train.Feature:
"Returns a bytes_list from a string / byte."
if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value) -> tf.train.Feature:
if not isinstance(value, list): value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value) -> tf.train.Feature:
"Returns an int64_list from a bool / enum / int / uint."
if not isinstance(value, list): value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def get_tf_example_from_raw_img(raw_img, y: None) -> tf.train.Example:
if y is None: feat_dict = {'image': _bytes_feature(raw_img)}
else: feat_dict = {'image': _bytes_feature(raw_img), 'label': _int64_feature(y)}
return tf.train.Example(features=tf.train.Features(feature=feat_dict))
def get_tf_example_from_numpy(x, y: None) -> tf.train.Example:
if y is None: feat_dict = {'image': _float_feature(x.tolist())}
else: feat_dict = {'image': _float_feature(x.tolist()), 'label': _int64_feature(y)}
return tf.train.Example(features=tf.train.Features(feature=feat_dict))
def store_np_imgs_as_tfrecord(output_path: str, x: np.array, y: np.array=None) -> None:
n = x.shape[0]
x_reshape = x.reshape(n,-1)
with tf.io.TFRecordWriter(output_path) as w:
for i in tqdm(range(n)):
if y is None: ex = get_tf_example_from_numpy(x_reshape[i])
else: ex = get_tf_example_from_numpy(x_reshape[i], y[i])
w.write(ex.SerializeToString())
def parse_tf_example_img(tf_example: tf.train.Example,h: int, w: int, c: int=3,dtype=tf.float32):
feat_desc = {
'image': tf.io.FixedLenFeature([h * w * c], dtype),
'label': tf.io.FixedLenFeature([], tf.int64)
}
feat = tf.io.parse_single_example(tf_example,features=feat_desc)
x, y = feat['image'], feat['label']
x = tf.reshape(x, [h, w, c])
return x, y
def read_tfrecord_as_dataset(ds_path: str, parser: Callable, batch_size: int = None,
shuffle: bool = True, shuffle_buffer_size: int = 50000,
prefetch: bool = False) -> tf.data.Dataset:
ds = tf.data.TFRecordDataset(ds_path)
if shuffle: ds = ds.shuffle(shuffle_buffer_size)
ds = ds.map(parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)
if batch_size is not None: ds = ds.batch(batch_size)
if prefetch: ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds
| nbs/01_data.load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Partial Evalutation and Currying
#
# 通常编程时,我们会想半途中评估函数(debug)。 考虑下cumulative sum的以下定义
#
# ```
# def cumsum(data):
# return accumulate(add, data)
# ```
#
# 还有函数 `fib_many` 执行了 Fibonacci 函数, `fib`, 对一个数字列表操作
#
# ```
# def fib_many(data):
# return map(fib, data)
# ```
#
# 在每种情况下,我们专门研究一个具有单一特定参数的高阶函数(`accumulate`或`map`),将第二个参数打开供将来使用。
#
# +
# Obligatory set of small functions
from toolz import accumulate
def add(x, y):
return x + y
def mul(x, y):
return x * y
def fib(n):
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return b
# +
# We commonly use a lambda expression and the equals sign
cumsum = lambda data: accumulate(add, data)
# This is perfectly equivalent to the function definition
def cumsum(data):
return accumulate(add, data)
# +
# Or we can use the `partial` function from functools
# Partial inserts an argument into the first place
from functools import partial
cumsum = partial(accumulate, add)
list(cumsum([1,2,3,1,2,3]))
# Semantically like the following:
# cumsum(whatever) = accumulate(add, whatever)
# -
# ### Exercise
#
# 局部执行 `mul` 函数去创建新的double函数
double = ...
assert double(5) = 10
# ## Curry
#
#
# 柯里化提供合成糖进行部分评估。
#
# curry 是一个更高阶的函数,它改变了函数用一组不完整的参数调用时的行为
#
# 通常Python会引发TypeError。
#
# 现在它返回一个局部函数。
#
mul(2)
from toolz import curry
mul = curry(mul)
mul2 = mul(2)
mul2(8)
# functools.partial 允许函数性陈述的惯用表达式执行。
accumulate(add)
# +
accumulate = curry(accumulate)
cumsum = accumulate(add)
cumprod = accumulate(mul)
fibMany = curry(map(fib))
list(fibMany(range(10)))
# -
# ## Curried namespace
#
# Toolz包含一个用于curried函数的独立命名空间
#
from toolz.curried import map
fibMany = map(fib)
# This is the same as the following:
from toolz import map, curry
map = curry(map)
| 3-curry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2, f_regression, mutual_info_regression
from sklearn.feature_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from feature_selector import FeatureSelector
pd.set_option('max_columns',50)
pd.set_option('max_row',200)
pd.set_option('display.float_format', lambda x: '%.5f' % x)
data = pd.read_csv("student-merged.csv")
variables = data.columns
def merge_data(d1, d2):
"""merging two datasets in one dataset with two categorical columns added for the two classes"""
#portuguese class = 1 and math class = 0
d1["class"] = 'por'
d2["class"] = 'mat'
frames = [d1, d2]
df = pd.concat(frames)
df.reset_index(inplace=True, drop=True)
return df
def dataframe_it():
"""To load in dataset """
d1 = pd.read_csv("student-mat.csv")
d2 = pd.read_csv("student-por.csv")
return d1, d2
d1,d2=dataframe_it()
data_new = merge_data(d1, d2)
data_new
data=data_new
# +
# abandoned
dict_var = {}
school = list(data["school"])
for i in range(len(school)):
if school[i] == "GP":
school[i] = 0
if school[i] == "MS":
school[i] = 1
dict_var['school']=school
sex = list(data["sex"])
for i in range(len(school)):
if sex[i] == "F":
sex[i] = 0
if sex[i] == "M":
sex[i] = 1
dict_var['sex']=sex
address = list(data["address"])
for i in range(len(school)):
if address[i] == "U":
address[i] = 0
if address[i] == "R":
address[i] = 1
dict_var['address']=address
famsize = list(data["famsize"])
for i in range(len(school)):
if famsize[i] == "LE3":
famsize[i] = 0
if famsize[i] == "GT3":
famsize[i] = 1
dict_var['famsize']=famsize
Pstatus = list(data["Pstatus"])
for i in range(len(school)):
if Pstatus[i] == "T":
Pstatus[i] = 0
if Pstatus[i] == "A":
Pstatus[i] = 1
dict_var['Pstatus']=Pstatus
Mjob = list(data["Mjob"])
for i in range(len(school)):
if Mjob[i] == "teacher":
Mjob[i] = 0
if Mjob[i] == "health":
Mjob[i] = 1
if Mjob[i] == "services":
Mjob[i] = 2
if Mjob[i] == "at_home":
Mjob[i] = 3
if Mjob[i] == "other":
Mjob[i] = 4
dict_var['Mjob']=Mjob
Fjob = list(data["Fjob"])
for i in range(len(school)):
if Fjob[i] == "teacher":
Fjob[i] = 0
if Fjob[i] == "health":
Fjob[i] = 1
if Fjob[i] == "services":
Fjob[i] = 2
if Fjob[i] == "at_home":
Fjob[i] = 3
if Fjob[i] == "other":
Fjob[i] = 4
dict_var['Fjob']=Fjob
reason = list(data["reason"])
for i in range(len(school)):
if reason[i] == "home":
reason[i] = 0
if reason[i] == "reputation":
reason[i] = 1
if reason[i] == "course":
reason[i] = 2
if reason[i] == "other":
reason[i] = 3
dict_var['reason']=reason
guardian = list(data["guardian"])
for i in range(len(school)):
if guardian[i] == "mother":
guardian[i] = 0
if guardian[i] == "father":
guardian[i] = 1
if guardian[i] == "other":
guardian[i] = 2
dict_var['guardian']=guardian
schoolsup = list(data["schoolsup"])
for i in range(len(school)):
if schoolsup[i] == "yes":
schoolsup[i] = 0
if schoolsup[i] == "no":
schoolsup[i] = 1
dict_var['schoolsup']=schoolsup
famsup = list(data["famsup"])
for i in range(len(school)):
if famsup[i] == "yes":
famsup[i] = 0
if famsup[i] == "no":
famsup[i] = 1
dict_var['famsup']=famsup
paid = list(data["paid"])
for i in range(len(school)):
if paid[i] == "yes":
paid[i] = 0
if paid[i] == "no":
paid[i] = 1
dict_var['paid']=paid
activities = list(data["activities"])
for i in range(len(school)):
if activities[i] == "yes":
activities[i] = 0
if activities[i] == "no":
activities[i] = 1
dict_var['activities']=activities
nursery = list(data["nursery"])
for i in range(len(school)):
if nursery[i] == "yes":
nursery[i] = 0
if nursery[i] == "no":
nursery[i] = 1
dict_var['nursery']=nursery
higher = list(data["higher"])
for i in range(len(school)):
if higher[i] == "yes":
higher[i] = 0
if higher[i] == "no":
higher[i] = 1
dict_var['higher']=higher
internet = list(data["internet"])
for i in range(len(school)):
if internet[i] == "yes":
internet[i] = 0
if internet[i] == "no":
internet[i] = 1
dict_var['internet']=internet
romantic = list(data["romantic"])
for i in range(len(school)):
if romantic[i] == "yes":
romantic[i] = 0
if romantic[i] == "no":
romantic[i] = 1
dict_var['romantic']=romantic
# -
# abandoned
data_num = data
for i in data.columns:
if type(list(data[i])[0]) == str:
data_num[i] = dict_var[i]
data_num.to_csv('data_merge_num.csv',index=False)
data_num
plt.figure(figsize=(15,15))
sns.heatmap(data_num.corr(), annot=True, fmt=".2f", cbar=True)
plt.xticks(rotation=90)
plt.yticks(rotation = 0)
#seperate test results and variables
data_var = data_num.drop(['G1','G2','G3'],1)
data_var
pre_vars = ["school", "sex", "address", "famsize", "Pstatus", "Mjob", "Fjob", "reason", "guardian", "schoolsup", "famsup",
"paid", "activities", "nursery", "higher", "internet", "romantic"]
data_numvar = data_var.drop(pre_vars,1)
data_numvar
data_catvar = data_var[pre_vars]
data_catvar
data_result = data_num.iloc[:,-3:]
data_result
#Removing features with low variance
pd.DataFrame(VarianceThreshold(threshold=(.8 * (1 - .8))).fit_transform(data_var))
# removed Pstatus, schoolsup, higher
# rank feature by importance using chi2 test(classification) and mutual info regression(regression), fit on G3 score only
bestfeatures = SelectKBest(score_func=chi2, k=len(data_catvar.columns))
fit = bestfeatures.fit(data_catvar, data_result['G3'])
# +
df_scores = pd.DataFrame(fit.scores_)
df_scores
df_columns = pd.DataFrame(data_catvar.columns)
df_columns
# concatenate 2 dfs
df_feature_scores = pd.concat([df_columns,df_scores],axis=1)
df_feature_scores.columns = ['feature_name','Score'] #naming the dataframe columns
df_feature_scores
df_feature_scores.sort_values(by="Score", ascending=False)
# +
bestfeatures = SelectKBest(score_func= f_regression, k=len(data_numvar.columns))
fit = bestfeatures.fit(data_numvar, data_result['G3'])
df_scores = pd.DataFrame(fit.scores_)
df_scores
df_columns = pd.DataFrame(data_numvar.columns)
df_columns
# concatenate 2 dfs
df_feature_scores = pd.concat([df_columns,df_scores],axis=1)
df_feature_scores.columns = ['feature_name','Score'] #naming the dataframe columns
df_feature_scores
df_feature_scores.sort_values(by="Score", ascending=False)
# -
#Recursive feature elimination using RFECV
#https://scikit-learn.org/stable/auto_examples/feature_selection/plot_rfe_with_cross_validation.html#sphx-glr-auto-examples-feature-selection-plot-rfe-with-cross-validation-py
#redo with some non-numeric categorical data
min_features_to_select = 5
RFE_feature = RFECV(estimator=RandomForestClassifier(),step=1, cv=StratifiedKFold(2),
scoring='accuracy', min_features_to_select=min_features_to_select)
RFE_feature.fit_transform(data.iloc[:,:-3],data_result['G3'])
# +
print("Optimal number of features : %d" % RFE_feature.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(min_features_to_select,
len(RFE_feature.grid_scores_) + min_features_to_select),
RFE_feature.grid_scores_)
plt.show()
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seaborn graphics
#
# [Seaborn](https://seaborn.pydata.org/) is a Python library with *"a high-level interface for drawing attractive statistical graphics"*. This notebook includes some examples taken from the [Seaborn example gallery](http://seaborn.pydata.org/examples/).
# +
# The imports
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
# -
# ## Example 1: scatterplot matrix
# +
import seaborn as sns
sns.set(style="ticks")
df = sns.load_dataset("iris")
sns.pairplot(df, hue="species");
# -
# ## Example 2: Correlation matrix heatmap
# +
sns.set(context="paper", font="monospace")
# Load the datset of correlations between cortical brain networks
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
corrmat = df.corr()
# Set up the matplotlib figure
f, ax = plt.subplots( figsize=(10, 8) )
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=.8, square=True)
# Use matplotlib directly to emphasize known networks
networks = corrmat.columns.get_level_values("network")
for i, network in enumerate(networks):
if i and network != networks[i - 1]:
ax.axhline(len(networks) - i, c="w")
ax.axvline(i, c="w")
f.tight_layout()
# -
# ## Example 3: Linear regression with marginal distributions
# +
sns.set(style="darkgrid", color_codes=True)
tips = sns.load_dataset("tips")
g = sns.jointplot("total_bill", "tip", data=tips, kind="reg",
xlim=(0, 60), ylim=(0, 12), color="r", height=7)
# -
# ## Interactivity
#
# We repeat the above example, but now using the _notebook_ backend to provide pan & zoom interactivity.
# Note that this may not work if graphics have already been initialized
# Seaborn + interactivity
# #%matplotlib notebook
# %matplotlib widget
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set( style="darkgrid", color_codes=True )
tips = sns.load_dataset("tips")
sns.jointplot( "total_bill", "tip", data=tips, kind="reg",
xlim=(0, 60), ylim=(0, 12), color="r", height=9 );
# -
| vmfiles/IPNB/Examples/b Graphics/30 Seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy import stats
import numpy as np
from matplotlib import pyplot as plt
freinds = [10,11,21,12,13,41,15,18,31,34,26,52,17,18,5]
likes = [8,9,15,9,10,29,14,16,25,30,20,45,15,14,2]
slope, intercept, r_value, p_value, std_err = stats.linregress(freinds,likes)
# +
likes_reg = [slope * i + intercept for i in freinds];
plt.scatter(freinds, likes)
plt.plot(freinds, likes_reg, '-r')
plt.show()
print(r_value)
# -
| Simple Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # Naas - Emailbuilder demo
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Naas/Naas_Emailbuilder_demo.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/></a>
# + [markdown] papermill={} tags=[]
# ## Input
# + papermill={} tags=[]
# List to emails address of the receiver(s)
email_to = [""]
# Email sender : Can only take your email account or <EMAIL>
email_from = ""
# Email subject
subject = "My Object"
# + [markdown] papermill={} tags=[]
# ## Model
# + papermill={} tags=[]
import naas_drivers
import naas
import pandas as pd
# + papermill={} tags=[]
table = pd.DataFrame({
"Table Header 1": ["Left element 1", "Left element 2", "Left element 3"],
"Table Header 2": ["Right element 1", "Right element 2", "Right element 3"]
})
link = "https://www.naas.ai/"
img = "https://gblobscdn.gitbook.com/spaces%2F-MJ1rzHSMrn3m7xaPUs_%2Favatar-1602072063433.png?alt=media"
list_bullet = ["First element",
"Second element",
"Third element",
emailbuilder.link(link, "Fourth element"),
]
footer_icons = [{
"img_src": img,
"href": link
}]
email_content = {
'element': naas_drivers.emailbuilder.title("This is a title"),
'heading': naas_drivers.emailbuilder.heading("This is a heading"),
'subheading': naas_drivers.emailbuilder.subheading("This is a subheading"),
'text': naas_drivers.emailbuilder.text("This is a text"),
'link': naas_drivers.emailbuilder.link(link, "This is a link"),
'button': naas_drivers.emailbuilder.button(link, "This is a button"),
'list': naas_drivers.emailbuilder.list(list_bullet),
'table': naas_drivers.emailbuilder.table(table, header=True, border=True),
'image': naas_drivers.emailbuilder.image(img),
'footer': naas_drivers.emailbuilder.footer_company(networks=footer_icons, company=["Company informations"], legal=["Legal informations"])
}
# + papermill={} tags=[]
content = naas_drivers.emailbuilder.generate(display='iframe',
**email_content)
# + [markdown] papermill={} tags=[]
# ## Output
# + papermill={} tags=[]
naas.notification.send(email_to,
subject,
content,
email_from)
# + papermill={} tags=[]
| Naas/Naas_Emailbuilder_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import io
import requests
url="https://raw.githubusercontent.com/cs109/2014_data/master/countries.csv"
s=requests.get(url).content
c=pd.read_csv(io.StringIO(s.decode('utf-8')))
print(c)
# +
import pymysql.cursors
# Connect to the database
connection = pymysql.connect(host='172.16.0.1',
user='admin',
password='<PASSWORD>',
database='covid',
cursorclass=pymysql.cursors.DictCursor)
with connection:
# with connection.cursor() as cursor:
# # Create a new record
# sql = "INSERT INTO `users` (`email`, `password`) VALUES <PASSWORD>, %s)"
# cursor.execute(sql, ('<EMAIL>', '<PASSWORD>'))
# # connection is not autocommit by default. So you must commit to save
# # your changes.
# connection.commit()
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT `id`, FROM `covid_cases_by_region`"
cursor.execute(sql)
result = cursor.fetchone()
print(result)
# -
| etl/ETL Covid Big Gato.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import pandas
import pynumdiff
import scipy.fftpack
from IPython.display import display,SVG
import figurefirst
fifi = figurefirst
import run_pareto_plot
# +
# define problem
example = 'example_7'
dt = 0.01
noise = 0.25
timeseries_length = 4
problem = 'lorenz_x'
read_existing = True
simdt = 0.0001
# define method
method_parent = 'linear_model'
method = 'savgoldiff'
# -
r = pynumdiff.utils.simulate.lorenz_x(timeseries_length=timeseries_length,
noise_parameters=[0, noise],
dt=dt)
x, x_truth, dxdt_truth, _ = r
t = np.arange(0, timeseries_length, dt)
print('done simulating')
gamma_range = [1e-4, 1e4]
r = run_pareto_plot.get_pareto_plot_data(x, x_truth, dxdt_truth, dt, method, method_parent, gamma_range,
num_gammas=60)
rmses, errcorrs, tvs, rmses_gamma, errcorrs_gamma, tvs_gamma, params_gamma, successful_gammas = r
# +
threshold = 0.25
while len(np.where(errcorrs_gamma<threshold)[0]) < 1:
threshold += 0.05
idx_errcorr_okay = np.where(errcorrs_gamma<threshold)
idx_opt = idx_errcorr_okay[0][np.argmin(rmses_gamma[idx_errcorr_okay])]
opt_rmse = rmses_gamma[idx_opt]
opt_errcorr = errcorrs_gamma[idx_opt]
# -
print(idx_opt)
# +
plt.plot(np.log(rmses), errcorrs, '.', color='gray', markersize=2)
plt.plot(np.log(rmses_gamma), errcorrs_gamma, color='violet')
plt.ylim(0,1)
plt.plot(np.log(opt_rmse), opt_errcorr, '*', color='red', markersize=20)
plt.xlabel('log RMSE')
plt.ylabel('Error Correlation')
#plt.xlim(0.4, 3)
plt.ylim(0,1)
# -
q1 = np.diff(errcorrs_gamma) / np.diff(np.log(rmses_gamma))
plt.plot(q1, '.')
plt.plot(rmses_gamma + errcorrs_gamma)
print( np.argmin(np.log(rmses_gamma) + errcorrs_gamma) )
plt.plot( np.diff(np.log(rmses_gamma) + errcorrs_gamma) )
plt.plot( rmses_gamma + tvs_gamma )
print( np.argmin(rmses_gamma + tvs_gamma) )
plt.plot( np.diff(rmses_gamma + tvs_gamma) )
# +
plt.plot((rmses), (tvs), '.', color='gray', markersize=2)
plt.plot((rmses_gamma), (tvs_gamma), color='violet')
#plt.ylim(0,1)
plt.plot((opt_rmse), (tvs_gamma[idx_opt]), '*', color='red', markersize=20)
plt.xlabel('log RMSE')
plt.ylabel('log TV(vel estimate)')
plt.xlim(1.5, 4)
plt.ylim(0,1)
# -
plt.plot(np.log(tvs), errcorrs, '.')
plt.xlabel('TV(vel estimate)')
plt.ylabel('Error Correlation')
| notebooks/paper_figures/misc/make_fig_1_explore_options.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IMPORTS
# ## Libraries
# +
import math
import pandas as pd
import numpy as np
from google.oauth2 import service_account
from googleapiclient.discovery import build
pd.set_option('display.max_columns', 200)
# -
# # Read Google Sheets
# +
# Documentation: https://developers.google.com/sheets/api/quickstart/python
SERVICE_ACCOUNT_FILE = 'D:/01-DataScience/04-Projetos/00-Git/Youtube-Video-Recommendations/Credentials/keys.json'
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
credentials = None
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES)
# The ID of spreadsheet.
SAMPLE_SPREADSHEET_ID = '1uCur7jOXuLnwuwfWgoBL8mvDDvchuLf-o0X-AnOxS7s'
service = build('sheets', 'v4', credentials=credentials)
# Call the Sheets API
sheet = service.spreadsheets()
#Read Values
result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range="YoutubeScrapeSample!A1:R").execute()
values = result.get('values', [])
# -
dfGoogleSheets = pd.DataFrame(values[1:], columns=values[0])
dfGoogleSheets['UploadDate'] = pd.to_datetime(dfGoogleSheets['UploadDate'])
dfGoogleSheets['WatchList'] = dfGoogleSheets['WatchList'].replace('', np.nan)
dfGoogleSheets[['DaysSincePublication', 'Duration', 'ViewCount', 'LikeCount', 'DislikeCount']] = dfGoogleSheets[['DaysSincePublication', 'Duration', 'ViewCount', 'LikeCount', 'DislikeCount']].astype(int)
dfGoogleSheets['AverageRating'] = dfGoogleSheets['AverageRating'].astype(float)
dfGoogleSheets[dfGoogleSheets.select_dtypes(include=['object']).columns] = dfGoogleSheets.select_dtypes(include=['object']).astype('category')
dfGoogleSheets.dtypes
# ## Drop Duplicates
dfGoogleSheets = dfGoogleSheets.drop_duplicates(subset=['Id'])
dfGoogleSheets.shape
# ## Split into Labeled and Unlabeled Dataset
dfTrain = dfGoogleSheets[dfGoogleSheets['WatchList'].notnull()].reset_index(drop=True)
dfTest = dfGoogleSheets[dfGoogleSheets['WatchList'].isnull()].reset_index(drop=True)
# +
# Train Dataset
print('\n\tTrain Dataset')
print(f'Number of Rows: {dfTrain.shape[0]}')
print(f'Number of Columns: {dfTrain.shape[1]}')
# Test Dataset
print('\n\n\tTest Dataset')
print(f'Number of Rows: {dfTest.shape[0]}')
print(f'Number of Columns: {dfTest.shape[1]}')
# -
# # Convert to .feather
dfTrain.to_feather('../Data/FeatherData/dfTrainGS.feather')
dfTest.to_feather('../Data/FeatherData/dfTestGS.feather')
| 02-YouTubeNotebooks/03-GoogleSheetsRead.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import csv
from num2words import num2words
from collections import OrderedDict
# Turn CSV into a pandas DataFrame
raw_data_table = pd.read_csv('./spreadsheets/DN-reformatted.csv', sep=',')
# Show first 10 rows of table
#raw_data_table
# -
fbrf_table = pd.read_csv('./spreadsheets/fbrf-table.csv', sep=',')
lookup = dict(zip(fbrf_table.Keys,fbrf_table.Values))#FBrfs for existing terms
#lookup
nice_name_table = pd.read_csv('./spreadsheets/nice-names-table.csv', sep=',')
nice_names = dict(zip(nice_name_table.Keys,nice_name_table.Values))#improved names for existing terms from raw_data
#nice_names
# +
#generate fbrfs for all descending neurons (keys are short names)
start = 47572
DN_fbrfs = list()
for i in raw_data_table.index:
x = start + i
ID = "FBbt:000"+str(x)
DN_fbrfs.append(ID)
DN_dict = OrderedDict(zip(raw_data_table.short,DN_fbrfs))
#DN_dict
# -
#save short name to fbrf mapping as tsv
with open('fbrf_dnshortname.tsv', 'w') as f:
for key in DN_dict.keys():
f.write("%s\t%s\n"%(key,DN_dict[key]))
# +
#list of fbbts for DN_groups
DN_group_names = set(raw_data_table.Group)
DN_group_fbbts = list()
for i in DN_group_names:
x = lookup[i]
DN_group_fbbts.append(x)
#DN_group_names
#DN_group_fbbts
# -
#list of names for processes (neuropils)
process_neuropil_names = open('./spreadsheets/neuropils.txt', 'r')
process_neuropil_names = process_neuropil_names.read().splitlines()
#process_neuropil_names
# +
#list of fbbts for processes (neuropils)
processes = list()
for i in process_neuropil_names:
x = lookup[i]
processes.append(x)
#processes[0:5]
# -
#list of fbbts for pathways (tracts)
pathway_tract_names = open('./spreadsheets/tracts.csv', 'r')
pathway_tract_names = pathway_tract_names.read().splitlines()
#pathway_tract_names[0:5]
# +
#list of fbbts for pathways (tracts)
tracts = list()
for i in pathway_tract_names:
x = lookup[i]
tracts.append(x)
#tracts[0:5]
# -
def name_lister(names):
L = ""
if len(names) < 1:
return False
elif len(names) == 1:
return names[0]
elif len(names) > 1:
L = names[0]
if len(names) > 2:
for i in names[1:-1]:
L = L + ", " + i
L = L + " and " + names[-1]
return L
# +
#similar neurons - comment#######################
similar_neurons_df = pd.read_csv('./spreadsheets/similar-neurons.tsv', sep='\t')
similar_neurons_df = similar_neurons_df.fillna("") #must do this first - won't replace 'nan'
similar_neurons_df = similar_neurons_df.applymap(str) #convert everything to a string
sim_comment_od = OrderedDict([])
for dn in raw_data_table.short:
if dn in list(similar_neurons_df.Name):#can't use "in" with pd series
sim_rows = similar_neurons_df[similar_neurons_df.Name == dn]#table with row for each similar neuron for one dn
sim_name_list = list()
#build up a statement for each row and add to list
for r in sim_rows.index:
sim_text = ""
sim_text = sim_text + sim_rows.sim_name[r]
if len(sim_rows.fbbt[r]) > 0:
sim_text = sim_text + " (" + sim_rows.fbbt[r] + ")"
sim_text = sim_text + ", described by " + sim_rows.Citation[r]
if len(sim_rows.fbrf[r]) > 0:
sim_text = sim_text + " (" + sim_rows.fbrf[r] + ")"
sim_name_list.append(sim_text)
#put together list items with ',' and 'and' in a sentence
sim_comment_od[dn] = "Namiki et al., 2018 (FBrf0239335), identify this as being morphologically similar to "\
+ name_lister(sim_name_list) + "."
sim_name_list.clear
#similar_neurons_df
# +
#"may be equivalent to" field
sim_eqto_od = OrderedDict([])
for dn in raw_data_table.short:
if dn in list(similar_neurons_df.Name):
sim_rows = similar_neurons_df[similar_neurons_df.Name == dn]#table with row for each similar neuron for one dn
sim_fbbt = list()
for r in sim_rows.index:#make list of fbbts for one dn
if len(sim_rows.fbbt[r]) > 0:
eq_fbbt = "http://purl.obolibrary.org/obo/" + sim_rows.fbbt[r].replace(":","_")
sim_fbbt.append(eq_fbbt)
if len(sim_fbbt) > 0:
sim_eqto_od[dn] = sim_fbbt
sim_fbbt.clear
max_eqto = 0
for i in sim_eqto_od:
if len(sim_eqto_od[i]) > max_eqto:
max_eqto = len(sim_eqto_od[i])
#max_eqto
#sim_eqto_od["DNg34"]
#sim_eqto_od
# +
# Make a dictionary with key - column header & value = template specification (first row of table).
# Make first two columns
template_seed = OrderedDict([ ('ID' , 'ID'), ('CLASS_TYPE' , 'CLASS_TYPE'),\
('RDF_Type' , 'TYPE' )])
#label, description, short synonym:
template_seed.update([("Name" , "A rdfs:label"), ("Definition" , "A IAO:0000115"),\
("Xref_def" , ">A oboInOwl:hasDbXref"), ("Comment" , "A rdfs:comment"),\
("created_by" , "A oboInOwl:created_by"),\
("creation_date", "A oboInOwl:creation_date")])
#short name synonyms
template_seed.update([("Synonym1" , "A oboInOwl:hasExactSynonym"),\
("syn_type" , ">A oboInOwl:HasSynonymType"),\
("Synonym2" , "A oboInOwl:hasExactSynonym"),\
("syn_ref" , ">A oboInOwl:hasDbXref")])
#may be equivalent to annotation
#AI specifies an IRI
for i in range(max_eqto):
template_seed.update([("eq_to" + str(i+1), "AI IAO:0006011"),\
("eq_to_ref" + str(i+1), ">A oboInOwl:hasDbXref")])
# Columns for DN group:
for n in DN_group_fbbts:
template_seed.update([(n , "C %")])
# Columns for processes:
for n in processes:
template_seed.update([(n , "C 'has synaptic terminal in' some %")])
# Columns for tract:
for n in tracts:
template_seed.update([(n , "C 'fasciculates with' some %")])
# Create dataFrame for template
# from_records takes a list of dicts - one for each row. We only have one row.
template = pd.DataFrame.from_records([template_seed])
template
# -
def group_text_generator(group):
if group == 'DNa':
return "Descending neuron belonging to the DNa group, having a cell body on the anterior dorsal surface of the brain."
elif group == 'DNb':
return "Descending neuron belonging to the DNb group, having a cell body on the anterior ventral surface of the brain."
elif group == 'DNc':
return "Descending neuron belonging to the DNc group, having a cell body in the pars intercerebralis."
elif group == 'DNd':
return "Descending neuron belonging to the DNd group, having a cell body just lateral to the antennal lobe, on the anterior surface of the brain."
elif group == 'DNg':
return "Descending neuron belonging to the DNg group, having a cell body in the cell body rind around the gnathal ganglion."
elif group == 'DNp':
return "Descending neuron belonging to the DNp group, having a cell body on the posterior surface of the brain."
elif group == 'DNx':
return "Descending neuron belonging to the DNx group, having a cell body outside of the brain."
else: print("Please enter a valid group")
def cell_text_generator(cells):
if cells > 1:
return " There is a cluster of up to %s of these cells in each hemisphere."%(num2words(cells))
elif cells == 1:
return " There is one of these cells per hemisphere."
def side_text_generator(cross,side):
c_s_dict = {"N" : " This neuron does not cross the midline", "Y" : " This neuron crosses the midline",\
"I" : " and descends on the ipsilateral side of the cervical connective.",\
"C" : " and descends on the contralateral side of the cervical connective."}
return c_s_dict[cross] + c_s_dict[side]
def label_maker(short):
letter_regions = {"a" : "of the anterior dorsal brain", "b" : "of the anterior ventral brain",\
"c" : "of the pars intercerebralis", "d" : "of the anterior brain",\
"g" : "of the gnathal ganglion", "p" : "of the posterior brain",\
"x" : "outside of the brain"}
region = letter_regions[short[2]]
return "descending neuron %s %s"%(region,short)
# +
count = 0 #0 = DNa01
for i in raw_data_table.index:
r = raw_data_table.short[count]
row_od = OrderedDict([]) #new template row as an empty ordered dictionary
for c in template.columns: #make columns and blank data for new template row
row_od.update([(c , "")])
#these are the same in each row
row_od["CLASS_TYPE"] = "subclass"
row_od["RDF_Type"] = "owl:Class"
row_od["Xref_def"] = "FlyBase:FBrf0239335"
row_od["syn_type"] = "http://purl.obolibrary.org/obo/fbbt#VFB_SYMBOL"
row_od["syn_ref"] = "FlyBase:FBrf0239335"
row_od["created_by"] = "CP"
row_od["creation_date"] = "2018-09-20T12:00:00Z"
#easy to generate data
row_od["ID"] = DN_dict[r]
row_od["Synonym1"] = r
row_od["Synonym2"] = r
row_od["Name"] = label_maker(r)
if r in sim_comment_od:
row_od["Comment"] = sim_comment_od[r]
#is_a relationship
is_a = lookup[r[0:3]]
row_od[is_a] = is_a
#may be equivalent to
if r in sim_eqto_od:
count_eq = 0
for i in sim_eqto_od[r]:
count_eq += 1
row_od["eq_to" + str(count_eq)] = i
row_od["eq_to_ref" + str(count_eq)] = "FlyBase:FBrf0239335"
#FOR SYNAPSING AND FASCICULATION
#get column names where value > 0
names = raw_data_table[:count].columns[(raw_data_table > 0).iloc[count]]
#convert these to FBrfs
FBrfs = list()
for n in names:
if n in lookup:
FBrfs.append(lookup[n])
else: continue
#make these into columns in row_od
for f in FBrfs:
row_od[f] = f
#STUFF FOR DEFINITION
#DN group
r_group = raw_data_table.Group[count]
group_text = group_text_generator(r_group)
#number_cells
num_cells = raw_data_table.max_cells[count]
number_cells_text = cell_text_generator(num_cells)
#crossing and descending side
cross = raw_data_table.crossing_midline[count]
side = raw_data_table.Descending_side[count]
side_text = side_text_generator(cross,side)
#using x > 0 names generated for relationships
synapsing_names = list()
for n in names:
if (n in nice_names) and (n in process_neuropil_names):
synapsing_names.append(nice_names[n])
else: continue
synapses_in = name_lister(synapsing_names)
if synapses_in != False:
synapsing_text = " It has neurites in the " + synapses_in + "."
else: synapsing_text = ""
tract_names = list()
for n in names:
if (n in nice_names) and (n in pathway_tract_names):
tract_names.append(nice_names[n])
else: continue
in_tracts = name_lister(tract_names)
if in_tracts != False:
tract_text = " It fasciculates with the " + in_tracts + " in the thoracico-abdominal ganglion."
else: tract_text = ""
row_od["Definition"] = group_text + side_text + synapsing_text + tract_text + number_cells_text
#make new row into a DataFrame and add it to template
new_row = pd.DataFrame.from_records([row_od])
template = pd.concat([template, new_row], ignore_index=True, sort=False)
count +=1
template
# -
template.to_csv("./template.tsv", sep = "\t", header=True, index=False)
| src/patterns/robot_template_projects/descending_neurons/descending_neuron_template_gen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''medibrain'': conda)'
# language: python
# name: python37464bitmedibrainconda26a62581bc9d4febb30e1b929fdf8fbe
# ---
# ## Setup
import numpy
# import matplotlib.pyplot as plt
# import seaborn as sns
import pandas
# # %matplotlib inline
# plt.rcParams['figure.figsize'] = (16.0, 4.0)
# sns.set_style("whitegrid")
random_state = 7
numpy.random.seed(random_state)
from lib.cox_helpers import initialize_cox_store
import datetime
# ## Load Data
data = pandas.read_csv('data_k5.csv')
data.head()
from sklearn import model_selection
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, roc_auc_score, matthews_corrcoef, accuracy_score
from sklearn.metrics import classification_report
from sklearn.preprocessing import OneHotEncoder
# +
data = data.join(pandas.get_dummies(data['AGE'], prefix='AGE'))
data.drop('AGE', axis=1, inplace=True)
data = data.join(pandas.get_dummies(data['OUTCOME'], prefix='OUTCOME'))
data.drop('OUTCOME', axis=1, inplace=True)
data = data.join(pandas.get_dummies(data['SEX'], prefix='SEX'))
data.drop('SEX', axis=1, inplace=True)
cols = [col for col in data if col != 'READMISSION_30_DAYS'] + ['READMISSION_30_DAYS']
data = data[cols]
data.head()
# +
zero_indices = data[data['READMISSION_30_DAYS'] == 0].index
sample_size_to_remove = sum(data['READMISSION_30_DAYS'] == 0) - sum(data['READMISSION_30_DAYS'] == 1)
random_indices = numpy.random.choice(zero_indices, sample_size_to_remove, replace=False)
data = data.drop(random_indices)
print(len(data))
readmission_count = data.groupby('READMISSION_30_DAYS').size().sort_values(ascending=False)
print(readmission_count)
# +
dataset = data.values
numpy.random.shuffle(dataset)
# split into input (X) and output (Y) variables
X = dataset[:,0:100].astype(float)
Y = dataset[:,100]
# X,Y
# +
test_size = 0.1
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=test_size, random_state=7)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
scaler = scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# +
from math import floor
def partition(vector, fold, k):
size = vector.shape[0]
start = floor((size/k)*fold)
end = floor((size/k)*(fold+1))
validation = vector[start:end]
#print(str(type(vector)))
if str(type(vector)) == "<class 'scipy.sparse.csr.csr_matrix'>":
indices = range(start, end)
mask = numpy.ones(vector.shape[0], dtype=bool)
mask[indices] = False
training = vector[mask]
elif str(type(vector)) == "<class 'numpy.ndarray'>":
training = numpy.concatenate((vector[:start], vector[end:]))
return training, validation
def Cross_Validation(learner, k, examples, labels):
train_folds_score = []
validation_folds_score = []
test_score_auc = []
test_score_mcc = []
for fold in range(0, k):
training_set, validation_set = partition(examples, fold, k)
training_labels, validation_labels = partition(labels, fold, k)
learner.fit(training_set, training_labels)
training_predicted = learner.predict(training_set)
validation_predicted = learner.predict(validation_set)
# print(training_predicted, validation_predicted)
test_predicted = learner.predict(X_test)
train_folds_score.append(roc_auc_score(training_labels, training_predicted))
# print(training_labels, training_predicted)
# print(numpy.sum(training_labels), numpy.sum(training_predicted))
# print(classification_report(training_labels, training_predicted))
validation_folds_score.append(roc_auc_score(validation_labels, validation_predicted))
test_score_auc.append(roc_auc_score(Y_test, test_predicted))
test_score_mcc.append(matthews_corrcoef(Y_test, test_predicted))
return train_folds_score, validation_folds_score, test_score_auc, test_score_mcc
# -
def run(model, features, labels) :
cox_store = initialize_cox_store()
cox_store['experiments'].update_row({
'k': 5,
'random_state': random_state,
'start_time': datetime.datetime.now().strftime('%Y%m%d%H%M%S'),
'classifier': model.__str__().split("(")[0],
'classifier_full': model.__str__()
})
train_scores, validation_scores, test_scores_auc, test_scores_mcc = Cross_Validation(model, 10, features, labels)
#print(train_scores, validation_scores, test_scores)
print(model)
print('Train AUC', float(format(numpy.mean(train_scores), '.3f')))
print('Validation AUC',float(format(numpy.mean(validation_scores), '.3f')))
print('Test AUC',float(format(numpy.mean(test_scores_auc), '.3f')))
print('Test MCC',float(format(numpy.mean(test_scores_mcc), '.3f')))
print()
cox_store['experiments'].update_row({
'Train AUC': float(format(numpy.mean(train_scores), '.3f')),
'Validation AUC': float(format(numpy.mean(validation_scores), '.3f')),
'Test AUC': float(format(numpy.mean(test_scores_auc), '.3f')),
'Test MCC': float(format(numpy.mean(test_scores_mcc), '.3f'))
})
cox_store['experiments'].flush_row()
cox_store.close()
# %%time
models = [LogisticRegression(solver='liblinear'), KNeighborsClassifier(), GaussianNB(), SVC(gamma='auto')] #LogisticRegression(solver='liblinear')
for model in models:
run(model, X_test, Y_test)
# +
from cox.readers import CollectionReader
reader = CollectionReader('cox')
a = reader.df('experiments')
print(a.to_string())
a.to_excel('experimentalResults.xlsx')
# -
| archive/k_anonymity_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Examples</h1>
# <strong>Example #1: Saving A Dict</strong>
#
# Let's save an employee's information into file using pickle. Run the code below then look in the example folder for the output.
# +
import pandas as pd
import shutil
import glob
import os
import pickle
if not 'script_dir' in globals():
script_dir = os.getcwd()
data_directory = 'data\\'
example_directory = 'PickleExample\\'
target_file_name = 'employee.txt'
target_path = os.path.join(script_dir, data_directory, example_directory, target_file_name)
employee = {"Name": "Sam", "Age":25, "Height": 177, "Country" : "Brazil"}
filename = open(target_path,'wb')
pickle.dump(employee, filename)
filename.close()
# +
#Here is how you will unpickle a saved file.
inputFile = open(target_path,'rb')
loaded_object = pickle.load(inputFile)
inputFile.close()
print(loaded_object)
# -
# <strong>Example #2: Saving A List</strong>
#
# Let's save a list into file using pickle.
# +
grades = [34,53,23,56,67,74,3,33,2,6,7,8,83,34,2,34,64,65]
target_file_name = 'grades.txt'
target_path = os.path.join(script_dir, data_directory, example_directory, target_file_name)
filename = open(target_path,'wb')
pickle.dump(grades,filename)
filename.close()
# +
#Here is how you will unpickle a saved file.
inputFile = open(target_path,'rb')
loaded_object = pickle.load(inputFile)
inputFile.close()
print(loaded_object)
# -
# Copyright © 2020, Mass Street Analytics, LLC. All Rights Reserved.
| 03 Advanced/26-saving-objects-with-pickle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Deep Dive
# > Learning special concepts like closures, itertools, *args/**kwargs, decorators and property.
#
# - toc: false
# - badges: true
# - comments: false
# - categories: [jupyter, python]
# - author: <NAME>
# ### 1. Closures
#
# Closure is simply a method with an added environment variable. Closure provides data abstraction and object oriented coding without the use of a class. To implement cloure:
#
# 1. A function should be embedded within another function
# 2. The outer function must return the inner function
# 3. The inner function must use the outer function variable
#
# If the implementation gets extensive with multiple methods and input parameters, it might be worth creating a class. Closure is a halfway house to class - a replacement for classes with a single method class and variable.
def outer_func(power):
def inner_func(number):
return number ** power # Inner function uses outer function variable "power".
return inner_func
# +
# Call outer function with power 2.
func_2 = outer_func(2)
print(func_2(5))
# Call outer function with power 3.
func_3 = outer_func(3)
print(func_3(5))
# -
# All functions have an attribute called __closure__.
print(outer_func.__closure__) # Outer function returns none.
print(func_2.__closure__) # Inner function returns a cell array.
print(func_2.__closure__[0].cell_contents) # Returns the method input variable value of the closure function.
# ### 2. Itertools
#
# Iteration module with many useful functions such as permutation, combination, filter, etc.
#
import itertools
values = [1, 2, 3, 4]
# Returns 1, 1+2, 1+2+3, etc.
for item in itertools.accumulate(values):
print(item)
# Combines the data in sets of 3, no duplicates like in permutations.
for item in itertools.combinations(values, 3):
print(item)
# Combines data in sets of 3, in all possible combinations.
for item in itertools.permutations(values, 3):
print(item)
# Filter with a condition, drops while x < 2.
for item in itertools.dropwhile(lambda x: x<2, values):
print(item)
# Creates 3 iterations of the list.
for item in itertools.tee(values, 3):
print("Iteration")
for it in item:
print(it)
# This is nto a itertools function. Combines the first item in first list to first item in second list, second to second, etc. It stops when one of the lists runs out of values.
for item in zip(values, ['a', 'b']):
print(item)
# This is like zip, except it continues combining with none when one of the lists runs out of values.
for item in itertools.zip_longest(values, ['a', 'b']):
print(item)
# ### 3. *args and **kwargs
#
# *args is used to unpack a set of values to a tuple. These are often used as function arguments where you can send an arbitrary number of arguments.
#
# **kwargs is used to unpack a named set of values to a dictionary. When used as a function argument, you can send any number of named arguments to the function for it to unpack and use inside.
#
# *args - accepts any number of positional arguments.
def test_args(*args):
total = 0
# The arguments are unpacked into a tuple.
for x in args:
total += x;
print(total)
# Test with 3 arguments.
test_args(5, 9, 10)
# Test with 4 arguments.
test_args(10, 20, 30, 40, 50)
# Create 2 lists.
list1 = [2, 3]
list2 = [4, 5]
# Print the list without unpacking - prints a list.
print(list2)
# Print the list with unpacking - prints the values in the list.
print(*list2)
# Unpack list 1 and 2 and send it to *args
test_args(*list1, *list2)
# Can be used to merge two lists.
list3 = [*list1, *list2]
print(list3)
# To send any number of named arguments we use **kwargs.
def test_kwargs(**kwargs):
# The arguments are unpacked into a dictionary.
for x in kwargs:
print("{}-{}".format(x, kwargs[x]))
# Send two named parameters.
test_kwargs(a=10, b=20)
# Send 3 named parameters.
test_kwargs(arg1='aaaa', arg2=3, arg3='xx')
# Chain positional args followed by named args.
def test_args_kwargs(*args, **kwargs):
for x in args:
print(x)
for x in kwargs.values():
print(x)
# Test by sending both types of args.
test_args_kwargs(2, 3, 4, b=55, c=999)
# ### 4. Decorators
#
# Decorators are functions that take another function as input, adds to the function in some way. this makes use of closure and the ability to pass function as an argument just like another variable. A function can have an number of decorators added to it, same decorator can be added to multiple functions.
# +
# Takes any function
def add_border(func):
# The function can have any number of arguments, does not matter.
def inner_func(*args, **kwargs):
print('------')
# Call the passed function with arguments.
func(*args, **kwargs)
print('------')
return inner_func
# Add the decorator to the function.
@add_border
def sum(*args, **kwargs):
total = 0
for x in args:
total += x
print(total)
@add_border
def power(a, b):
print(a ** b)
# -
sum(10, 20, 40)
power(3, 2)
# ### 5. Property - Inbuilt function and Decorator
#
# Inside a class, any variable defined can be accessed and set by instantiating the class and access the values the object. But if there are any special validations/conditions we want add while getting or setting, we could right methods to do these and set them as property using this function.
#
# variable_name = property(fget, fset, fdel, doc)
#
# where variable_name: name of the property
# fget: getter function
# fset: setter function
# fdel: delete function
# doc: comment
# Class with no special getters or setters.
class student:
def __init__(self, name, age):
self.name = name
self.age = age
a_student = student("test1", 20)
print(a_student.name, a_student.age)
a_student.name = "test2"
print(a_student.name, a_student.age)
# Class with getter and setter implemented with property function.
class student_new:
def __init__(self, name=None):
# This calls the setter.
self.name = name
def get_name(self):
# Convention is to use an internal variable - _variable.
# Not using another variable will call the same function in a recursion and cause error.
return self._name;
def set_name(self, name):
if name != None:
self._name = name + ' huh'
name = property(get_name, set_name)
b_student = student_new("test2")
print(b_student.name)
b_student.name = "hello"
b_student.name
# Class where property in impmemented with @property decorator.
class student_nextgen:
def __init__(self, name=None):
self.name = name
@property
def name(self):
return self._name;
@name.setter
def name(self, name):
if name != None:
self._name = name + ' huh'
c_student = student_nextgen(name='what')
c_student.name
d_student = student_nextgen()
d_student.name = 'hello'
d_student.name
| _notebooks/2021-02-10-python-deep-dive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### New to Plotly?
# Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
# <br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
# <br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
# #### Version Check
# Note: `United States County Choropleths` are available in version <b>2.5.1+</b><br>
# Run `pip install plotly --upgrade` to update your Plotly version
import plotly
plotly.__version__
# #### Required Packages
# `geopandas`, `pyshp` and `shapely` must be installed for this figure factory.
#
# Run the following commands to install the correct versions of the following modules:
#
# ```
# pip install geopandas==0.3.0
# pip install pyshp==1.2.10
# pip install shapely==1.6.3
# ```
#
# If you are using Windows, follow this post to properly install geopandas and dependencies: http://geoffboeing.com/2014/09/using-geopandas-windows/. If you are using Anaconda, do not use PIP to install the packages above. Instead use conda to install them:
#
# ```
# conda install plotly
# conda install geopandas
# ```
# #### FIPS and Values
# Every US state and county has an assined ID regulated by the US Federal Government under the term FIPS (Federal Information Processing Standards) codes. There are state codes and county codes: the 2016 state and county FIPS codes can be found at the [US Census Website](https://www.census.gov/geographies/reference-files/2016/demo/popest/2016-fips.html).
#
# Combine a state FIPS code (eg. `06` for California) with a county FIPS code of the state (eg. `059` for Orange county) and this new state-county FIPS code (`06059`) uniquely refers to the specified state and county.
#
# `ff.create_choropleth` only needs a list of FIPS codes and a list of values. Each FIPS code points to one county and each corresponding value in `values` determines the color of the county.
# #### Simple Example
# A simple example of this is a choropleth a few counties in California:
# +
import plotly.plotly as py
import plotly.figure_factory as ff
fips = ['06021', '06023', '06027',
'06029', '06033', '06059',
'06047', '06049', '06051',
'06055', '06061']
values = range(len(fips))
fig = ff.create_choropleth(fips=fips, values=values)
py.iplot(fig, filename='choropleth of some cali counties - full usa scope')
# -
# #### Change the Scope
# Even if your FIPS values belong to a single state, the scope defaults to the entire United States as displayed in the example above. Changing the scope of the choropleth shifts the zoom and position of the USA map. You can define the scope with a list of state names and the zoom will automatically adjust to include the state outlines of the selected states.
#
# By default `scope` is set to `['USA']` which the API treats as identical to passing a list of all 50 state names:<br>
#
# `['AK', 'AL', 'CA', ...]`
#
# State abbreviations (eg. `CA`) or the proper names (eg. `California`) as strings are accepted. If the state name is not recognized, the API will throw a Warning and indicate which FIPS values were ignored.
#
# Another param used in the example below is `binning_endpoints`. If your `values` is a list of numbers, you can bin your values into half-open intervals on the real line.
# +
import plotly.plotly as py
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv')
df_sample_r = df_sample[df_sample['STNAME'] == 'California']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
colorscale = [
'rgb(193, 193, 193)',
'rgb(239,239,239)',
'rgb(195, 196, 222)',
'rgb(144,148,194)',
'rgb(101,104,168)',
'rgb(65, 53, 132)'
]
fig = ff.create_choropleth(
fips=fips, values=values, scope=['CA', 'AZ', 'Nevada', 'Oregon', ' Idaho'],
binning_endpoints=[14348, 63983, 134827, 426762, 2081313], colorscale=colorscale,
county_outline={'color': 'rgb(255,255,255)', 'width': 0.5}, round_legend_values=True,
legend_title='Population by County', title='California and Nearby States'
)
py.iplot(fig, filename='choropleth_california_and_surr_states_outlines')
# -
# #### Single State
# +
import plotly.plotly as py
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv')
df_sample_r = df_sample[df_sample['STNAME'] == 'Florida']
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
endpts = list(np.mgrid[min(values):max(values):4j])
colorscale = ["#030512","#1d1d3b","#323268","#3d4b94","#3e6ab0",
"#4989bc","#60a7c7","#85c5d3","#b7e0e4","#eafcfd"]
fig = ff.create_choropleth(
fips=fips, values=values, scope=['Florida'], show_state_data=True,
colorscale=colorscale, binning_endpoints=endpts, round_legend_values=True,
plot_bgcolor='rgb(229,229,229)',
paper_bgcolor='rgb(229,229,229)',
legend_title='Population by County',
county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},
exponent_format=True,
)
py.iplot(fig, filename='choropleth_florida')
# -
# #### Multiple States
# +
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
NE_states = ['Connecticut', 'Maine', 'Massachusetts', 'New Hampshire', 'Rhode Island', 'Vermont']
df_sample = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv')
df_sample_r = df_sample[df_sample['STNAME'].isin(NE_states)]
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
colorscale = [
'rgb(68.0, 1.0, 84.0)',
'rgb(66.0, 64.0, 134.0)',
'rgb(38.0, 130.0, 142.0)',
'rgb(63.0, 188.0, 115.0)',
'rgb(216.0, 226.0, 25.0)'
]
fig = ff.create_choropleth(
fips=fips, values=values,
scope=NE_states, county_outline={'color': 'rgb(255,255,255)', 'width': 0.5},
legend_title='Population per county'
)
fig['layout']['legend'].update({'x': 0})
fig['layout']['annotations'][0].update({'x': -0.12, 'xanchor': 'left'})
py.iplot(fig, filename='choropleth_new_england')
# -
# #### Simplify County, State Lines
# Below is a choropleth that uses several other parameters. For a full list of all available params call `help(ff.create_choropleth)`
#
# - `simplify_county` determines the simplification factor for the counties. The larger the number, the fewer vertices and edges each polygon has. See http://toblerity.org/shapely/manual.html#object.simplify for more information.
# - `simplify_state` simplifies the state outline polygon. See the [documentation](http://toblerity.org/shapely/manual.html#object.simplify) for more information.
# Default for both `simplify_county` and `simplif_state` is 0.02
#
# Note: This choropleth uses a divergent categorical colorscale. See http://react-colorscales.getforge.io/ for other cool colorscales.
# +
import plotly.figure_factory as ff
import pandas as pd
scope = ['Oregon']
df_sample = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/minoritymajority.csv'
)
df_sample_r = df_sample[df_sample['STNAME'].isin(scope)]
values = df_sample_r['TOT_POP'].tolist()
fips = df_sample_r['FIPS'].tolist()
colorscale = ["#8dd3c7", "#ffffb3", "#bebada", "#fb8072",
"#80b1d3", "#fdb462", "#b3de69", "#fccde5",
"#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f",
"#8dd3c7", "#ffffb3", "#bebada", "#fb8072",
"#80b1d3", "#fdb462", "#b3de69", "#fccde5",
"#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f",
"#8dd3c7", "#ffffb3", "#bebada", "#fb8072",
"#80b1d3", "#fdb462", "#b3de69", "#fccde5",
"#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f"]
fig = ff.create_choropleth(
fips=fips, values=values, scope=scope,
colorscale=colorscale, round_legend_values=True,
simplify_county=0, simplify_state=0,
county_outline={'color': 'rgb(15, 15, 55)', 'width': 0.5},
state_outline={'width': 1},
legend_title='pop. per county',
title='Oregon'
)
py.iplot(fig, filename='choropleth_oregon_ono_simplification_factor')
# -
# #### The Entire USA
# +
import plotly.plotly as py
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
df_sample = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/laucnty16.csv')
df_sample['State FIPS Code'] = df_sample['State FIPS Code'].apply(lambda x: str(x).zfill(2))
df_sample['County FIPS Code'] = df_sample['County FIPS Code'].apply(lambda x: str(x).zfill(3))
df_sample['FIPS'] = df_sample['State FIPS Code'] + df_sample['County FIPS Code']
colorscale = ["#f7fbff","#ebf3fb","#deebf7","#d2e3f3","#c6dbef","#b3d2e9","#9ecae1",
"#85bcdb","#6baed6","#57a0ce","#4292c6","#3082be","#2171b5","#1361a9",
"#08519c","#0b4083","#08306b"]
endpts = list(np.linspace(1, 12, len(colorscale) - 1))
fips = df_sample['FIPS'].tolist()
values = df_sample['Unemployment Rate (%)'].tolist()
fig = ff.create_choropleth(
fips=fips, values=values,
binning_endpoints=endpts,
colorscale=colorscale,
show_state_data=False,
show_hover=True, centroid_marker={'opacity': 0},
asp=2.9, title='USA by Unemployment %',
legend_title='% unemployed'
)
py.iplot(fig, filename='choropleth_full_usa')
# -
# Also see Mapbox county choropleths made in Python: [https://plot.ly/python/mapbox-county-choropleth/](https://plot.ly/python/mapbox-county-choropleth/)
# #### Reference
help(ff.create_choropleth)
# +
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
# ! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'county_choropleth.ipynb', 'python/county-choropleth/', 'USA County Choropleth Maps',
'How to create colormaped representations of USA counties by FIPS values in Python.',
title = 'Python USA County Choropleth Maps | Plotly',
has_thumbnail='true', thumbnail='thumbnail/county-choropleth-usa-greybkgd.jpg',
language='python', page_type='example_index',
display_as='maps', order=0,
uses_plotly_offline=False,ipynb='~notebook_demo/212')
# -
| _posts/python-v3/maps/county-choropleth/county_choropleth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import twint
import pandas as pd
import nest_asyncio
nest_asyncio.apply()
import re
import matplotlib.pyplot as plt
from textblob import TextBlob
import datetime
# +
# Functions
def twint_to_pandas(columns): #Creds to <NAME>
return twint.output.panda.Tweets_df[columns]
def getTweets(username): #runs a twint search and returns a pandas df
c = twint.Config()
# c.Search= str(st)
c.Username = username
c.Limit = 2000
c.Hide_output = True
c.Pandas = True
c.Since = now_time = datetime.datetime.now().strftime("%Y-%m-%d")
twint.run.Search(c)
df = twint_to_pandas(["date", "username", "tweet"])
return df
# -
df = getTweets("MrZackMorris")
df
# +
# Configure
c = twint.Config()
c.Search = "\$"
c.Output = "tweets.csv"
c.Store_csv = True
c.Limit = 2000
c.Pandas = True
c.Pandas_clean = True
c.Pandas_au = True
#Run
twint.run.Search(c)
# -
# ## Preprocessing data
df = pd.read_csv('tweets.csv')
df.drop_duplicates(inplace=True)
df = df.reset_index(drop=True)
xx = df.groupby(['date']).agg({'tweet':'count'})
# ## Matplotlib graphs
plt.style.use('seaborn-darkgrid')
plt.figure(figsize=(20,10))
plt.plot(xx)
plt.xticks(rotation=50, size=13)
plt.yticks(rotation=50, size=13)
plt.show()
# ## Plotly graphs
# +
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
# plotly.graph_objects
colors = px.colors.qualitative.Plotly
fig = go.Figure()
fig.add_traces(go.Scatter(x=xx.reset_index()["date"] ,y = xx['tweet'], mode = 'lines', line=dict(color=colors[0])))
fig.show()
# -
df.tweet
df[df.tweet.str.contains('\$')]['tweet']
df["tickers"] = df.tweet.apply(lambda x: re.findall(r'\$(\w+)', x))
df_date = df[df.tickers.notnull()][["tickers", "tweet","date"]]
df_date.sort_values("date", ascending=False, inplace=True)
df_date.tweet.loc[0]
| Tweet notifications.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: optimas
# language: python
# name: optimas
# ---
import csv
import random
from collections import defaultdict
from copy import deepcopy
from typing import List
out = csv.reader(open('Eldrow Wordlist - Sheet1.csv', 'r'))
wordList = [line[0] for line in out]
import matplotlib.pyplot as plt
# frequencies of letters at different positions
freq = {i: defaultdict(int) for i in range(5)}
for i in range(5):
for word in wordList:
freq[i][word[i]] += 1
# Game Engine
class WordleHardMode:
"""
## four types of colour ##
0. no colour (initial phase)
1. grey - non-existing alphabet
2. yellow - correct alphabet but incorrect location
3. green - correct alphabet with correct location
"""
def __init__(self, wordListFile = 'Eldrow Wordlist - Sheet1.csv',
verbose = False):
# game
self.targetWord = None
self.colours = ["W"] * 5
self.loadWordList(wordListFile)
self.guesses = []
self.finished = False
# helper variables
self.greens = [''] * 5
self.yellows = [[],[],[],[],[]]
self.greys = []
# game settings
self.verbose = verbose
# setup
def loadWordList(self, f: str) -> None:
out = csv.reader(open(f, 'r'))
self.WORDLIST = [line[0] for line in out]
self.validWords = self.WORDLIST
def randomTarget(self) -> None:
self.targetWord = random.choice(self.WORDLIST)
def setTarget(self, targetWord: str):
assert targetWord in self.WORDLIST, "Word not in word list"
self.targetWord = targetWord
# game
def step(self, guessWord: str) -> WordleHardMode:
gs = self._guess(guessWord)
if not gs.finished:
gs._getValidWords()
return gs
def getLegalMoves(self) -> List[str]:
return self.validWords
# helper function
def _guess(self, guessWord: str) -> WordleHardMode:
if guessWord not in self.validWords:
# print("Invalid word. Try again!")
print(f"{guessWord} is not in {self.validWords}")
return
nextGameState = deepcopy(self)
nextGameState.guesses.append(guessWord)
if (guessWord == nextGameState.targetWord):
if self.verbose:
print("Game finished!")
nextGameState.finished = True
for i in range(5):
nextGameState.colours[i] = "G"
return nextGameState
for i in range(5):
# marked for green firsts
if guessWord[i] == nextGameState.targetWord[i]:
nextGameState.colours[i] = "G"
nextGameState.greens[i] = guessWord[i]
continue
# marked yellow or grey
nextGameState.colours[i] = "X"
if guessWord[i] in self.targetWord and guessWord[i] != nextGameState.targetWord[i]:
for j, targetChar in enumerate(self.targetWord):
if i == j:
continue
if guessWord[i] == targetChar and \
(self.colours[j] != "G" or guessWord[j] == nextGameState.targetWord[j]):
nextGameState.colours[i] = "Y"
break
if nextGameState.colours[i] == "Y":
nextGameState.yellows[i].append(guessWord[i])
else:
nextGameState.greys.append(guessWord[i])
if nextGameState.verbose:
print(guessWord)
print("".join(nextGameState.colours))
return nextGameState
def _getValidWords(self) -> None:
validWords = []
for word in self.validWords:
valid = True
for idx, char in enumerate(word):
# condition: green letter must be used again
if self.greens[idx] != '' and char != self.greens[idx]:
valid = False
continue
# condition: yellow cannot be played in the same position
if char in self.yellows[idx]:
valid = False
continue
# condition: grey cannot be played in any position
if char in self.greys:
valid = False
continue
# condition: check if all yellows are played
for posV in self.yellows:
for charYellow in posV:
if charYellow not in word:
valid = False
continue
if valid:
validWords.append(word)
self.validWords = validWords
def debug(self):
print(self.greens)
print(self.yellows)
print(self.greys)
def __str__(self):
if len(self.guesses ) == 0:
return
return f"Round {len(self.guesses)}:\n{self.guesses[-1]}\n{''.join(self.colours)}"
gameState = WordleHardMode()
gameState.setTarget("APPLE")
# +
# # test
# nextGameState = gameState.guess("EXIST")
# print(nextGameState)
# print(len(nextGameState.getValidWords()))
# -
def getMaxWord(gameState):
maxWord = ''
maxOptions = -1
for word in gameState.validWords:
# print(word)
nextGameState = gameState.step(word)
if nextGameState.finished:
continue
optionsCount = len(nextGameState.validWords)
if optionsCount > maxOptions:
maxOptions = optionsCount
maxWord = word
return maxWord, maxOptions
word, count = getMaxWord(gameState)
print(count)
gameState = gameState.step(word)
print(gameState)
gameState = WordleHardMode()
gameState.setTarget("APPLE")
while True:
word, count = getMaxWord(gameState)
gameState = gameState.step(word)
print(gameState)
if len(gameState.validWords) == 1:
gameState = gameState.step(gameState.validWords[0])
print(gameState)
print("Finished")
break
gameState = WordleHardMode()
gameState.setTarget("APPLE")
for guess in ["FUZZY", "VIVID", "BONGO", "MAMMA", "WREAK"]:
gameState = gameState.step(guess)
print(gameState)
gs = gameState.step("ASSET")
gs.validWords
| old/Wordle Puzzle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.9 64-bit
# name: python3
# ---
import pyglet.gl as gl
import numpy as np
import pyrealsense2 as rs
import open3d as o3d
import matplotlib.pyplot as plt
import thread
class PointObject:
def __init__(self, point_cloud,depth,rgb,timestamp):
self.point_cloud = point_cloud
self.depth = depth
self.rgb = rgb
self.timestamp = timestamp
def depth_cam_collection():
point_cloud_list = []
# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()
# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
config = rs.config()
# Enable depth and colour stream
config.enable_stream(rs.stream.depth)
config.enable_stream(rs.stream.color)
# Start streaming with chosen configuration
pipe.start(config)
# We'll use the colorizer to generate texture for our PLY
# (alternatively, texture can be obtained from color or infrared stream)
colorizer = rs.colorizer()
try:
# Skip 5 first frames to give the Auto-Exposure time to adjust
for x in range(5):
pipe.wait_for_frames()
while True:
# Wait for the next set of frames from the camera
frames = pipe.wait_for_frames()
colorized = colorizer.process(frames)
# Check the colour image
color_frame = frames.get_color_frame()
depth_frame = frames.get_depth_frame()
time_stamp = frames.get_timestamp()
print(time_stamp)
color = np.asanyarray(color_frame.get_data())
depth = np.asanyarray(depth_frame.get_data())
# fig, (ax1, ax2) = plt.subplots(1, 2)
# fig.suptitle('Horizontally stacked subplots')
# ax1.imshow(color)
# ax2.imshow(depth)
# Create save_to_ply object
ply = rs.save_to_ply("1.ply")
# Set options to the desired values
# In this example we'll generate a textual PLY with normals (mesh is already created by default)
ply.set_option(rs.save_to_ply.option_ply_binary, False)
ply.set_option(rs.save_to_ply.option_ply_normals, True)
print("Saving to 1.ply...")
# Apply the processing block to the frameset which contains the depth frame and the texture
ply.process(colorized)
print("Done")
pcd = o3d.io.read_point_cloud("1.ply") # Load saved pointcloud
point_cloud_data = np.asarray(pcd.points) # Picke error need to be checked
point_object = PointObject(point_cloud_data,depth,color,time_stamp)
point_cloud_list.append(point_object)
finally:
pipe.stop()
arr = numpy.asarray(point_cloud_list)
point_cloud_1 = point_cloud_list[10].point_cloud
o3d.visualization.draw_geometries([point_cloud_1])
# +
pcd = o3d.io.read_point_cloud("1.ply") # Load saved pointcloud
# downpcd = pcd.voxel_down_sample(voxel_size=0.05)
# o3d.visualization.draw_geometries([downpcd])
print(np.asarray(pcd.points))
o3d.visualization.draw_geometries([pcd])
# -
frame = np.asarray(pcd.points)
frame.shape
import cv2 # state of the art computer vision algorithms library
import numpy as np # fundamental package for scientific computing
import matplotlib.pyplot as plt # 2D plotting library producing publication quality figures
from pyntcloud import PyntCloud # open source library for 3D pointcloud visualisation
import pyrealsense2 as rs # Intel RealSense cross-platform open-source API
print("Environment Ready")
# +
# Setup:
pipe = rs.pipeline()
cfg = rs.config()
profile = pipe.start(cfg)
# Skip 5 first frames to give the Auto-Exposure time to adjust
for x in range(5):
pipe.wait_for_frames()
# Store next frameset for later processing:
frameset = pipe.wait_for_frames()
color_frame = frameset.get_color_frame()
depth_frame = frameset.get_depth_frame()
# Cleanup:
pipe.stop()
print("Frames Captured")
# -
color = np.asanyarray(color_frame.get_data())
plt.rcParams["axes.grid"] = False
plt.imshow(color)
colorizer = rs.colorizer()
colorized_depth = np.asanyarray(colorizer.colorize(depth_frame).get_data())
plt.imshow(colorized_depth)
pc = rs.pointcloud();
pc.map_to(color_frame);
pointcloud = pc.calculate(depth_frame);
pointcloud.export_to_ply("1.ply", color_frame)
cloud = PyntCloud.from_file("1.ply");
cloud.plot()
print(cloud)
cloud.points.describe()
| depth_camera_point_cloud.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (herschelhelp_internal)
# language: python
# name: helpint
# ---
# # EGS master catalogue
# ## Preparation of CANDELS-EGS data
#
# CANDELS-EGS catalogue: the catalogue comes from `dmu0_CANDELS-EGS`.
#
# In the catalogue, we keep:
#
# - The identifier (it's unique in the catalogue);
# - The position;
# - The stellarity;
# - The magnitude for each band in 2 arcsec aperture (aperture 10).
# - The kron magnitude to be used as total magnitude (no “auto” magnitude is provided).
#
# We don't know when the maps have been observed. We will use the year of the reference paper.
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
import datetime
print("This notebook was executed on: \n{}".format(datetime.datetime.now()))
# +
# %matplotlib inline
# #%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
from collections import OrderedDict
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from herschelhelp_internal.flagging import gaia_flag_column
from herschelhelp_internal.masterlist import nb_astcor_diag_plot, remove_duplicates
from herschelhelp_internal.utils import astrometric_correction, flux_to_mag
# +
OUT_DIR = os.environ.get('TMP_DIR', "./data_tmp")
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
RA_COL = "candels-egs_ra"
DEC_COL = "candels-egs_dec"
# -
# ## I - Column selection
# +
imported_columns = OrderedDict({
'ID': "candels-egs_id",
'RA': "candels-egs_ra",
'DEC': "candels-egs_dec",
'CLASS_STAR': "candels-egs_stellarity",
#HST data
'FLUX_APER_10_F606W': "f_ap_acs_f606w",
'FLUXERR_APER_10_F606W': "ferr_ap_acs_f606w",
'FLUX_AUTO_F606W': "f_acs_f606w",
'FLUXERR_AUTO_F606W': "ferr_acs_f606w",
'FLUX_APER_10_F814W': "f_ap_acs_f814w",
'FLUXERR_APER_10_F814W': "ferr_ap_acs_f814w",
'FLUX_AUTO_F814W': "f_acs_f814w",
'FLUXERR_AUTO_F814W': "ferr_acs_f814w",
'FLUX_APER_10_F125W': "f_ap_wfc3_f125w",
'FLUXERR_APER_10_F125W': "ferr_ap_wfc3_f125w",
'FLUX_AUTO_F125W': "f_wfc3_f125w",
'FLUXERR_AUTO_F125W': "ferr_wfc3_f125w",
'FLUX_APER_10_F140W': "f_ap_wfc3_f140w",
'FLUXERR_APER_10_F140W': "ferr_ap_wfc3_f140w",
'FLUX_AUTO_F140W': "f_wfc3_f140w",
'FLUXERR_AUTO_F140W': "ferr_wfc3_f140w",
'FLUX_APER_10_F160W': "f_ap_wfc3_f160w",
'FLUXERR_APER_10_F160W': "ferr_ap_wfc3_f160w",
'FLUX_AUTO_F160W': "f_wfc3_f160w",
'FLUXERR_AUTO_F160W': "ferr_wfc3_f160w",
#CFHT Megacam
'CFHT_u_FLUX': "f_candels-megacam_u", # 9 CFHT_u_FLUX Flux density (in μJy) in the u*-band (CFHT/MegaCam) (3)
'CFHT_u_FLUXERR': "ferr_candels-megacam_u",# 10 CFHT_u_FLUXERR Flux uncertainty (in μJy) in the u*-band (CFHT/MegaCam) (3)
'CFHT_g_FLUX': "f_candels-megacam_g",# 11 CFHT_g_FLUX Flux density (in μJy) in the g'-band (CFHT/MegaCam) (3)
'CFHT_g_FLUXERR': "ferr_candels-megacam_g",# 12 CFHT_g_FLUXERR Flux uncertainty (in μJy) in the g'-band (CFHT/MegaCam) (3)
'CFHT_r_FLUX': "f_candels-megacam_r",# 13 CFHT_r_FLUX Flux density (in μJy) in the r'-band (CFHT/MegaCam) (3)
'CFHT_r_FLUXERR': "ferr_candels-megacam_r",# 14 CFHT_r_FLUXERR Flux uncertainty (in μJy) in the r'-band (CFHT/MegaCam) (3)
'CFHT_i_FLUX': "f_candels-megacam_i",# 15 CFHT_i_FLUX Flux density (in μJy) in the i'-band (CFHT/MegaCam) (3)
'CFHT_i_FLUXERR': "ferr_candels-megacam_i",# 16 CFHT_i_FLUXERR Flux uncertainty (in μJy) in the i'-band (CFHT/MegaCam) (3)
'CFHT_z_FLUX': "f_candels-megacam_z",# 17 CFHT_z_FLUX Flux density (in μJy) in the z'-band (CFHT/MegaCam) (3)
'CFHT_z_FLUXERR': "ferr_candels-megacam_z",# 18 CFHT_z_FLUXERR
#CFHT WIRCAM
'WIRCAM_J_FLUX': "f_candels-wircam_j",# 29 WIRCAM_J_FLUX Flux density (in μJy) in the J-band (CFHT/WIRCam) (3)
'WIRCAM_J_FLUXERR': "ferr_candels-wircam_j",# 30 WIRCAM_J_FLUXERR Flux uncertainty (in μJy) in the J-band (CFHT/WIRCam) (3)
'WIRCAM_H_FLUX': "f_candels-wircam_h",# 31 WIRCAM_H_FLUX Flux density (in μJy) in the H-band (CFHT/WIRCam) (3)
'WIRCAM_H_FLUXERR': "ferr_candels-wircam_h",# 32 WIRCAM_H_FLUXERR Flux uncertainty (in μJy) in the H-band (CFHT/WIRCam) (3)
'WIRCAM_K_FLUX': "f_candels-wircam_k",# 33 WIRCAM_K_FLUX Flux density (in μJy) in the Ks-band (CFHT/WIRCam) (3)
'WIRCAM_K_FLUXERR': "ferr_candels-wircam_k",# 34 WIRCAM_K_FLUXERR
#Mayall/Newfirm
'NEWFIRM_J1_FLUX': "f_candels-newfirm_j1",# 35 NEWFIRM_J1_FLUX Flux density (in μJy) in the J1-band (Mayall/NEWFIRM) (3)
'NEWFIRM_J1_FLUXERR': "ferr_candels-newfirm_j1",# 36 NEWFIRM_J1_FLUXERR Flux uncertainty (in μJy) in the J1-band (Mayall/NEWFIRM) (3)
'NEWFIRM_J2_FLUX': "f_candels-newfirm_j2",# 37 NEWFIRM_J2_FLUX Flux density (in μJy) in the J2-band (Mayall/NEWFIRM) (3)
'NEWFIRM_J2_FLUXERR': "ferr_candels-newfirm_j2",# 38 NEWFIRM_J2_FLUXERR Flux uncertainty (in μJy) in the J2-band (Mayall/NEWFIRM) (3)
'NEWFIRM_J3_FLUX': "f_candels-newfirm_j3",# 39 NEWFIRM_J3_FLUX Flux density (in μJy) in the J3-band (Mayall/NEWFIRM) (3)
'NEWFIRM_J3_FLUXERR': "ferr_candels-newfirm_j3",# 40 NEWFIRM_J3_FLUXERR Flux uncertainty (in μJy) in the J3-band (Mayall/NEWFIRM) (3)
'NEWFIRM_H1_FLUX': "f_candels-newfirm_h1",# 41 NEWFIRM_H1_FLUX Flux density (in μJy) in the H1-band (Mayall/NEWFIRM) (3)
'NEWFIRM_H1_FLUXERR': "ferr_candels-newfirm_h1",# 42 NEWFIRM_H1_FLUXERR Flux uncertainty (in μJy) in the H1-band (Mayall/NEWFIRM) (3)
'NEWFIRM_H2_FLUX': "f_candels-newfirm_h2",# 43 NEWFIRM_H2_FLUX Flux density (in μJy) in the H2-band (Mayall/NEWFIRM) (3)
'NEWFIRM_H2_FLUXERR': "ferr_candels-newfirm_h2",# 44 NEWFIRM_H2_FLUXERR Flux uncertainty (in μJy) in the H2-band (Mayall/NEWFIRM) (3)
'NEWFIRM_K_FLUX': "f_candels-newfirm_k",# 45 NEWFIRM_K_FLUX Flux density (in μJy) in the K-band (Mayall/NEWFIRM) (3)
'NEWFIRM_K_FLUXERR': "ferr_candels-newfirm_k",# 46 NEWFIRM_K_FLUXERR
#Spitzer/IRAC
'IRAC_CH1_FLUX': "f_candels-irac_i1",# 47 IRAC_CH1_FLUX Flux density (in μJy) in the 3.6μm-band (Spitzer/IRAC) (3)
'IRAC_CH1_FLUXERR': "ferr_candels-irac_i1",# 48 IRAC_CH1_FLUXERR Flux uncertainty (in μJy) in the 3.6μm-band (Spitzer/IRAC) (3)
'IRAC_CH2_FLUX': "f_candels-irac_i2",# 49 IRAC_CH2_FLUX Flux density (in μJy) in the 4.5μm-band (Spitzer/IRAC) (3)
'IRAC_CH2_FLUXERR': "ferr_candels-irac_i2",# 50 IRAC_CH2_FLUXERR Flux uncertainty (in μJy) in the 4.5μm-band (Spitzer/IRAC) (3)
'IRAC_CH3_FLUX': "f_candels-irac_i3",# 51 IRAC_CH3_FLUX Flux density (in μJy) in the 5.8μm-band (Spitzer/IRAC) (3)
'IRAC_CH3_FLUXERR': "ferr_candels-irac_i3",# 52 IRAC_CH3_FLUXERR Flux uncertainty (in μJy) in the 5.8μm-band (Spitzer/IRAC) (3)
'IRAC_CH4_FLUX': "f_candels-irac_i4",# 53 IRAC_CH4_FLUX Flux density (in μJy) in the 8.0μm-band (Spitzer/IRAC) (3)
'IRAC_CH4_FLUXERR': "ferr_candels-irac_i4"# 54 IRAC_CH4_FLUXERR
})
catalogue = Table.read("../../dmu0/dmu0_CANDELS-EGS/data/hlsp_candels_hst_wfc3_egs-tot-multiband_f160w_v1_cat.fits")[list(imported_columns)]
for column in imported_columns:
catalogue[column].name = imported_columns[column]
epoch = 2011
# Clean table metadata
catalogue.meta = None
# +
# Adding flux and band-flag columns
for col in catalogue.colnames:
if col.startswith('f_'):
errcol = "ferr{}".format(col[1:])
# Some object have a magnitude to 0, we suppose this means missing value
#catalogue[col][catalogue[col] <= 0] = np.nan
#catalogue[errcol][catalogue[errcol] <= 0] = np.nan
mag, error = flux_to_mag(np.array(catalogue[col])*1.e-6, np.array(catalogue[errcol])*1.e-6)
# Fluxes are added in µJy
catalogue.add_column(Column(mag, name="m{}".format(col[1:])))
catalogue.add_column(Column(error, name="m{}".format(errcol[1:])))
# Add nan col for aperture fluxes
if ('wfc' not in col) & ('acs' not in col):
catalogue.add_column(Column(np.full(len(catalogue), np.nan), name="m_ap{}".format(col[1:])))
catalogue.add_column(Column(np.full(len(catalogue), np.nan), name="merr_ap{}".format(col[1:])))
catalogue.add_column(Column(np.full(len(catalogue), np.nan), name="f_ap{}".format(col[1:])))
catalogue.add_column(Column(np.full(len(catalogue), np.nan), name="ferr_ap{}".format(col[1:])))
# Band-flag column
if "ap" not in col:
catalogue.add_column(Column(np.zeros(len(catalogue), dtype=bool), name="flag{}".format(col[1:])))
# TODO: Set to True the flag columns for fluxes that should not be used for SED fitting.
# -
catalogue[:10].show_in_notebook()
# ## II - Removal of duplicated sources
# We remove duplicated objects from the input catalogues.
# +
SORT_COLS = ['ferr_ap_acs_f606w', 'ferr_ap_acs_f814w', 'ferr_ap_wfc3_f125w', 'ferr_ap_wfc3_f140w', 'ferr_ap_wfc3_f160w']
FLAG_NAME = 'candels-egs_flag_cleaned'
nb_orig_sources = len(catalogue)
catalogue = remove_duplicates(catalogue, RA_COL, DEC_COL, sort_col=SORT_COLS,flag_name=FLAG_NAME)
nb_sources = len(catalogue)
print("The initial catalogue had {} sources.".format(nb_orig_sources))
print("The cleaned catalogue has {} sources ({} removed).".format(nb_sources, nb_orig_sources - nb_sources))
print("The cleaned catalogue has {} sources flagged as having been cleaned".format(np.sum(catalogue[FLAG_NAME])))
# -
# ## III - Astrometry correction
#
# We match the astrometry to the Gaia one. We limit the Gaia catalogue to sources with a g band flux between the 30th and the 70th percentile. Some quick tests show that this give the lower dispersion in the results.
gaia = Table.read("../../dmu0/dmu0_GAIA/data/GAIA_EGS.fits")
gaia_coords = SkyCoord(gaia['ra'], gaia['dec'])
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
# +
delta_ra, delta_dec = astrometric_correction(
SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]),
gaia_coords
)
print("RA correction: {}".format(delta_ra))
print("Dec correction: {}".format(delta_dec))
# -
catalogue[RA_COL].unit = u.deg
catalogue[DEC_COL].unit = u.deg
catalogue[RA_COL] = catalogue[RA_COL] + delta_ra.to(u.deg)
catalogue[DEC_COL] = catalogue[DEC_COL] + delta_dec.to(u.deg)
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec)
# ## IV - Flagging Gaia objects
catalogue.add_column(
gaia_flag_column(SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]), epoch, gaia)
)
# +
GAIA_FLAG_NAME = "candels-egs_flag_gaia"
catalogue['flag_gaia'].name = GAIA_FLAG_NAME
print("{} sources flagged.".format(np.sum(catalogue[GAIA_FLAG_NAME] > 0)))
# -
# # V - Saving to disk
catalogue.write("{}/CANDELS-EGS.fits".format(OUT_DIR), overwrite=True)
| dmu1/dmu1_ml_EGS/1.6_CANDELS-EGS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pandas import read_csv
from sklearn.model_selection import train_test_split as ttsplit
from sklearn.neighbors import KNeighborsClassifier as knn
from numpy import array
df = read_csv("iris.csv")
x, y = array(df.drop(["species"],1)), array(df["species"])
x_train, x_test, y_train, y_test = ttsplit(x, y, test_size = 0.2)
clf = knn().fit(x_train, y_train) # n_neighbors = len(set(df["species"]))
clf.score(x_test, y_test)
print(df["sepal_length"].max(), df["sepal_length"].min())
print(df["sepal_width"].max(), df["sepal_width"].min())
print(df["petal_length"].max(), df["petal_length"].min())
print(df["petal_width"].max(), df["petal_width"].min())
df.columns
clf.predict(array([[4.0, 2.0, 1.0, 1.0]]))
| ex_8/cluster_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pip install torch torchvision progress
# +
'''
Training script for CIFAR-10/100
Copyright (c) <NAME>, 2017
'''
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models.cifar as models
from tqdm.notebook import tqdm, trange
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
# +
use_cuda = torch.cuda.is_available()
class Args:
dataset = 'cifar100'
workers = 12
epochs = 5 #164
start_epoch = 0
train_batch = 128
test_batch = 100
lr = 0.1
drop = 0
schedule = [81, 122]
gamma = 0.1
momentum = 0.9
weight_decay = 5e-4
checkpoint = 'checkpoint'
resume = ''
arch = 'alexnet'
depth = 29
block_name = 'BasicBlock'
cardinality = 8
widen_factor = 4
growthRate = 12
compressionRate = 2
manualSeed = None
evaluate = False #** action 'store_true'; help='evaluate model on validation set'
gpu_id = '0'
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
args=Args()
state = {
"dataset" : args.dataset,
"workers" : args.workers,
"epochs" : args.epochs,
"start_epoch" : args.start_epoch,
"train_batch" : args.train_batch,
"test_batch" : args.test_batch,
"lr" : args.lr,
"drop" : args.drop,
"schedule" : args.schedule,
"gamma" : args.gamma,
"momentum" : args.momentum,
"weight_decay" : args.weight_decay,
"checkpoint" : args.checkpoint,
"resume" : args.resume,
"arch" : args.arch,
"depth" : args.depth,
"block_name" : args.block_name,
"cardinality" : args.cardinality,
"widen_factor" : args.widen_factor,
"growthRate" : args.growthRate,
"compressionRate" : args.compressionRate,
"anualSeed" : args.manualSeed,
"evaluate" : args.evaluate,
"gpu_id" : args.gpu_id,
}
# Validate dataset
assert args.dataset == 'cifar10' or args.dataset == 'cifar100', 'Dataset can only be cifar10 or cifar100.'
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0 # best test accuracy
# +
def train(trainloader, model, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for batch_idx, (inputs, targets) in tqdm(enumerate(trainloader), total=len(trainloader)):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(trainloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(testloader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar('Processing', max=len(testloader))
for batch_idx, (inputs, targets) in tqdm(enumerate(testloader), total=len(testloader)):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(testloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
# -
def main():
global best_acc
start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Data
print('==> Preparing dataset %s' % args.dataset)
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == 'cifar10':
dataloader = datasets.CIFAR10
num_classes = 10
else:
dataloader = datasets.CIFAR100
num_classes = 100
trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)
testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)
# Model
print("==> creating model '{}'".format(args.arch))
if args.arch.startswith('resnext'):
model = models.__dict__[args.arch](
cardinality=args.cardinality,
num_classes=num_classes,
depth=args.depth,
widen_factor=args.widen_factor,
dropRate=args.drop,
)
elif args.arch.startswith('densenet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
growthRate=args.growthRate,
compressionRate=args.compressionRate,
dropRate=args.drop,
)
elif args.arch.startswith('wrn'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
widen_factor=args.widen_factor,
dropRate=args.drop,
)
elif args.arch.endswith('resnet'):
model = models.__dict__[args.arch](
num_classes=num_classes,
depth=args.depth,
block_name=args.block_name,
)
else:
model = models.__dict__[args.arch](num_classes=num_classes)
if use_cuda:
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Resume
title = 'cifar-10-' + args.arch
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(testloader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and val
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
print("Training ...")
train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
print("Validation ...")
test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
# append logger file
logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
main()
| cifar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="JDHI0zqg6izy" outputId="af9679f2-58d3-4050-96cf-96568651575a" active=""
# # # from google.colab import auth
# # # auth.authenticate_user()
#
# # from google.colab import drive
# # drive.mount('/content/gdrive')
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="9VuEO3Ls62Fz" outputId="a9924de8-bf09-4687-8355-2d01adbd40c0" active=""
# # # 경로 변경
# # cd /content/gdrive/My Drive/AIBM/20191030/
# -
# [혈압·혈당 데이터](https://nhiss.nhis.or.kr/bd/ab/bdabf003cv.do)
#
# [연도] 2013~2014년 일반검진 및 생애전환기 건강검진 데이터 1,000,000건
# [항목] 연령, 수축기혈압, 이완기혈압, 공복혈당, 성별, 고혈압/당뇨병 진료여부, 체질량지수
# [변수]
# - BTH_G : 연령(그룹)
# - SBP : 수축기혈압
# - DBP : 이완기혈압
# - FBS : 공복혈당
# - SEX : 성별(남성:1, 여성:2)
# - DIS : 고혈압/당뇨병 진료여부
# 고혈압/당뇨병 진료내역 있음: 1
# 고혈압 진료내역 있음: 2
# 당뇨병 진료내역 있음: 3
# 고혈압/당뇨병 진료내역 없음: 4
# - BMI : 체질량지수
# [파일형식] excel, csv file
# ※ csv file은 쉼표로 구분한 텍스트 파일로서 워드패드, 통계프로그램(SPSS, SAS 등)에서 조회 가능 합니다.
# + colab={} colab_type="code" id="RCBSGWuy6eGx"
import pandas as pd
import numpy as np
import os
#os.chdir("C://Users//inhwan//Desktop")
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="HmLJ2KRE8F6K" outputId="4948a708-1639-4c0a-b633-6c3a23ac3071"
# !ls
# + colab={} colab_type="code" id="QP85ybdF6eG2"
data = pd.read_csv("국가건강검진_혈압혈당데이터.csv", engine = 'python')
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="9lGn0ch76eG4" outputId="fcc19b12-53d4-4587-cd54-a08f3fe1f020"
data.info()
# -
# # 데이터 형 변환
convert_dict = {'SEX': object,
'BTH_G': object,
'SBP': int,
'DBP': int,
'FBS': int,
'DIS': object,
'BMI': float
}
data = data.astype(convert_dict)
data.info()
# + [markdown] colab_type="text" id="NFNuFa4R6eG8"
# ## 데이터 범주화 및 y라벨 설정
# + colab={} colab_type="code" id="Pv53zZNu6eHB"
data = data.drop('DIS', axis = 1)
# + colab={} colab_type="code" id="L_jhRNJsC9cZ"
data['SBP_A'] = data['SBP'].apply(lambda x : 0 if x < 120 else 1 ) # 위험:1 정상:0
# + colab={} colab_type="code" id="t433P7tvC9-K"
data['DBP_A'] = data['DBP'].apply(lambda x : 0 if x < 80 else 1 ) # 위험:1 정상:0
# + colab={} colab_type="code" id="uv6d4tO_DnXE"
data['FBS_A'] = data['FBS'].apply(lambda x : 0 if x < 126 else 1 ) # 위험:1 정상:0
# + colab={} colab_type="code" id="BKl98nxLEJeE"
data['BMI_A'] = data['BMI'].apply(lambda x : 0 if x < 27 else 1 ) # 위험:1 정상:0
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="mb-nHATk8vYH" outputId="5e4e0e4b-598e-4bd3-f130-b3772a46e872"
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="VrKM92TKEeMV" outputId="4f126596-d9c0-4ee4-9189-80054ca80622"
data.info()
# -
data.describe()
# + colab={} colab_type="code" id="FCv9O7nSb_O7"
def healthcheck1 (row):
if row['FBS_A'] + row['SBP_A'] + row['DBP_A'] + row['BMI_A'] > 1 : # 당뇨 + 고혈압 + 과체중
return 1
return 0
# + colab={} colab_type="code" id="2ovvltk_JVcV"
def healthcheck2 (row):
if row['FBS_A'] + row['SBP_A'] + row['DBP_A'] + row['BMI_A'] == 4 : # 당뇨 + 고혈압 + 과체중
return 4
if row['FBS_A'] + row['SBP_A'] + row['DBP_A'] == 3 : # 당뇨 + 고혈압
return 3
if row['SBP_A'] + row['DBP_A'] == 2 : #고혈압
return 2
if row['FBS_A'] == 1 : # 당뇨
return 1
return 0
# + colab={} colab_type="code" id="r_uNRHDdH0UM"
data['HEALTH1'] = data.apply (lambda row: healthcheck1(row), axis=1)
# + colab={} colab_type="code" id="ftARuuOvccd6"
data['HEALTH2'] = data.apply (lambda row: healthcheck2(row), axis=1)
# -
data.info()
data.describe()
# + colab={} colab_type="code" id="yuEz8eaNU16N"
#data.drop(['SBP','DBP','FBS','BMI'], axis='columns', inplace=True)a
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="yi51-LaMLqxv" outputId="67f4bead-5218-4f88-a1d3-ee73bb36bd00"
data.head()
# + [markdown] colab_type="text" id="9koYwaQZ6eHG"
# ## 데이터 탐색
# -
data.corr()
# + colab={} colab_type="code" id="ZkzdUPKCZCwJ"
x_columns = ['SEX','BTH_G','SBP_A','DBP_A','FBS_A','BMI_A', 'HEALTH1'] # 유의한 변수들만 선별(SBP 제외)
data_lr = data[x_columns]
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="heNbTfXtZpJk" outputId="bc04c638-eae5-42d2-94d4-ea2f461975cd"
data_lr.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" id="wAZfhoyw6eHH" outputId="bfe0370b-d2dc-49f9-c2bf-2358195ccc75"
# 건강 체크
# 0: 정상
# 1: 이상
data['HEALTH1'].value_counts() #불균형 심함
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="UIt1L3QDQa2R" outputId="a6fead95-a06c-4ae3-e07e-65a3c72a142c"
data_lr.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" id="0utoZ4C66eHJ" outputId="5ed43fc5-bd24-42d7-8604-5090677b0436"
data_lr['SEX'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 537} colab_type="code" id="jKoK2hki6eHL" outputId="24999524-7892-4979-e0c8-e313814b6e8c"
data_lr['BTH_G'].value_counts()
# + [markdown] colab_type="text" id="iTTuGcIs6eHN"
# ## data sampling
# + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" id="dyVc0riC6eHQ" outputId="b3a66eaa-acd8-4f57-be44-660543edb562"
data_lr['HEALTH1'].value_counts() # 약 7프로만 당뇨 환자
# + colab={} colab_type="code" id="WfURd-hN6eHO"
data_sample = data_lr.sample(n=40000, random_state = 1234)
# + colab={"base_uri": "https://localhost:8080/", "height": 73} colab_type="code" id="e6m-6xeyf31H" outputId="4df79e3f-a3d1-44a0-97e7-bb50d343aed9"
data_sample['HEALTH1'].value_counts() # 약 7프로만 당뇨 환자
# + [markdown] colab_type="text" id="CDxHXlrc6eHS"
# # Logistic regression
# + colab={} colab_type="code" id="meOoFcXl6eHT"
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
Y = data_sample['HEALTH1']
X = data_sample.iloc[:,0:5]
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.3)
# + colab={"base_uri": "https://localhost:8080/", "height": 93} colab_type="code" id="wLsQLLYz6eHV" outputId="a7d8f9ab-c8a3-472f-9ac8-c07d5d6ebf75"
log_clf = LogisticRegression()
log_clf.fit(X_train, Y_train)
log_clf.score(X_test, Y_test)
# + [markdown] colab_type="text" id="gmrhQqvK6eHY"
# # 군집분석
# + colab={} colab_type="code" id="kHg9UVQA6eHZ"
# age blood_pressure bmi fbs diastolic(이완기) dis 를 이용한 군집분석
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
# + colab={} colab_type="code" id="eCZNxRO16eHb"
#data_cluster = data_lr
data_cluster = data.sample(n=10000, random_state = 1234)
#x_columns = ['SEX','BTH_G','DBP','FBS','DIS_1','DIS_2','DIS_3','DIS_4'] # 유의한 변수들만 선별(SBP 제외)
#y_columns = 'diabetes'
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="AjGfM2G26eHd" outputId="518c6f6e-6d53-4a2e-f578-bd833314b729"
data_cluster.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="ETi_A0Xq6eHf" outputId="aa8f150f-0bcf-496b-b21d-b0735a2f5120"
# # dendrogram
# from scipy.cluster.hierarchy import linkage, dendrogram
# import matplotlib.pyplot as plt
# # Calculate the linkage: mergings
# mergings = linkage(data_cluster,method='complete')
# # Plot the dendrogram, using varieties as labels
# plt.figure(figsize=(50,30))
# dendrogram(mergings,
# labels = data_cluster['HEALTH1'].to_numpy(),
# leaf_rotation=90,
# leaf_font_size=10,
# )
# plt.show()
# dendrogram
from scipy.cluster.hierarchy import linkage, dendrogram
import matplotlib.pyplot as plt
# Calculate the linkage: mergings
mergings = linkage(data_cluster,method='complete')
# Plot the dendrogram, using varieties as labels
plt.figure(figsize=(50,30))
dendrogram(mergings,
labels = data_cluster['HEALTH2'].to_numpy(),
leaf_rotation=90,
leaf_font_size=10,
)
plt.show()
# + [markdown] colab_type="text" id="8NqOHjhy6eHk"
# # Kmeans
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="pbzUKlUB6eHl" outputId="637be44d-946b-41e3-829d-22833f314596"
data.head()
# + colab={} colab_type="code" id="S0scI2b56eHp"
#feature = data_lr[['SEX', 'BTH_G', 'SBP_A', 'DBP_A', 'FBS_A', 'BMI_A']]
feature = data
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="A-wmvwaJ6eHt" outputId="0b82e7a1-b17b-423e-f0b9-0077e9cf5d54"
feature.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 36} colab_type="code" id="luUYpbjS6eHv" outputId="9f9d3f86-e148-428a-8834-263de39d1c3f"
len(feature)
# + colab={} colab_type="code" id="7uUSiYR-6eHx"
model = KMeans(n_clusters=5,algorithm='auto')
model.fit(feature) # 모델 학습
predict = pd.DataFrame(model.predict(feature)) # 모델 예측
predict.columns=['cluster_label']
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="MetWoAYg6eH0" outputId="38ed7023-d85c-4bb4-8a5d-5e2571179f2b"
feature.reset_index(drop=True, inplace=True)
predict.reset_index(drop=True, inplace=True)
new_data = pd.concat([feature, predict], axis = 1)
new_data.head() # 군집 라벨이 추가된 데이터 프레임
# + colab={"base_uri": "https://localhost:8080/", "height": 128} colab_type="code" id="cjPuRhPq6eH2" outputId="8472c998-b19e-405f-ae38-f1b884556b70"
new_data['cluster_label'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} colab_type="code" id="_VD9PdM76eH4" outputId="e51223cf-d4d9-4a93-8eed-11612671339b"
#plot 시각화
centers = model.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5);
# + colab={} colab_type="code" id="FtOJor1i6eH6"
## 군집 별 특성 찾기
cluster1 = new_data[new_data['cluster_label']==0]
cluster2 = new_data[new_data['cluster_label']==1]
cluster3 = new_data[new_data['cluster_label']==2]
cluster4 = new_data[new_data['cluster_label']==3]
cluster5 = new_data[new_data['cluster_label']==4]
# + [markdown] colab_type="text" id="3oaJOkXT6eH8"
# ### 클러스터별 통계량 추출
# + colab={} colab_type="code" id="3HurZlx96eH9"
def makestat(df):
print(df.describe())
print(df.groupby('SEX').count()) # 성별 수
print(df.groupby('SEX')['SBP','DBP','FBS','BMI'].mean()) # 성별 SBP DBP FBS BMI 평균
print(df['SBP_A'].count(), df['DBP_A]'].count(), df['FBS_A'].count(), df['BMI_A'].count())
# + colab={"base_uri": "https://localhost:8080/", "height": 198} colab_type="code" id="SVzKa0zurTb7" outputId="55af750d-a04d-40bf-ff3c-0a81503932aa"
cluster1.head()
#feature = cluster1[['SEX', 'BTH_G', 'SBP_A', 'DBP_A', 'FBS_A', 'BMI_A']]
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="4H3doi4R6eH_" outputId="3aa03386-e2d4-43e4-f7b3-746cdbcf17eb"
makestat(cluster1)
# + colab={} colab_type="code" id="4Xwwy_QZ6eIC" outputId="bea94991-8d83-4501-f99f-a6a3a3de849e"
makestat(cluster2)
# + colab={} colab_type="code" id="Y6QJASg_6eIH" outputId="a77bf5f6-12d5-45e4-b0fe-02596cb09362"
makestat(cluster3)
# + colab={} colab_type="code" id="PxRB1Q7W6eIJ" outputId="33bbe081-2fc8-42a4-ebd6-f98b760868e3"
makestat(cluster4)
# + colab={} colab_type="code" id="AetN1dBL6eIL" outputId="9e37dfe9-8f32-4bfc-a66b-902ead7f933e"
makestat(cluster5)
# + colab={} colab_type="code" id="T2IZDTuhfJHE"
# 건강 체크
# 0: 정상
# 1: 당뇨
# 2: 고혈압,
# 3: 당뇨 + 고혈압
# 4: 당뇨 + 고혈압 + 과체중
# + colab={} colab_type="code" id="CoLU8hp8P524"
data2 = data.set_index(['HEALTH'])
data2 = data2.loc[1]
#print(df.loc[df['B'].isin(['one','three'])])
# + [markdown] colab_type="text" id="yp4yetHd6eIN"
# # 카이제곱분석
# + colab={} colab_type="code" id="BYwPRoO-6eIO"
# H0: 집단간 차이가 없다 vs H1: 집단간 차이 있다
| shared/notebooks/1_statistics/.ipynb_checkpoints/diabetes_analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
#
pip install citipy
# +
# Dependencies and Setup
import os
import csv
import time
import datetime
import requests
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pprint import pprint
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
from api_keys import g_key
# Citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# randomly generate a list of lats and longs
# numpy.random.uniform(low=0.0, high=1.0, size=None)
lat = np.random.uniform(low=-90.00, high=90.00, size=600)
lon = np.random.uniform(low=-180.00, high=180.00, size=600)
latlong = zip(lat, lon)
# Use citipy to generate the a list of the closest cities to your random coordinates.
cities = []
for c in latlong:
cities.append(citipy.nearest_city(c[0], c[1]))
city_name=[]
for city in cities:
name = city.city_name
city_name.append(name)
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
# Use Openweather api to get the weather data needed from those cities.
url = "http://api.openweathermap.org/data/2.5/weather?"
temps = []
humid = []
clouds = []
winds = []
lats = []
lons = []
names = []
# Build query URL
for city in city_name:
query_url = url + "appid=" + weather_api_key + "&q=" + city + "&units=imperial"
response = requests.get(query_url)
if response.status_code == 200:
response = response.json()
temps.append(response["main"]["temp"])
humid.append(response["main"]["humidity"])
clouds.append(response["clouds"]["all"])
winds.append(response["wind"]["speed"])
lats.append(response["coord"]["lat"])
lons.append(response["coord"]["lon"])
names.append(response["name"])
# -
#Populating dataframe
weather_data = pd.DataFrame({"City": names,
"Temperature (F)": temps,
"Humidity (%)": humid,
"Cloud Coverage (%)": clouds,
"Wind Speed (mph)": winds,
"Latitude": lats,
"Longitude": lons
})
weather_data.head()
# Set variable for output file
output_file = os.path.join('..', 'output_data', 'cities.csv')
# Open the output file
with open(output_file, 'w') as datafile:
weather_data.to_csv(output_file)
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
weather_data.plot.scatter(x="Latitude", y="Temperature (F)", title="Temperature per Latitude")
plt.savefig('Temp_perLat.png')
# ## Latitude vs. Humidity Plot
weather_data.plot.scatter(x="Latitude", y="Humidity (%)", title="Humidity per Latitude")
plt.savefig('HumidityperLat.png')
# ## Latitude vs. Cloudiness Plot
weather_data.plot.scatter(x="Latitude", y="Cloud Coverage (%)", title="Cloud Coverage per Latitude")
plt.savefig('CloudperLat.png')
# ## Latitude vs. Wind Speed Plot
weather_data.plot.scatter(x="Latitude", y="Wind Speed (mph)", title="Wind Speed per Latitude")
plt.savefig('WindperLat.png')
# ## Linear Regression
# +
# Create Northern and Southern Hemisphere DataFrames
northern_hemisphere = weather_data.loc[weather_data["Latitude"]>0.01]
southern_hemisphere = weather_data.loc[weather_data["Latitude"]<-0.01]
#Northern_hemisphere.head()
southern_hemisphere.head()
# +
# Define plotting function
def plotLinearRegression(xdata,ydata,xlbl,ylbl,lblpos,ifig):
(slope, intercept, rvalue, pvalue, stderr) = linregress(xdata, ydata)
print(f"The r-squared is: {rvalue}")
regress_values = xdata * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(xdata,ydata)
plt.plot(xdata,regress_values,"r-")
plt.annotate(line_eq,lblpos,fontsize=15,color="red")
plt.xlabel(xlbl)
plt.ylabel(ylbl)
plt.savefig(f"output_data/fig{ifig}.png")
plt.show()
# -
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
#Create a Scatter Plot for Lattitude vs Temperature of City
x_values = northern_hemisphere['Latitude']
y_values = northern_hemisphere['Temperature (F)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"red")
plt.annotate(line_eq,(5,10),fontsize=15,color="red")
plt.ylim(0,100)
plt.xlim(0, 80)
plt.ylabel("Max. Temp")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("NORTH MAX TEMP VS LATITUDE.png")
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
#Create a Scatter Plot for Lattitude vs Temperature of City (Southern Hemisphere)
x_values = southern_hemisphere['Latitude']
y_values = southern_hemisphere['Temperature (F)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.ylim(30, 100)
plt.xlim(-60, 0, 10)
plt.ylabel("Max. Temp")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("SOUTH MAX TEMP VS LATITUDE.png")
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
x_values = northern_hemisphere['Latitude']
y_values = northern_hemisphere['Humidity (%)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.ylabel("Humidity")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("NORTH HUMIDITY VS LATITUDE.png")
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
x_values = southern_hemisphere['Latitude']
y_values = southern_hemisphere['Humidity (%)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-25,10),fontsize=15,color="red")
plt.ylim(0, 100)
plt.ylabel("Humidity")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("SOUTH HUMIDITY VS LATITUDE.png")
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = northern_hemisphere['Latitude']
y_values = northern_hemisphere['Cloud Coverage (%)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.ylabel("Cloud Coverage (%)")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("NORTH CLOUD COVERAGE VS LATTITUDE.png")
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
x_values = southern_hemisphere['Latitude']
y_values = southern_hemisphere['Cloud Coverage (%)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-25,10),fontsize=15,color="red")
plt.ylabel("Cloud Coverage (%)")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("SOUTHERN CLOUD COVERAGE VS LATITUDE.png")
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
x_values = northern_hemisphere['Latitude']
y_values = northern_hemisphere['Wind Speed (mph)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(45,22),fontsize=15,color="red")
plt.ylabel("Wind Speed (mph)")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("NORTHERN WIND SPEED VS LATITUDE.png")
# -
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
x_values = southern_hemisphere['Latitude']
y_values = southern_hemisphere['Wind Speed (mph)']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(-25,25),fontsize=15,color="red")
plt.ylabel("Wind Speed (mph)")
plt.xlabel("Latitude")
# plt.show()
plt.savefig("SOUTHERN WIND SPEED VS LATITUDE.png")
# -
| WeatherPy/WeatherPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FUNCIONES
import os
import numpy as np
import pprint
import copy
from math import sqrt
from scipy.linalg import solve_triangular
# ### Creación matrices
def crea_matriz(renglones,columnas,maximo_valor,minimo_valor,entero=False):
"""
Función de apoyo para genear matrices aleatorias
params: renglones no. de renglones de la matriz
columnas no. de renglones de la matriz
maximo_valor valor máximo de las entradas de la matriz
minimo_valor valor mínimo de las entradas de la matriz
entero Indica si las entradas serán enteras (True) o no
return: M Matriz con numeros al azar
"""
M=np.zeros((renglones, columnas))
for i in range(renglones):
for j in range(columnas):
if entero:
M[i][j]=(np.random.rand(1)*(maximo_valor+1-minimo_valor)+minimo_valor)//1
else:
M[i][j]=np.random.rand(1)*(maximo_valor-minimo_valor)+minimo_valor
return M
# ### Factorización QR
def house(x):
"""
Función que calcula la proyección de householder
params: x vector al que se le hará la reflexión householder
return: Beta constante utilizada para obtener v
v vector que representa la reflexión de householder
"""
m=len(x)
norm_2_m=x[1:m].dot(np.transpose(x[1:m]))
v=np.concatenate((1,x[1:m]), axis=None)
Beta=0
if (norm_2_m==0 and x[0]>=0):
Beta=0
elif (norm_2_m==0 and x[0]<0):
Beta=2
else:
norm_x=np.sqrt(pow(x[0],2)+norm_2_m)
if (x[0]<=0):
v[0]=x[0]-norm_x
else:
v[0]=-norm_2_m/(x[0]+norm_x)
Beta=2*pow(v[0],2)/(norm_2_m+pow(v[0],2))
v=v/v[0]
return Beta, v
def factorizacion_QR(A):
"""
Función que genera una matriz que contendrá información escencial de las proyecciones householder
(vectores v's) y componentes de la matriz triangular superior R, del estilo:
[r11 r12 r13 r14 ]
[v_2_(1) r22 r23 r24 ]
[v_3_(1) v_3_(2) r33 r34 ]
[v_4_(1) v_4_(2) v_4_(3) r44 ]
[v_5_(1) v_5_(2) v_5_(3) v_5_(4)]
params: A Matriz (mxn) de la que se desea obtner factorización QR
return: A_r_v Matriz (mxn) con la información escencial (es igual a la matriz R, pero en lugar de tener ceros
en la parte inferior, contiene info de los vectores householder que serán útiles para
futuros cálculos, que entre otros están el calcular la matriz ortonormal Q)
"""
m=A.shape[0]
n=A.shape[1]
A_r_v=copy.copy(A)
for j in range(n):
beta, v=house(A_r_v[j:m,j])
A_r_v[j:m,j:n]=A_r_v[j:m,j:n]-beta*(np.outer(v,v)@A_r_v[j:m,j:n])
A_r_v[(j+1):m,j]=v[1:(m-j)]
return A_r_v
def QT_C(A_r_v,C):
"""
Función que calcula el producto matricial de Q_transpuesta por una matriz dada C
params: A_r_v Matriz (mxn) con la info escencial
C Matriz (mxp) (si se pasa por ejemplo C=Identidad (mxm) la funcion devolverá Q)
return: M Matriz con numero al azar
"""
m=A_r_v.shape[0]
n=A_r_v.shape[1]
QT_por_C=np.eye(m)
for j in range(n-1,-1,-1):
v=np.concatenate((1,A_r_v[(j+1):m,j]), axis=None)
beta=2/(1+A_r_v[(j+1):m,j].dot(A_r_v[(j+1):m,j]))
QT_por_C[j:m,j:m]=C[j:m,j:m]-beta*np.outer(v,v)@C[j:m,j:m]
return QT_por_C
def Q_j(A_r_v,j):
"""
Función que calcula la matriz Qj (en el proceso de obtención de factorización QR se van obteniendo n Qj's,
que si se multiplican todas da por resultado Q=Q1*Q2*...*Qn)
params: A_r_v Matriz (mxn) con la info escencial
C Matriz (mxp) (si se pasa por ejemplo C=Identidad (mxm) la funcion devolverá Q)
return: Qj Matriz Q de la j-esima iteración del proceso iterativo de factorización QR
"""
m=A_r_v.shape[0]
n=A_r_v.shape[1]
Qj=np.eye(m)
v=np.concatenate((1,A_r_v[(j+1):m,j]), axis=None)
beta=2/(1+A_r_v[(j+1):m,j].dot(A_r_v[(j+1):m,j]))
Qj[j:m,j:m]=np.eye(m-j)-beta*np.outer(v,v)
return Qj
# ### Funciones para solución de Sistemas de Ecuaciones Lineales
def Solucion_SEL_QR_nxn(A,b):
"""
Función que obtiene la solución de un sistema de ecuaciones lineala (SEL) con n ecuaciones y n incognitas
params: A Matriz (nxn) que representa los coeficientas de las ecuaciones
b vector (nx1) constantes del sistema
return: x vector que satisface (Ax=b)
"""
A_r_v=factorizacion_QR(A)
m=A_r_v.shape[0]
#Q=np.transpose(QT_C(A_r_v,np.eye(m)))
#R=np.transpose(Q)@A
n=A_r_v.shape[0]
Q=np.eye(m)
R=copy.copy(A)
for j in range(m):
Qj=Q_j(A_r_v,j)
Q=Q@Qj
R=Q_j(A_r_v,j)@R
b_prima=np.transpose(Q)@b
x = solve_triangular(R, np.transpose(Q)@b)
return x
# #### Eliminación por bloques
def bloques(A, b=False, n1=False, n2=False):
"""
Esta es la función para la creación de bloques usando un arreglo de numpy
params: A Matriz (nxn) que representa los coeficientas de las ecuaciones
b vector (nx1) constantes del sistema
n1 Numero de renglones que tendrá el 1er bloque
n2 Numero de renglones que tendrá el 2do bloque
return: A11 Fraccion de la matriz dividida
A12 Fraccion de la matriz dividida
A12 Fraccion de la matriz dividida
A12 Fraccion de la matriz dividida
b1 Fraccion del vector dividido
b2 Fraccion del vector dividido
"""
# Primero definimos el n
m,n = A.shape
# Condiciones de A
# Si no se dan los n deseados, se intentan hacer los bloques casi iguales
if not (n1&n2):
n1 = n//2
n2 = n - n1
# Los bloques deben cumplir la condicion de tamaño
elif n1+n1 != n:
sys.exit('n1 + n2 debe ser igual a n')
else:
None
# Condiciones de b
if b is False:
b1 = None
b2 = None
print('condicion1')
elif len(b) == m:
b1 = b[:n1]
b2 = b[n1:m]
else:
sys.exit('los renglones de A y b deben ser del mismo tamaño')
A11 = A[:n1,:n1]
A12 = A[:n1,n1:n]
A21 = A[n1:m,:n1]
A22 = A[n1:m,n1:n]
return A11,A12,A21,A22,b1,b2
def eliminacion_bloques(A,b):
"""
Función que obtiene la solución de un sistema de ecuaciones lineala (SEL) con n ecuaciones y n incognitas
params: A Matriz (nxn) que representa los coeficientas de las ecuaciones
b vector (nx1) constantes del sistema
return: x1 Solucion al 1er sistema de ecuaciones obtenido con la división por bloques
x2 Solucion al 2do sistema de ecuaciones obtenido con la división por bloques
"""
if np.linalg.det(A)==0:
sys.exit('A debe ser no singular')
A11,A12,A21,A22,b1,b2 = bloques(A,b)
if np.linalg.det(A11)==0:
ys.exit('A11 debe ser no singular')
## 1. Calcular A11^{-1}A12 y A11^{-1}b1 teniendo cuidado en no calcular la inversa sino un sistema de ecuaciones lineales
## Aquí se debe usar el método QR una vez que esté desarrollado
## Definimos y = A11^{-1}b1, por tanto A11y=b1. Resolviendo el sistema anterior para 11y:
y = Solucion_SEL_QR_nxn(A11,b1)
#y = np.linalg.solve(A11,b1)
## Definimos Y = A11^{-1}A12
Y = Solucion_SEL_QR_nxn(A11,A12)
#Y = np.linalg.solve(A11,A12)
## 2. Calcular el complemento de Schur del bloque A11 en A. Calcular b_hat
S = A22 - A21@Y
b_h = b2 - A21@y
## 3. Resolver Sx2 = b_hat
x2 = Solucion_SEL_QR_nxn(S,b_h)
#x2 = np.linalg.solve(S,b_h)
## 4. Resolver A11x1 = b1-A12X2
x1 = Solucion_SEL_QR_nxn(A11,b1-A12@x2)
#x1 = np.linalg.solve(A11,b1-A12@x2)
return np.concatenate((x1,x2), axis=0)
# # Prueba Unitaria
# ## Eliminación por bloques con QR considerando sistemas sin solución
# ### Ejemplo 1 - Matriz 3 x 3 (Fila con ceros)
# Empezaremos por generar un sistema de ecuaciones lineales sin solución.
# Generamos una matriz 3 x 3
A = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]], dtype='d')
b = np.array([[-2], [0], [5]], dtype='d')
print("A:")
pprint.pprint(A)
print("b:")
pprint.pprint(b)
# **Numpy**
# Utilizaremos la función de numpy *np.linalg.solve(A,b)* para validar que el sistema de ecuaciones efectivamente no tiene solución.
np.linalg.solve(A,b)
# Podemos observar que la función de numpy nos arroja un error al tratar de resolver un sistema de ecuaciones lineales sin solución.
#
# El error se refiere a que la matriz A es una matriz singular.
# **Implementación Programadores - Eliminación por bloques con QR**
# Utilizaremos la función eliminacion_bloques implementada por los programadores para validar sus funcionalidad cuando trata de resolver un sistema de ecuaciones lineales sin solución.
eliminacion_bloques(A,b)
# Podemos observar que la función nos arroja un error al tratar de resolver un sistema de ecuaciones lineales sin solución al igual que numpy. Sin embargo, no existe ningun mensaje que le de visibilidad al usuario del tipo de error al que se esta enfrentando.
# ### Ejemplo 2 - Matriz 10^2 x 10^2 (Fila con ceros)
# Empezamos por generar un sistema de ecuaciones lineales de 10^2 x 10^2
m = 100
n = 100
A = np.round(crea_matriz(m, n, 10, -10,False), 2)
b = np.round(crea_matriz(m, 1, 10, -10,False), 2)
print("A:")
pprint.pprint(A)
print("b:")
pprint.pprint(b)
# Actualizaremos la ultima fila de la matriz con puros ceros para volverlo un sistema de ecuaciones lineales sin solución.
A[-1] = np.zeros(n)
print("A:")
pprint.pprint(A)
# **Numpy**
# Utilizaremos la función de numpy *np.linalg.solve(A,b)* una vez mas para validar que el sistema de ecuaciones dado efectivamente no tiene solución.
np.linalg.solve(A,b)
# Podemos observar que la función de numpy nos arroja el mismo error que en el Ejemplo 1.
# **Implementación Programadores - Eliminación por bloques con QR**
# Utilizaremos la función eliminacion_bloques implementada por los programadores para validar sus funcionalidad cuando trata de resolver un sistema de ecuaciones lineales sin solución.
eliminacion_bloques(A,b)
# Una vez mas, podemos observar que la función nos arroja un error al tratar de resolver un sistema de ecuaciones lineales sin solución al igual que numpy. Sin embargo, no existe ningun mensaje que le de visibilidad al usuario del tipo de error al que se esta enfrentando.
# ### Ejemplo 3 - Matriz 2 x 2 (Sistema incompatible - Rectas Paralelas)
# Empezaremos por generar un sistema de ecuaciones lineales sin solución (Rectas Paralelas).
# Generamos una matriz 2 x 2 incompatible
A = np.array([[11, 4], [132, 48]], dtype='d')
b = np.array([[7], [-1]], dtype='d')
print("A:")
pprint.pprint(A)
print("b:")
pprint.pprint(b)
# **Numpy**
# Utilizaremos la función de numpy *np.linalg.solve(A,b)* una vez mas para validar que el sistema de ecuaciones dado efectivamente no tiene solución.
np.linalg.solve(A,b)
# Podemos observar que la función de numpy nos arroja el mismo error que en el Ejemplo 1 y 2.
# **Implementación Programadores - Eliminación por bloques con QR**
# Utilizaremos la función eliminacion_bloques implementada por los programadores para validar sus funcionalidad cuando trata de resolver un sistema de ecuaciones lineales sin solución.
eliminacion_bloques(A,b)
# Una vez mas, podemos observar que la función nos arroja un error al tratar de resolver un sistema de ecuaciones lineales sin solución al igual que numpy. Sin embargo, no existe ningun mensaje que le de visibilidad al usuario del tipo de error al que se esta enfrentando.
# ### Ejemplo 4 - Matriz 4 x 3 no cuadrada
m = 4
n = 3
A = np.round(crea_matriz(m, n, 10, -10,False), 2)
b = np.round(crea_matriz(m, 1, 10, -10,False), 2)
print("A:")
pprint.pprint(A)
print("b:")
pprint.pprint(b)
# **Numpy**
# Utilizaremos la función de numpy *np.linalg.solve(A,b)* para observar que hace cuando se enfrenta a una matriz no cuadrada.
np.linalg.solve(A,b)
# La función nos arroja un error en el que nos dice que las dimensiones de la matriz deben de ser cuadradas.
# **Implementación Programadores - Eliminación por bloques con QR**
# Utilizaremos la función eliminacion_bloques implementada por los programadores para observar que hace cuando se enfrenta a una matriz que no es cuadrada.
eliminacion_bloques(A,b)
# Para este ejemplo podemos apreciar que la función nos arroja el mismo error que numpy. Sería bueno agregar una validación previa a la ejecución, en la que garanticemos que el input es una matriz cuadrada.
# ## Resumen de Hallazgos
# La función eliminacion_bloques(A,b) debería de ser capaz de identificar cuando se enfrenta a un sistema de ecuaciones lineales para los cuales no existe solución. En estos casos la función debería de dar como output un mensaje "Sistema de ecuaciones sin solución" o "Matriz singular" por ejemplo.
#
# Por otra parte, tambien debería validar las dimensiones de la matriz de entrada, si las dimensiones de la matriz de entrada no son cuadradas deberíamos de recibir como output un mensaje "Las dimensiones de la matriz (A) no son cuadradas, por favor revisa" por dar algun ejemplo.
#
# La finalidad de agregar estas funcionalidades a la función es poder dar retroalimentación al usuario respecto a la naturaleza de los errores que se pudieran presentar al utilizar la función.
#
# Dados estos hallazgos levantaremos un issue con las siguientes acciones:
#
# **TO DO.**
#
# + Agregar validación para sistema de ecuaciones sin solución.
#
# + Agregar validación para matrices no cuadradas.
#
| Pruebas/Revision1/pu_epbQR_sistema_sin_solucion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Collect Tweets and Desired Attributes
print("---------------------------------------------------------")
print("Collecting Tweets")
for tweet in tweepy.Cursor(api.search,q="Wits -filter:retweets", count=5000,
#tweets = tweepy.Cursor(api.search,q="WITS OR WitsExam OR Wits OR WitsUniversity OR WITSUniversity OR University of the Witwatersrand OR Witsie OR WitsEntrolment OR AdamHabib -filter:retweets",count=10,
lang="en",
start_date = datetime.datetime(2019, 6, 1, 0, 0, 0)
end_date = datetime.datetime(2020, 1, 1, 0, 0, 0)
tweet_mode = 'extended',
include_entities=True).items():
tweet_hashtags = ''
if tweet.entities['hashtags'] == []: # no hash tag
tweet_hashtags = None
else:
tweet_hashtags = "-".join([ dic['text'] for dic in tweet.entities['hashtags']])
#for tweet in results:
#print(tweet.get('user', {}).get('location', {}))
# Extract Tweet Attributes in these Sequence
location = tweet.user.location
#print(location)
tweet_time = tweet.created_at
twitter_user = tweet.user.screen_name
tweet_url = "https://twitter.com/" +twitter_user +"/status/"+ tweet.id_str
tweet_text = tweet.full_text
# Create Record for the collected Tweet in the form of a DataFrame
tweet_record = [tweet_time, twitter_user, tweet_text.strip(), tweet_url, location,tweet_hashtags]
# Append the Tweet Record to the CSV File
csvWriter.writerow(tweet_record)
print("Done Collecting Tweets")
print("---------------------------------------------------------")
# +
# for tweet in tweepy.Cursor(api.search,q="WITS OR Witsaccommodation OR WitsAccommodation OR WitsExam OR wits OR WitsUniversity OR WITSUniversity OR University of the Witwatersrand OR Witsie OR WitsEnrolment OR AdamHabib",count=100,\
# lang="en",\
# since_id=2014-6-12).items():
# #tweet_mode = 'extended',
# print(tweet.created_at, tweet.text)
# csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])
| data/raw/code_repo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Extending xclim
#
# xclim tries to make it easy for users to add their own indices and indicators. The following goes into details on how to create _indices_ and document them so that xclim can parse most of the metadata directly. We then explain the multiple ways new _Indicators_ can be created and, finally, how we can regroup and structure them in virtual submodules.
#
# Central to xclim are the **Indicators**, objects computating indices over climate variables, but xclim also provides other modules:
#
# 
#
# Where `subset` is a phantom module, kept for legacy code, as it only redirects the calls to `clisops.core.subset`.
#
# This introduction will focus on the Indicator/Indice part of xclim and how one can extend it by implementing new ones.
#
#
# ## Indices vs Indicators
#
# Internally and in the documentation, xclim makes a distinction between "indices" and "indicators".
#
# ### indice
#
# * A python function accepting DataArrays and other parameters (usually bultin types)
# * Returns one or several DataArrays.
# * Handles the units : checks input units and set proper CF-compliant output units. But doesn't usually prescribe specific units, the output will at minimum have the proper dimensionality.
# * Performs **no** other checks or set any (non-unit) metadata.
# * Accessible through [xclim.indices](../indices.rst).
#
# ### indicator
#
# * An instance of a subclass of `xclim.core.indicator.Indicator` that wraps around an `indice` (stored in its `compute` property).
# * Returns one or several DataArrays.
# * Handles missing values, performs input data and metadata checks (see [usage](usage.ipynb#Health-checks-and-metadata-attributes)).
# * Always ouputs data in the same units.
# * Adds dynamically generated metadata to the output after computation.
# * Accessible through [xclim.indicators ](../indicators_api.rst)
#
# Most metadata stored in the Indicators is parsed from the underlying indice documentation, so defining indices with complete documentation and an appropriate signature helps the process. The two next sections go into details on the definition of both objects.
#
# #### Call sequence
#
# The following graph shows the steps done when calling an Indicator. Attributes and methods of the Indicator object relating to those steps are listed on the right side.
#
# 
# ## Defining new indices
#
# The annotated example below shows the general template to be followed when defining proper _indices_. In the comments `Ind` is the indicator instance that would be created from this function.
#
# <div class="alert alert-info">
#
# Note that it is not _needed_ to follow these standards when writing indices that will be wrapped in indicators. Problems in parsing will not raise errors at runtime, but will result in Indicators with poorer metadata than expected by most users, especially those that dynamically use indicators in other applications where the code is inaccessible, like web services.
#
# </div>
#
# 
#
# The following code is another example.
# +
import xarray as xr
import xclim as xc
from xclim.core.units import declare_units, convert_units_to
from xclim.indices.generic import threshold_count
@declare_units(tasmax="[temperature]", thresh="[temperature]")
def tx_days_compare(
tasmax: xr.DataArray, thresh: str = "0 degC", op: str = ">", freq: str = "YS"
):
r"""Number of days where maximum daily temperature. is above or under a threshold.
The daily maximum temperature is compared to a threshold using a given operator and the number
of days where the condition is true is returned.
It assumes a daily input.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature.
thresh : str
Threshold temperature to compare to.
op : {'>', '<'}
The operator to use.
# A fixed set of choices can be imposed. Only strings, numbers, booleans or None are accepted.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [temperature]
Maximum value of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily maximum temperature for period :math:`j` is:
.. math::
TXx_j = max(TX_{ij})
References
----------
Smith, <NAME> Tremblay, Robert, An dummy citation for examples in documentation. J. RTD. (2020).
"""
thresh = convert_units_to(thresh, tasmax)
out = threshold_count(tasmax, op, thresh, freq)
out.attrs["units"] = "days"
return out
# -
# ### Naming and conventions
#
# Variable names should correspond to CMIP6 variables, whenever possible. The file `xclim/data/variables.yml` lists all variables that xclim can use when generating indicators from yaml files (see below), and new indices should try to reflect these also. For new variables, the `xclim.testing.get_all_CMIP6_variables` function downloads the official table of CMIP6 variables and puts everything in a dictionary. If possible, use variables names from this list, add them to `variables.yml` as needed.
#
# ### Generic functions for common operations
#
# The [xclim.indices.generic](../indices.rst#generic-indices-submodule) submodule contains useful functions for common computations (like `threshold_count` or `select_resample_op`) and many basic indice functions, as defined by [clix-meta](https://github.com/clix-meta/clix-meta). In order to reduce duplicate code, their use is recommended for xclim's indices. As previously said, the units handling has to be made explicitly when non trivial, [xclim.core.units](../api.rst#module-xclim.core.units) also exposes a few helpers for that (like `convert_units_to`, `to_agg_units` or `rate2amount`).
#
# ## Defining new indicators
#
# xclim's Indicators are instances of (subclasses of) `xclim.core.indicator.Indicator`. While they are the central to xclim, their construction can be somewhat tricky as a lot happens backstage. Essentially, they act as self-aware functions, taking a set of input variables (DataArrays) and parameters (usually strings, integers or floats), performing some health checks on them and returning one or multiple DataArrays, with CF-compliant (and potentially translated) metadata attributes, masked according to a given missing value set of rules.
# They define the following key attributes:
#
# * the `identifier`, as string that uniquely identifies the indicator,
# * the `realm`, one of "atmos", "land", "seaIce" or "ocean", classifying the domain of use of the indicator.
# * the `compute` function that returns one or more DataArrays, the "indice",
# * the `cfcheck` and `datacheck` methods that make sure the inputs are appropriate and valid.
# * the `missing` function that masks elements based on null values in the input.
# * all metadata attributes that will be attributed to the output and that document the indicator:
# - Indicator-level attribute are : `title`, `abstract`, `keywords`, `references` and `notes`.
# - Ouput variables attributes (respecting CF conventions) are: `var_name`, `standard_name`, `long_name`, `units`, `cell_methods`, `description` and `comment`.
#
# Output variables attributes are regrouped in `Indicator.cf_attrs` and input parameters are documented in `Indicator.parameters`.
#
# A particularity of Indicators is that each instance corresponds to a single class: when creating a new indicator, a new class is automatically created. This is done for easy construction of indicators based on others, like shown further down.
#
# See the [class documentation](../api.rst#module-xclim.core.indicator) for more info on the meaning of each attribute. The [indicators](https://github.com/Ouranosinc/xclim/tree/master/xclim/indicators) module contains over 50 examples of indicators to draw inspiration from.
#
# ### Metadata parsing vs explicit setting
#
# As explained above, most metadata can be parsed from the indice's signature and docstring. Otherwise, it can always be set when creating a new Indicator instance *or* a new subclass. When _creating_ an indicator, output metadata attributes can be given as strings, or list of strings in the case of indicator returning multiple outputs. However, they are stored in the `cf_attrs` list of dictionaries on the instance.
#
# ### Internationalization of metadata
#
# xclim offers the possibility to translate the main Indicator metadata field and automatically add the translations to the outputs. The mechnanic is explained in the [Internationalization](../internationalization.rst) page.
#
# ### Inputs and checks
#
# There are two ways that xclim uses to decide which input arguments of the indicator's call function are considered _variables_ and which are _parameters_.
#
# - The `nvar` indicator integer attribute sets the number of arguments that are sent to the `datacheck` and `cfcheck` methods (in the signature's order).
# - The annotations of the underlying indice (the `compute` method). Arguments annotated with the `xarray.DataArray` type are considered _variables_ and can be read from the dataset passed in `ds`.
#
# ### Indicator creation
#
# There a two ways for creating indicators:
#
# 1) By initializing an existing indicator (sub)class
# 2) From a dictionary
#
# The first method is best when defining indicators in scripts of external modules and are explained here. The second is best used when building virtual modules through YAML files, and is explained further down and in the [submodule doc](../api.rst#module-xclim.core.indicator).
#
# Creating a new indicator that simply modifies a few metadata output of an existing one is a simple call like:
# +
from xclim.core.indicator import registry
from xclim.core.utils import wrapped_partial
from xclim.indices import tg_mean
# An indicator based on tg_mean, but returning Celsius and fixed on annual resampling
tg_mean_c = registry["TG_MEAN"](
identifier="tg_mean_c",
units="degC",
title="Mean daily mean temperature but in degC",
compute=wrapped_partial(tg_mean, freq="YS"), # We inject the freq arg.
)
# -
print(tg_mean_c.__doc__)
# The registry is a dictionary mapping indicator identifiers (in uppercase) to their class. This way, we could subclass `tg_mean` to create our new indicator. `tg_mean_c` is the exact same as `atmos.tg_mean`, but outputs the result in Celsius instead of Kelvins, has a different title and resamples to "YS". The `identifier` keyword is here needed in order to differentiate the new indicator from `tg_mean` itself. If it wasn't given, a warning would have been raised and further subclassing of `tg_mean` would have in fact subclassed `tg_mean_c`, which is not wanted!
#
# This method of class initialization is good for the cases where only metadata and perhaps the compute function is changed. However, to modify the CF compliance and data checks, we recommend creating a class first:
# +
class TG_MAX_C(registry["TG_MAX"]):
identifier = "tg_max_c"
missing = "wmo"
title = "Maximum daily mean temperature"
units = "degC"
@staticmethod
def cfcheck(tas):
xc.core.cfchecks.check_valid(tas, "standard_name", "air_temperature")
# Add a very strict check on the long name.
# glob-like wildcards can be used (here *)
xc.core.cfchecks.check_valid(tas, "long_name", "Surface * daily temperature")
@staticmethod
def datacheck(tas):
xc.core.datachecks.check_daily(tas)
tg_max_c = TG_MAX_C()
# +
from xclim.testing import open_dataset
ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc")
ds.tas.attrs["long_name"] = "Surface average daily temperature"
with xc.set_options(cf_compliance="raise"):
# The data passes the test we implemented ("average" is caught by the *)
tmaxc = tg_max_c(tas=ds.tas)
tmaxc
# -
# A caveat of this method is that the new indicator is added to the registry with a non-trivial name. When an indicator subclass is created in a module outside `xclim.indicators`, the name of its parent module is prepended to its identifier in the registry. Here, the module is `__main__`, so:
"__main__.TG_MAX_C" in registry
# A simple way to workaround this is to provided a (fake) module name. Passing one of `atmos`, `land`, `seaIce` or `ocean` will result in a normal entry in the registry. However, one could want to keep the distinction between the newly created indicators and the "official" ones by passing a custom name **upon instantiation**:
# Fake module is passed upon instantiation
tg_max_c2 = TG_MAX_C(module="example")
print(tg_max_c2.__module__)
print("example.TG_MAX_C" in registry)
# One pattern to create multiple indicators is to write a standard subclass that declares all the attributes that are common to indicators, then call this subclass with the custom attributes. See for example in [xclim.indicators.atmos](https://github.com/Ouranosinc/xclim/blob/master/xclim/indicators/atmos/_temperature.py) how indicators based on daily mean temperatures are created from the `Tas` subclass of the `Daily` class.
# ## Virtual modules
#
# `xclim` gives users the ability to generate their own modules from existing indices library. These mappings can help in emulating existing libraries (such as ICCLIM), with the added benefit of CF-compliant metadata, multilingual metadata support, and optimized calculations using federated resources (using Dask). This can be used for example to tailor existing indices with predefined thresholds without having to rewrite indices.
#
# Presently, xclim is capable of approximating the indices developed in ICCLIM (https://icclim.readthedocs.io/en/latest/intro.html), ANUCLIM (https://fennerschool.anu.edu.au/files/anuclim61.pdf) and clix-meta (https://github.com/clix-meta/clix-meta) and is open to contributions of new indices and library mappings.
#
# This notebook serves as an example of how one might go about creating their own library of mapped indices. Two ways are possible:
#
# 1. From a YAML file (recommended way)
# 2. From a mapping (dictionary) of indicators
#
# ### YAML file
#
# The first method is based on the YAML syntax proposed by `clix-meta`, expanded to xclim's needs. The full documentation on that syntax is [here](../api.rst#module-xclim.core.indicator). This notebook shows an example different complexities of indicator creation. It creates a minimal python module defining a indice, creates a YAML file with the metadata for several indicators and then parses it into xclim.
# + nbsphinx="hidden"
# Workaround absence of syntax highlighting in notebooks
from pygments.formatters import Terminal256Formatter
from pygments.lexers import YamlLexer, PythonLexer, JsonLexer
from pygments import highlight
with open("example.py") as f:
pydata = f.read()
with open("example.yml") as f:
ymldata = f.read()
with open("example.fr.json") as f:
jsondata = f.read()
highlighted_py = highlight(pydata, PythonLexer(), Terminal256Formatter(style="manni"))
highlighted_yaml = highlight(ymldata, YamlLexer(), Terminal256Formatter(style="manni"))
highlighted_json = highlight(jsondata, JsonLexer(), Terminal256Formatter(style="manni"))
# -
# These variables were generated by a hidden cell above that syntax-colored them.
print("Content of example.py :")
print(highlighted_py)
print("\n\nContent of example.yml :")
print(highlighted_yaml)
print("\n\nContent of example.fr.json :")
print(highlighted_json)
# `example.yml` created a module of 4 indicators.
#
# - `RX1day` is simply the same as `registry['RX1DAY']`, but with an updated `long_name`.
# - `RX5day` is based on `registry['MAX_N_DAY_PRECIPITATION_AMOUNT']`, changed the `long_name` and injects the `window` and `freq` arguments.
# - `R75pdays` is based on `registry['DAYS_OVER_PRECIP_THRESH']`, injects the `thresh` argument and changes the description of the `per` argument. Passing "data: {per}" tells xclim the value is still to be determined by the user, but other parameter's metadata field might be changed.
# - `fd` is a more complex example. As there were no `base:` entry, the `Daily` class serves as a base. As it is pretty much empty, a lot has to be given explicitly:
# * A list of allowed resampling frequency is passed
# * Many output metadata fields are given
# * A index_function name if given (here it refers to a function in `xclim.indices.generic`).
# * Some parameters are injected.
# * The input variable `data` is mapped to a known variable. Functions in `xclim.indices.generic` are indeed generic. Here we tell xclim that the `data` argument is minimal daily temperature. This will set the proper units check, default value and CF-compliance checks.
# - `R95p` is similar to `fd` but here the `index_function` is not defined in `xclim` but rather in `example.py`.
# - `R99p` is the same as `R95p` but changes the injected value. In order to avoid rewriting the output metadata, and allowed periods, we based it on `R95p` : as the latter was defined within the current yaml file, the identifier is prefixed by a dot (.). However, in order to _inject_ a parameter we still need to repeat the index_function name (and retrigger the indice function wrapping process under the hood).
# - `LPRatio` is a version of "liquid precip ratio" where we we force the use of `tas` (instead of having it an optional variable). We also inject a specific threshold.
#
# A few ways of prescribing _default_ or _allowed_ periods (resampling frequencies) are shown here. In `fd` and `R95p`, only the default value of `freq` is given. `R75pdays` will keep the default value in the signature of the underlying indice. `RX1day` goes in more details by prescribing a default value and a list of _allowed_ values. xclim will be relax and accept any `freq` values equivalent to those listed here. Finally, `RX5day` directly injects the `freq` argument, so that it doesn't even appear in the docstring.
#
# Additionnaly, the yaml specified a `realm` and `references` to be used on all indices and provided a submodule docstring. Creating the module is then simply:
#
# Finally, french translations for the main attributes and the new indicaters are given in `example.fr.json`. Even though new indicator objects are created for each yaml entry, non-specified translations are taken from the base classes if missing in the `json` file.
#
# Note that all files are named the same way : `example.<ext>`, with the translations having an additionnal suffix giving the locale name. In the next cell, we build the module by passing only the path without extension. This absence of extension is what tells xclim to try to parse a module (`*.py`) and custom translations (`*.<locale>.json`). Those two could also be read beforehand and passed through the `indices=` and `translations=` arguments.
# +
import xclim as xc
example = xc.core.indicator.build_indicator_module_from_yaml("example", mode="raise")
# -
print(example.__doc__)
print("--")
print(xc.indicators.example.R99p.__doc__)
# Useful for using this technique in large projects, we can iterate over the indicators like so:
# +
ds2 = ds.assign(
per=xc.core.calendar.percentile_doy(ds.pr, window=5, per=75).isel(
percentiles=0, drop=True
)
)
outs = []
with xc.set_options(metadata_locales="fr"):
for name, ind in example.iter_indicators():
print(f"Indicator: {name}")
print(f"\tIdentifier: {ind.identifier}")
print(f"\tTitle: {ind.title}")
out = ind(ds=ds2) # Use all default arguments and variables from the dataset,
outs.append(out)
# -
# `out` contains all the computed indices, with translated metadata.
# Note that this merge doesn't make much sense with the current list of indicators since they have different frequencies (`freq`).
out = xr.merge(outs)
out.attrs = {
"title": "Indicators computed from the example module."
} # Merge puts the attributes of the first variable, we don't want that.
out
# ### Mapping of indicators
#
# For more complex mappings, submodules can be constructed from Indicators directly. This is not the recommended way, but can sometimes be a workaround when the YAML version is lacking features.
# +
from xclim.core.indicator import build_indicator_module, registry
from xclim.core.utils import wrapped_partial
mapping = dict(
egg_cooking_season=registry["MAXIMUM_CONSECUTIVE_WARM_DAYS"](
module="awesome",
compute=wrapped_partial(
xc.indices.maximum_consecutive_tx_days, thresh="35 degC"
),
long_name="Season for outdoor egg cooking.",
),
fish_feeling_days=registry["WETDAYS"](
module="awesome",
compute=wrapped_partial(xc.indices.wetdays, thresh="14.0 mm/day"),
long_name="Days where we feel we are fishes",
),
sweater_weather=xc.atmos.tg_min,
)
awesome = build_indicator_module(
name="awesome",
objs=mapping,
doc="""
=========================
My Awesome Custom indices
=========================
There are only 3 indices that really matter when you come down to brass tacks.
This mapping library exposes them to users who want to perform real deal
climate science.
""",
)
# -
print(xc.indicators.awesome.__doc__)
# Let's look at our new awesome module
print(awesome.__doc__)
for name, ind in awesome.iter_indicators():
print(f"{name} : {ind}")
| docs/notebooks/extendxclim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linearity measure applied to fashion MNIST
#
# ## General definition
#
# The model linearity module in alibi provides metric to measure how linear an ML model is. Linearity is defined based on how much the linear superposition of the model's outputs differs from the output of the same linear superposition of the inputs.
#
# Given $N$ input vectors $v_i$, $N$ real coefficients $\alpha_i$ and a predict function $\text{M}(v_i)$, the linearity of the predict function is defined as
#
# $$L = \Big|\Big|\sum_i \alpha_i M(v_i) - M\Big(\sum_i \alpha_i v_i\Big) \Big|\Big| \quad \quad \text{If M is a regressor}$$
#
# $$L = \Big|\Big|\sum_i \alpha_i \log \circ M(v_i) - \log \circ M\Big(\sum_i \alpha_i v_i\Big)\Big|\Big| \quad \quad \text{If M is a classifier}$$
#
# Note that a lower value of $L$ means that the model $M$ is more linear.
#
#
# ## Alibi implementation
# * Based on the general definition above, alibi calculates the linearity of a model in the neighboorhood of a given instance $v_0$.
#
# ## Fashion MNIST data set
#
# * We train a convolutional neural network to classify the images in the fashion MNIST dataset.
#
# * We investigate the correlation between the model's linearity associated to a certain instance and the class the instance belong to.
#
# * We also calculate the linearity measure for each internal layer of the CNN and show how linearity propagates through the model.
# +
import pandas as pd
import numpy as np
import matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
from time import time
import tensorflow as tf
from alibi.confidence.model_linearity import linearity_measure, LinearityMeasure
from alibi.confidence.model_linearity import _infer_feature_range
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input, Activation
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import backend as K
# -
# ## Load data fashion mnist
# The fashion MNIST data set consists of 60000 images of shape $28 \times 28$ divided in 10 categories. Each category corresponds to a different type of clothing piece, such as "boots", "t-shirts", etc
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
idx = 0
plt.imshow(x_train[idx])
print('Sample instance from the MNIST data set.')
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
# ## Convolutional neural network
# Here we define and train a 2 layer convolutional neural network on the fashion MNIST data set.
# ### Define model
def model():
x_in = Input(shape=(28, 28, 1), name='input')
x = Conv2D(filters=64, kernel_size=2, padding='same', name='conv_1')(x_in)
x = Activation('relu', name='relu_1')(x)
x = MaxPooling2D(pool_size=2, name='maxp_1')(x)
x = Dropout(0.3, name='drop_1')(x)
x = Conv2D(filters=64, kernel_size=2, padding='same', name='conv_2')(x)
x = Activation('relu', name='relu_2')(x)
x = MaxPooling2D(pool_size=2, name='maxp_2')(x)
x = Dropout(0.3, name='drop_2')(x)
x = Flatten(name='flat')(x)
x = Dense(256, name='dense_1')(x)
x = Activation('relu', name='relu_3')(x)
x = Dropout(0.5, name='drop_3')(x)
x_out = Dense(10, name='dense_2')(x)
x_out = Activation('softmax', name='softmax')(x_out)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
cnn = model()
cnn.summary()
# ### Training
cnn.fit(x_train, y_train, batch_size=64, epochs=5);
# ## Linearity of each Layer
# Here we calculate the linearity of the model considering each layer as the output in turn. The values are averaged over 100 random instances sampled from the training set.
# ### Extract layers
inp = cnn.input
outs = {l.name: l.output for l in cnn.layers}
predict_fns = {name: K.function([inp], [out]) for name, out in outs.items()}
# ### Calculate linearity
# +
# Infering feature ranges.
features_range = _infer_feature_range(x_test)
# Selecting random instances from training set.
rnd = np.random.randint(len(x_test) - 101, size=100)
# -
lins_layers = {}
for name, l in predict_fns.items():
if name != 'input':
def predict_fn(x):
layer = l([x])
return layer[0]
if name == 'softmax':
lins_layers[name] = linearity_measure(predict_fn, x_test[rnd], feature_range=features_range,
agg='global', model_type='classifier', nb_samples=20)
else:
lins_layers[name] = linearity_measure(predict_fn, x_test[rnd], feature_range=features_range,
agg='global', model_type='regressor', nb_samples=20)
lins_layers_mean = {k: v.mean() for k, v in lins_layers.items()}
S = pd.Series(data=lins_layers_mean)
colors = ['gray' for l in S[:-1]]
colors.append('r')
ax = S.plot(kind='bar', linewidth=3, figsize=(15,10), color=colors, width=0.7, fontsize=18)
ax.set_ylabel('L measure', fontsize=20)
ax.set_xlabel('Layer', fontsize=20)
print('Linearity measure calculated taking as output each layer of a convolutional neural network.')
# Linearity measure in the locality of a given instance calculated taking as output each layer of a convolutional neural network trained on the fashion MNIST data set.
# * The linearity measure of the first convolutional layer conv_1 is 0, as expected since convolutions are linear operations.
# * The relu activation introduces non-linearity, which is increased by maxpooling. Dropout layers and flatten layers do no change the output at inference time so the linearity doesn't change.
# * The second convolutional layer conv_2 and the dense layers change the linearity even though they are linear operations.
# * The softmax layer in red is obtained by inverting the softmax function.
# * For more details see arxiv reference.
# ## Linearity and categories
# Here we calculate the linearity averaged over all instances belonging to the same class, for each class.
class_groups = []
for i in range(10):
y = y_test.argmax(axis=1)
idxs_i = np.where(y == i)[0]
class_groups.append(x_test[idxs_i])
def predict_fn(x):
return cnn.predict(x)
lins_classes = []
t_0 = time()
for j in range(len(class_groups)):
print(f'Calculating linearity for instances belonging to class {j}')
class_group = class_groups[j]
class_group = np.random.permutation(class_group)[:2000]
t_i = time()
lin = linearity_measure(predict_fn, class_group, feature_range=features_range,
agg='global', model_type='classifier', nb_samples=20)
t_i_1 = time() - t_i
print(f'Run time for class {j}: {t_i_1}')
lins_classes.append(lin)
t_fin = time() - t_0
print(f'Total run time: {t_fin}')
df = pd.DataFrame(data=lins_classes).T
ax = df.mean().plot(kind='bar', linewidth=3, figsize=(15,10), color='gray', width=0.7, fontsize=10)
ax.set_ylabel('L measure', fontsize=20)
ax.set_xlabel('Class', fontsize=20)
print("Linearity measure distribution means for each class in the fashion MNIST data set.")
ax2 = df.plot(kind='hist', subplots=True, bins=20, figsize=(10,10), sharey=True)
for a in ax2:
a.set_xlabel('L measure', fontsize=20)
a.set_ylabel('', rotation=True, fontsize=10)
#ax2.set_ylabel('F', fontsize=10)
print('Linearity measure distributions for each class in the fashion MNIST data set.')
| doc/source/examples/linearity_measure_fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: beta_rec
# language: python
# name: beta_rec
# ---
# ### Install Beta-recsys
# ## Loading dataset
# +
import sys
sys.path.append("../")
import random
import numpy as np
from beta_rec.data.grocery_data import GroceryData
from beta_rec.datasets.instacart import Instacart_25
seed = 2021
random.seed(seed) # Fix random seeds for reproducibility
np.random.seed(seed)
# make sure that you have already download the Instacart data from this link: https://www.kaggle.com/c/instacart-market-basket-analysis#
# uncompressed them and put them in this folder: ../datasets/instacart_25/raw/*.csv
dataset = Instacart_25(
min_u_c=20, min_i_c=30, min_o_c=10
) # Specifying the filtering conditions.
# Split the data
split_dataset = dataset.load_temporal_basket_split(test_rate=0.2, n_test=10)
data = GroceryData(split_dataset)
# -
# ### Model config
# +
config = {"config_file": "../configs/vbcar_default.json"}
config["n_sample"] = 5000000 # To reduce the test running time
config["max_epoch"] = 80
config["emb_dim"] = 64
config["root_dir"] = "/home/zm324/workspace/beta-recsys/"
config["dataset"] = "instacart_25"
config["batch_size"] = 10000
config["lr"] = 0.001
# config["item_fea_type"] = "random_word2vec"
# config["tunable"] = [
# {"name": "lr", "type": "choice", "values": [0.5, 0.05, 0.025, 0.001, 0.005]},
# ]
# config["tune"] = True
# the 'config_file' key is required, that is used load a default config.
# Other keys can be specified to replace the default settings.
# -
# ### Model intialization and training
# +
from beta_rec.recommenders import VBCAR
for item_fea_type in [
# "random",
# "cate",
# "cate_word2vec",
# "cate_bert",
# "cate_one_hot",
# "random_word2vec",
# "random_bert",
"random_one_hot",
"random_bert_word2vec_one_hot",
"random_cate_word2vec",
"random_cate_bert",
"random_cate_one_hot",
"random_cate_bert_word2vec_one_hot",
]:
config["item_fea_type"] = item_fea_type
model = VBCAR(config)
model.train(data)
model.test(data.test)
# @To be discussed
# model.train(train_df)
# Case 1, without validation, stop training by loss or max_epoch
# model.train(train_df,valid_df[0])
# Case 2, with validation, stop training by performance on validation set
# model.train(train_df,valid_df[0],test_df[0])
# Case 3, same as Case 2, but also evaluate performance for each epoch on test set.
# Note that the best model will be save automatically, and record the model-save-dir.
| VBR_Instacart_25%_feature_combination-5m1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:ecpaperenv]
# language: python
# name: conda-env-ecpaperenv-py
# ---
import importlib
import sys
import xarray as xr
import numpy as np
ystart=1979
yend=1979
# open up CESM data to get the output grid.
cesmdat = xr.open_dataset("/glade/campaign/cesm/collections/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly/PHIS/f.e11.F1850C5CNTVSST.f09_f09.002.cam.h0.PHIS.040101-050012.nc")
grid_out = xr.Dataset({'lat': (['lat'], cesmdat.lat)}, {'lon': (['lon'], cesmdat.lon)})
reusewgt=False
wgtfile="/project/cas/islas/temp/era5/wgtfile.nc"
for iyear in np.arange(ystart,yend,1):
print(iyear)
dat = xr.open_dataset("/project/haggis/ERA5/download/tas_fc/06h/tas_fc_06h_step12_"+str(iyear)+".nc")
regridder = xr.Regridder(dat, grid_out,'bilinear', periodic=True, reuse_weights=reusewgt, filename=wgtfile)
dat_rg = regridder(dat.)
| DATA_SORT/analysisincrements/sortoutforecast/.ipynb_checkpoints/regridforecast-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/usm.jpg" width="480" height="240" align="left"/>
# # MAT281 - Laboratorio N°03
#
# ## Objetivos de la clase
#
# * Reforzar los conceptos básicos de pandas.
# ## Contenidos
#
# * [Problema 01](#p1)
#
# ## Problema 01
#
#
# <img src="https://imagenes.universia.net/gc/net/images/practicas-empleo/p/pr/pro/profesiones-con-el-avance-de-la-tecnologia.jpg" width="480" height="360" align="center"/>
#
#
# EL conjunto de datos se denomina `ocupation.csv`, el cual contiene información tal como: edad ,sexo, profesión, etc.
#
# Lo primero es cargar el conjunto de datos y ver las primeras filas que lo componen:
import pandas as pd
import os
# cargar datos
df = pd.read_csv(os.path.join("data","ocupation.csv"), sep="|").set_index('user_id')
df.head()
# El objetivo es tratar de obtener la mayor información posible de este conjunto de datos. Para cumplir este objetivo debe resolver las siguientes problemáticas:
# 1. ¿Cuál es el número de observaciones en el conjunto de datos?
len(df.index)
# 2. ¿Cuál es el número de columnas en el conjunto de datos?
len(df.columns)
# 3. Imprime el nombre de todas las columnas
df.columns
# 4. Imprima el índice del dataframe
df.index
# 5. ¿Cuál es el tipo de datos de cada columna?
type(df.index)
# 6. Resumir el conjunto de datos
df.describe()
# 7. Resume conjunto de datos con todas las columnas
df.describe(include='all')
# 8. Imprimir solo la columna de **occupation**.
df['occupation']
# 9. ¿Cuántas ocupaciones diferentes hay en este conjunto de datos?
len(df['occupation'].unique())
# 10. ¿Cuál es la ocupación más frecuente?
w=df['occupation'].value_counts()
print (w.max())
w[w==w.max()]
# 11. ¿Cuál es la edad media de los usuarios?
mean_df = df['age'].mean()
print(mean_df)
# 12. ¿Cuál es la edad con menos ocurrencia?
w=df['occupation'].value_counts()
print (w.min())
w[w==w.min()]
| homeworks/laboratorio_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.insert(0, '../')
from pgso.test_functions import *
from pgso.gso import GSO as PGSO, PSO_purana
from pgso.benchmark import *
import matplotlib.pyplot as plt
from tqdm import tqdm
import seaborn as sns
import numpy as np
import scipy.io
from numba import jit
# # PSO IMPLEMENTATION
# +
#dependencies
import random
import math
import copy # for array copying
import sys
class Particle:
def __init__(self,x0, num_dimensions):
self.position_i=[] # particle position
self.velocity_i=[] # particle velocity
self.pos_best_i=[] # best position individual
self.err_best_i=-1 # best error individual
self.err_i=-1 # error individual
self.num_dimensions = num_dimensions
for i in range(0, self.num_dimensions):
self.velocity_i.append(random.uniform(-1,1))
self.position_i.append(x0[i])
# evaluate current fitness
def evaluate(self,costFunc):
self.err_i=costFunc(self.position_i)
# check to see if the current position is an individual best
if self.err_i < self.err_best_i or self.err_best_i==-1:
self.pos_best_i=self.position_i
self.err_best_i=self.err_i
# update new particle velocity
def update_velocity(self,pos_best_g):
w=0.5 # constant inertia weight (how much to weigh the previous velocity)
c1=1 # cognative constant
c2=2 # social constant
for i in range(0, self.num_dimensions):
r1=random.random()
r2=random.random()
vel_cognitive=c1*r1*(self.pos_best_i[i]-self.position_i[i])
vel_social=c2*r2*(pos_best_g[i]-self.position_i[i])
self.velocity_i[i]=w*self.velocity_i[i]+vel_cognitive+vel_social
# update the particle position based off new velocity updates
def update_position(self,bounds):
for i in range(0, self.num_dimensions):
self.position_i[i]=self.position_i[i]+self.velocity_i[i]
# adjust maximum position if necessary
if self.position_i[i]>bounds[i][1]:
self.position_i[i]=bounds[i][1]
# adjust minimum position if neseccary
if self.position_i[i] < bounds[i][0]:
self.position_i[i]=bounds[i][0]
def PSO(costFunc,bounds,maxiter, swarm_init, log=False, the_list=None):
num_dimensions=len(swarm_init[0])
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
num_particles = len(swarm_init)
# establish the swarm
swarm = [Particle(position, num_dimensions) for position in swarm_init]
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j].evaluate(costFunc)
# determine if current particle is the best (globally)
if swarm[j].err_i < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j].position_i)
err_best_g=float(swarm[j].err_i)
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j].update_velocity(pos_best_g)
swarm[j].update_position(bounds)
if log:
the_list.append(err_best_g)
i+=1
return pos_best_g, err_best_g
# -
# # GSO IMPLEMENTATION
def GSO(M, bounds, num_particles, max_iter, costfunc, log=False, the_list=None):
subswarm_bests = []
dims = len(bounds)
lb = bounds[0][0]
ub = bounds[0][1]
for i in range(M):
swarm_init = [np.random.uniform(lb, ub, dims) for _ in range(num_particles)]
temp_list = list()
subswarm_best,_ = PSO(costfunc,bounds,max_iter, swarm_init=swarm_init, log=True, the_list=temp_list)
subswarm_bests.append(subswarm_best)
if log:
the_list.append(temp_list)
best_position, best_error = PSO(costfunc, bounds, max_iter, swarm_init=subswarm_bests)
return best_position, best_error
# # ROTATED and SHIFTED FUNCTIONS
# +
def rotated_rastrigin(x):
if len(x) == 10:
mat = scipy.io.loadmat('./matlab-files/rastrigin_M_D10.mat')
elif len(x) == 30:
mat = scipy.io.loadmat('./matlab-files/rastrigin_M_D30.mat')
elif len(x) == 50:
mat = scipy.io.loadmat('./matlab-files/rastrigin_M_D50.mat')
y = np.matmul(mat['M'],x)
return rastrigin(y)
def rotated_griewangk(x):
if len(x) == 10:
mat = scipy.io.loadmat('./matlab-files/griewank_M_D10.mat')
elif len(x) == 30:
mat = scipy.io.loadmat('./matlab-files/griewank_M_D30.mat')
elif len(x) == 50:
mat = scipy.io.loadmat('./matlab-files/griewank_M_D50.mat')
y = np.matmul(mat['M'],x)
return griewank(y)
def rotated_ackley(x):
if len(x) == 10:
mat = scipy.io.loadmat('./matlab-files/ackley_M_D10.mat')
elif len(x) == 30:
mat = scipy.io.loadmat('./matlab-files/ackley_M_D30.mat')
elif len(x) == 50:
mat = scipy.io.loadmat('./matlab-files/ackley_M_D50.mat')
y = np.matmul(mat['M'],x)
return ackley(x)
def shifted_rotated_rastrigin(x):
o = np.random.uniform(-2, 2, len(x))
x = x - o
return rotated_rastrigin(x)
def shifted_rotated_ackley(x):
o = np.random.uniform(-2, 2, len(x))
x = x - o
return rotated_ackley(x)
# +
unimodal_functions = [exponential, powellsumfcn, sum_of_squares, schfewel_220, schwefel_222, griewank, zakharov, sphere]
unimodal_strings = ['exponential', ' powell sum function', ' sum_of_squares', ' schfewel 2.20', ' schwefel 2.22', ' griewank', ' zakharov', ' sphere']
unimodal_bounds = [[-1, 1], [-1, 1], [-10, 10], [-100, 100], [-100, 100], [-600, 600], [-5, 10], [-100, 100]]
multimodal_functions = [nonContinuousRastrigin, ackley, rastrigin, rosen, rotated_rastrigin, rotated_griewangk, rotated_ackley, shifted_rotated_rastrigin, shifted_rotated_ackley]
multimodal_strings = ['nonContinuousRastrigin', 'ackley', 'rastrigin', 'rosen', "rotated_rastrigin", "rotated_griewangk", "rotated_ackley", "shifted_rotated_rastrigin", "shifted_rotated_ackley"]
multimodal_bounds = [[-100, 100], [-40, 40], [-100, 100], [-30, 30], [-100, 100], [-600, 600], [-40, 40], [-5.12, 5.12], [-10, 10]]
# +
def get_GSO_results(dimensions, bounds, costfunc, algorithm, M, num_particles, max_iter, suppress=True):
search_space = [bounds for _ in range(dimensions)]
if not suppress:
print("\n Algorithm: ", algorithm,"\n Dimensions: ", dimensions,"\n cost function: ", costfunc,"\n iterations: ", max_iter)
score = 0
for _ in range(10):
score += algorithm(M, search_space, num_particles, max_iter, costfunc)[1]
score = score / 10
return score
def run_test(dimensions, algorithm, M, num_particles, max_iter, mode="unimodal"):
modal_tests = dict()
if mode == "unimodal":
for func, bnds, stri in zip(unimodal_functions, unimodal_bounds, unimodal_strings):
modal_tests[stri] = get_GSO_results(dimensions, bnds, func, algorithm, M, num_particles, max_iter)
else:
for func, bnds, stri in zip(multimodal_functions, multimodal_bounds, multimodal_strings):
modal_tests[stri] = get_GSO_results(dimensions, bnds, func, algorithm, M, num_particles, max_iter)
return modal_tests
# -
# # Unimodal on GSO
print(run_test(10, GSO, 7, 20, 1000))
print(run_test(30, GSO, 7, 20, 1000))
print(run_test(50, GSO, 7, 20, 1000))
# # Multimodal On GSO
print(run_test(10, GSO, 7, 20, 1000, "multimodal"))
print(run_test(30, GSO, 7, 20, 1000, "multimodal"))
print(run_test(50, GSO, 7, 20, 1000, "multimodal"))
# # Unimodal on PGSO
print(run_test(10, PGSO, 7, 20, 1000))
print(run_test(30, PGSO, 7, 20, 1000))
print(run_test(50, PGSO, 7, 20, 1000))
# # Multimodal on PGSO
print(run_test(10, PGSO, 7, 20, 1000, "multimodal"))
print(run_test(30, PGSO, 7, 20, 1000, "multimodal"))
print(run_test(50, PGSO, 7, 20, 1000, "multimodal"))
# # PSO
def True_PSO(costFunc,bounds,maxiter, num_particles, log=False, the_list=None):
lb = bounds[0][0]
ub = bounds[0][1]
num_dimensions=len(bounds)
swarm_init = np.array([np.random.uniform(lb, ub, num_dimensions) for _ in range(num_particles)])
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
# establish the swarm
swarm = [Particle(position, num_dimensions) for position in swarm_init]
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j].evaluate(costFunc)
# determine if current particle is the best (globally)
if swarm[j].err_i < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j].position_i)
err_best_g=float(swarm[j].err_i)
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j].update_velocity(pos_best_g)
swarm[j].update_position(bounds)
if log:
the_list.append(err_best_g)
i+=1
return pos_best_g, err_best_g
def run_PSO_tests(dimensions, maxiter, num_particles, mode="unimodal"):
results_dict = dict()
if mode == "unimodal":
functions = unimodal_functions
strings = unimodal_strings
bounds = unimodal_bounds
else:
functions = multimodal_functions
strings = multimodal_strings
bounds = multimodal_bounds
for func, bnds, stri in zip(functions, bounds, strings):
search_space = [bnds for _ in range(dimensions)]
score = 0
for _ in range(10):
score += True_PSO(func, search_space, maxiter, num_particles)[1]
score = score/10
results_dict[stri] = score
return results_dict
# # Unimodal on PSO
print(run_PSO_tests(10, 1000, 50))
print(run_PSO_tests(30, 1000, 50))
print(run_PSO_tests(50, 1000, 50))
# # Multimodal on PSO
print(run_PSO_tests(10, 1000, 50, 'multimodal'))
print(run_PSO_tests(30, 1000, 50, 'multimodal'))
print(run_PSO_tests(50, 1000, 50, 'multimodal'))
# +
graph_functions = [nonContinuousRastrigin, rotated_ackley, rotated_rastrigin, rotated_griewangk, griewank, rosen, sphere]
graph_bounds = [[-100, 100], [-40, 40], [-100, 100], [-600, 600], [-600, 600], [-30, 30], [-100, 100]]
def make_graphs(iterations, func, bounds, dims, algo="pso"):
search_space = [bounds for _ in range(dims)]
if algo == "pso":
errors = list()
for i in tqdm(range(iterations)):
errors.append(True_PSO(func, search_space, i, 20)[1])
else:
errors = list()
if algo == 'gso':
algo = GSO
else:
algo = PGSO
for i in tqdm(range(10, iterations)):
errors.append(algo(5, search_space, 20, i, func))
return errors
# -
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.lines as mlines
# %matplotlib inline
pso_list = list()
search_space = [graph_bounds[0] for _ in range(50)]
_ = True_PSO(graph_functions[0], search_space, 1500, 20, log=True, the_list=pso_list)
len(pso_list)
# +
iterations = [i for i in range(1500)]
df = pd.DataFrame({'iterations': iterations, 'error': pso_list})
plt.figure(figsize=(16,9))
sns.set()
_ = sns.lineplot(x="iterations",y="error", data=df)
# -
# nonContinuousRastrigin
# GSO
gso_list = list()
_ = GSO(5, search_space, 20, 1500, graph_functions[0], log=True, the_list=gso_list)
gso_true_list = list()
for i,j,k,l,m in zip(gso_list[0], gso_list[1], gso_list[2], gso_list[3], gso_list[4]):
gso_true_list.append((i + j + k + l + m)/5)
len(gso_true_list)
# +
iterations = [i for i in range(1500)]
df2 = pd.DataFrame({'iterations': iterations, 'error': gso_true_list})
plt.figure(figsize=(16,9))
sns.set()
_ = sns.lineplot(x="iterations",y="error", data=df)
# +
sns.set()
plt.figure(figsize=(16,9))
ax = plt.gca()
blue_line = mlines.Line2D([],[],color='blue', label='PSO', markersize='20')
orange_line = mlines.Line2D([],[],color='orange', label='GSO', markersize='20')
a = sns.lineplot(x='iterations', y='error', data=df, ax=ax)
b = sns.lineplot(x='iterations', y='error', data=df2, ax=ax)
# a.legend()
_ = ax.legend(handles=[blue_line, orange_line])
# -
pgso_list = list()
search_space = [graph_bounds[0] for _ in range(50)]
_ = PGSO(5, search_space, 20, 1500, graph_functions[0], log=True, the_list=pgso_list)
def make_graph(algo, cost_func, bounds, max_iter, num_particles):
# Run PSO First
pso_list = list()
search_space = [graph_bounds for _ in range(50)]
True_PSO(cost_func, search_space, max_iter, num_particles, log=True, the_list=pso_list)
# Run GSO
gso_list = list()
_ = GSO(M, search_space, num_particles, max_iter, cost_func, log=True, the_list=gso_list)
gso_true_list = list()
for i, j, k, l, m in zip(gso_list[0], gso_list[1], gso_list[2], gso_list[3], gso_list[4]):
gso_true_list.append((i + j + k + l + m)/5)
# RUN Pgso
pgso_list = list()
_ = PGSO(M, search_space, num_particles, max_iter, cost_func, log=True, the_list=pgso_list)
pgso_true_list = list()
for i, j, k, l, m in zip(pgso_list[0], pgso_list[1], pgso_list[2], pgso_list[3], pgso_list[4]):
pgso_true_list.append((i + j + k + l + m)/5)
| experiments/.ipynb_checkpoints/Benchmarks-checkpoint.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .contextual_drl
# language: python
# name: .contextual_drl
# ---
# # Learn the domain model in PDDL using iLOCM
#
# **interactive-LOCM**
# This code combines LOCM1 and LOCM2 algorithms and is last part of the pipeline that I use in my thesis to generate PDDL models from instructional texts.
#
# - Step 0: Preprocess: Lemmatize, Coref resolve, action override rename and replacing empty parameters.
# - Step 1: Find classes and make transition graphs.
# - Step 2: Get transistion sets from LOCM2 algorithm
# - Step 3: Create FSMs
# - Step 4: Perform Zero Analysis and add new FSM if necessary.
# - Step 5: Create and test hypothesis for state parameters
# - Step 6: Create and merge state parameters
# - Step 7: Remove parameter flaws
# - Step 8: Extract static preconditions
# - Step 9: Form action schemas
from collections import defaultdict
import itertools
import os
from tabulate import tabulate
from pprint import pprint
import matplotlib.pyplot as plt
# %matplotlib inline
import networkx as nx
import pandas as pd
pd.options.display.max_columns = 100
from IPython.display import display, Markdown
from ipycytoscape import *
import string
# ## Read input sequences
input_seqs = u'''
board-truck(driver1, truck2, s4), disembark-truck(driver1, truck2, s4), board-truck(driver1, truck2, s4), load-truck(package5, truck2, s4), drive-truck(truck2, s4, s1, driver1), drive-truck(truck2, s1, s3, driver1), unload-truck(package5, truck2, s3), drive-truck(truck2, s3, s5, driver1), drive-truck(truck2, s5, s1, driver1), disembark-truck(driver1, truck2, s1), board-truck(driver2, truck1, s0), load-truck(package2, truck1, s0), drive-truck(truck1, s0, s1, driver2), unload-truck(package2, truck1, s1), drive-truck(truck1, s1, s0, driver2), disembark-truck(driver2, truck1, s0), board-truck(driver1, truck2, s1), load-truck(package1, truck2, s1), drive-truck(truck2, s1, s4, driver1), drive-truck(truck2, s4, s5, driver1), unload-truck(package1, truck2, s5), drive-truck(truck2, s5, s1, driver1), disembark-truck(driver1, truck2, s1), board-truck(driver1, truck2, s1), drive-truck(truck2, s1, s4, driver1), load-truck(package4, truck2, s4), drive-truck(truck2, s4, s1, driver1), unload-truck(package4, truck2, s1), disembark-truck(driver1, truck2, s1), board-truck(driver1, truck2, s1), drive-truck(truck2, s1, s4, driver1), load-truck(package3, truck2, s4), drive-truck(truck2, s4, s5, driver1), unload-truck(package3, truck2, s5), drive-truck(truck2, s5, s1, driver1), disembark-truck(driver1, truck2, s1)
'''
# +
def read_input(input_seqs):
'''
Read the input data and return list of action sequences.
Each sequence is a list of action-argumentlist tuples.
'''
sequences = []
for seq in input_seqs.split('\n'):
actions = []
arguments = []
if seq and not seq.isspace() and len(seq)>1:
sequence = seq.rstrip("\n\r").lstrip("\n\r").lower()
action_defs = sequence.split("),")
for action_def in action_defs:
action = action_def.split('(')[0].strip(")\n\r").strip()
argument = action_def.split('(')[1].strip(")\n\r")
actions.append(action.translate(str.maketrans('', '', string.punctuation)))
argument_list = argument.split(',')
argument_list = [x.strip() for x in argument_list]
#argument_list.insert(0,'zero')
arguments.append(argument_list)
actarg_tuples = zip(actions,arguments)
sequences.append(list(actarg_tuples))
return sequences
def print_sequences(sequences):
for seq in sequences:
for index,action in enumerate(seq):
print(str(index) + ": " + str(action))
print()
# -
sequences = read_input(input_seqs)
print_sequences(sequences)
domain_name = 'driverlog' #specify domain name to be used in PDDL here.
# ## Step 1.1: Find classes
# +
transitions = set() # A transition is denoted by action_name + argument position
arguments = set()
actions = set()
for seq in sequences:
for actarg_tuple in seq:
actions.add(actarg_tuple[0])
for j, arg in enumerate(actarg_tuple[1]):
transitions.add(actarg_tuple[0]+"."+str(j))
arguments.add(arg)
print("\nActions")
print(actions)
# print("\nTransitions")
# print(transitions)
print("\nArguments/Objects")
print(arguments)
# -
def get_actarg_dictionary(sequences):
d = defaultdict(list)
for seq in sequences:
for actarg_tuple in seq:
d[actarg_tuple[0]].append(actarg_tuple[1])
return d
d = get_actarg_dictionary(sequences)
# +
# class util functions.
def get_classes(d):
# TODO incorporate word similarity in get classes.
c = defaultdict(set)
for k,v in d.items():
for arg_list in v:
for i,object in enumerate(arg_list):
c[k,i].add(object)
sets = c.values()
classes = []
# remove duplicate classes
for s in sets:
if s not in classes:
classes.append(s)
# now do pairwise intersections of all values. If intersection, combine them; then return the final sets.
classes_copy = list(classes)
while True:
combinations = list(itertools.combinations(classes_copy,2))
intersections_count = 0
for combination in combinations:
if combination[0].intersection(combination[1]):
intersections_count +=1
if combination[0] in classes_copy:
classes_copy.remove(combination[0])
if combination[1] in classes_copy:
classes_copy.remove(combination[1])
classes_copy.append(combination[0].union(combination[1]))
if intersections_count==0:
# print("no intersections left")
break
return classes_copy
# TODO: Can use better approach here. NER might help.
def get_class_names(classes):
# Name the class to first object found ignoring the digits in it
class_names = []
for c in classes:
for object in c:
# object = ''.join([i for i in object if not i.isdigit()])
class_names.append(object)
break
return class_names
def get_class_index(arg,classes):
for class_index, c in enumerate(classes):
if arg in c:
return class_index #it is like breaking out of the loop
print("Error:class index not found") #this statement is only executed if class index is not returned.
# +
classes = get_classes(d) #sorts of object
print("\nSorts/Classes")
print(classes)
class_names = get_class_names(classes)
print("\nExtracted class names")
print(class_names)
# -
# ## USER INPUT 1: Enter Correct Class names
# Editing the extracted class names to more readable object classes will make the final PDDL model more readable.
# +
############ (Optional) User Input ############
# Give user an option to change class names.
# class_names[0] = 'rocket'
#tyre
# class_names[0] = 'Jack'
# class_names[1] = 'Boot'
# class_names[2] = 'Wheel'
# class_names[3] = 'Hub'
# class_names[4] = 'Wrench'
# class_names[5] = 'Nut'
#driverlog
class_names[0] = 'Driver'
class_names[1] = 'Truck'
class_names[2] = 'Package'
class_names[3] = 'Location'
# #blocksworld
# class_names[0] = 'Block'
# class_names[1] = 'Gripper'
print("\nRenamed class names")
print(class_names)
# -
# **Assumptions of LOCM2**
# - Each object of a same class undergoes similar kind of transition.
# - Objects of same class in a same action undergo similar kind of transition.
# +
# change transitions to be more meaningful by incorporating class_names.
full_transitions = set()
for seq in sequences:
for actarg_tuple in seq:
actions.add(actarg_tuple[0])
for j, arg in enumerate(actarg_tuple[1]):
full_transitions.add(actarg_tuple[0]+"."+class_names[get_class_index(arg,classes)]+'.'+str(j))
arguments.add(arg)
print("\nActions")
print(actions)
print("\nTransitions")
print(full_transitions)
print("\nArguments/Objects")
print(arguments)
# -
print("\nNumber of Actions: {},\nNumber of unique transitions: {},\nNumber of unique objects (arguments): {},\nNumber of classes/sorts: {}".format(len(actions), len(transitions), len(arguments), len(classes)))
# ## Building Transition graphs
# ### Utils
# +
def empty_directory(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
# elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
def findsubsets(S,m):
return set(itertools.combinations(S, m))
def print_table(matrix):
display(tabulate(matrix, headers='keys', tablefmt='html'))
def printmd(string):
display(Markdown(string))
# -
# ### Save graphs in graphml format (used in cytoscape)
def save(graphs, domain_name):
adjacency_matrix_list = [] # list of adjacency matrices per class
for index, G in enumerate(graphs):
nx.write_graphml(G, "output/"+ domain_name + "/" + class_names[index] + ".graphml")
df = nx.to_pandas_adjacency(G, nodelist=G.nodes(), dtype=int)
adjacency_matrix_list.append(df)
# print_table(df)
return adjacency_matrix_list
def plot_cytographs(graphs, domain_name, aml):
cytoscapeobs = []
for index, G in enumerate(graphs):
cytoscapeobj = CytoscapeWidget()
cytoscapeobj.graph.add_graph_from_networkx(G)
edge_list = list()
for source, target, data in G.edges(data=True):
edge_instance = Edge()
edge_instance.data['source'] = source
edge_instance.data['target'] = target
for k, v in data.items():
cyto_attrs = ['group', 'removed', 'selected', 'selectable',
'locked', 'grabbed', 'grabbable', 'classes', 'position', 'data']
if k in cyto_attrs:
setattr(edge_instance, k, v)
else:
edge_instance.data[k] = v
edge_list.append(edge_instance)
cytoscapeobj.graph.edges = edge_list
# cytoscapeobj.graph.add_graph_from_df(aml[index],aml[index].columns.tolist())
cytoscapeobs.append(cytoscapeobj)
# print(cytoscapeobj)
printmd('## class **'+class_names[index]+'**')
print_table(aml[index])
# print("Nodes:{}".format(G.nodes()))
# print("Edges:{}".format(G.edges()))
cytoscapeobj.set_style([{
'width':400,
'height':400,
'selector': 'node',
'style': {
'label': 'data(id)',
'font-family': 'helvetica',
'font-size': '8px',
'background-color': '#11479e',
'height':'10px',
'width':'10px',
}
},
{
'selector': 'node:parent',
'css': {
'background-opacity': 0.333,
'background-color': '#bbb'
}
},
{
'selector': '$node > node',
'css': {
'padding-top': '10px',
'padding-left': '10px',
'padding-bottom': '10px',
'padding-right': '10px',
'text-valign': 'top',
'text-halign': 'center',
'background-color': '#bbb'
}
},
{
'selector': 'edge',
'style': {
'label':'data(weight)',
'width': 1,
'line-color': '#9dbaea',
'target-arrow-shape': 'triangle',
'target-arrow-color': '#9dbaea',
'arrow-scale': 0.5,
'curve-style': 'bezier',
'font-family': 'helvetica',
'font-size': '8px',
'text-valign': 'top',
'text-halign':'center'
}
},
])
cytoscapeobj.max_zoom = 4.0
cytoscapeobj.min_zoom = 0.5
display(cytoscapeobj)
return cytoscapeobs
# #### Build transitions graphs and call save function
def build_and_save_transition_graphs(classes, domain_name, class_names):
# There should be a graph for each class of objects.
graphs = []
# Initialize all graphs empty
for sort in classes:
graphs.append(nx.DiGraph())
consecutive_transition_lists = [] #list of consecutive transitions per object instance per sequence.
for m, arg in enumerate(arguments): # for all arguments (objects found in sequences)
for n, seq in enumerate(sequences): # for all sequences
consecutive_transition_list = list() # consecutive transition list for a sequence and an object (arg)
for i, actarg_tuple in enumerate(seq):
for j, arg_prime in enumerate(actarg_tuple[1]): # for all arguments in actarg tuples
if arg == arg_prime: # if argument matches arg
node = actarg_tuple[0] + "." + str(j)
# node = actarg_tuple[0] + "." + class_names[get_class_index(arg,classes)] + "." + str(j) # name the node of graph which represents a transition
consecutive_transition_list.append(node) # add node to the cons_transition for sequence and argument
# for each class append the nodes to the graph of that class
class_index = get_class_index(arg_prime, classes) # get index of class to which the object belongs to
graphs[class_index].add_node(node) # add node to the graph of that class
consecutive_transition_lists.append([n, arg, consecutive_transition_list])
# print(consecutive_transition_lists)
# for all consecutive transitions add edges to the appropriate graphs.
for cons_trans_list in consecutive_transition_lists:
# print(cons_trans_list)
seq_no = cons_trans_list[0] # get sequence number
arg = cons_trans_list[1] # get argument
class_index = get_class_index(arg, classes) # get index of class
# add directed edges to graph of that class
for i in range(0, len(cons_trans_list[2]) - 1):
if graphs[class_index].has_edge(cons_trans_list[2][i], cons_trans_list[2][i + 1]):
graphs[class_index][cons_trans_list[2][i]][cons_trans_list[2][i + 1]]['weight'] += 1
else:
graphs[class_index].add_edge(cons_trans_list[2][i], cons_trans_list[2][i + 1], weight=1)
# make directory if doesn't exist
dirName = "output/"+ domain_name
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
empty_directory(dirName)
# save all the graphs
adjacency_matrix_list = save(graphs, domain_name) # list of adjacency matrices per class
# plot cytoscape interactive graphs
cytoscapeobs = plot_cytographs(graphs,domain_name, adjacency_matrix_list)
return adjacency_matrix_list, graphs, cytoscapeobs
# ##### Transition Graphs
#### Build weighted directed graphs for transitions.
printmd("## "+ domain_name.upper())
adjacency_matrix_list, graphs, cytoscapeobjs = build_and_save_transition_graphs(classes, domain_name, class_names)
# ## USER INPUT 2: Edit transition graphs
# For meaningful LOCM models, here one can edit the transition graphs to make them accurate. However, in the paper we don't do that in order to estimate what kind of models are learned automatically from natural language data.
# Option 1. **You can add or delete nodes/edges in transition graphs by following methods like add_node, delete_edges shown in the following library.**
# https://github.com/QuantStack/ipycytoscape/blob/master/ipycytoscape/cytoscape.py
#
# Option 2. **Alternatively you can use the saved .graphml file. Open it up in Cytoscape, edit it within the GUI and load that graph into the graphs list.**
# ## Step 2: Get Transition Sets from LOCM2
#
# **Algorithm**: LOCM2
#
# **Input** :
# - T_all = set of observed transitions for a sort/class
# - H : Set of holes - each hole is a set of two transitions.
# - P : Set of pairs <t1,t2> i.e. consecutive transitions.
# - E : Set of example sequences of actions.
#
# **Output**:
# - S : Set of transition sets.
# ### Finding holes
# Holes are transitions that LOCM1 will assume to be true due to the flaw of overgeneralizing
def get_adjacency_matrix_with_holes(adjacency_matrix_list):
adjacency_matrix_list_with_holes = []
for index,adjacency_matrix in enumerate(adjacency_matrix_list):
# print("\n ROWS ===========")
df = adjacency_matrix.copy()
df1 = adjacency_matrix.copy()
# for particular adjacency matrix's copy, loop over all pairs of rows
for i in range(df.shape[0] - 1):
for j in range(i+1, df.shape[0]):
idx1, idx2 = i, j
row1, row2 = df.iloc[idx1,:], df.iloc[idx2, :] #we have now all pairs of rows
common_values_flag = False #for each two rows we have a common_values_flag
# if there is a common value between two rows, turn common value flag to true
for col in range(row1.shape[0]):
if row1.iloc[col] > 0 and row2.iloc[col] > 0:
common_values_flag = True
break
# now if two rows have common values, we need to check for holes.
if common_values_flag:
for col in range(row1.shape[0]):
if row1.iloc[col] > 0 and row2.iloc[col] == 0:
df1.iloc[idx2,col] = 'hole'
elif row1.iloc[col] == 0 and row2.iloc[col] > 0:
df1.iloc[idx1, col] = 'hole'
adjacency_matrix_list_with_holes.append(df1)
return adjacency_matrix_list_with_holes
# +
adjacency_matrix_list_with_holes = get_adjacency_matrix_with_holes(adjacency_matrix_list)
# Printing FSM matrices with and without holes
for index,adjacency_matrix in enumerate(adjacency_matrix_list):
printmd("\n#### " + class_names[index] )
print_table(adjacency_matrix)
printmd("\n#### HOLES: " + class_names[index])
print_table(adjacency_matrix_list_with_holes[index])
# +
# Create list of set of holes per class (H)
holes_per_class = []
for index,df in enumerate(adjacency_matrix_list_with_holes):
holes = set()
for i in range(df.shape[0]):
for j in range(df.shape[1]):
if df.iloc[i,j] == 'hole':
holes.add(frozenset({df.index[i] , df.columns[j]}))
holes_per_class.append(holes)
for i, hole in enumerate(holes_per_class):
print("#holes in class " + class_names[i]+":" + str(len(hole)))
# for h in hole:
# print(list(h))
# -
# #### T_all - Set of observed transitions for a class.
# List of transitions per class (T_all). It is just a set of transitions that occur for a class.
transitions_per_class = []
for index, df in enumerate(adjacency_matrix_list_with_holes):
transitions_per_class.append(df.columns.values)
# for i, transition in enumerate(transitions_per_class):
# print('{}:{}'.format(class_names[i], transition))
# #### P - set of pairs <t1,t2> (consecutive transitions)
def get_consecutive_transitions_per_class(adjacency_matrix_list_with_holes):
consecutive_transitions_per_class = []
for index, df in enumerate(adjacency_matrix_list_with_holes):
consecutive_transitions = set() # for a class
for i in range(df.shape[0]):
for j in range(df.shape[1]):
if df.iloc[i, j] != 'hole':
if df.iloc[i, j] > 0:
# print("(" + df.index[i] + "," + df.columns[j] + ")")
consecutive_transitions.add((df.index[i], df.columns[j]))
consecutive_transitions_per_class.append(consecutive_transitions)
return consecutive_transitions_per_class
# Create list of consecutive transitions per class (P). If value is not null, ordered pair i,j would be consecutive transitions per class
consecutive_transitions_per_class = get_consecutive_transitions_per_class(adjacency_matrix_list_with_holes)
# printmd("### Consecutive transitions per class")
# for i, transition in enumerate(consecutive_transitions_per_class):
# printmd("#### "+class_names[i]+":")
# for x in list(transition):
# print(x)
# # print('{}:{}'.format(class_names[i], transition))
# print()
# ### Check Well Formed
# +
def check_well_formed(subset_df):
# got the adjacency matrix subset
df = subset_df.copy()
well_formed_flag = True
if (df == 0).all(axis=None): # all elements are zero
well_formed_flag = False
# for particular adjacency matrix's copy, loop over all pairs of rows
for i in range(0, df.shape[0]-1):
for j in range(i + 1, df.shape[0]):
print(i,j)
idx1, idx2 = i, j
row1, row2 = df.iloc[idx1, :], df.iloc[idx2, :] # we have now all pairs of rows
common_values_flag = False # for each two rows we have a common_values_flag
# if there is a common value between two rows, turn common value flag to true
for col in range(row1.shape[0]):
if row1.iloc[col] > 0 and row2.iloc[col] > 0:
common_values_flag = True
break
if common_values_flag:
for col in range(row1.shape[0]): # check for holes if common value
if row1.iloc[col] > 0 and row2.iloc[col] == 0:
well_formed_flag = False
elif row1.iloc[col] == 0 and row2.iloc[col] > 0:
well_formed_flag = False
if not well_formed_flag:
return False
elif well_formed_flag:
return True
# -
# ### Check Valid Transitions
def check_valid(subset_df,consecutive_transitions_per_class):
# Note: Essentially we check validity against P instead of E.
# In the paper of LOCM2, it isn't mentioned how to check against E.
# Reasoning: If we check against all consecutive transitions per class,
# we essentially check against all example sequences.
# check the candidate set which is well-formed (subset df against all consecutive transitions)
# got the adjacency matrix subset
df = subset_df.copy()
# for particular adjacency matrix's copy, loop over all pairs of rows
for i in range(df.shape[0]):
for j in range(df.shape[0]):
if df.iloc[i,j] > 0:
valid_val_flag = False
ordered_pair = (df.index[i], df.columns[j])
for ct_list in consecutive_transitions_per_class:
for ct in ct_list:
if ordered_pair == ct:
valid_val_flag=True
# if after all iteration ordered pair is not found, mark the subset as invalid.
if not valid_val_flag:
return False
# return True if all ordered pairs found.
return True
# ### LOCM2 transition sets
# +
def locm2_get_transition_sets_per_class(holes_per_class, transitions_per_class, consecutive_transitions_per_class):
"""LOCM 2 Algorithm in the original LOCM2 paper"""
# contains Solution Set S for each class.
transition_sets_per_class = []
# for each hole for a class/sort
for index, holes in enumerate(holes_per_class):
class_name = class_names[index]
printmd("### "+ class_name)
# S
transition_set_list = [] #transition_sets_of_a_class, # intially it's empty
if len(holes)==0:
print("no holes") # S will contain just T_all
if len(holes) > 0: # if there are any holes for a class
print(str(len(holes)) + " holes")
for ind, hole in enumerate(holes):
printmd("#### Hole " + str(ind + 1) + ": " + str(set(hole)))
is_hole_already_covered_flag = False
if len(transition_set_list)>0:
for s_prime in transition_set_list:
if hole.issubset(s_prime):
printmd("Hole "+ str(set(hole)) + " is already covered.")
is_hole_already_covered_flag = True
break
# discover a set which includes hole and is well-formed and valid against test data.
# if hole is not covered, do BFS with sets of increasing sizes starting with s=hole
if not is_hole_already_covered_flag:
h = hole.copy()
candidate_sets = []
# all subsets of T_all starting from hole's len +1 to T_all-1.
for i in range(len(h)+1,len(transitions_per_class[index])):
subsets = findsubsets(transitions_per_class[index],i) # all subsets of length i
for s in subsets:
if h.issubset(s): # if is subset of s
candidate_sets.append(set(s))
s_well_formed_and_valid = False
for s in candidate_sets:
if len(s)>=i:
printmd("Checking candidate set *" + str(s) + "* of class **" + class_name + "** for well formedness and Validity")
subset_df = adjacency_matrix_list[index].loc[list(s),list(s)]
print_table(subset_df)
# checking for well-formedness
well_formed_flag = False
well_formed_flag = check_well_formed(subset_df)
if not well_formed_flag:
print("This subset is NOT well-formed")
elif well_formed_flag:
print("This subset is well-formed.")
# if well-formed validate across the data E
# to remove inappropriate dead-ends
valid_against_data_flag = False
valid_against_data_flag = check_valid(subset_df, consecutive_transitions_per_class)
if not valid_against_data_flag:
print("This subset is well-formed but invalid against example data")
if valid_against_data_flag:
print("This subset is valid.")
print("Adding this subset " + str(s) +" to the locm2 transition set.")
if s not in transition_set_list: # do not allow copies.
transition_set_list.append(s)
print("Hole that is covered now:")
print(list(h))
s_well_formed_and_valid = True
break
if s_well_formed_and_valid:
break
print(transition_set_list)
#step 7 : remove redundant sets S - {s1}
ts_copy = transition_set_list.copy()
for i in range(len(ts_copy)):
for j in range(len(ts_copy)):
if ts_copy[i] < ts_copy[j]: #if subset
if ts_copy[i] in transition_set_list:
transition_set_list.remove(ts_copy[i])
elif ts_copy[i] > ts_copy[j]:
if ts_copy[j] in transition_set_list:
transition_set_list.remove(ts_copy[j])
print("\nRemoved redundancy transition set list")
print(transition_set_list)
#step-8: include all-transitions machine, even if it is not well-formed.
transition_set_list.append(set(transitions_per_class[index])) #fallback
printmd("#### Final transition set list")
print(transition_set_list)
transition_sets_per_class.append(transition_set_list)
return transition_sets_per_class
############ LOCM2 #################
#### Input ready for LOCM2, Starting LOCM2 algorithm now
#### Step 8: selecting transition sets (TS) [Main LOCM2 Algorithm]
printmd("### Getting transitions sets for each class using LOCM2")
transition_sets_per_class = locm2_get_transition_sets_per_class(holes_per_class, transitions_per_class, consecutive_transitions_per_class)
# -
# ## Step 3: Algorithm For Induction of State Machines
#
# - Input: Action training sequence of length N
# - Output: Transition Set TS, Object states OS.
#
# We already have transition set TS per class.
def plot_cytographs_fsm(graph, domain_name):
cytoscapeobj = CytoscapeWidget()
cytoscapeobj.graph.add_graph_from_networkx(graph)
edge_list = list()
for source, target, data in graph.edges(data=True):
edge_instance = Edge()
edge_instance.data['source'] = source
edge_instance.data['target'] = target
for k, v in data.items():
cyto_attrs = ['group', 'removed', 'selected', 'selectable',
'locked', 'grabbed', 'grabbable', 'classes', 'position', 'data']
if k in cyto_attrs:
setattr(edge_instance, k, v)
else:
edge_instance.data[k] = v
edge_list.append(edge_instance)
cytoscapeobj.graph.edges = edge_list
# print("Nodes:{}".format(graph.nodes()))
# print("Edges:{}".format(graph.edges()))
cytoscapeobj.set_style([{
'width':400,
'height':500,
'selector': 'node',
'style': {
'label': 'data(id)',
'font-family': 'helvetica',
'font-size': '8px',
'background-color': '#11479e',
'height':'10px',
'width':'10px',
}
},
{
'selector': 'node:parent',
'css': {
'background-opacity': 0.333,
'background-color': '#bbb'
}
},
{
'selector': '$node > node',
'css': {
'padding-top': '10px',
'padding-left': '10px',
'padding-bottom': '10px',
'padding-right': '10px',
'text-valign': 'top',
'text-halign': 'center',
'background-color': '#bbb'
}
},
{
'selector': 'edge',
'style': {
'label':'data(weight)',
'width': 1,
'line-color': '#9dbaea',
'target-arrow-shape': 'triangle',
'target-arrow-color': '#9dbaea',
'arrow-scale': 0.5,
'curve-style': 'bezier',
'font-family': 'helvetica',
'font-size': '8px',
'text-valign': 'top',
'text-halign':'center'
}
},
])
cytoscapeobj.max_zoom = 2.0
cytoscapeobj.min_zoom = 0.5
display(cytoscapeobj)
# #### First make start(t) and end(t) as state for each transition in FSM.
# +
state_machines_overall_list = []
for index, ts_class in enumerate(transition_sets_per_class):
fsms_per_class = []
printmd("### "+ class_names[index])
num_fsms = len(ts_class)
print("Number of FSMS:" + str(num_fsms))
for fsm_no, ts in enumerate(ts_class):
fsm_graph = nx.DiGraph()
printmd("#### FSM " + str(fsm_no))
for t in ts:
source = "s(" + str(t) + ")"
target = "e(" + str(t) + ")"
fsm_graph.add_edge(source,target,weight=t)
t_df = adjacency_matrix_list[index].loc[list(ts), list(ts)] #transition df for this fsm
print_table(t_df)
# merge end(t1) = start(t2) from transition df
edge_t_list = [] # edge transition list
for i in range(t_df.shape[0]):
for j in range(t_df.shape[1]):
if t_df.iloc[i, j] != 'hole':
if t_df.iloc[i, j] > 0:
for node in fsm_graph.nodes():
if "e("+t_df.index[i]+")" in node:
merge_node1 = node
if "s("+t_df.index[j]+")" in node:
merge_node2 = node
fsm_graph = nx.contracted_nodes(fsm_graph, merge_node1, merge_node2 , self_loops=True)
if merge_node1 != merge_node2:
mapping = {merge_node1: merge_node1 + "|" + merge_node2}
fsm_graph = nx.relabel_nodes(fsm_graph, mapping)
# we need to complete the list of transitions
# that can happen on self-loop nodes
# as these have been overwritten (as graph is not MultiDiGraph)
sl_state_list = list(nx.nodes_with_selfloops(fsm_graph)) # self looping states.
# if state is self-looping
t_list = []
if len(sl_state_list)>0:
# if s(T1) and e(T1) are there for same node, this T1 can self-loop occur.
for s in sl_state_list:
for sub_s in s.split('|'):
if sub_s[0] == 'e':
if ('s' + sub_s[1:]) in s.split('|'):
t_list.append(sub_s[2:-1])
fsm_graph[s][s]['weight'] = '|'.join(t_list)
plot_cytographs_fsm(fsm_graph,domain_name)
df = nx.to_pandas_adjacency(fsm_graph, nodelist=fsm_graph.nodes(), weight = 1)
print_table(df)
fsms_per_class.append(fsm_graph)
state_machines_overall_list.append(fsms_per_class)
# -
# ## USER INPUT 3: Rename States.
#
#
# As states are shown in terms of end and start of transitions, user can rename them for easy readability later on.
#
# If states are renamed, certain hardcoded aspects of code won't work. It is advisable to create a separate state dictionary and use it after step 9: (formation of PDDL model) to replace states in PDDL code.
#
#
# This also makes it easier to specify problem statements.
# ### Automatic creation: rename states as integers 0, 1, 2 .. etc. for each fsm.
# +
# An Automatic state dictionary is added here where states are
# renamed as 0, 1, 2 etc. for a specific FSM
state_mappings_class = []
state_machines_overall_list_2 = []
for index, fsm_graphs in enumerate(state_machines_overall_list):
state_mappings_fsm = []
fsms_per_class_2 = []
printmd("### "+ class_names[index])
num_fsms = len(fsm_graphs)
print("Number of FSMS:" + str(num_fsms))
for fsm_no, G in enumerate(fsm_graphs):
state_mapping = {k: v for v, k in enumerate(G.nodes())}
G_copy = nx.relabel_nodes(G, state_mapping)
plot_cytographs_fsm(G, domain_name)
plot_cytographs_fsm(G_copy, domain_name)
printmd("Fsm "+ str(fsm_no))
fsms_per_class_2.append(G_copy)
state_mappings_fsm.append(state_mapping)
state_machines_overall_list_2.append(fsms_per_class_2)
state_mappings_class.append(state_mappings_fsm)
# -
# ### Looking at the graph, User can specify states here
# +
# User can specify states here.
# assign your states in state dictionary called state_mapping
# e.g. state_mapping['e(removewheek.2)|s(putonwheel.2)'] = 'jack_free_to_use'
# -
# ## Step 5: Induction of parameterized state machines
# Create and test hypothesis for state parameters
# ### Form Hyp for HS (Hypothesis set)
# +
HS_list = []
ct_list = []
# for transition set of each class
for index, ts_class in enumerate(transition_sets_per_class):
printmd("### "+ class_names[index])
ct_per_class = []
HS_per_class = []
# for transition set of each fsm in a class
for fsm_no, ts in enumerate(ts_class):
printmd("#### FSM: " + str(fsm_no) + " Hypothesis Set")
# transition matrix for the ts
t_df = adjacency_matrix_list[index].loc[list(ts), list(ts)]
ct_in_fsm = set() # find consecutive transition set for a state machine in a class.
for i in range(t_df.shape[0]):
for j in range(t_df.shape[1]):
if t_df.iloc[i, j] != 'hole':
if t_df.iloc[i, j] > 0:
ct_in_fsm.add((t_df.index[i], t_df.columns[j]))
ct_per_class.append(ct_in_fsm)
# add to hypothesis set
HS = set()
# for each pair B.k and C.l in TS s.t. e(B.k) = S = s(C.l)
for ct in ct_in_fsm:
B = ct[0].split('.')[0] # action name of T1
k = int(ct[0].split('.')[1]) # argument index of T1
C = ct[1].split('.')[0] # action name of T2
l = int(ct[1].split('.')[1]) # argument index of T2
# When both actions B and C contain another argument of the same sort G' in position k' and l' respectively,
# we hypothesise that there may be a relation between sorts G and G'.
for seq in sequences:
for actarg_tuple in seq:
arglist1 = []
arglist2 = []
if actarg_tuple[0] == B: #if action name is same as B
arglist1 = actarg_tuple[1].copy()
# arglist1.remove(actarg_tuple[1][k]) # remove k from arglist
for actarg_tuple_prime in seq: #loop through seq again.
if actarg_tuple_prime[0] == C:
arglist2 = actarg_tuple_prime[1].copy()
# arglist2.remove(actarg_tuple_prime[1][l]) # remove l from arglist
# for arg lists of actions B and C, if class is same add a hypothesis set.
for i in range(len(arglist1)): # if len is 0, we don't go in
for j in range(len(arglist2)):
class1 = get_class_index(arglist1[i], classes)
class2 = get_class_index(arglist2[j], classes)
if class1 == class2: # if object at same position have same classes
# add hypothesis to hypothesis set.
if (k!=i) and (l!=j):
HS.add((frozenset({"e("+B+"."+ str(k)+")", "s("+C+"."+str(l)+")"}),B,k,i,C,l,j,class_names[index],class_names[class1]))
print(str(len(HS))+ " hypothesis created")
# for h in HS:
# print(h)
HS_per_class.append(HS)
HS_list.append(HS_per_class)
ct_list.append(ct_per_class)
# -
# ### Test hyp against E
HS_list_retained = []
for index, HS_class in enumerate(HS_list):
printmd("### "+ class_names[index])
HS_per_class_retained = []
for fsm_no, HS in enumerate(HS_class):
printmd("#### FSM: " + str(fsm_no) + " Hypothesis Set")
count=0
HS_copy = HS.copy()
HS_copy2 = HS.copy()
# for each object O occuring in Ou
for O in arguments:
# for each pair of transitions Ap.m and Aq.n consecutive for O in seq
ct = []
for seq in sequences:
for actarg_tuple in seq:
act = actarg_tuple[0]
for j, arg in enumerate(actarg_tuple[1]):
if arg == O:
ct.append((act + '.' + str(j), actarg_tuple[1]))
for i in range(len(ct)-1):
A_p = ct[i][0].split('.')[0]
m = int(ct[i][0].split('.')[1])
A_q = ct[i+1][0].split('.')[0]
n = int(ct[i+1][0].split('.')[1])
# for each hypothesis H s.t. A_p = B, m = k, A_q = C, n = l
for H in HS_copy2:
if A_p == H[1] and m == H[2] and A_q == H[4] and n == H[5]:
k_prime = H[3]
l_prime = H[6]
# if O_p,k_prime = Q_q,l_prime
if ct[i][1][k_prime] != ct[i+1][1][l_prime]:
if H in HS_copy:
HS_copy.remove(H)
count += 1
print(str(len(HS_copy))+ " hypothesis retained")
# state machine
# if len(HS_copy)>0:
# plot_cytographs_fsm(state_machines_overall_list[index][fsm_no],domain_name)
# for H in HS_copy:
# print(H)
HS_per_class_retained.append(HS_copy)
HS_list_retained.append(HS_per_class_retained)
# ## Step 6: Creation and merging of state parameters
# +
# Each hypothesis refers to an incoming and outgoing transition
# through a particular state of an FSM
# and matching associated transitions can be considered
# to set and read parameters of a state.
# Since there maybe multiple transitions through a give state,
# it is possible for the same parameter to have multiple
# pairwise occurences.
print("Step 6: creating and merging state params")
param_bindings_list_overall = []
for classindex, HS_per_class in enumerate(HS_list_retained):
param_bind_per_class = []
for fsm_no, HS_per_fsm in enumerate(HS_per_class):
param_binding_list = []
# fsm in consideration
G = state_machines_overall_list[classindex][fsm_no]
state_list = G.nodes()
# creation
for index,h in enumerate(HS_per_fsm):
param_binding_list.append((h,"v"+str(index)))
merge_pl = [] # parameter to merge list
if len(param_binding_list)>1:
# merging
pairs = findsubsets(param_binding_list, 2)
for pair in pairs:
h_1 = pair[0][0]
h_2 = pair[1][0]
# equate states
state_eq_flag = False
for s_index, state in enumerate(state_list):
# if both hyp states appear in single state in fsm
if list(h_1[0])[0] in state:
if list(h_1[0])[0] in state:
state_eq_flag =True
if ((state_eq_flag and h_1[1] == h_2[1] and h_1[2] == h_2[2] and h_1[3] == h_2[3]) or (state_eq_flag and h_1[4] == h_2[4] and h_1[5] == h_2[5] and h_1[6] == h_2[6])):
merge_pl.append(list([pair[0][1], pair[1][1]]))
#inner lists to sets (to list of sets)
l=[set(x) for x in merge_pl]
#cartesian product merging elements if some element in common
for a,b in itertools.product(l,l):
if a.intersection( b ):
a.update(b)
b.update(a)
#back to list of lists
l = sorted( [sorted(list(x)) for x in l])
#remove dups
merge_pl = list(l for l,_ in itertools.groupby(l))
# sort
for pos, l in enumerate(merge_pl):
merge_pl[pos] = sorted(l, key = lambda x: int(x[1:]))
print(merge_pl) # equal params appear in a list in this list.
for z,pb in enumerate(param_binding_list):
for l in merge_pl:
if pb[1] in l:
# update pb
param_binding_list[z] = (param_binding_list[z][0], l[0])
param_bind_per_class.append(param_binding_list)
print(class_names[classindex])
# set of params per class
param = set()
for pb in param_binding_list:
# print(pb)
param.add(pb[1])
# num of params per class
printmd("No. of params earlier:" + str(len(param_binding_list)))
printmd("No. of params after merging:" + str(len(param)))
param_bindings_list_overall.append(param_bind_per_class)
# -
# ## Step 7: Remove Parameter Flaws
# +
# Removing State Params.
# Flaw occurs Object can reach state S with param P having an inderminate value.
# There is transition s.t. end(B.k) = S.
# but there is no h = <S,B,k,k',C,l,l',G,G') and <h,P> is in bindings.
para_bind_overall_fault_removed = []
for classindex, fsm_per_class in enumerate(state_machines_overall_list):
print(class_names[classindex])
pb_per_class_fault_removed = []
for fsm_no, G in enumerate(fsm_per_class):
pb_per_fsm_fault_removed = []
# G is fsm in consideration
faulty_pb = []
for state in G.nodes():
inedges = G.in_edges(state, data=True)
for ie in inedges:
tr = ie[2]['weight']
t_list = tr.split('|')
for t in t_list:
B = t.split('.')[0]
k = t.split('.')[1]
S = 'e(' + t + ')'
flaw = True
for pb in param_bindings_list_overall[classindex][fsm_no]:
H = pb[0]
v = pb[1]
if (S in set(H[0])) and (B==H[1]) and (int(k)==H[2]) :
# this pb is okay
flaw=False
# print(flaw)
if flaw:
for pb in param_bindings_list_overall[classindex][fsm_no]:
H = pb[0]
H_states = list(H[0])
for h_state in H_states:
if h_state in state:
if pb not in faulty_pb:
faulty_pb.append(pb) # no duplicates
for pb in param_bindings_list_overall[classindex][fsm_no]:
if pb not in faulty_pb:
pb_per_fsm_fault_removed.append(pb)
print(str(len(pb_per_fsm_fault_removed)) + "/" + str(len(param_bindings_list_overall[classindex][fsm_no])) + " param retained")
for pb in pb_per_fsm_fault_removed:
print(pb)
pb_per_class_fault_removed.append(pb_per_fsm_fault_removed)
para_bind_overall_fault_removed.append(pb_per_class_fault_removed)
# -
# ## Step 8: (TODO) Static Preconditions via LOP
# As further enhancement, one can add step 8: Extraction of static preconditions from the LOCM paper.
# However, LOP algorithm is better version of that step.
#
# Insert [LOP](https://www.aaai.org/ocs/index.php/ICAPS/ICAPS15/paper/viewFile/10621/10401) here for finding static preconditions
# ## Step 9: Formation of PDDL Schema
# +
# get action schema
print(";;********************Learned PDDL domain******************")
output_file = "output/"+ domain_name + "/" + domain_name + ".pddl"
write_file = open(output_file, 'w')
write_line = "(define"
write_line += " (domain "+ domain_name+")\n"
write_line += " (:requirements :typing)\n"
write_line += " (:types"
for class_name in class_names:
write_line += " " + class_name
write_line += ")\n"
write_line += " (:predicates\n"
# one predicate to represent each object state
predicates = []
for class_index, pb_per_class in enumerate(para_bind_overall_fault_removed):
for fsm_no, pbs_per_fsm in enumerate(pb_per_class):
for state_index, state in enumerate(state_machines_overall_list[class_index][fsm_no].nodes()):
state_set = set(state.split('|'))
predicate = ""
write_line += " (" + class_names[class_index] + "_fsm" + str(fsm_no) + "_" + state
predicate += " (" + class_names[class_index] + "_fsm" + str(fsm_no) + "_" + state
for pb in pbs_per_fsm:
if set(pb[0][0]) <= state_set:
if " ?"+pb[1] + " - " + str(pb[0][8]) not in predicate:
write_line += " ?"+pb[1] + " - " + str(pb[0][8])
predicate += " ?"+pb[1] + " - " + str(pb[0][8])
write_line += ")\n"
predicate += ")"
predicates.append(predicate)
write_line += " )\n"
for action_index, action in enumerate(actions):
write_line += "\n"
write_line += " (:action"
write_line += " " + action + " "
write_line += " :parameters"
write_line += " ("
arg_already_written_flag = False
params_per_action = []
args_per_action = []
for seq in sequences:
for actarg_tuple in seq:
if not arg_already_written_flag:
if actarg_tuple[0] == action:
arglist = []
for arg in actarg_tuple[1]:
write_line += "?"+arg + " - " + class_names[get_class_index(arg,classes)] + " "
arglist.append(arg)
args_per_action.append(arglist)
params_per_action.append(actarg_tuple[1])
arg_already_written_flag = True
write_line += ")\n"
# need to use FSMS to get preconditions and effects.
# Start-state = precondition. End state= Effect
preconditions = []
effects = []
for arglist in params_per_action:
for arg in arglist:
current_class_index = get_class_index(arg, classes)
for fsm_no, G in enumerate(state_machines_overall_list[current_class_index]):
#
for start, end, weight in G.edges(data='weight'):
_actions = weight.split('|')
for _action in _actions:
if _action.split('.')[0] == action:
for predicate in predicates:
pred = predicate.split()[0].lstrip("(")
clss = pred.split('_')[0]
fsm = pred.split('_')[1]
state = set(pred.split('_')[2].replace('))',')').split('|'))
if clss == class_names[current_class_index]:
if fsm == "fsm" + str(fsm_no):
if state == set(start.split('|')):
if predicate not in preconditions:
preconditions.append(predicate)
if state == set(end.split('|')):
if predicate not in effects:
effects.append(predicate)
break
write_line += " :precondition"
write_line += " (and\n"
for precondition in preconditions:
# precondition = precondition.replace(?)
write_line += " "+precondition+"\n"
write_line += " )\n"
write_line += " :effect"
write_line += " (and\n"
for effect in effects:
write_line += " " + effect + "\n"
write_line += " )"
write_line += ")\n"
write_line += ")\n" #domain ending bracket
print(write_line)
write_file.write(write_line)
write_file.close()
# -
# ### Validating PDDL -- Fixing Syntax by replacing predicates with state dictionary values
# This is required because PDDL syntax doesn't support extra paranthesis () which occur in states (transitions occuring in states as 'start(t1)' or 'end(t1)')
# +
# get action schema
print(";;********************Learned PDDL domain******************")
output_file = "output/"+ domain_name + "/" + domain_name + ".pddl"
write_file = open(output_file, 'w')
write_line = "(define"
write_line += " (domain "+ domain_name+")\n"
write_line += " (:requirements :typing)\n"
write_line += " (:types"
for class_name in class_names:
write_line += " " + class_name
write_line += ")\n"
write_line += " (:predicates\n"
# one predicate to represent each object state
predicates = []
for class_index, pb_per_class in enumerate(para_bind_overall_fault_removed):
for fsm_no, pbs_per_fsm in enumerate(pb_per_class):
state_mapping = state_mappings_class[class_index][fsm_no]
for state_index, state in enumerate(state_machines_overall_list[class_index][fsm_no].nodes()):
state_set = set(state.split('|'))
predicate = ""
write_line += " (" + class_names[class_index] + "_fsm" + str(fsm_no) + "_state" + str(state_mapping[state])
predicate += " (" + class_names[class_index] + "_fsm" + str(fsm_no) + "_state" + str(state_mapping[state])
for pb in pbs_per_fsm:
if set(pb[0][0]) <= state_set:
if " ?"+pb[1] + " - " + str(pb[0][8]) not in predicate:
write_line += " ?"+pb[1] + " - " + str(pb[0][8])
predicate += " ?"+pb[1] + " - " + str(pb[0][8])
write_line += ")\n"
predicate += ")"
predicates.append(predicate)
write_line += " )\n"
for action_index, action in enumerate(actions):
write_line += " (:action"
write_line += " " + action + " "
write_line += " :parameters"
write_line += " ("
arg_already_written_flag = False
params_per_action = []
args_per_action = []
for seq in sequences:
for actarg_tuple in seq:
if not arg_already_written_flag:
if actarg_tuple[0] == action:
arglist = []
for arg in actarg_tuple[1]:
write_line += "?"+arg + " - " + class_names[get_class_index(arg,classes)] + " "
arglist.append(arg)
args_per_action.append(arglist)
params_per_action.append(actarg_tuple[1])
arg_already_written_flag = True
write_line += ")\n"
# need to use FSMS to get preconditions and effects.
# Start-state = precondition. End state= Effect
preconditions = []
effects = []
for arglist in params_per_action:
for arg in arglist:
current_class_index = get_class_index(arg, classes)
for fsm_no, G in enumerate(state_machines_overall_list[current_class_index]):
G_int = state_machines_overall_list_2[current_class_index][fsm_no]
state_mapping = state_mappings_class[current_class_index][fsm_no]
for start, end, weight in G_int.edges(data='weight'):
_actions = weight.split('|')
for _action in _actions:
if _action.split('.')[0] == action:
for predicate in predicates:
pred = predicate.split()[0].lstrip("(")
clss = pred.split('_')[0]
fsm = pred.split('_')[1]
state_ind = pred.split('_')[2].rstrip(")")[-1]
if clss == class_names[current_class_index]:
if fsm == "fsm" + str(fsm_no):
if int(state_ind) == int(start):
if predicate not in preconditions:
preconditions.append(predicate)
if int(state_ind) == int(end):
if predicate not in effects:
effects.append(predicate)
break
write_line += " :precondition"
write_line += " (and\n"
for precondition in preconditions:
write_line += " "+precondition+"\n"
write_line += " )\n"
write_line += " :effect"
write_line += " (and\n"
for effect in effects:
write_line += " " + effect + "\n"
write_line += " )"
write_line += ")\n\n"
write_line += ")\n" #domain ending bracket
print(write_line)
write_file.write(write_line)
write_file.close()
# -
# ### State Mapping: What are these states?
# +
# To see what these states are, look at the following graphs
for index, fsm_graphs in enumerate(state_machines_overall_list):
printmd("## Class " + str(index))
printmd("### "+ class_names[index])
print("Number of FSMS:" + str(num_fsms))
for fsm_no, G in enumerate(fsm_graphs):
printmd("Fsm "+ str(fsm_no))
plot_cytographs_fsm(state_machines_overall_list_2[index][fsm_no], domain_name)
plot_cytographs_fsm(G, domain_name)
# -
# ### State Mappings: Text format
for index, sm_fsm in enumerate(state_mappings_class):
printmd("## Class " + str(index))
printmd("### "+ class_names[index])
for fsm_no, mapping in enumerate(sm_fsm):
printmd("Fsm "+ str(fsm_no))
pprint(mapping)
| iLOCM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="tMDG6tALmPWM" colab_type="code" colab={}
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import preprocessing
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
# + id="wKTLf7Z_maHP" colab_type="code" colab={}
df= pd.read_csv("https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter05/Datasets/online_shoppers_intention.csv")
# + id="Ax07WHy3mfBu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="49554075-c739-4a29-8ca3-a9210d25d46f"
df.head()
# + id="ArbnDQmRmtz-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="56619e91-2d35-430d-cf6a-649edaac6f63"
df.info()
# + id="xjh_AYnpmxR1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} outputId="6ebdd20b-7917-43de-fbda-f8586e007dd4"
df.isnull().sum()
# + [markdown] id="piR5GtCznaPx" colab_type="text"
# **Exercise 5.01**
# + id="YuYSw6SjngEJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 300} outputId="acac12b4-3d36-4270-8f4d-316b205728bc"
sns.countplot(df['Weekend'])
plt.title('Weekend Session Distribution', fontsize = 20)
plt.show()
# + id="Yh_nY_UQnhZs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="1c6e64ff-430e-4b25-ab63-166774048a3a"
print(df['Weekend'].value_counts())
print()
print(df['Weekend'].value_counts(normalize=True))
| Chapter05/Exercise5.01/Exercise5.01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#This Classifier is used for classifying whether it is a character or an non-text object
import pickle
#Load training dataset of street view character and non-text object images
with open("X_char.txt", "rb") as X_file_r: # Unpickling
X_char = pickle.load(X_file_r)
with open("X_obj.txt", "rb") as X_obj_file_r: # Unpickling
X_obj = pickle.load(X_obj_file_r)
with open("X.txt", "rb") as X_file_r: # Unpickling
X = pickle.load(X_file_r)
# -
#Create labels for training of the model
#1 represents it is a character
#0 represents it is an non-text object
y_label_svc=[1]*13262+[0]*14000
# +
import matplotlib.pyplot as plt
import numpy as np
#Combine training dataset of street view character and non-text object
X_svc=X+X_obj
X_svc=np.array(X_svc)
#Shuffle the training dataset randomly so that the classes of 0 and 1 are distributed randomly
from sklearn.utils import shuffle
X_svc, y_label_svc = shuffle(X_svc, y_label_svc, random_state=10)
# +
#Package sklearn required
from sklearn import svm
from sklearn import linear_model
from sklearn.cross_validation import train_test_split
from sklearn.model_selection import cross_val_score
#Split 2/3 of dataset for training and 1/3 of dataset for testing through cross validation in order to evaluate the model
X_svc_train, X_svc_test, y_svc_train, y_svc_test = train_test_split(X_svc, y_label_svc, test_size=0.33, random_state=5)
#Compare support vector machine classifier (SVC) and logistic regression classifier
#reference: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
svmclf = svm.SVC()
#reference: http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
logclf = linear_model.LogisticRegression()
#Choose a model to maximise recall, which is less false negative through 5-fold cross validation
scores_svm = cross_val_score(svmclf, X_svc_train, y_svc_train, cv=5,scoring='recall')
scores_log = cross_val_score(logclf, X_svc_train, y_svc_train, cv=5,scoring='recall')
print("Recall of SVC: %0.2f (+/- %0.2f)" % (scores_svm.mean(), scores_svm.std() * 2))
print("Recall of Logistic Regression: %0.2f (+/- %0.2f)" % (scores_log.mean(), scores_log.std() * 2))
#Logistic Regression and SVC perform similarly. SVC is more generalized to separate two classes
# -
from sklearn.externals import joblib
#Fit and save the SVC model
svmclf.fit(X_svc, y_label_svc)
joblib.dump(svmclf, 'svmmodel.pkl')
#code to load the model
#svmclf = joblib.load('svmmodel.pkl')
| NonText_Object_Classifier(SVC).ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.2
# language: julia
# name: julia-1.4
# ---
using DrWatson
@quickactivate "MEngProject"
using MEngProject
using CUDA, DifferentialEquations, PyPlot, NNlib, ImageFiltering, Images, MEngProject, MEngProject.LamKernels, MEngProject.Laminart, MEngProject.Utils, BenchmarkTools, Test
function reshape2d_4d(img::AbstractArray)
reshape(img, size(img)[1], size(img)[2], 1, 1)
end
# +
img_ = convert(Array{Float32,2}, load(datadir("Iine_100_100_gs.png")));
img_ = reshape2d_4d(img_)
img_ = CuArray(img_)
r_ = similar(img_)
p_ = LaminartGPU.kernels(img_, Parameters.parameters);
LaminartGPU.I_u!(r_, img_, p_)
temp_out = (I = img_, r = r_)
p_ = merge(p_, temp_out);
# -
# +
img = convert(Array{Float32,2}, load(datadir("Iine_100_100_gs.png")));
p = Laminart.kernels(img, Parameters.parameters);
p = Laminart.add_I_u_p(img, p);
# +
u0 = reshape(rand(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2);
x_lgn = reshape(rand(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K)
C = reshape(rand(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K)
H_z = reshape(rand(Float32, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, p.K)
f = Laminart.MyFunction(x_lgn, C, H_z)
prob = ODEProblem(f, u0, tspan, p);
# -
tspan = (0.0f0, 100.0f0)
u0_ = CuArray(reshape(u0, p.dim_i, p.dim_j, 5*p.K+2,1));
# u0_ = CuArray(reshape(zeros(Float32, p.dim_i, p.dim_j*(5*p.K+2)), p.dim_i, p.dim_j, 5*p.K+2,1));
size(x_lgn)
# +
x_lgn_ = CuArray(reshape(x_lgn[:,:,1], p.dim_i, p.dim_j, 1,1))
# x_lgn = CuArray(reshape(Array{Float32}(undef, p.dim_i, p.dim_j * p.K), p.dim_i, p.dim_j, 1,1))
C_ = CuArray(reshape(C, p.dim_i, p.dim_j, p.K,1))
H_z_ = CuArray(reshape(H_z, p.dim_i, p.dim_j, p.K,1))
f_ = LaminartGPU.MyFunction(x_lgn_, C_, H_z_)
# -
prob_= ODEProblem(f_, u0_, tspan, p_);
sol_ = solve(prob_)
sol = solve(prob)
size(sol(100))
size(sol_(100))
@test sol(100)[30:70,30:70,:] ≈ Array(sol_(100)[30:70,30:70,:,1])
Utils.plot_rb(sol_(100)[:,:,1,1])
Utils.plot_rb(sol(100)[:,:,1])
u_ = u0_
du_ = u0_
@inbounds begin
x_ = CuArray(@view u_[:, :, 1:p.K,:])
y_ = CuArray(@view u_[:, :, p.K+1:2*p.K,:])
m_ = CuArray(@view u_[:, :, 2*p.K+1:3*p.K,:])
z_ = CuArray(@view u_[:, :, 3*p.K+1:4*p.K,:])
s_ = CuArray(@view u_[:, :, 4*p.K+1:5*p.K,:])
# C = @view u[:, :, 5*p.K+1:6*p.K]
# H_z = @view u[:, :, 6*p.K+1:7*p.K]
v_p_ = CuArray(@view u_[:, :, 5*p.K+1:5*p.K+1,:])
v_m_ = CuArray(@view u_[:, :, 5*p.K+2:5*p.K+2,:])
# x_lgn = @view u[:, :, 7*p.K+3]
dx_ = CuArray(@view du_[:, :, 1:p.K,:])
dy_ = CuArray(@view du_[:, :, p.K+1:2*p.K,:])
dm_ = CuArray(@view du_[:, :, 2*p.K+1:3*p.K,:])
dz_ = CuArray(@view du_[:, :, 3*p.K+1:4*p.K,:])
ds_ = CuArray(@view du_[:, :, 4*p.K+1:5*p.K,:])
dv_p_ = CuArray(@view du_[:, :, 5*p.K+1:5*p.K+1,:])
dv_m_ = CuArray(@view du_[:, :, 5*p.K+2:5*p.K+2,:])
end;
u_[:, :, 5*p.K+1:5*p.K+1,:][:,:,1,1]
u[:,:,5*p.K+1]
u = u0
du = u0
@inbounds begin
x = @view u[:, :, 1:p.K]
y = @view u[:, :, p.K+1:2*p.K]
m = @view u[:, :, 2*p.K+1:3*p.K]
z = @view u[:, :, 3*p.K+1:4*p.K]
s = @view u[:, :, 4*p.K+1:5*p.K]
# C = @view u[:, :, 5*p.K+1:6*p.K]
# H_z = @view u[:, :, 6*p.K+1:7*p.K]
v_p = @view u[:, :, 5*p.K+1]
v_m = @view u[:, :, 5*p.K+2]
# x_lgn = @view u[:, :, 7*p.K+3]
dx = @view du[:, :, 1:p.K]
dy = @view du[:, :, p.K+1:2*p.K]
dm = @view du[:, :, 2*p.K+1:3*p.K]
dz = @view du[:, :, 3*p.K+1:4*p.K]
ds = @view du[:, :, 4*p.K+1:5*p.K]
dv_p = @view du[:, :, 5*p.K+1]
dv_m = @view du[:, :, 5*p.K+2]
x_lgn = @view x_lgn[:,:,1];
end;
@test x ≈ Array(x_)[:,:,:,1]
@test y ≈ Array(y_)[:,:,:,1]
@test m ≈ Array(m_)[:,:,:,1]
@test z ≈ Array(z_)[:,:,:,1]
@test s ≈ Array(s_)[:,:,:,1]
@test v_p ≈ Array(v_p_)[:,:,:,1]
@test v_m ≈ Array(v_m_)[:,:,:,1]
@test dx ≈ Array(dx_)[:,:,:,1]
@test dy ≈ Array(dy_)[:,:,:,1]
@test dm ≈ Array(dm_)[:,:,:,1]
@test dz ≈ Array(dz_)[:,:,:,1]
@test ds ≈ Array(ds_)[:,:,:,1]
@test dv_p ≈ Array(dv_p_)[:,:,:,1]
@test dv_m ≈ Array(dv_m_)[:,:,:,1]
@test C ≈ Array(C_)[:,:,:,1]
@test H_z ≈ Array(H_z_)[:,:,:,1]
@test x_lgn ≈ Array(x_lgn_)[:,:,:,1]
sol = solve(prob);
Utils.plot_rb(sol(100)[:,:,7], name="img", save=false)
Utils.plot_rb(Array(sol_(100)[:,:,7]), name="img", save=false)
size(sol(100))
size(sol_(100))
ii = reshape(ones(Float32, 20000), 100, 100, 2)
ii[:,:,1] .= img
ii[:,:,2] .= relu.(ii[:,:,2] .- (0.8f0 .* img))
# Utils.plot_rb(ii[:,:,2], name="i", save=false)
ii_ = CuArray(reshape(ii, 100,100,2,1));
# # Test
r_ = similar(img_)
r = similar(img)
# p_ = LaminartGPU.kernels(img_, Parameters.parameters);
r = Laminart.I_u(img,p)
LaminartGPU.I_u!(r_, img_, p_);
@test r ≈ Array(r_)[:,:,1,1]
@test p.r ≈ Array(p_.r[:,:,1,1])
# ## x_lgn
out = similar(x_lgn)
out_ = similar(x_lgn_)
out .= 0f0
out_ .= 1f0
Laminart.fun_x_lgn!(out, x, p)
LaminartGPU.fun_x_lgn!(out_, x_, p_)
@test out[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
# +
out = similar(x_lgn)
out_ = similar(x_lgn_)
out .= 0f0
out_ .= 1f0
Laminart.fun_x_lgn!(out, ii, p)
LaminartGPU.fun_x_lgn!(out_, ii_, p_)
@test out[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
# -
size(out), size(out_)
function fun_x_lgn!(x_lgn::AbstractArray, x::AbstractArray, p::NamedTuple)
@. x_lgn = 0.0
# @inbounds begin
for k ∈ 1:p.K
@. x_lgn += @view x[:, :, k]
# end
end
return nothing
end
# +
# function fun_x_lgn!(x_lgn::AbstractArray, x::AbstractArray, p::NamedTuple)
out_ = NNlib.conv(ii_, p_.k_x_lgn, pad=0,flipped=true);
# return nothing
# end
Laminart.fun_x_lgn!(out, ii, p)
# -
@test out[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
Utils.plot_rb(out_[:,:,1,1], name="", save=false)
outt = out ./ 2
out_
@test outt[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
Utils.plot_rb(out_[30:70,30:70,1,1],name="img", save=false)
Utils.plot_rb(out[30:70,30:70,1],name="img", save=false, axMax=5)
out = similar(x_lgn)
out_ = similar(x_lgn_)
out .= 0f0
out_ .= 0.3f0
Laminart.fun_x_lgn!(out, ii, p)
LaminartGPU.fun_x_lgn!(out_, ii_, p_)
@test out[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
Utils.plot_rb(out_[:,:,1,1], axMax=1.21, name="", save=false)
Utils.plot_rb(out[:,:,1], axMax=1.21, name="", save=false)
Utils.plot_rb(out_[:,:,1], name="", save=false)
# ## C
@test v_p ≈ Array(v_p_)[:,:,1,1]
@test v_m ≈ Array(v_m_)[:,:,1,1]
size(v_p), size(Array(v_p_)[:,:,1,1])
# +
out = similar(C)
out_ = similar(C_)
out .= 0f0
out_ .= 1f0
Laminart.fun_v_C!(out, v_p, v_m, p)
LaminartGPU.fun_v_C!(out_, v_p_, v_m_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
# ## H_z
@test z ≈ Array(z_[:,:,:,1])
out = similar(H_z)
out_ = similar(H_z_)
out .= 0f0
out_ .= 1f0;
# +
Laminart.fun_H_z!(out, z, p)
LaminartGPU.fun_H_z!(out_, z_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
inn = max.(z_ .- p_.Γ, 0f0)
# function fun_H_z!(H_z::AbstractArray, z::AbstractArray, p::NamedTuple)
LaminartGPU.conv!(out_, inn, p_.k_H, p_)
# return nothing
# end
# Utils.plot_rb(out_[:,:,1,1], axMax=9.53)
Utils.plot_rb(out_[:,:,1,1], axMax=1.953)
# Utils.plot_rb(inn[:,:,1,1])
out_ = NNlib.conv(max.(z_ .- p_.Γ, 0f0), p_.k_H, pad=(size(p_.k_H)[1]>>1, size(p_.k_H)[1]>>1, size(p_.k_H)[2]>>1, size(p_.k_H)[2]>>1), flipped=true);
Utils.plot_rb(out[:,:,1], axMax=1.96)
# ## v_p
# +
@test v_p ≈ Array(v_p_)[:,:,1,1]
@test v_m ≈ Array(v_m_)[:,:,1,1]
@test x_lgn ≈ Array(x_lgn_)[:,:,1,1]
# -
# +
out = similar(dv_p)
out_ = similar(dv_p_)
out .= 0f0
out_ .= 1f0
Laminart.fun_dv!(out, v_p, p.r, x_lgn, p)
LaminartGPU.fun_dv!(out_, v_p_, p_.r, x_lgn_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
Utils.plot_rb(out, axMin=-1.38)
Utils.plot_rb(Array(out_[:,:,1,1]), axMin=-1.38)
# +
out = similar(dv_p)
out_ = similar(dv_p_)
out .= 0f0
out_ .= 1f0;
# -
imfilter!(out, x_lgn, centered(p.k_gauss_1), p.filling)
LaminartGPU.conv!(out_, x_lgn_, p_.k_gauss_1, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# +
# Laminart.fun_dv!(out, v_p, p.r, x_lgn, p)
# LaminartGPU.fun_dv!(out_, v_p_, p_.r, x_lgn_, p_)
# -
@. out =
p.δ_v * (
-v_p + ((1f0 - v_p) * max(p.r, 0f0) * (1f0 + p.C_1 * x_lgn)) -
((1f0 + v_p) * p.C_2 * out)
);
@. out_ =
p_.δ_v * (
-v_p_ + ((1f0 - v_p_) * max(p_.r, 0f0) * (1f0 + p_.C_1 * x_lgn_)) -
((1f0 + v_p_) * p_.C_2 * out_)
)
@test p.δ_v ≈ p_.δ_v
@test -v_p ≈Array(-v_p_)[:,:,:,1]
@test v_p ≈ Array(v_p_)[:,:,:,1]
@test max.(p.r, 0f0) ≈ Array(max.(p_.r, 0f0))[:,:,1,1]
# @test p.C_1 ≈ p_.C_1
# @test x_lgn ≈ Array(x_lgn_)[:,:,:,1]
# @test v_p ≈ Array(v_p_)[:,:,:,1]
# @test p.C_2 ≈ p_.C_2
# @test out ≈ Array(out_)[:,:,:,1]
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
size(p.r)
size(p_.r)
max.(p.r, 0f0)
findmax(Array(max.(p_.r, 0f0))[:,:,1,1])
findmax(Array(max.(p.r, 0f0)))
@test p.r ≈ Array(p_.r)[:,:,1,1]
# ## v_m
# +
out = similar(dv_m)
out_ = similar(dv_m_)
out .= 0f0
out_ .= 1f0
Laminart.fun_dv!(out, v_m, -p.r, x_lgn, p)
LaminartGPU.fun_dv!(out_, v_m_, .-p_.r, x_lgn_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
Utils.plot_rb(out, axMin=-1.4, axMax=2.87)
Utils.plot_rb(Array(out_[:,:,1,1]), axMin=-1.4, axMax=2.87)
Utils.plot_rb(Array(out_[:,:,1,1]), axMin=-1.4)
Utils.plot_rb(p_.r[:,:,1,1])
# ## x
# +
out = similar(dx)
out_ = similar(dx_)
out .= 0f0
out_ .= 1f0
Laminart.fun_dx_v1!(out, x, C, z, p.x_V2, p)
LaminartGPU.fun_dx_v1!(out_, x_, C_, z_, p_.x_V2, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
# ## y
# +
out = similar(dy)
out_ = similar(dy_)
out .= 0f0
out_ .= 1f0
Laminart.fun_dy!(out, y, C, x, m, p)
LaminartGPU.fun_dy!(out_, y_, C_, x_, m_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
@test y ≈ Array(y_)[:,:,:,1]
@test C ≈ Array(C_)[:,:,:,1]
@test x ≈ Array(x_)[:,:,:,1]
@test m ≈ Array(m_)[:,:,:,1]
# @test ≈ Array()[:,:,:,1]
Laminart.func_filter_W!(out, m, p.k_W_p, p)
# @. dy = m * dy
# Laminart.fun_f!(dy, dy, p)
# @. dy = p.δ_c * (-y + ((1 - y) * (C + (p.η_p * x))) - ((1 + y) * dy))
LaminartGPU.conv!(out_, m_, p_.k_W_p, p_)
# @. dy_ = m_ * dy_
# LaminartGPU.fun_f!(dy_, dy_, p_)
# @. dy_ = p_.δ_c * (-y_ + ((1 - y_) * (C_ + (p_.η_p * x_))) - ((1 + y_) * dy_))
@test out[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
size(out), size(out_)
@test m ≈ Array(m)[:,:,:,1]
@test p.k_W_p ≈ Array(p_.k_W_p)
p_.k_W_p
p_.k_W_p[:,:,:,1]
p_.k_W_p[:,:,:,2]
Utils.plot_gs(p.k_W_p[:,:,1,1])
Utils.plot_gs(p.k_W_p[:,:,2,2])
Utils.plot_gs(p.k_W_p[:,:,2,1])
Utils.plot_gs(p.k_W_p[:,:,2,2])
Utils.plot_gs(p_.k_W_p[:,:,1,1])
Utils.plot_gs(p_.k_W_p[:,:,2,2])
Utils.plot_gs(p.k_W_p[:,:,1,2])
Utils.plot_gs(p.k_W_p[:,:,2,1])
Utils.plot_gs(-p.k_W_p[:,:,2,2]+p.k_W_p[:,:,1,1])
# ## m
# +
out = similar(dm)
out_ = similar(dm_)
out .= 0f0
out_ .= 1f0
Laminart.fun_dm!(out, m, x, p)
LaminartGPU.fun_dm!(out_, m_, x_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
# ## z
# +
out = similar(dz)
out_ = similar(dz_)
out .= 0f0
out_ .= 1f0
Laminart.fun_dz!(out, z, y, H_z, s, p)
LaminartGPU.fun_dz!(out_, z_, y_, H_z_, s_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
@test z ≈ Array(z_)[:,:,:,1]
@test y ≈ Array(y_)[:,:,:,1]
@test H_z ≈ Array(H_z_)[:,:,:,1]
@test s ≈ Array(s_)[:,:,:,1]
# @test z ≈ Array()[:,:,:,1]
# @test z ≈ Array()[:,:,:,1]
imfilter!(out, s, centered(p.k_T_p), p.filling);
LaminartGPU.conv!(out_, s_, p_.k_T_p, p_)
# @. dz =
# p.δ_z * (
# -z + ((1 - z) * ((p.λ * max(y, 0)) + H_z + (p.a_23_ex * p.att))) -
# ((z + p.ψ) * dz)
# )
# +
@test out[30:70,30:70,1] ≈ Array(out_)[30:70,30:70,1,1]
# -
@test out[30:70,30:70,2] ≈ Array(out_)[30:70,30:70,2,1]
size(s_), size(p_.k_T_p)
p_.k_T_p
kk = CuArray(reshape(ones(Float32, 4), 1, 1, 2, 2))
kk[:,:,1,1]=0.87
kk[:,:,2,2]=0.87
kk[:,:,1,2]=0.13
kk[:,:,2,1]=0.13;
LaminartGPU.conv!(out_, s_, kk, p_)
# ## s
# +
out = similar(ds)
out_ = similar(ds_)
out .= 0f0
out_ .= 1f0
Laminart.fun_ds!(out, s, H_z, p)
LaminartGPU.fun_ds!(out_, s_, H_z_, p_)
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# -
@test out[30:70,30:70,:] ≈ Array(out_)[30:70,30:70,:,1]
# +
# Utils.plot_rb(t[:,:,1], axMax=1, name="img", save=false)
# -
| notebooks/dev/GPUDev0718.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multi-channel slow-wave detection
#
# This notebook demonstrates how to use YASA to perform **multi-channel slow-waves detection** from a NumPy array (example 1) or a MNE Raw object (example 2). If you haven't done so already, check out the notebook on [single-channel slow-waves detection](05_sw_detection.ipynb).
#
# Please make sure to install the latest version of YASA first by typing the following line in your terminal or command prompt:
#
# `pip install --upgrade yasa`
#
# **Important**
# - The data must be a numpy array of shape *(n_channels, n_samples)*.
# - The sampling frequency `sf` must be the same for all channels.
# - A list of the channel names (`ch_names`) must be provided as well.
# - The unit of the data must be $\mu V$. Note that the default unit in [MNE](https://martinos.org/mne/dev/generated/mne.io.Raw.html) is $V$. Therefore, if you use MNE, you must multiply your data by 1e6 (1 $V$ = 1,000,000 $\mu V$).
#
# ## Example 1: Using NumPy
#
# To illustrate the multi-channel slow-waves detection, we load a full-night 3-channels dataset (Cz, Fz, Pz) sampled at 100 Hz. The data is in compressed NumPy format (*.npz*).
# +
import mne
import yasa
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Load data
f = np.load('data_full_6hrs_100Hz_Cz+Fz+Pz.npz')
data, chan = f['data'], f['chan']
hypno = np.load('data_full_6hrs_100Hz_hypno.npz').get('hypno')
sf = 100.
times = np.arange(data.size) / sf
print(data.shape, chan)
# -
# To apply the multi-channel detection, we use the [sw_detect](https://raphaelvallat.com/yasa/build/html/generated/yasa.spindles_detect.html#yasa.spindles_detect) function. Note that we pass the hypnogram and restrain the detection to N2 or N3 sleep.
# +
sw = yasa.sw_detect(data, sf, ch_names=chan, hypno=hypno, include=(2, 3))
# Get the full detection dataframe
sw.summary().round(2)
# -
# Get the average per channel and stage
sw.summary(grp_chan=True, grp_stage=True, aggfunc='mean')
# Plot an average template of the detected slow-waves, centered around the negative peak
ax = sw.plot_average(center="NegPeak", time_before=0.4,
time_after=0.8, palette="Set1")
ax.legend(frameon=False)
sns.despine()
# Same but grouped by stage, using the `hue="Stage"` input parameter (default is "Channel")
# Note that we're also disabling the time-consuming bootstrapped confidence
# intervals with `ci=None`.
ax = sw.plot_average(center="NegPeak", hue="Stage", time_before=0.4,
time_after=0.8, ci=None,
palette=["xkcd:cerulean", "xkcd:midnight blue"])
ax.legend(frameon=False)
sns.despine()
# **Grouped by both stage and channel**
#
# While YASA does not directly support grouping by both channel and stage, it is possible with just a few lines of code.
# First, we need to get a long-format dataframe of peak-locked event.
df_sync = sw.get_sync_events(center="NegPeak", time_before=0.4, time_after=0.8)
df_sync
# +
# Then we can use seaborn.lineplot to create the desired plot:
ax = sns.lineplot(data=df_sync, x="Time", y="Amplitude", hue="Channel",
style="Stage", palette="Set1", ci=None, dashes=True)
sns.despine()
# -
# *************
#
# ## Example 2: Using a Raw object from MNE-Python
#
# This example demonstrates how to manipulate [MNE Raw object](https://mne-tools.github.io/stable/generated/mne.io.Raw.html#mne.io.Raw). The MNE package has several [functions](https://mne-tools.github.io/stable/python_reference.html#module-mne.io) to load the most standard EEG file formats (EDF, BrainVision, EEGLab, FieldTrip...).
#
# For the sake of this example, we'll load a PSG file encoded in the native MNE format (*.fif) using the [mne.io.read_raw_fif](https://martinos.org/mne/stable/generated/mne.io.read_raw_fif.html) function.
# Load the raw object
raw = mne.io.read_raw_fif('sub-02_mne_raw.fif', preload=True, verbose=False)
# Let's have a look at the data
print('Chan =', raw.ch_names)
print('Sampling frequency =', raw.info['sfreq'])
print('Data shape =', raw._data.shape)
# Keep only the channels of interests
raw_eeg = raw.copy().pick_types(eeg=True).drop_channels(['O1', 'O2'])
print('Chan =', raw_eeg.ch_names)
# Multi-channel slow-waves detection
# Note that since we're using a MNE Raw object, there is no need
# to manually specify the sampling frequency and channel names.
# We also use a less conservative amplitude threshold
sw = yasa.sw_detect(raw_eeg, amp_ptp=(50, 400))
print(sw.summary().shape[0], 'slow-waves detected.')
sw.summary().round(3)
sw.summary(grp_chan=True)
# Plot an average template of the detected slow-waves, centered around the start of the slow-wave
# Definitely noiser than our previous plot since there are fewer detected slow-waves
# in our second datset, because it's a 45 min nap and not a full-night recording!
ax = sw.plot_average(center="Start", time_before=0.2, time_after=1)
ax.legend(frameon=False)
sns.despine()
# Visual inspection of the detected slow-waves
# %matplotlib widget
sw.plot_detection();
| notebooks/06_sw_detection_multi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import os
sns.set_style("whitegrid")
% matplotlib inline
# +
diretorios = ['P10', 'P15', 'P20', 'P25']
bufferDataFile = 'bufferData.csv'
taxaMacDataFile = 'taxaMACData.csv'
# folder = diretorios[3]
folder = "PFBuffer"
dfData = pd.read_csv(os.path.join('../Simulations',folder,taxaMacDataFile),delimiter="|") #index_col='Tempo'
dfData.head()
# -
df = dfData[dfData["Buffer"] != 0.0]
ax = sns.scatterplot(x="TaxaMAC",y="Buffer",hue="Nó sensor",data=df)
ax.legend(loc=(1.03,0.4))
df.head()
# +
dfPivot = dfData.pivot(index='Tempo',columns='Nó sensor',values=['TaxaMAC','Buffer'])
dfPivot.fillna(value=0,inplace=True)
dfPivot.head()
# ax = dfPivot['TaxaMAC'].plot(subplots=True, figsize=(25, 10),ylim=(0,100),legend=True,style='-')
# ax = dfPivot['Buffer'].plot(subplots=True, figsize=(25, 10),ylim=(0,100),legend=True,style='.',ax=ax)
# plt.savefig('saida.png')
# plt.tight_layout()
# -
df = dfData[dfData['Nó sensor'] == 2 ]
df.head()
plt.figure(figsize=(25,5))
ax = plt.subplot()
ax = df[['Tempo','TaxaMAC']].iloc[100:200].plot(x='Tempo',ax=ax,style=':',marker='o')
ax = df[['Tempo','Buffer']].iloc[100:200].plot(x='Tempo',ax=ax,style='-',marker='.')
ax.yaxis.label.set_text("Proporção (%)")
# +
NoSensor = 3
repeticao = 5
df = dfData[(dfData['Nó sensor'] == NoSensor) & (dfData['Repeticao'] == repeticao )]
plt.figure(figsize=(25,5))
ax = plt.subplot()
T_inferior = 0
T_superior = T_inferior + 200
ax = df[['Tempo','TaxaMAC']].iloc[T_inferior:T_superior].plot(x='Tempo',ax=ax,style=':',marker='o')
ax = df[['Tempo','Buffer']].iloc[T_inferior:T_superior].plot(x='Tempo',ax=ax,style='-',marker='.')
# ax = df[['Tempo','TaxaMAC']].plot(x='Tempo',ax=ax,style=':',marker='o')
# ax = df[['Tempo','Buffer']].plot(x='Tempo',ax=ax,style='-',marker='.')
ax.get_figure().savefig('serieTemporal-TaxaBuffer-vs-TaxaMAC-N{0}-I{1}-F{2}'.format(NoSensor,T_inferior, T_superior))
# -
df = dfData[dfData['Nó sensor'] == 3 ]
df['Buffer'].plot.hist()
# +
diff = df.diff()
diff.head()
diff['Tempo'].plot.hist()
# -
| Castalia-K/notebooks/TaxaMAC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > **Copyright © 2021 CertifAI Sdn. Bhd.**<br>
# <br>
# This program and the accompanying materials are made available under the
# <br>terms of the Apache License, Version 2.0 which is available at
# <br>https://www.apache.org/licenses/LICENSE-2.0.
# <br>
# <br>Unless required by applicable law or agreed to in writing, software
# <br>distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# <br>WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# <br>License for the specific language governing permissions and limitations
# <br>under the License.
# <br>
# <br>SPDX-License-Identifier: Apache-2.0
# # YOLO Image Retrieval
from ipyfilechooser import FileChooser
import os
import cv2
import numpy as np
import re
import random
# # Paths Selection
# Once run, you can select files multiple times without rerun the script
#
# ### Select Image Path
# Select Image Folder
image_path_selector = FileChooser()
image_path_selector.show_only_dirs = True
display(image_path_selector)
# ### Select Label Path
# Select Label Folder
label_path_selector = FileChooser()
label_path_selector.show_only_dirs = True
display(label_path_selector)
# ### Select Label File
# Select *label.txt* file
label_file_selector = FileChooser()
label_file_selector.filter_pattern = '*.txt'
display(label_file_selector)
# ### Select Output Path
# Select Output Folder
#
# *Note: Image will be written in {output_path}/{image_path name} directory*
#
# *Example: Output path = {/home/x/Output}, Image path = /home/x/{Image} => Image will be written in /home/x/Output/Image/xx.jpg*
output_path_selector = FileChooser()
output_path_selector.show_only_dirs = True
display(output_path_selector)
# # Run after selecting all paths
# +
#Create a Label class representing each bbox
class Label:
def __init__(self, row):
self.x = float(row[1])
self.y = float(row[2])
self.w = float(row[3])
self.h = float(row[4])
self.label = int(row[0])
def getWidth(self):
return self.w * self.width
def getHeight(self):
return self.h * self.height
def setImageSize(self, size):
self.width = size[1]
self.height = size[0]
def botleft(self, offsetx = 0, offsety = 0):
return (int((self.x - (self.w / 2)) * self.width) + offsetx, int((self.y + (self.h / 2)) * self.height) + offsety)
def topleft(self, offsetx = 0, offsety = 0):
return (int((self.x - (self.w / 2)) * self.width) + offsetx, int((self.y - (self.h / 2)) * self.height) + offsety)
def botright(self, offsetx = 0, offsety = 0):
return (int((self.x + (self.w / 2)) * self.width) + offsetx, int((self.y + (self.h / 2)) * self.height) + offsety)
#Using dictionary to save all bounding boxes and labels for respective images
label_dict = dict()
label_dict.clear()
#define label path
label_path = label_path_selector.selected
#load label dict
if not os.path.isdir(label_path):
print("not a directory")
else:
for filename in os.listdir(label_path):
if filename[-4:] != (".txt"):
continue
file = os.path.join(label_path, filename)
with open(file, mode = 'r', newline = '') as f:
label_list = list()
update = True
for row in f.read().splitlines():
element = row[:-1].split(' ')
if len(element) != 5:
update = False
break
label_list.append(Label(element))
if update:
label_dict[filename[:-4]] = label_list
#Building class list
class_file = label_file_selector.selected
class_list = list()
with open(class_file, mode = 'r', newline = '') as f:
class_list = f.read().splitlines()
#Building color list
color_list = list()
#choose color for class
def random_color():
return tuple(random.randint(0, 230) for _ in range(3))
for i in range(len(class_list)):
color_list.append(random_color())
##Write to Output
#check if image path is a directory
image_path = image_path_selector.selected
image_folder = image_path.split(os.path.sep)[-2]
output_path = os.path.join(output_path_selector.selected,image_folder)
if not os.path.isdir(image_path):
print("not a directory")
else:
#Create Output Path
if not os.path.exists(output_path):
os.mkdir(output_path)
for filename in os.listdir(image_path):
#Only load Images in the folder
splitter = filename.rfind(".")
ext = filename[splitter:]
isImage = re.search('(.)(jpg|png|jpeg|JPG|JPEG|PNG|bmp)', ext)
if not isImage:
continue
mat = cv2.imread(os.path.join(image_path,filename))
fix_h = 400
ratio = mat.shape[1]/mat.shape[0]
fix_w = int(fix_h * ratio)
mat = cv2.resize(mat, (fix_w, fix_h), interpolation = cv2.INTER_AREA)
name = filename[:splitter]
if name in label_dict:
for label in label_dict[name]:
label.setImageSize(mat.shape)
color = color_list[label.label]
classes = class_list[label.label]
#Rectangle size
rect_width = label.getWidth()
rect_height = label.getHeight()
min_dim = min(rect_width, rect_height)
rect_thick = max(int(min_dim // 100),1)
#Draw rectangle
cv2.rectangle(mat, label.topleft(), label.botright(), color, rect_thick)
#Text box
tf = max(rect_thick - 1, 1)
t_size = cv2.getTextSize(classes, 0, fontScale=rect_thick / 3, thickness=tf)[0]
textbox_topright = label.topleft()[0] + t_size[0], label.topleft()[1] - t_size[1] - 3
cv2.rectangle(mat, label.topleft(), textbox_topright, color, -1, cv2.LINE_AA) # filled
cv2.putText(mat, classes, (label.topleft()[0], label.topleft()[1] - 2), 0, rect_thick / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
cv2.imwrite(os.path.join(output_path,filename), mat)
| YOLO/ImageRetrieval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## MOSAIKS feature extraction
#
# This tutorial demonstrates the **MOSAIKS** method for extracting _feature vectors_ from satellite imagery patches for use in downstream modeling tasks. It will show:
# - How to extract 1km$^2$ patches ofLandsat 8 multispectral imagery for a list of latitude, longitude points
# - How to extract summary features from each of these imagery patches
# - How to use the summary features in a linear model of the population density at each point
#
# ### Background
#
# Consider the case where you have a dataset of latitude and longitude points assosciated with some dependent variable (for example: population density, weather, housing prices, biodiversity) and, potentially, other independent variables. You would like to model the dependent variable as a function of the independent variables, but instead of including latitude and longitude directly in this model, you would like to include some high dimensional representation of what the Earth looks like at that point (that hopefully explains some of the variance in the dependent variable!). From the computer vision literature, there are various [representation learning techniques](https://en.wikipedia.org/wiki/Feature_learning) that can be used to do this, i.e. extract _features vectors_ from imagery. This notebook gives an implementation of the technique described in [Rolf et al. 2021](https://www.nature.com/articles/s41467-021-24638-z), "A generalizable and accessible approach to machine learning with global satellite imagery" called Multi-task Observation using Satellite Imagery & Kitchen Sinks (**MOSAIKS**). For more information about **MOSAIKS** see the [project's webpage](http://www.globalpolicy.science/mosaiks).
#
#
# **Notes**:
# - If you're running this on the [Planetary Computer Hub](http://planetarycomputer.microsoft.com/compute), make sure to choose the **GPU - PyTorch** profile when presented with the form to choose your environment.
# !pip install -q git+https://github.com/geopandas/dask-geopandas
# +
import warnings
import time
import os
RASTERIO_BEST_PRACTICES = dict( # See https://github.com/pangeo-data/cog-best-practices
CURL_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt",
GDAL_DISABLE_READDIR_ON_OPEN="EMPTY_DIR",
AWS_NO_SIGN_REQUEST="YES",
GDAL_MAX_RAW_BLOCK_CACHE_SIZE="200000000",
GDAL_SWATH_SIZE="200000000",
VSI_CURL_CACHE_SIZE="200000000",
)
os.environ.update(RASTERIO_BEST_PRACTICES)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import contextily as ctx
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import rasterio
import rasterio.warp
import rasterio.mask
import shapely.geometry
import geopandas
import dask_geopandas
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from scipy.stats import spearmanr
from scipy.linalg import LinAlgWarning
from dask.distributed import Client
warnings.filterwarnings(action="ignore", category=LinAlgWarning, module="sklearn")
import pystac_client
import planetary_computer as pc
# -
# First we define the pytorch model that we will use to extract the features and a helper method. The **MOSAIKS** methodology describes several ways to do this and we use the simplest.
# +
def featurize(input_img, model, device):
"""Helper method for running an image patch through the model.
Args:
input_img (np.ndarray): Image in (C x H x W) format with a dtype of uint8.
model (torch.nn.Module): Feature extractor network
"""
assert len(input_img.shape) == 3
input_img = torch.from_numpy(input_img / 255.0).float()
input_img = input_img.to(device)
with torch.no_grad():
feats = model(input_img.unsqueeze(0)).cpu().numpy()
return feats
class RCF(nn.Module):
"""A model for extracting Random Convolution Features (RCF) from input imagery."""
def __init__(self, num_features=16, kernel_size=3, num_input_channels=1): # ------------------------------------------------------------- Input channels
super(RCF, self).__init__()
# We create `num_features / 2` filters so require `num_features` to be divisible by 2
assert num_features % 2 == 0
self.conv1 = nn.Conv2d(
num_input_channels,
num_features // 2,
kernel_size=kernel_size,
stride=1,
padding=0,
dilation=1,
bias=True,
)
nn.init.normal_(self.conv1.weight, mean=0.0, std=1.0)
nn.init.constant_(self.conv1.bias, -1.0)
def forward(self, x):
x1a = F.relu(self.conv1(x), inplace=True)
x1b = F.relu(-self.conv1(x), inplace=True)
x1a = F.adaptive_avg_pool2d(x1a, (1, 1)).squeeze()
x1b = F.adaptive_avg_pool2d(x1b, (1, 1)).squeeze()
if len(x1a.shape) == 1: # case where we passed a single input
return torch.cat((x1a, x1b), dim=0)
elif len(x1a.shape) == 2: # case where we passed a batch of > 1 inputs
return torch.cat((x1a, x1b), dim=1)
# -
# Next, we initialize the model and pytorch components
# +
num_features = 2048
device = torch.device("cuda")
model = RCF(num_features).eval().to(device)
# + [markdown] tags=[]
# ### Read dataset of (lat, lon) points and corresponding labels
# Zambia: 1997-2015
# Tanzania: 2003-2010
# Nigeria: 1995-2006
# +
year = 2013
adm_level = "adm1"
np.random.seed(42)
# +
# load Data
gdf_crop = geopandas.read_file("data/unified_crop_data.gpkg")
# Filter for 1 Country
gdf_crop = gdf_crop[gdf_crop.adm0 == 'zambia']
# Filter for 1 year but keep geometry without crop data
gdf_crop = gdf_crop[(gdf_crop.year == year) | (np.isnan(gdf_crop.year))]
# find the bounds of your geodataframe
x_min, y_min, x_max, y_max = gdf_crop.total_bounds
# set sample size (number of points inside bounding box)
# this will be reduced to only points inside the country
n = 2000
# generate random data within the bounds
x = np.random.uniform(x_min, x_max, n)
y = np.random.uniform(y_min, y_max, n)
# convert them to a points GeoSeries
gdf_points = geopandas.GeoSeries(geopandas.points_from_xy(x, y))
# only keep those points within polygons
gdf_points = gdf_points[gdf_points.within(gdf_crop.unary_union)]
# make points GeoSeries into GeoDataFrame
gdf_points = geopandas.GeoDataFrame(gdf_points).rename(columns={0:'geometry'}).set_geometry('geometry')
# Make blank GeoDataFrame
gdf = geopandas.GeoDataFrame()
# Extract lon, lat, and geometry values and assign to columns
gdf['lon'] = gdf_points['geometry'].x
gdf['lat'] = gdf_points['geometry'].y
gdf['geometry'] = gdf_points['geometry']
# Set CRS
gdf = gdf.set_crs('EPSG:4326')
# Also make a regular dataframe
points = pd.DataFrame(gdf)
# -
len(points)
fig, ax = plt.subplots(figsize=(10, 10))
gdf_crop.plot(
ax = ax
, color = "blue"
, edgecolor = 'black'
, alpha = .25
)
gdf.plot(ax = ax)
ax.grid(False)
ctx.add_basemap(ax, crs="EPSG:4326")
# Get rid of points with nodata population values
# ### Extract features from the imagery around each point
#
# We need to find a suitable Sentinel 2 scene for each point. As usual, we'll use `pystac-client` to search for items matching some conditions, but we don't just want do make a `.search()` call for each of the 67,968 remaining points. Each HTTP request is relatively slow. Instead, we will *batch* or points and search *in parallel*.
#
# We need to be a bit careful with how we batch up our points though. Since a single Sentinel 2 scene will cover many points, we want to make sure that points which are spatially close together end up in the same batch. In short, we need to spatially partition the dataset. This is implemented in `dask-geopandas`.
#
# So the overall workflow will be
#
# 1. Find an appropriate STAC item for each point (in parallel, using the spatially partitioned dataset)
# 2. Feed the points and STAC items to a custom Dataset that can read imagery given a point and the URL of a overlapping S2 scene
# 3. Use a custom Dataloader, which uses our Dataset, to feed our model imagery and save the corresponding features
# +
NPARTITIONS = 250
ddf = dask_geopandas.from_geopandas(gdf, npartitions=1)
hd = ddf.hilbert_distance().compute()
gdf["hd"] = hd
gdf = gdf.sort_values("hd")
gdf = gdf.reset_index()
dgdf = dask_geopandas.from_geopandas(gdf, npartitions=NPARTITIONS, sort=False)
# -
# We'll write a helper function that
def query(points):
"""
Find a STAC item for points in the `points` DataFrame
Parameters
----------
points : geopandas.GeoDataFrame
A GeoDataFrame
Returns
-------
geopandas.GeoDataFrame
A new geopandas.GeoDataFrame with a `stac_item` column containing the STAC
item that covers each point.
"""
intersects = shapely.geometry.mapping(points.unary_union.convex_hull)
search_start = f"{year}-01-01"
search_end = f"{year}-12-31"
catalog = pystac_client.Client.open(
"https://planetarycomputer.microsoft.com/api/stac/v1"
)
# The time frame in which we search for non-cloudy imagery
search = catalog.search(
collections=["landsat-8-c2-l2"], # "landsat-8-c2-l2" "sentinel-2-l2a"
intersects=intersects,
datetime=[search_start, search_end],
query={"eo:cloud_cover": {"lt": 10}},
limit=500,
)
ic = search.get_all_items_as_dict()
features = ic["features"]
features_d = {item["id"]: item for item in features}
data = {
"eo:cloud_cover": [],
"geometry": [],
}
index = []
for item in features:
data["eo:cloud_cover"].append(item["properties"]["eo:cloud_cover"])
data["geometry"].append(shapely.geometry.shape(item["geometry"]))
index.append(item["id"])
items = geopandas.GeoDataFrame(data, index=index, geometry="geometry").sort_values(
"eo:cloud_cover"
)
point_list = points.geometry.tolist()
point_items = []
for point in point_list:
covered_by = items[items.covers(point)]
if len(covered_by):
point_items.append(features_d[covered_by.index[0]])
else:
# There weren't any scenes matching our conditions for this point (too cloudy)
point_items.append(None)
return points.assign(stac_item=point_items)
# +
# %%time
with Client(n_workers=16) as client:
print(client.dashboard_link)
meta = dgdf._meta.assign(stac_item=[])
df2 = dgdf.map_partitions(query, meta=meta).compute()
# -
df2.shape
# +
df3 = df2.dropna(subset=["stac_item"])
matching_urls =(
[pc.sign(item["assets"]["SR_B1"]["href"]) for item in df3.stac_item.tolist()] +
[pc.sign(item["assets"]["SR_B2"]["href"]) for item in df3.stac_item.tolist()] +
[pc.sign(item["assets"]["SR_B3"]["href"]) for item in df3.stac_item.tolist()]
)
points = df3[["lon", "lat"]].to_numpy()
df3.shape
# -
class CustomDataset(Dataset):
def __init__(self, points, fns, buffer=500):
self.points = points
self.fns = fns
self.buffer = buffer
def __len__(self):
return self.points.shape[0]
def __getitem__(self, idx):
lon, lat = self.points[idx]
fn = self.fns[idx]
if fn is None:
return None
else:
point_geom = shapely.geometry.mapping(shapely.geometry.Point(lon, lat))
with rasterio.Env():
with rasterio.open(fn, "r") as f:
point_geom = rasterio.warp.transform_geom(
"epsg:4326", f.crs.to_string(), point_geom
)
point_shape = shapely.geometry.shape(point_geom)
mask_shape = point_shape.buffer(self.buffer).envelope
mask_geom = shapely.geometry.mapping(mask_shape)
try:
out_image, out_transform = rasterio.mask.mask(
f, [mask_geom], crop=True
)
except ValueError as e:
if "Input shapes do not overlap raster." in str(e):
return None
out_image = out_image / 255.0
out_image = torch.from_numpy(out_image).float()
return out_image
# +
dataset = CustomDataset(points, matching_urls)
dataloader = DataLoader(
dataset,
batch_size=8,
shuffle=False,
num_workers=os.cpu_count() ,
collate_fn=lambda x: x,
pin_memory=False,
)
# +
x_all = np.zeros((points.shape[0], num_features), dtype=float)
tic = time.time()
i = 0
for images in dataloader:
for image in images:
if image is not None:
# Edit this below to reflect landsat data
# A full image should be ~101x101 pixels (i.e. ~1km^2 at a 30m/px spatial
# resolution), however we can receive smaller images if an input point
# happens to be at the edge of a landsat scene (a literal edge case). To deal
# with these (edge) cases we crudely drop all images where the spatial
# dimensions aren't both greater than 20 pixels.
if image.shape[1] >= 20 and image.shape[2] >= 20:
image = image.to(device)
with torch.no_grad():
feats = model(image.unsqueeze(0)).cpu().numpy()
x_all[i] = feats
else:
# this happens if the point is close to the edge of a scene
# (one or both of the spatial dimensions of the image are very small)
pass
else:
pass # this happens if we do not find a S2 scene for some point
if i % 1000 == 0:
print(
f"{i}/{points.shape[0]} -- {i / points.shape[0] * 100:0.2f}%"
+ f" -- {time.time()-tic:0.2f} seconds"
)
tic = time.time()
i += 1
# -
x_all.shape
x_all = pd.DataFrame(x_all)
x_all
gdf
gdf_features = gdf.join(x_all)
gdf_features = gdf_features.drop(['index', 'lon', 'lat', 'hd'], axis = 1)
gdf_features
cols = range(0, num_features)
gdf_features_long = pd.melt(gdf_features,
id_vars=['geometry'],
value_vars=cols,
var_name = 'feature')
features = gdf_crop.sjoin(gdf_features_long, how = 'right', predicate = 'intersects')
features
features_summary = features.groupby([adm_level, 'year', 'feature']).agg({'value': 'mean'})
features_summary = features_summary.reset_index()
features_summary
features_summary_wide = features_summary.pivot(index = [adm_level, "year"], columns='feature', values='value')
features_summary_wide = features_summary_wide.reset_index().rename_axis(None, axis=1)
features_summary_wide
# + tags=[]
| mosaiks_landsat_8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from lmdec import SuccessiveBatchedPowerMethod, PowerMethod
from lmdec.array.random import array_constant_partition, cumulative_partition
import dask.array as da
from time import time
import matplotlib.pyplot as plt
# Suppose you have a design matrix, $X$, with $n$ observations of $p$ samples.
#
# <center> $X \in \mathbb{R}^{n \times p}$<\center>
#
#
# It is often desired to see the singular value decompositions of subsets of $X$.
#
# Let $I = \{n_1, n_2, \dots, n_{r-1}, n\}$ which defines subsets of $X$. Therefore, $X_i = X_{0:n_i, :}$
#
# We want to see how the SVD of $X_i$ evolves as $i \rightarrow r$.
N, P = 100000, 20000,
r = 10
k = 10
X = da.random.random(size=(N, P))
partitions = cumulative_partition(array_constant_partition(array_shape=(N,P), f=1/r))
# Directly computing each Truncated SVD
sub_SVDs = []
start = time()
for i, part in enumerate(partitions):
PM = PowerMethod(tol=1e-4, scale=False, center=False, factor=None)
sub_SVDs.append(PM.svd(X[part, :]))
print('Took: {0:.2f} seconds'.format(time() - start))
SSPM = SuccessiveBatchedPowerMethod(tol=1e-4, scale=False, center=False, factor=None)
start = time()
SSPM.svd(X)
print('Took: {0:.2f} seconds'.format(time() - start))
# # Compare Singular Vectors
for i, ((_, S_PM, _), S_SSPM) in enumerate(zip(sub_SVDs, SSPM.history['iter']['S'])):
acc = (da.linalg.norm(S_PM - S_SSPM)/da.linalg.norm(S_PM)).compute()
print('Relative Accuracy of SVD(X_{0}): {1:.5f}'.format(i, acc))
# # Compare Singular Vectors
i = 8
plt.plot(sub_SVDs[i][0][0:100, 0], linewidth=5, label='Power Method')
plt.plot(SSPM.sub_svds[i][0][0:100, 0], label = 'SuccessiveBatchedPowerMethod')
plt.legend()
| examples/Ex 3 (SuccessiveBatchedPowerMethod).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
my_list = [5, 4, 6, 1, 1, 7, 99]
my_array = np.array(my_list)
my_series = pd.Series(data = my_list)
print(my_series)
my_series_2 = pd.Series(data = my_array)
print(my_series_2)
print(my_series[2])
my_labels = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh']
print(my_labels)
my_series_3 = pd.Series(data = my_list, index = my_labels)
print(my_series_3)
print(my_series_3['fourth'])
pd.DataFrame
| notebooks/pandas_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.021363, "end_time": "2021-10-19T19:41:33.334521", "exception": false, "start_time": "2021-10-19T19:41:33.313158", "status": "completed"} tags=[]
# # Latest COVID-19 India Matplotlib Overview
# + papermill={"duration": 0.627035, "end_time": "2021-10-19T19:41:33.979259", "exception": false, "start_time": "2021-10-19T19:41:33.352224", "status": "completed"} tags=[]
#hide
import pandas as pd
import numpy as np
import requests
import json
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
from IPython.core.display import display,HTML
# %matplotlib inline
from datetime import date
# + papermill={"duration": 0.3379, "end_time": "2021-10-19T19:41:34.342777", "exception": false, "start_time": "2021-10-19T19:41:34.004877", "status": "completed"} tags=[]
#hide
df = pd.read_csv('https://api.covid19india.org/csv/latest/states.csv')
df = df[['Date','State','Confirmed','Deceased']]
df = df.rename(columns={'Confirmed':'Cases', "Deceased":'Deaths'})
df_cases = df[["Date",'State','Cases']]
df_deaths = df[['Date','State','Deaths']]
df_cases1 = df_cases.groupby('Date')
df_deaths1= df_deaths.groupby('Date')
# + papermill={"duration": 0.045663, "end_time": "2021-10-19T19:41:34.405953", "exception": false, "start_time": "2021-10-19T19:41:34.360290", "status": "completed"} tags=[]
#hide
df_snap = pd.read_csv('data/SnapshotCases-28-July.csv')
arr_states = df_snap['states'].unique()
arr_dates = df['Date'].unique()
df_snap = df_snap.set_index('states')
# + papermill={"duration": 0.027627, "end_time": "2021-10-19T19:41:34.450187", "exception": false, "start_time": "2021-10-19T19:41:34.422560", "status": "completed"} tags=[]
#hide
arr_states[-9] = 'Puducherry'
arr_states[-10] = 'Odisha'
arr_states[6] = 'Dadra and Nagar Haveli and Daman and Diu'
arr_states = np.append(arr_states,np.array(['Chhattisgarh','Ladakh','Uttarakhand']))
arr_states
# + papermill={"duration": 0.046868, "end_time": "2021-10-19T19:41:34.514657", "exception": false, "start_time": "2021-10-19T19:41:34.467789", "status": "completed"} tags=[]
#hide
dates = []
for i in arr_dates:
if i>='2020-07-01':
dates.append(i)
dict = {'states':dates}
for i in arr_states:
dict[i] = [0]*len(dates)
dft_cases = pd.DataFrame(dict)
dft_deaths = pd.DataFrame(dict)
# + papermill={"duration": 8.675437, "end_time": "2021-10-19T19:41:43.206431", "exception": false, "start_time": "2021-10-19T19:41:34.530994", "status": "completed"} tags=[]
#hide
for i in range(len(dates)):
df1_deaths = df_deaths1.get_group(dates[i])
for j in range(len(df1_deaths.index)):
if df1_deaths.iloc[j,1] in arr_states:
dft_deaths.loc[i,df1_deaths.iloc[j,1]] = df1_deaths.iloc[j,2]
dft_deaths = dft_deaths.set_index('states')
# + papermill={"duration": 8.746733, "end_time": "2021-10-19T19:41:51.972076", "exception": false, "start_time": "2021-10-19T19:41:43.225343", "status": "completed"} tags=[]
#hide
for i in range(len(dates)):
df1_cases = df_cases1.get_group(dates[i])
for j in range(len(df1_cases.index)):
if df1_cases.iloc[j,1] in arr_states:
dft_cases.loc[i,df1_cases.iloc[j,1]] = df1_cases.iloc[j,2]
dft_cases = dft_cases.set_index('states')
# + papermill={"duration": 0.02529, "end_time": "2021-10-19T19:41:52.017875", "exception": false, "start_time": "2021-10-19T19:41:51.992585", "status": "completed"} tags=[]
#hide
dft_cases = dft_cases.T
dft_deaths = dft_deaths.T
dt_today = dates[-1]
dt_yday = dates[-2]
# + papermill={"duration": 0.028725, "end_time": "2021-10-19T19:41:52.063494", "exception": false, "start_time": "2021-10-19T19:41:52.034769", "status": "completed"} tags=[]
#hide
dft_cases = dft_cases.reset_index()
dft_deaths = dft_deaths.reset_index()
dft_cases = dft_cases.rename(columns = {'index':'state'})
dft_deaths = dft_deaths.rename(columns = {'index':'state'})
# + papermill={"duration": 0.033723, "end_time": "2021-10-19T19:41:52.117050", "exception": false, "start_time": "2021-10-19T19:41:52.083327", "status": "completed"} tags=[]
#hide
dfc_cases = dft_cases.groupby('state')[dt_today].sum()
dfc_deaths = dft_deaths.groupby('state')[dt_today].sum()
dfp_cases = dft_cases.groupby('state')[dt_yday].sum()
dfp_deaths = dft_deaths.groupby('state')[dt_yday].sum()
# + papermill={"duration": 0.039094, "end_time": "2021-10-19T19:41:52.171791", "exception": false, "start_time": "2021-10-19T19:41:52.132697", "status": "completed"} tags=[]
#hide
df_table = pd.DataFrame({'states': dfc_cases.index, 'Cases': dfc_cases.values, 'Deaths': dfc_deaths.values, 'PCases': dfp_cases.values, 'PDeaths': dfp_deaths.values}).set_index('states')
df_table = df_table.sort_values(by = ['Cases','Deaths'], ascending = [False, False])
df_table = df_table.reset_index()
df_table.head()
# + papermill={"duration": 0.030434, "end_time": "2021-10-19T19:41:52.222424", "exception": false, "start_time": "2021-10-19T19:41:52.191990", "status": "completed"} tags=[]
#hide
for c in 'Cases, Deaths'.split(', '):
df_table[f'{c} (+)'] = (df_table[c] - df_table[f'P{c}']).clip(0)
df_table['Fatality Rate'] = (100* df_table['Deaths']/ df_table['Cases']).round(2)
# + papermill={"duration": 0.032696, "end_time": "2021-10-19T19:41:52.271353", "exception": false, "start_time": "2021-10-19T19:41:52.238657", "status": "completed"} tags=[]
#hide
df_table.head()
# + papermill={"duration": 0.039967, "end_time": "2021-10-19T19:41:52.334379", "exception": false, "start_time": "2021-10-19T19:41:52.294412", "status": "completed"} tags=[]
#hide
summary = {'updated':dates[-1], 'since':dates[-2]}
list_names = ['Cases', 'PCases', 'Deaths', 'PDeaths', 'Cases (+)', 'Deaths (+)']
for name in list_names:
summary[name] = df_table.sum()[name]
summary
# + papermill={"duration": 0.028602, "end_time": "2021-10-19T19:41:52.381864", "exception": false, "start_time": "2021-10-19T19:41:52.353262", "status": "completed"} tags=[]
#hide
overview = '''
<!-- ####### HTML!! #########-->
<h1 style="color: #5e9ca0; text-align: center;">India</h1>
<p style="text-align: center;">Last update: <strong>{update}</strong></p>
<p style="text-align: center;">Confirmed cases:</p>
<p style="text-align: center;font-size:24px;">{cases} (<span style="color: #ff0000;">+{new}</span>)</p>
<p style="text-align: center;">Confirmed deaths:</p>
<p style="text-align: center;font-size:24px;">{deaths} (<span style="color: #ff0000;">+{dnew}</span>)</p>
'''
# + papermill={"duration": 0.030639, "end_time": "2021-10-19T19:41:52.435041", "exception": false, "start_time": "2021-10-19T19:41:52.404402", "status": "completed"} tags=[]
#hide_input
update = summary['updated']
cases = summary['Cases']
new = summary['Cases (+)']
deaths = summary['Deaths']
dnew = summary['Deaths (+)']
html = HTML(overview.format(update=update, cases=cases,new=new,deaths=deaths,dnew=dnew))
display(html)
# + papermill={"duration": 0.034256, "end_time": "2021-10-19T19:41:52.488633", "exception": false, "start_time": "2021-10-19T19:41:52.454377", "status": "completed"} tags=[]
#hide
dt_cols = list(dft_cases.columns[1:])
dft_ct_new_cases = dft_cases.groupby('state')[dt_cols].sum().diff(axis=1).fillna(0).astype(int)
dft_ct_new_cases.sort_values(by = dates[-1], ascending = False,inplace = True)
# + papermill={"duration": 0.04178, "end_time": "2021-10-19T19:41:52.550705", "exception": false, "start_time": "2021-10-19T19:41:52.508925", "status": "completed"} tags=[]
#hide
dft_ct_new_cases.head()
# + papermill={"duration": 25.941017, "end_time": "2021-10-19T19:42:18.513689", "exception": false, "start_time": "2021-10-19T19:41:52.572672", "status": "completed"} tags=[]
#hide_input
df = dft_ct_new_cases.copy()
df.loc['Total'] = df.sum()
n = 5
ax = []
fig = plt.figure(figsize = (18,28))
gs = fig.add_gridspec(n+2, 5)
# gs = fig.add_gridspec(2, 3)
ax1 = fig.add_subplot(gs[0, :])
ef = df.loc['Total'].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax1.bar(ef.date,ef.Total,alpha=0.3,color='#007acc')
ax1.plot(ef.date,ef.Total , marker="o", color='#007acc')
ax1.xaxis.set_major_locator(mdates.WeekdayLocator())
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1.text(0.02, 0.5,'India daily case count', transform = ax1.transAxes, fontsize=25);
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax2 = fig.add_subplot(gs[1,0])
ef = df.loc['Maharashtra'].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax2.bar(ef.date, ef.Maharashtra,color = '#007acc',alpha=0.5)
ax2.xaxis.set_major_locator(mdates.WeekdayLocator())
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax2.set_xticks(ax2.get_xticks()[::3])
maxyval = ef.Maharashtra.max()
ax2.set_ylim([0,maxyval])
ax2.text(0.05, 0.9,'Maharashtra', transform = ax2.transAxes, fontsize=20);
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax3 = fig.add_subplot(gs[1,1])
ef = df.loc['Tamil Nadu'].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax3.bar(ef.date, ef['Tamil Nadu'],color = '#007acc',alpha=0.5,)
ax3.xaxis.set_major_locator(mdates.WeekdayLocator())
ax3.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax3.set_xticks(ax3.get_xticks()[::3])
ax3.text(0.05, 0.9,'Tamil Nadu', transform = ax3.transAxes, fontsize=20);
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax5 = fig.add_subplot(gs[1,3])
ef = df.loc['Telangana'].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax5.bar(ef.date, ef['Telangana'],color = '#007acc',alpha=0.5,)
ax5.xaxis.set_major_locator(mdates.WeekdayLocator())
ax5.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax5.set_xticks(ax5.get_xticks()[::3])
ax5.text(0.05, 0.9,'Telangana', transform = ax5.transAxes, fontsize=20);
ax5.spines['right'].set_visible(False)
ax5.spines['top'].set_visible(False)
ax6 = fig.add_subplot(gs[1,4])
ef = df.loc['Andhra Pradesh'].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax6.bar(ef.date, ef['Andhra Pradesh'],color = '#007acc',alpha=0.5,)
ax6.xaxis.set_major_locator(mdates.WeekdayLocator())
ax6.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax6.set_xticks(ax6.get_xticks()[::3])
ax6.text(0.05, 1,'Andhra Pradesh', transform = ax6.transAxes, fontsize=20);
ax6.spines['right'].set_visible(False)
ax6.spines['top'].set_visible(False)
ax4 = fig.add_subplot(gs[1,2])
ef = df.loc['Delhi'].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax4.bar(ef.date, ef.Delhi,color = '#007acc',alpha=0.5)
ax4.set_xticks([])
ax4.xaxis.set_major_locator(mdates.WeekdayLocator())
ax4.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax4.set_xticks(ax4.get_xticks()[::3])
ax4.spines['right'].set_visible(False)
ax4.spines['top'].set_visible(False)
ax4.text(0.05, 1,'Delhi', transform = ax4.transAxes, fontsize=20)
for i in range(n):
ax.append(fig.add_subplot(gs[i+2,:]))
ef = df.iloc[i+3].rename_axis('date').reset_index()
ef['date'] = ef['date'].astype('datetime64[ns]')
ax[i].bar(ef.date,ef.iloc[:,-1],color = '#007acc',alpha=0.3)
ax[i].plot(ef.date,ef.iloc[:,-1],marker='o',color='#007acc')
ax[i].text(0.02,0.5,f'{ef.columns.values[-1]}',transform = ax[i].transAxes, fontsize = 20);
ax[i].xaxis.set_major_locator(mdates.WeekdayLocator())
ax[i].xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax[i].set_ylim([0,7000])
ax[i].spines['right'].set_visible(False)
ax[i].spines['top'].set_visible(False)
plt.tight_layout()
# + papermill={"duration": 0.041557, "end_time": "2021-10-19T19:42:18.580102", "exception": false, "start_time": "2021-10-19T19:42:18.538545", "status": "completed"} tags=[]
#hide_input
print(df_table.to_string(index=False))
# + papermill={"duration": 0.028776, "end_time": "2021-10-19T19:42:18.641097", "exception": false, "start_time": "2021-10-19T19:42:18.612321", "status": "completed"} tags=[]
# + papermill={"duration": 0.031823, "end_time": "2021-10-19T19:42:18.698436", "exception": false, "start_time": "2021-10-19T19:42:18.666613", "status": "completed"} tags=[]
| _notebooks/latest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# # Binary classification of RGB images of "Rock, Paper, Scissors" hand signs
# ## Initialization
# ### Install and load packages
# + tags=[]
try
using Pkg
Pkg.activate(".")
Pkg.instantiate()
using IJulia, ImageIO, ImageMagick, Plots, Images, Interact, JLD2, WebIO, Printf
using Random: randperm, seed!
catch
push!(empty!(Base.LOAD_PATH), "@stdlib", pwd())
Pkg.add("IJulia")
Pkg.add("ImageIO")
Pkg.add("ImageMagick")
Pkg.add("Plots")
Pkg.add("Images")
Pkg.add("Interact")
Pkg.add("JLD2")
Pkg.add("WebIO")
Pkg.add("Random")
Pkg.add("Printf")
Pkg.instantiate()
using IJulia, ImageIO, ImageMagick, Plots, Images, Interact, JLD2, WebIO, Printf
using Random: randperm, seed!
WebIO.install_jupyter_nbextension()
end
theme(
:wong;
label="",
markerstrokewidth=0.3,
markerstrokecolor=:white
)
if !@isdefined(gn)
include("src/MyLearn2Classify.jl")
using Main.MyLearn2Classify
end
# -
# ### Load Data
X_rock = load("data/rock.jld2")["rock_imgs"]
X_paper = load("data/paper.jld2")["paper_imgs"]
X_scissors = load("data/scissors.jld2")["scissors_imgs"];
# ## Functions
# +
"""
img = array2rgb(X)
Given a `height` x `width` x `3` array, return an RGB image with the same
dimensions, where
img[i, j] = RGB(X[i, j, 1], X[i, j, 2], X[i, j, 3])
"""
function array2rgb(X::Array{T, 3}) where T <: Number
height, width = size(X, 1), size(X, 2)
img = Array{RGB, 2}(undef, height, width)
for i in 1:height, j in 1:width
r, g, b = X[i, j, :]
img[i, j] = RGB(r, g, b)
end
return img
end
"""
Y = array2matrix(X)
Given a four-dimensional array `X`, vectorize each slice of `X` along
its fourth dimension. For example, if `X` has size `height` x `width` x
`rgb` x `n_samples`, then the returned matrix has size
`height * width * rgb` x `n_samples`.
"""
function array2matrix(X::AbstractArray{T, 4}) where T
height, width, rgb, n_samples = size(X)
Y = zeros(height * width * rgb, n_samples)
for idx in 1:n_samples
Y[:, idx] = vec(X[:, :, :, idx])
end
return Y
end
"""
Y = array2matrix(X, slice_dim)
Given an array `X`, vectorize each slice of `X` along dimension `slice_dim`.
This function is useful for converting high-dimensional datasets with a
sample dimension into data matrices for training neural networks.
"""
function array2matrix(X::AbstractArray, slice_dim::Integer)
return hcat((vec(slice) for slice in eachslice(X; dims=slice_dim))...)
end
"""
X_train, X_test = split_train_test(X, train_fraction[, sample_dim]; seed=1)
Split an array `X` along dimension `sample_dim` into `X_train` and `X_test`, where
`X_train` contains a random fraction `train_fraction` (where `train_fraction` is
between 0 and 1) of the samples, and `X_test` contains the remaining samples.
Notes:
- `sample_dim` is set to the last dimension of `X` by default.
- The `seed` keyword argument (default: 1) may be set to keep `X_train` and `X_test` the same across multiple runs.
"""
function split_train_test(X::AbstractArray, train_fraction::Float64, sample_dim::Int64=ndims(X); seed::Integer=1)
n_samples = size(X, sample_dim)
n_train = round(Integer, n_samples * train_fraction)
train_idx = randperm(seed!(seed), n_train)
test_idx = setdiff(1:n_samples, train_idx)
X_train = selectdim(X, sample_dim, train_idx)
X_test = selectdim(X, sample_dim, test_idx)
return X_train, X_test
end
shared_fraction(v1, v2) = sum(v1 .== v2) / length(v1)
function compute_pcorrect(X::AbstractMatrix, y::Vector, w::Vector, b::Float64, f_a::Function)
network_output = gn(X, w, b, f_a)
class1_encoding = 0
class2_encoding = 1
threshold = 0.5
y_out = [x < threshold ? class1_encoding : class2_encoding for x in network_output]
return shared_fraction(y, y_out)
end;
# -
# ## Reshape Data
# + tags=[]
X_rock_vec = array2matrix(X_rock)
X_paper_vec = array2matrix(X_paper)
train_fraction = 0.6
X_rock_train, X_rock_test = split_train_test(X_rock_vec, train_fraction)
X_paper_train, X_paper_test = split_train_test(X_paper_vec, train_fraction)
X_train = [X_rock_train X_paper_train]
X_test = [X_rock_test X_paper_test]
y_train = [zeros(size(X_rock_train, 2)); ones(size(X_paper_train, 2))]
y_test = [zeros(size(X_rock_test, 2)); ones(size(X_paper_test, 2))];
# -
# ## Train
# +
f_a = tanh # in [sigmoid, linear, tanh]
df_a = dtanh # in [dsigmoid, dlinear, dtanh]
mu = 0.0001
iters = 1500
batch_size = 100
show_loss = true
normalize = true
seed = false
w_hat, b_hat, loss = learn2classify_asgd(
f_a,
df_a,
grad_loss,
X_train,
y_train,
mu,
iters,
batch_size,
show_loss,
normalize,
seed
);
# -
# ## Review
# + tags=[]
println("Training pcorrect: $(compute_pcorrect(X_train, y_train, w_hat, b_hat, f_a))")
println("Testing pcorrect: $(compute_pcorrect(X_test, y_test, w_hat, b_hat, f_a))")
# +
h, w, rgb, n_samples = size(X_rock)
w_hat_rgb = reshape(w_hat, h, w, rgb)
plot(
heatmap(w_hat_rgb[:, :, 1], color=:grays, title="r", aspect_ratio=1.0),
heatmap(w_hat_rgb[:, :, 2], color=:grays, title="g", aspect_ratio=1.0),
heatmap(w_hat_rgb[:, :, 3], color=:grays, title="b", aspect_ratio=1.0),
layout=(1, 3),
size=(900, 300)
)
# + tags=[]
incorrect_idx(y::Vector, y_true::Vector) = findall(y .!= y_true)
network_output = gn(X_test, w_hat, b_hat, f_a)
y_out = [x < 0.5 ? 0.0 : 1.0 for x in network_output]
output2label = Dict(0.0 => "Rock", 1.0 => "Paper")
println("Incorrectly classified")
@manipulate throttle=0.3 for idx in incorrect_idx(y_out, y_test)
X = reshape(X_test[:, idx], h, w, rgb)
array2rgb(X)
end
# +
using Printf
correct_idx(y::Vector, y_true::Vector) = findall(y .== y_true)
try
rm("classified", recursive=true)
catch
end
for idx in incorrect_idx(y_out, y_test)
X = reshape(X_test[:, idx], h, w, rgb)
img = array2rgb(X)
save(@sprintf("classified/wrong%03d.png", idx), img)
end
for idx in correct_idx(y_out, y_test)
X = reshape(X_test[:, idx], h, w, rgb)
img = array2rgb(X)
save(@sprintf("classified/right%03d.png", idx), img)
end
| BinaryClassificationRGB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.6 64-bit (''accel_phys_38'': conda)'
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import sys
import os
import time
import bisect
sys.path.append('D:/Dropbox/py_projects/customlib/')
import customlib as cl
import numpy as np
import scipy.constants as pc
import pandas as pd
import matplotlib.pyplot as plt
# -
# # damage thresholds
# ---
#
# paper by Stuart et al. 1996 "Nanosecond-to-femtosecond laser-induced breakdown in dielectrics"
#
# For pulse lengths longer than a few picoseconds, the main damage mechanism is through heating. However, for short pulses, the damage is due to electron ionization. The transition from heating to ionization results in higher damage thresholds for shorter pulses than predicted by the heating damage curve.
#
# damage fluence due to heating:
# $$ df = c_{norm} * \Tau_{l}^{\alpha} $$
# - $\Tau_{l}$ is the pulse length
# - $\alpha$ is usually in the range of 0.3 - 0.6, with 0.5 being the standard value
# - $c_{norm}$ is the normalization constant for the curve. this is usually based on some damage threshold given a specific pulse length e.g. 20 J/cm^2 at 20 ns
def damage_fluence_heating(tlen, normconst, alpha=0.5):
"""
calculate the damage threshold for a given pulse length
alpha = 0.5, usual values are between 0.3-0.6 depending on the surface. 0.5 for sused silica
return damage fluence threshold in J/cm^2
"""
damagefluence = normconst * tlen**alpha
return damagefluence
# ### damage threshold values
# ---
#
# Thorlabs beam sampler -B coating (BSF10-B):
# - 0.246 J/cm2 (800 nm, 99 fs, 1 kHz, Ø0.166 mm)
# - 7.5 J/cm2 (810 nm, 10 ns, 10 Hz, Ø0.133 mm)
#
# +
damflu = [7.5, 20]
dampulselen = [10.0e-9, 10.0e-9]
normconst = [df / damage_fluence_heating(tlen,1) for df,tlen in zip(damflu,dampulselen)]
# +
fig, axs = plt.subplots(nrows=1,ncols=1,figsize=(16,9), sharex=True)
colorlist = ['dodgerblue', 'orange', 'tab:green']
yscale = 1
for i,nc in enumerate(normconst):
ls = dict(linewidth=1.50,linestyle='-',color=colorlist[i],marker='o',ms=0, mfc='None', alpha=1)
xvec = np.arange(-15,-6,0.5)
tlenvec = 10**(xvec)
yvec = np.log10( damage_fluence_heating(tlenvec,nc) )
axs.plot(xvec, yvec,**ls)
axs.scatter( np.log10(dampulselen[i]), np.log10(damflu[i]) )
axs.scatter( np.log10(99e-15), np.log10(0.246) )
fs = dict(color='k', fontsize=20)
plt.xlabel('pulse length ',**fs)
axs.set_ylabel('Damage Fluence Threshold (J/cm^2)', **fs)
# leglist = ['Air', 'Regen top surface', 'Front-end box air']
# leglist = ['Front-end box', 'Table top']
# axs.legend(leglist, fontsize=18, loc='best')
# axs.set_xticks(np.arange(xmin,xmax, 20*60))
# axs.xaxis.set_major_formatter(timeplotfmt )
# axs.set_yticks(np.arange(0,40,5))
axs.tick_params(labelcolor='k', labelsize=15 )
axs.grid(True)
plt.show()
# +
ENERGY_IR = 25e-3 # Joules. energy of IR pulse
TLEN_IR = 1.1e-12 # sec. pulse length of IR laser
FWHM_X_IR = (2332 - 1237.5)*1e-6 # meter. waist of IR laser out of compressor
FWHM_Y_IR = (1672 - 654.5)*1e-6 # meter. waist of IR laser out of compressor
print(f'FWHM X = {FWHM_X_IR*1e6:0.3f} um')
print(f'FWHM Y = {FWHM_Y_IR*1e6:0.3f} um')
WAIST_X_IR = FWHM_X_IR / np.sqrt( 2 * np.log(2) )
WAIST_Y_IR = FWHM_Y_IR / np.sqrt( 2 * np.log(2) )
print(f'waist X = {WAIST_X_IR*1e6:0.3f} um')
print(f'waist Y = {WAIST_Y_IR*1e6:0.3f} um')
print(f'laser fluence = {cl.laser.fluence(WAIST_X_IR, ENERGY_IR):0.3f} J/cm^2')
# +
ENERGY_IR = 0.07*0.90*3.56e-3 # Joules. energy of IR pulse
TLEN_IR = 1.1e-12 # sec. pulse length of IR laser
POWER_IR = ENERGY_IR/TLEN_IR
WAIST_X_IR = 2.0e-3
# WAIST_Y_IR = 4.0e-3
print(f'waist X = {WAIST_X_IR*1e6:0.3f} um')
# print(f'waist Y = {WAIST_Y_IR*1e6:0.3f} um')
FLUENCE_IR = cl.laser.fluence(WAIST_X_IR, ENERGY_IR)
print(f'laser fluence = {FLUENCE_IR*1e3:0.3f} mJ/cm^2')
POWERDEN_IR = FLUENCE_IR / TLEN_IR
print(f'power density = {POWERDEN_IR*1e-9:0.3f} GW/cm^2')
# -
np.exp(-2)
# # interference
# ---
#
# Superimposing two plane waves results in an interference pattern in the transverse intensity that depends on the angle of incidence of the waves and the wavelength. The peaks are separated by a distance:
#
# $$ \Lambda = \frac{\lambda}{2 \sin{\alpha/2}} $$
# +
def fringe_dist(wavelen, angle):
fd = wavelen / (2 * np.sin(angle/2) )
return fd
ANGLE_AC = 0.330 # rad
WAVELEN_IR = 1030e-9 # meter
FRINGEDIST_AC = fringe_dist(WAVELEN_IR, ANGLE_AC)
print(f'distance between fringes @ plane of interaction = {FRINGEDIST_AC*1e6:0.2f} um')
DIST_BBO2CCD = 1e-2 # meter. distance from BBO crystal to CCD of camera
FRINGEDIST_CCD = DIST_BBO2CCD*np.sin(ANGLE_AC/2)
print(f'distance betwen fringes at CCD = L*sin(angle/2) = {FRINGEDIST_CCD*1e3:0.3f} mm')
TLEN_IR = 1.1e-12 # sec. pulse length of IR laser
WAIST_X_IR = 2.0e-3
# -
0.01*np.sin(ANGLE_AC/2)
pc.c*3e-12 / (np.sqrt(2) * np.sin(ANGLE_AC/2) )
| notebooks/3.0-laser-various-calcs-igad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Binary by any other name
#
# * https://adventofcode.com/2020/day/5
#
# Today's task is to decode binary. The seat ID, with its row and column numbers, is a typical pack-two-values-into-one-binary number scheme, with the number here consisting of 10 digits; the 3 least significant are encoding the column, the other 7 the row. Eric Wastle's description is just reading out the binary number from the most significant bit to the least.
#
# In this scheme, the 0's and 1's have been replaced by letters. `F` and `B` for the row number, and `L` and `R` for the column number. If you use the [`str.translate()` function](https://docs.python.org/3/library/stdtypes.html#str.translate) this is trivially returned to base binary notation. Don't worry about separating out the two values again, that's just a [right-shift operation](https://en.wikipedia.org/wiki/Bitwise_operation#Arithmetic_shift) and a [bit mask](https://en.wikipedia.org/wiki/Bitwise_operation#AND).
#
# As so often with AoC puzzles, I've used a dataclass for this (I'm rather fond of the module) so I can easily inspect the puzzle results and have a nice representation. It also made it trivially easy to make the objects orderable, to find the max seat id. I've given the implementation a [`__slots__` attribute](https://docs.python.org/3/reference/datamodel.html#slots) almost out of habit, it is very simple immutable data object, and we don't _need_ to store arbitrary additional attributes, so why pay the memory price? And by making the class immutable (`frozen=True`) we get hashability for free, which came in helpful in part 2.
# +
from dataclasses import dataclass
from typing import FrozenSet, Mapping, Sequence, Tuple
from itertools import product
_from_pass = str.maketrans('FBLR', '0101')
_to_row = str.maketrans('01', 'FB')
_to_col = str.maketrans('01', 'LR')
@dataclass(order=True, frozen=True)
class SeatId:
__slots__ = ('id',)
id: int
@property
def row(self) -> int:
return self.id >> 3
@property
def rowid(self, _tm=_to_row) -> str:
return format(self.row, '07b').translate(_tm)
@property
def col(self) -> int:
return self.id & 7
@property
def colid(self, _tm=_to_col) -> str:
return format(self.col, '03b').translate(_tm)
def __repr__(self) -> str:
return f"<SeatID {self.id} {self.rowid}-{self.colid}>"
@classmethod
def from_boardingpass(cls, pass_: str, _tm=_from_pass) -> 'SeatId':
return cls(int(pass_.translate(_tm), 2))
tests: Mapping[str, Tuple[int, int, int]] = {
"FBFBBFFRLR": (44, 5, 357),
"BFFFBBFRRR": (70, 7, 567),
"FFFBBBFRRR": (14, 7, 119),
"BBFFBBFRLL": (102, 4, 820),
}
for pass_, (row, col, id_) in tests.items():
seatid = SeatId.from_boardingpass(pass_)
assert (seatid.row, seatid.col, seatid.id) == (row, col, id_)
assert max(map(SeatId.from_boardingpass, tests)).id == 820
# -
import aocd
seatids: Sequence[SeatId] = [
SeatId.from_boardingpass(pass_)
for pass_ in aocd.get_data(day=5, year=2020).splitlines()
]
print("Part 1:", max(seatids).id)
# ## Finding your seat
#
# Now we get to apply a little logic. From the instructions we know that not all possible row numbers _exist_; the plane is missing row numbers at the front and back, but the plane is also *full*, so we can assume that there is someone sitting in every _existing_ row. So the possible row ids are simply the range from the minimum to the maximum existing row id in the input. We also know we are not sitting in the first nor last row.
#
# This is not a large problem; even if there were no seats missing, there are _at most_ 1008 possible candidate seats (8 columns times 126 rows). Since we are looking for a numeric seat id where the ids before and after do exist (and so are occupied), only need to generate all possible seat id numbers (from `min(rowids) << 3 & 7` through to `max(rowids) << 7`), iterate over these with a *sliding window*, and find the one case where a missing seat is flanked by two occupied seats.
#
# A sliding window is an iterator from an input iterable, gives you the first `n` elements as a tuple, as the first element it produces. Then the next element it gives you is a tuple with the first element dropped, and another element from the input iterable added to the end. E.g. if your iterator starts with `'foo'`, `'bar'`, `'baz'`, `'spam'`, `'ham'`, then a sliding window with `n` set to `3`, from those inputs would first produce `('foo', 'bar', 'baz')`, then `('bar', 'baz', 'spam')`, and then `('baz', 'spam', 'ham')`. To find our own seat, a sliding window with 3 elements gives us the preceding ID, our possible boarding pass, and the subsequent ID. I built my sliding window using [`itertools.tee()`](https://docs.python.org/3/library/itertools.html#itertools.tee) and [`itertools.islice()`](https://docs.python.org/3/library/itertools.html#itertools.islice); the `tee()` object handles the buffering for us, with only 3 elements the warnings in the documentation about *significant auxiliary storage* don't apply.
#
# Note that we generate IDs for the seat just before the first possible seat, and for the one just after. Say, row 5 is the first possible row, then our seat would have, at minimum, id `(6 * 8) + 0 == 48`. But then the boarding pass with id 47 would have to exist (as well as boarding pass #49), so we generate all numbers starting at `(8 * 6) - 1`. Ditto for the last possible seat; we need not just `(max(rowids) - 1) * 8 + 7`, but want to generate `(max(rowids) * 8 + 0)` too, just so we can iterate with a sliding window that includes that those two ids at the start and end.
# +
from typing import Set
from itertools import tee, islice
def find_empty(seatids: Sequence[SeatId]) -> SeatId:
occupied = set(seatids)
# not the first, and not the last row, but include the seat ids before and after
candidates = map(SeatId, range(min(s.row for s in occupied) << 3 & 7, max(s.row for s in occupied) << 3))
# sliding window, every 3 seatids, the one before, the candidate seat, and the one after.
windowed = zip(*(islice(it, start, None) for start, it in enumerate(tee(candidates, 3))))
# b, s, a => before, seat, after. Yes, I fell for the lure of the single line expression.
return next(s for b, s, a in windowed if s not in occupied and len({a, b} & occupied) == 2)
# -
print("Part 2:", find_empty(seatids).id)
| 2020/Day 05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import sys
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
import gzip
from torch.utils import data
import torch
import torch.optim as optim
from torch.autograd import Variable
from time import gmtime, strftime
import torch.nn as nn
import math
import h5py
import src.model as models
import src.utils as utils
plt.style.use('ggplot')
# -
# %load_ext autoreload
# %autoreload 2
#Constraint Pytoch to use the good GPU!
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
print torch.cuda.get_device_name(0)
# # Load Data
KeysSpec = ['ESC','CN'] #Keys Species
data_path = {}
data_path['ESC'] = '/home/invites/jmorlot/HDDTeam/data/Hi-C/Mouse/boyan/last_try2/ESC/'
data_path['CN'] = '/home/invites/jmorlot/HDDTeam/data/Hi-C/Mouse/boyan/last_try2/CN/'
# +
# import re
# InputFiles ={key: [data_path[key]+f for f in os.listdir(data_path[key]) if re.search('basic',f)] for key in data_path.keys()}
# OutputFiles ={key: [data_path[key]+f.replace('basic','HiCPlusBoost') for f in os.listdir(data_path[key]) if re.search('basic',f)] for key in data_path.keys()}
# -
KeysSub = ['0.1','1.0','10.0','100.0'] #Keys Subsampling
Data={}
for keySp in KeysSpec:
print keySp
Data[keySp]={}
for keySu in KeysSub:
print '\t' + keySu
# path = data_path[keySp] + 'chr16_basic_' + keySu + '.hdf5'
f = h5py.File(data_path[keySp] + 'chr16_basic_' + keySu + '.hdf5','r')
Data[keySp][keySu] = np.array(f[f.keys()[0]])
f.close()
DataBoostHiC = {}
for keySp in KeysSpec:
print keySp
DataBoostHiC[keySp]={}
for keySu in KeysSub:
print '\t' + keySu
# path = data_path[keySp] + 'chr16_basic_' + keySu + '.hdf5'
f = h5py.File(data_path[keySp] + 'chr16_boosted_' + keySu + '.hdf5','r')
DataBoostHiC[keySp][keySu] = np.array(f[f.keys()[0]])
f.close()
# # HiCPlus
# ## Internal variables
# +
use_gpu = 1
conv2d1_filters_numbers = 8
conv2d1_filters_size = 9
conv2d2_filters_numbers = 8
conv2d2_filters_size = 1
conv2d3_filters_numbers = 1
conv2d3_filters_size = 5
down_sample_ratio = 16
epochs = 10
HiC_max_value = 100
max_range = 3000 # in paper=201
batch_size = 32 # in paper = size of the dataset (silly...)
# -
# ## Rewritting functions to fit with our matrix format
# +
def divide(HiCsample):
'''
Subdivide the HiC matrix in an ensemble of subimages of size subImage_size
'''
subImage_size = 40
step = 25
result = []
index = []
total_loci = HiCsample.shape[0]
for i in range(0, total_loci, step):
for j in range(0, total_loci, ):
if (abs(i-j) > max_range or i + subImage_size >= total_loci or j + subImage_size >= total_loci):
continue
subImage = HiCsample[i:i+subImage_size, j:j+subImage_size]
result.append([subImage,])
index.append((i, j))
result = np.array(result)
# print result.shape
result = result.astype(np.double)
index = np.array(index)
return result, index
def HiCPlus(HiCsample):
## Subdivide the HiC matrix in subimages
low_resolution_samples, index = divide(HiCsample)
low_resolution_samples = np.minimum(HiC_max_value, low_resolution_samples)
# batch_size = low_resolution_samples.shape[0]
## Reshape the high-quality Hi-C sample as the target value of the training.
sample_size = low_resolution_samples.shape[-1]
padding = conv2d1_filters_size + conv2d2_filters_size + conv2d3_filters_size - 3
half_padding = padding / 2
output_length = sample_size - padding
# print low_resolution_samples.shape
## Data Loader
lowres_set = data.TensorDataset(torch.from_numpy(low_resolution_samples), torch.from_numpy(np.zeros(low_resolution_samples.shape[0])))
lowres_loader = torch.utils.data.DataLoader(lowres_set, batch_size=batch_size, shuffle=False)
hires_loader = lowres_loader
## Get Model
model = models.Net(40, 28)
model.load_state_dict(torch.load('model/pytorch_model_12000'))
if use_gpu:
model = model.cuda()
_loss = nn.MSELoss()
## Make predictions
running_loss = 0.0
running_loss_validate = 0.0 # WARNING : NOT USED
reg_loss = 0.0 # WARNING : NOT USED
y_prediction = []
for i, _lowRes in enumerate(lowres_loader):
_lowRes = Variable(_lowRes[0]).float()
_lowRes = _lowRes.cuda()
_hiRes = model(_lowRes).data.cpu()
y_prediction.append(_hiRes)
del _lowRes,_hiRes
y_prediction = torch.cat(y_prediction)
y_predict = y_prediction.numpy()
print y_predict.shape
## Recombine samples
length = int(y_predict.shape[2])
y_predict = np.reshape(y_predict, (y_predict.shape[0], length, length))
# print y_predict.shape
# chrs_length = [249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,59128983,63025520,48129895,51304566]
# chrN = 21
length = HiCsample.shape[0]
prediction_1 = np.zeros((length, length))
for i in range(index.shape[0]):
x = int(index[i][0])
y = int(index[i][1])
prediction_1[x+6:x+34, y+6:y+34] = y_predict[i]
prediction_1[x+6:x+34, y+6:y+34] = y_predict[i]
return prediction_1
# -
# ## Apply HiCPlus to our datasets
DataBoosted={}
for keySp in KeysSpec:
DataBoosted[keySp]={}
print keySp
for keySu in KeysSub:
print '\t' + keySu
DataBoosted[keySp][keySu] = HiCPlus(Data[keySp][keySu])
# # Display Images
# +
# keySp = 'ESC'
# keySu = '1'
figpath = 'Figures/'
if not os.path.exists(figpath):
os.makedirs(figpath)
for keySp in KeysSpec:
print keySp
for keySu in KeysSub:
print '\t' + keySu
for Wm in [100,300,3000]:
print '\t' + '\t' + str(Wm)
W = [0,Wm]
f,ax = plt.subplots(1,4,figsize=(20,5))
ax[0].imshow(np.log10(Data[keySp][keySu])[W[0]:W[1],W[0]:W[1]],cmap='jet')
ax[0].set_title('Data Subsampled')
ax[1].imshow(np.log10(DataBoosted[keySp][keySu])[W[0]:W[1],W[0]:W[1]],cmap='jet')
ax[1].set_title('Data Enhanced with HiCPlus')
ax[2].imshow(np.log10(DataBoostHiC[keySp][keySu])[W[0]:W[1],W[0]:W[1]],cmap='jet')
ax[2].set_title('Data Enhanced with BoostHiC')
ax[3].imshow(np.log10(Data[keySp]['100'])[W[0]:W[1],W[0]:W[1]],cmap='jet')
ax[3].set_title('Original Data')
plt.savefig(figpath + 'CompareBoost_W'+ str(W[1]) + '_Spec' + keySp + '_Res' + keySu + '.png',dpi=300,format='png')
plt.show()
# -
# # Saving
for keySp in KeysSpec:
path = data_path[keySp] + '/HiCPlus/'
if not os.path.exists(path):
os.makedirs(path)
print keySp
for keySu in KeysSub:
print '\t' + keySu
f = h5py.File(path + 'chr16_basic_' + keySu + '_HiCPlus.hdf5','w')
f['data'] = DataBoosted[keySp][keySu]
f.close()
# # Debugging
# +
HiCsample = Data[keySp][keySu]
## Subdivide the HiC matrix in subimages
low_resolution_samples, index = divide(HiCsample)
low_resolution_samples = np.minimum(HiC_max_value, low_resolution_samples)
batch_size = 32
## Reshape the high-quality Hi-C sample as the target value of the training.
sample_size = low_resolution_samples.shape[-1]
padding = conv2d1_filters_size + conv2d2_filters_size + conv2d3_filters_size - 3
half_padding = padding / 2
output_length = sample_size - padding
print low_resolution_samples.shape
## Data Loader
lowres_set = data.TensorDataset(torch.from_numpy(low_resolution_samples), torch.from_numpy(np.zeros(low_resolution_samples.shape[0])))
lowres_loader = torch.utils.data.DataLoader(lowres_set, batch_size=batch_size, shuffle=False)
hires_loader = lowres_loader
## Get Model
model = models.Net(40, 28)
model.load_state_dict(torch.load('model/pytorch_model_12000'))
if use_gpu:
model = model.cuda()
_loss = nn.MSELoss()
## Make predictions
running_loss = 0.0
running_loss_validate = 0.0 # WARNING : NOT USED
reg_loss = 0.0 # WARNING : NOT USED
# -
ll = []
for i,l in enumerate(lowres_loader):
l = Variable(l[0]).float()
l = l.cuda()
l = l.cpu()
ll.append(l)
if i==3:
break
l = torch.cat(ll)
l[0]
| HiCPlus_Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
l = list(range(3))
print(l)
l.append(100)
print(l)
l.append('new')
print(l)
l.append([3, 4, 5])
print(l)
l = list(range(3))
print(l)
l.extend([100, 101, 102])
print(l)
l.extend((-1, -2, -3))
print(l)
l.extend('new')
print(l)
l2 = l + [5, 6, 7]
print(l2)
l += [5, 6, 7]
print(l)
l = list(range(3))
print(l)
l.insert(0, 100)
print(l)
l.insert(-1, 200)
print(l)
l.insert(0, [-1, -2, -3])
print(l)
l = list(range(3))
print(l)
l[1:1] = [100, 200, 300]
print(l)
l = list(range(3))
print(l)
l[1:2] = [100, 200, 300]
print(l)
| notebook/list_add_item.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
# %matplotlib inline
# +
#Read the data and glimpse it
df = pd.read_csv('D:/blog/survey_results_public.csv')
df.head()
# -
df.columns
# Data Understanding
def display_bar_chart(df, column, title):
'''
Displays a bar chart with a title
Parameters:
df: a dataframe
column: the column which we want to show
title: the title of the chart
Returns:
None
'''
status_vals = df[column].value_counts()
(status_vals[:10]/df.shape[0]).plot(kind="bar");
plt.title(title);
#Provide a pandas series of the counts for each Professional status
display_bar_chart(df, "Gender", "What is your gender?")
display_bar_chart(df, "WelcomeChange", "How is the company treating you?")
df['YearsCodePro'].unique()
def YearsCodePro(df):
"""
Convert the YearsCodePro to integer for calculating the mean
Parameters:
df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with YearsCodePro column becomes measurable
"""
YearsCodePro_map = {'Less than 1 year' : 0,
'More than 50 years' : np.random.uniform(low=50, high=60, size=(1,))[0],
np.nan: np.nan
}
df['YearsCodePro'] = df['YearsCodePro'].apply(lambda x: int(x) if x not in ['Less than 1 year','More than 50 years',np.nan] else YearsCodePro_map[x] )
return df
def JobSat(df):
"""
Convert the JobSat to integer for calculating the mean
Parameters:
df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with JobSat column becomes measurable
"""
sat_map = {'Very dissatisfied' : 1,
'Slightly dissatisfied' : 2,
'Neither satisfied nor dissatisfied' : 3,
'Slightly satisfied' : 4,
'Very satisfied' : 5,
np.nan: np.nan
}
df['JobSat'] = df['JobSat'].apply(lambda x: np.nan if x == np.nan else sat_map[x] )
return df
# +
#Compare selected indicators between western and eastern
df_clean_years_coded_pro = YearsCodePro(df)
useful_df = JobSat(df_clean_years_coded_pro)
#useful_df = OrgSize(df_clean_years_coded_pro)
# -
def Gender(df):
"""
Convert the OrgSize to integer for calculating the mean
Parameters:
df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with OrgSize column becomes measurable
"""
gender_map = {'Man' : 0,
'Woman' :1 ,
'Man;Non-binary, genderqueer, or gender non-conforming' : 2,
'Woman;Non-binary, genderqueer, or gender non-conforming' : 2,
'Woman;Man;Non-binary, genderqueer, or gender non-conforming' : 2,
'Woman;Man': 2,
'Non-binary, genderqueer, or gender non-conforming':2,
np.nan: np.nan
}
df['Gender'] = df['Gender'].apply(lambda x: np.nan if x == np.nan else gender_map[x] )
return df
comparison2 = Gender(useful_df)
comparison2
comparison = comparison2.groupby(['Gender','OrgSize']).mean()
comparison
def OrgSize(df):
"""
Convert the OrgSize to integer for calculating the mean
Parameters:
df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with OrgSize column becomes measurable
"""
OrgSize_map = {'2 to 9 employees' : 2,
'1,000 to 4,999 employees' :7 ,
'Just me - I am a freelancer, sole proprietor, etc.' : 1,
'10,000 or more employees' : 9,
'10 to 19 employees' : 3,
'500 to 999 employees': 6,
'20 to 99 employees': 4,
'100 to 499 employees': 5,
'5,000 to 9,999 employees': 8,
np.nan: np.nan
}
df_graph = df.reset_index()
df_graph['OrgSize'] = df_graph['OrgSize'].apply(lambda x: OrgSize_map[x])
df_graph['OrgSize'] = pd.to_numeric(df_graph['OrgSize'])
return df_graph
comparison_graph = OrgSize(comparison)
comparison_graph = comparison_graph.sort_values(by='OrgSize')
comparison_graph.set_index('OrgSize', inplace=True)
# +
#comparison_graph.set_index('OrgSize', inplace=True)
#Plot the Salary Comparison between Western World and Eastern World
comparison_graph.groupby('Gender')['JobSat'].plot()
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.title("Comparison between Organization size and job satisfaction based on gender")
plt.xlabel('OrgSize')
plt.ylabel('JobSat')
# -
comparison_graph.groupby('Gender')['YearsCodePro'].plot(legend=True)
plt.title("How many years at coding");
plt.xlabel('Org_size')
plt.ylabel('YearsCodePro')
comparison_graph.groupby('Gender')['WorkWeekHrs'].plot(legend=True)
plt.title("Is any particular gender being overworked?");
plt.xlabel('Org_size')
plt.ylabel('YearsCodePro')
comparison3 = comparison2.groupby(['Gender','WelcomeChange']).mean()
comparison3
def WelcomeChange(df):
"""
Convert the OrgSize to integer for calculating the mean
Parameters:
df: a dataframe that will be converted
Returns:
dataframe: a converted dataframe with OrgSize column becomes measurable
"""
OrgSize_map = {'A lot less welcome now than last year' : 1,
'A lot more welcome now than last year' :2,
'Just as welcome now as I felt last year' : 3,
'Not applicable - I did not use Stack Overflow last year' : 4,
'Somewhat less welcome now than last year' : 5,
'Somewhat more welcome now than last year': 6,
np.nan: np.nan
}
df_graph = df.reset_index()
df_graph['WelcomeChange'] = df_graph['OrgSize'].apply(lambda x: OrgSize_map[x])
df_graph['WelcomeChange'] = pd.to_numeric(df_graph['OrgSize'])
return df_graph
| .ipynb_checkpoints/Blog_post-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ceus
# language: python
# name: ceus
# ---
# # Contiguity diagram
#
# Computational notebook 06 for **Morphological tessellation as a way of partitioning space: Improving consistency in urban morphology at the plot scale**.
#
#
# <NAME>., <NAME>., <NAME>. and <NAME>. (2020) _‘Morphological tessellation as a way of partitioning space: Improving consistency in urban morphology at the plot scale’_, Computers, Environment and Urban Systems, 80, p. 101441. doi: [10.1016/j.compenvurbsys.2019.101441](http://doi.org/10.1016/j.compenvurbsys.2019.101441).
#
# Archived version of this repository is stored at the University of Strathclyde KnowledgeBase at DOI [10.15129/c766db26-3fa8-45c6-8218-098d529571fc](https://doi.org/10.15129/c766db26-3fa8-45c6-8218-098d529571fc).
#
# Contact: <EMAIL>
#
# Date: 29/03/2020
#
# Note: notebook has been cleaned and released retroactively. It is likely that different versions of packages were initially used, but we made sure that the results remained unaltered.
#
# ---
# **Description**
#
# This notebook generates figure 14, the illustration of tessellation-based contiguity matrix.
#
# ---
# **Data**
#
# The source of the data used wihtin the research is the Amtliche Vermessung dataset accessible from the Zurich municipal GIS open data portal (https://maps.zh.ch). From it can be extracted the cadastral layer (`Liegenschaften_Liegenschaft_Area`) and the layer of buildings (all features named `Gebäude`). All data are licensed under CC-BY 4.0.
#
# Source data: Vektor-Übersichtsplan des Kantons Zürich, 13.03.2018, Amt für Raumentwicklung Geoinformation / GIS-Produkte, Kanton Zürich, https://opendata.swiss/de/dataset/vektor-ubersichtsplan1
#
# --
#
# Data structure:
#
# ```
# data/
# contiguity_diagram.gpkg - samples to be used in diagram
# blg_s
# tess_s
# blg_c
# tess_c
# ```
import geopandas as gpd
import libpysal
from splot.libpysal import plot_spatial_weights
import matplotlib.pyplot as plt
import pandas as pd
# +
path = (
"data/contiguity_diagram.gpkg"
)
blg_s = gpd.read_file(path, layer="blg_s")
tess_s = gpd.read_file(path, layer="tess_s")
blg_c = gpd.read_file(path, layer="blg_c")
tess_c = gpd.read_file(path, layer="tess_c")
blg = pd.concat([blg_s, blg_c])
tess = pd.concat([tess_s, tess_c])
blg = blg.sort_values("uID")
blg.reset_index(inplace=True)
tess = tess.loc[tess["uID"].isin(blg["uID"])]
tess = tess.sort_values("uID")
tess.reset_index(inplace=True)
weights = libpysal.weights.contiguity.Queen.from_dataframe(tess)
f, ax = plt.subplots(figsize=(20, 10))
tess.plot(ax=ax)
plot_spatial_weights(weights, blg, ax=ax)
#plt.savefig(
# "contiguity_diagram.svg",
# dpi=300,
# bbox_inches="tight",
#)
# -
| 06_Contiguity_diagram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Selenium web scraper using chromium or chrome - for videos
# v0.4.2-final
import os
# Add the chromedriver to the path, by default in the current folder on the notebook in a subfolder webdriver, just place the binary inside
# Download the exact version of the webdriver for your version of Chrome: https://chromedriver.chromium.org/downloads
os.environ['PATH'] += ";%swebdriver" % (os.path.dirname(os.path.realpath("__file__")) + '\\')
os.environ['PATH']
# Parameters
cookies = [{'name': 'cookie1', 'value': 'value1', 'domain': 'website.com'},
{'name': 'cookie2', 'value': 'value2', 'domain': 'website.com'}
] # cookies to get authenticated, can also use password via selenium IDE but this adds more steps and is less secure
# If this does not work or you don't get how to do that, simply log manually in the window that will open.
#url = "https://www.sleep2021.org/2021/SLEEP2021/PosterTitles.asp?PosterSortOrder=num&pfp=BrowsebyPosterID"
curpath = os.path.dirname(os.path.realpath("__file__"))
rootfolder = "%s/%s" % (curpath, 'downloaded') # local base folder where to save to
# +
from lxml import etree
from pathvalidate import sanitize_filename
from html2text import html2text
import time
import requests
import random
from tqdm.auto import tqdm
# Generated by Selenium IDE
import pytest
import time
import json
#from selenium import webdriver
from seleniumwire import webdriver # this is NOT autogenerated, this allows to sniff media files
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class Scraper():
def setup_method(self, method=None, cookies=None):
self.driver = webdriver.Chrome()
self.store = {}
self.store['base'] = "https://www.website.com/"
self.store['dummyurl'] = "https://www.website.com/error404" # can also try to access an image instead
self.store['dummyurl_redirect'] = "https://www.website.com/frontpage-after-login" # url to access after, because some websites require users to access the frontpage before being able to access sublinks as a protection against bots
self.store['realbase'] = "https://www.website.com/listing-of-things-to-download"
self.vars = {}
def preset_cookies(self, cookies=None):
"""Open a URL using the driver's base URL"""
# Navigate with cookies, need to open a dummy url on the same domain and then only can we open the true url we want
# From: https://stackoverflow.com/questions/36305660/selenium-js-add-cookie-to-request
#t.driver.delete_all_cookies()
#t.driver.get_cookies()
# Navigate to a dummy url on the same domain.
self.driver.get(self.store['dummyurl'])
# Load cookies
if cookies:
for c in cookies:
t.driver.add_cookie(c)
# # url to access after and wait a bit, otherwise other requests will fail, because some websites require users to access the frontpage before being able to access sublinks as a protection against bots
self.driver.get(self.store['dummyurl_redirect'])
time.sleep(3)
def count_links(self):
# Go to posters listing and scrape links
self.driver.get(self.store['realbase'])
all_links = self.driver.find_elements(By.XPATH, "//ul[@id=\'agenda\']//a") # need to fetch all links everytime, otherwise they will become detached
# Count the number of links to make a loop
self.vars['all_links_count'] = len(all_links)
def scrape_all_abstracts(self, restart=None):
# Main loop to scrape everything
self.count_links()
for abstract_id in tqdm(range(self.vars['all_links_count'])):
if restart:
if abstract_id < restart:
continue
# Go to posters listing and scrape links
self.driver.get(self.store['realbase'])
all_links = self.driver.find_elements(By.XPATH, "//ul[@id=\'agenda\']//a") # need to fetch all links everytime, otherwise they will become detached
try:
all_links[abstract_id].click()
except:
continue # skip to the next link if this one has issues
# Download files
self.download_abstract()
self.download_poster_mediafiles()
# Wait a random time to avoid bot being detected
time.sleep(random.uniform(1, 10))
# Done
return(1)
def scrape_one_abstract(self, abstract_id):
# Go to posters listing and scrape links
self.driver.get(self.store['realbase'])
all_links = self.driver.find_elements(By.XPATH, "//ul[@id=\'agenda\']//a") # need to fetch all links everytime, otherwise they will become detached
try:
all_links[abstract_id].click()
except:
# skip to the next link if this one has issues
abstract_id += 1
all_links[abstract_id].click()
# Download files
self.download_abstract()
self.download_poster_mediafiles()
# Done
return(1)
def download_abstract(self):
# Download the abstract as HTML
# Wait a bit because otherwise the page may not have loaded yet
time.sleep(2) # TODO: for more robust methods, see https://stackoverflow.com/questions/5868439/wait-for-page-load-in-selenium and https://artoftesting.com/wait-for-page-to-load-selenium-webdriver-java
# Get HTML source code
poster_abstract = self.driver.page_source
# Extract abstract title
tree = etree.HTML(poster_abstract)
r = tree.xpath('//h1')[0]
poster_title = r.text
# Create poster folder
poster_folder = "%s/%s" % (rootfolder, sanitize_filename(poster_title))
self.vars['poster_folder'] = poster_folder
if not os.path.exists(poster_folder):
os.makedirs(poster_folder)
# Save HTML in the adequate folder
with open("%s/abstract.html" % (poster_folder), "wb") as f:
f.write(bytes(poster_abstract, encoding='utf-8'))
# Also save the abstract as a text (markdown) file, converting and removing all superfluous HTML markups
poster_abstract_body = tree.xpath('//div[@class=\'main-popup-content\']')
poster_abstract_body_html = etree.tostring(poster_abstract_body[0], pretty_print=True)
poster_abstract_body_text = html2text(str(poster_abstract_body_html)).replace('\\t', '').replace('\\n', '')[2:-1]
with open("%s/abstract.md" % (poster_folder), "wb") as f:
f.write(bytes(poster_abstract_body_text, encoding='utf-8'))
# To download complete source code with CSS, JS etc:
# https://stackoverflow.com/questions/42900214/how-to-download-a-html-webpage-using-selenium-with-python
#poster_abstract # debug
tree.xpath('//h1')[0].text
def download_poster_mediafiles(self):
# TODO: In the future, use Selenium if there are securities: https://sqa.stackexchange.com/questions/2197/how-to-download-a-file-using-seleniums-webdriver
# Clear previous requests, otherwise we will keep on redownloading the same stuff again and again
del self.driver.requests
# Access poster
self.driver.find_element(By.CSS_SELECTOR, ".pull-right > .btn-lg").click()
# Sniff media files (audio, poster)
time.sleep(3) # wait a bit for the poster to load, TODO: see driver.wait_for_request()
# Wait for the request/response to complete
self.driver.wait_for_request('4000px')
mediafiles = set([])
for request in self.driver.requests:
if request.response:
if '4000px.png' in request.url or request.response.headers['content-type'] == 'audio/mpeg':
mediafiles.add(request.url)
#print(
# request.url,
# request.response.status_code,
# request.response.headers['Content-Type']
#)
# Download media files
poster_folder = self.vars['poster_folder']
for file_url in mediafiles:
file_dl = requests.get(file_url, allow_redirects=True)
filename = file_url.rsplit('/', 1)[1]
open('%s/%s' % (poster_folder, filename), 'wb').write(file_dl.content)
def teardown_method(self, method=None):
self.driver.quit()
t = Scraper()
t
# -
t.setup_method()
t.preset_cookies(cookies=cookies)
import re
from sanitize_filename import sanitize
import os
# XPath can be tested with Chrome's developer's tool, in the Elements tab, in place of a search string
t.store['realbase'] = "https://www.website.com/listing-of-things-to-download"
t.driver.get(t.store['realbase'])
all_links = t.driver.find_elements(By.XPATH, "//ul[@id=\'agenda\']//li[contains(@class, \'list-group-item\')]/div[contains(@class, \'prestitle\')]/span[position()=1]") # need to fetch all links everytime, otherwise they will become detached
print(len(all_links))
for link_idx in tqdm(range(len(all_links))):
t.driver.get(t.store['realbase'])
all_links = t.driver.find_elements(By.XPATH, "//ul[@id=\'agenda\']//li[contains(@class, \'list-group-item\')]/div[contains(@class, \'prestitle\')]/span[position()=1]") # need to fetch all links everytime, otherwise they will become detached
all_links[link_idx].click()
time.sleep(3) # wait a bit for the list to show
h1 = sanitize(t.driver.find_elements(By.XPATH, r"//h1")[0].text)
all_sub_links = t.driver.find_elements(By.XPATH, r"//div[contains(@class, 'current-card')]//li[contains(@class, 'loadbyurl')]")
print(len(all_sub_links))
slink_idx = 0
for _ in tqdm(range(len(all_sub_links))):
t.driver.get(t.store['realbase'])
all_links = t.driver.find_elements(By.XPATH, "//ul[@id=\'agenda\']//li[contains(@class, \'list-group-item\')]/div[contains(@class, \'prestitle\')]/span[position()=1]") # need to fetch all links everytime, otherwise they will become detached
all_links[link_idx].click()
time.sleep(3) # wait a bit for the list to show
all_sub_links = t.driver.find_elements(By.XPATH, r"//div[contains(@class, 'current-card')]//li[contains(@class, 'loadbyurl')]")
try:
all_sub_links[slink_idx].click()
slink_idx += 1
except Exception as exc:
slink_idx += 1
print('Not interactable')
continue
time.sleep(2)
h1_2 = sanitize(t.driver.find_elements(By.XPATH, r"//h1")[0].text)
video_title = "%s - %s" % (h1, h1_2)
# Sanitize to make a valid filename
video_title = sanitize(video_title) # remove forbidden characters
video_title = video_title[:150] # limit if too long
print(video_title)
try:
watch_button = t.driver.find_elements(By.XPATH, r"//li[contains(@class, 'speakerrow')]//a[text()[contains(.,'Watch Now')]]")[0]
#watch_button[0].click()
t.driver.get(watch_button.get_attribute('href'))
time.sleep(3)
subpath = "%s/%s/%s" % (rootfolder, h1, slink_idx)
os.makedirs(subpath)
# Download talks (split per slide)
videofiles = re.findall(r'https:[^"]+mp4[^"]+', t.driver.page_source, re.I)[1:] # the first is the one playing, it's the same as the second item which is the first in the JS list
for idx, file_url in enumerate(videofiles):
file_url = file_url.replace('\\', '') # unescape (dirtily)
file_dl = requests.get(file_url, allow_redirects=True)
filename = "%s Slide%i.mp4" % (video_title, idx)
open('%s/%s' % (subpath, filename), 'wb').write(file_dl.content)
# Download slides
pngfiles = re.findall(r'https:\\[^"]+(?:png|jpg|jpeg)[^"]+', t.driver.page_source, re.I) # the first is the one playing, it's the same as the second item which is the first in the JS list
for idx, file_url in enumerate(pngfiles):
#print(file_url)
file_url = file_url.replace('\\', '') # unescape (dirtily)
file_dl = requests.get(file_url, allow_redirects=True)
filename = "%s Slide%i.png" % (video_title, idx)
open('%s/%s' % (subpath, filename), 'wb').write(file_dl.content)
except Exception as exc:
print(exc)
continue
print('All done!')
# ------------------------------------
# ## Tests
import requests
cookies = {'14014': 'AccountKey=<KEY>',
'ASPSESSIONIDCEBCRRRD': 'KEDCALBCPCELLBONDDOKBNDH',
'ASPSESSIONIDQACCQDTR': 'NIKLFFKCKAKGEEOPGJJBIGHC',
'ASPSESSIONIDQECCQDTR': 'MIKLFFKCCPPLDPCEIJDHIAJC'}
url = "https://www.sleep2021.org/2021/SLEEP2021/PosterTitles.asp?PosterSortOrder=num&pfp=BrowsebyPosterID"
r = requests.get(url, cookies=cookies)
# +
if r.status_code != 200:
print('Failure to connect! Update cookies. Response code:')
print(r.status_code)
r.text
| python/my_web_scraper_selenium_chrome_videos_generic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="KhsH8YtlFPW5"
# # Data Loading
# + colab={"base_uri": "https://localhost:8080/"} id="15ocXVCEp7Ag" executionInfo={"status": "ok", "timestamp": 1627163213107, "user_tz": -120, "elapsed": 395, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="97c31fee-6b6f-4c71-a32e-6f73b2bbcaff"
from google.colab import drive
drive.mount('/content/drive')
# + id="RAChyLndFYxJ" executionInfo={"status": "ok", "timestamp": 1627163215567, "user_tz": -120, "elapsed": 1698, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer , CountVectorizer, TfidfTransformer
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
# + colab={"base_uri": "https://localhost:8080/"} id="qE3YRf0XZNM1" executionInfo={"status": "ok", "timestamp": 1627163216679, "user_tz": -120, "elapsed": 831, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="4259e16e-cd22-400b-b0a2-8848cf3ffa15"
nltk.download('punkt')
nltk.download('stopwords')
stop_words = stopwords.words('english')
# + id="lFvYoDsMFW9P" executionInfo={"status": "ok", "timestamp": 1627163221571, "user_tz": -120, "elapsed": 3936, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
train_df= pd.read_csv("/content/drive/MyDrive/Group_project/final_project/Dataset/train.csv")
#test_df= pd.read_csv("/content/drive/MyDrive/Group_project/final_project/Dataset/test.csv")
#test_labels_df= pd.read_csv("/content/drive/MyDrive/Group_project/final_project/Dataset/sample_submission.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="yNeu_Z2BUFin" executionInfo={"status": "ok", "timestamp": 1627163221613, "user_tz": -120, "elapsed": 363, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="c46d0d35-45a8-47b6-9ccb-bb28a98cea03"
train_df.head()
# + [markdown] id="MVoWuajK2YK9"
# # Functions
# + id="eClJT9hzX9Lm" executionInfo={"status": "ok", "timestamp": 1627163221616, "user_tz": -120, "elapsed": 223, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
def clean_text(text):
## 1. Lowercase the text
text = text.lower()
## 2. Remove Punctuations
text = text.translate(str.maketrans('', '', string.punctuation))
## 3. Tokenize all the words
words = nltk.word_tokenize(text)
## 4. Remove stopwords and word digits
#clean_text = " ".join([ w for w in words if w.isalnum() ])
clean_text = " ".join([ w for w in words if w.isalnum() and (w not in stop_words) ])
clean_text = clean_text.replace("\t", ' ')
return clean_text
# + id="xHkP1wVd2vW2" executionInfo={"status": "ok", "timestamp": 1627163221619, "user_tz": -120, "elapsed": 221, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##getting length of each question
def ques_len(train_df):
q=train_df.split(" ")
return len(q)
# + id="5DzQbnvG2-fC" executionInfo={"status": "ok", "timestamp": 1627163221626, "user_tz": -120, "elapsed": 225, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##getting the number of common words between the two questions
def word_common(train_df):
q1=set(train_df['question1'].split(" "))
q2=set(train_df['question2'].split(" "))
return len(q1&q2)
# + id="Omr8cWML3EJV" executionInfo={"status": "ok", "timestamp": 1627163221630, "user_tz": -120, "elapsed": 238, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##getting the difference of length between two words
def word_len_diff(train_df):
q1=train_df['question1'].split(" ")
q2=train_df['question2'].split(" ")
return abs(len(q1)-len(q2))
# + [markdown] id="FnkrGbg77Ml-"
# # Data Exploration
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="GyTLj9-I9swE" executionInfo={"status": "ok", "timestamp": 1627163221647, "user_tz": -120, "elapsed": 238, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="96fabbea-b916-49b9-80ee-80bee256a9f3"
train_df.head()
# + [markdown] id="9JPqDdZTE2ep"
# **Checking for null values**
# + colab={"base_uri": "https://localhost:8080/"} id="VSZi1VdD9zGM" executionInfo={"status": "ok", "timestamp": 1627163221661, "user_tz": -120, "elapsed": 237, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="9a33c976-5617-4347-9268-3cefa416cb7f"
train_df.info()
# + [markdown] id="K5BDSUQV-zxU"
# Reviewing the number of records in each column, there are some questions that have null values
# + colab={"base_uri": "https://localhost:8080/"} id="SedvpXzD-Aaj" executionInfo={"status": "ok", "timestamp": 1627163221664, "user_tz": -120, "elapsed": 191, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="9c0fe217-78e7-4388-b3b6-334ac5be8885"
np.where(pd.isnull(train_df['question2']))
# + colab={"base_uri": "https://localhost:8080/"} id="SwaqOb6S_Uaj" executionInfo={"status": "ok", "timestamp": 1627163221674, "user_tz": -120, "elapsed": 166, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="abe492b6-2a8a-402d-a4b5-48bcc017c275"
np.where(pd.isnull(train_df['question1']))
# + colab={"base_uri": "https://localhost:8080/"} id="apJdx0R3_WZb" executionInfo={"status": "ok", "timestamp": 1627163221678, "user_tz": -120, "elapsed": 156, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="f403646a-ce34-4c28-b434-7f016aa41df8"
train_df.iloc[105780,:]
# + colab={"base_uri": "https://localhost:8080/"} id="wbIn081Z_n8M" executionInfo={"status": "ok", "timestamp": 1627163221680, "user_tz": -120, "elapsed": 144, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="5ea2b60f-751d-43d2-8491-f67f70325cff"
train_df.iloc[201841,:]
# + colab={"base_uri": "https://localhost:8080/"} id="yxb6WrB1_qxb" executionInfo={"status": "ok", "timestamp": 1627163221683, "user_tz": -120, "elapsed": 135, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="55b4808b-d889-4529-b3e7-41b00aa716aa"
train_df.iloc[363362,:]
# + [markdown] id="hoSCnifgFLB6"
#
# + [markdown] id="c7Tvje9iFftS"
# **Checking the number of unique question in the Dataset**
# + colab={"base_uri": "https://localhost:8080/"} id="h_L3TFbHEz-j" executionInfo={"status": "ok", "timestamp": 1627163222330, "user_tz": -120, "elapsed": 764, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="375d20c1-0a0d-4626-ae06-c13e5bd84f95"
print("Number of unique Questions in Column 1 :")
q1_unique=train_df['question1'].nunique()
q1_unique
# + colab={"base_uri": "https://localhost:8080/"} id="KCk2_wNiF1op" executionInfo={"status": "ok", "timestamp": 1627163222332, "user_tz": -120, "elapsed": 58, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="12292603-c167-4e3c-fda4-89e1612fe6e7"
print("Number of unique Questions in Column 2 :")
q2_unique=train_df['question2'].nunique()
q2_unique
# + colab={"base_uri": "https://localhost:8080/"} id="fk1MCC77F8ba" executionInfo={"status": "ok", "timestamp": 1627163222334, "user_tz": -120, "elapsed": 48, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="5eac348b-c525-4574-9a76-94e43efd55f7"
print("Number of unique Questions in All the dataset :")
all_questions= pd.concat([train_df['question1'] , train_df['question2']],axis=0, ignore_index=True)
all_unique=all_questions.nunique()
all_unique
# + colab={"base_uri": "https://localhost:8080/"} id="7U8jEy4LKcVp" executionInfo={"status": "ok", "timestamp": 1627163222336, "user_tz": -120, "elapsed": 36, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="a25c19c0-a3fa-483b-fbd8-b464791bad82"
train_df.shape[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 516} id="XS_tblakHwuZ" executionInfo={"status": "ok", "timestamp": 1627163222973, "user_tz": -120, "elapsed": 655, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="28102467-3a73-4ee7-f56f-c411e757092c"
###Bar plot for the number of unique questions in the dataset
##Calculating Percentages
uniq_per=(all_unique/all_questions.shape[0])*100
not_per=((all_questions.shape[0]-all_unique)/all_questions.shape[0])*100
print("Percentage of unique Questions: ",np.round(uniq_per,2))
###Bar plot
fig = plt.subplots(figsize =(12, 8));
plt.bar(["Unique","Not Unique"], [uniq_per,not_per]);
plt.title("Number of unique questions in the Dataset");
plt.ylabel("Percentage");
plt.yticks(np.arange(0,110,10));
# + [markdown] id="t8APo5UiO5GA"
# We can see that the number of unique questions in the dataset is about **66**% of the dataset and the rest is just a repeated questions
# + [markdown] id="Zk8-mfcgPk2w"
# **Checking the labels Precentages**
# + colab={"base_uri": "https://localhost:8080/", "height": 373} id="JqzHpyhEPqaO" executionInfo={"status": "ok", "timestamp": 1627163222975, "user_tz": -120, "elapsed": 69, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="c2688b77-f85e-4fb9-c9b7-20473ce3aa10"
##plotting a pie chart for the labels
##Calculating Percentages
label_1 = (((train_df['is_duplicate']==1).sum())/train_df.shape[0])*100
label_2 = ((train_df['is_duplicate']==0).sum()/train_df.shape[0])*100
##Pie Chart
fig = plt.subplots(figsize =(10, 6));
plt.pie([label_1,label_2],labels=["Is duplicated", "Not Duplicated"],startangle=90,autopct='%1.0f%%', labeldistance=1.2,explode=[0.04,0]);
plt.title("Percentage of Each Label in The Dataset");
# + [markdown] id="Et4Ze7TVkBkS"
# **More of Exploration on the labeling Technique**
# + colab={"base_uri": "https://localhost:8080/"} id="rprrbiK2hBFc" executionInfo={"status": "ok", "timestamp": 1627163222981, "user_tz": -120, "elapsed": 70, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="030ca723-8b52-4ecb-ef43-c6d102c18c3b"
##Checking if there is a cases where q1= q2
df_temp=train_df[train_df.question1==train_df.question2]
print(df_temp.shape)
print(df_temp['is_duplicate'].shape)
# + id="jzVv2HfNkL8T" executionInfo={"status": "ok", "timestamp": 1627163332356, "user_tz": -120, "elapsed": 109430, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##Cleaning Text
##Removing Rows with NaN questions
df_temp2=train_df.drop(index= [201841,105780,363362])
df_temp2['question1']=[clean_text(x) for x in df_temp2['question1']]
df_temp2['question2']=[clean_text(x) for x in df_temp2['question2']]
# + colab={"base_uri": "https://localhost:8080/", "height": 453} id="jGwlDmf1kBP6" executionInfo={"status": "ok", "timestamp": 1627163332360, "user_tz": -120, "elapsed": 109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="70337515-6c7a-4db6-e66b-d1294a410660"
##Checking if there is a cases where q1= q2 after cleaning
df_temp2=df_temp2[df_temp2.question1==df_temp2.question2]
print(df_temp2.shape)
print(df_temp2[df_temp2['is_duplicate']==0].shape)
df_temp2[df_temp2['is_duplicate']==0]
# + [markdown] id="ZxCGfx5wnmZz"
# From the above results, we can see that after the cleaning process, there are 16368 records that have Q1=Q2 and there are 3748 of them are labeled as different questions, so we need more exploration of these questions
# + colab={"base_uri": "https://localhost:8080/"} id="oAPFAKm7mOKC" executionInfo={"status": "ok", "timestamp": 1627163332362, "user_tz": -120, "elapsed": 101, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="f765f075-c947-40a9-afab-3629d5fce2a1"
##looking deeper into these questions
train_df.iloc[41,:]
# + colab={"base_uri": "https://localhost:8080/"} id="ZHggqRK1mce6" executionInfo={"status": "ok", "timestamp": 1627163332364, "user_tz": -120, "elapsed": 93, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="5dc8198d-b5b2-49ac-c749-df0e4887c375"
train_df.iloc[111,:].head()
# + colab={"base_uri": "https://localhost:8080/"} id="mZth0gyMmhdy" executionInfo={"status": "ok", "timestamp": 1627163332365, "user_tz": -120, "elapsed": 84, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="298ed218-1588-40a0-a3c3-68e31849fd91"
train_df.iloc[222,:]
# + colab={"base_uri": "https://localhost:8080/"} id="_ijjzhcumtx5" executionInfo={"status": "ok", "timestamp": 1627163332368, "user_tz": -120, "elapsed": 77, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="0a5c8b91-19f4-4d87-e3e5-8e8ba53061ba"
train_df.iloc[277,:]
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="kXzGUnwim4Xa" executionInfo={"status": "ok", "timestamp": 1627163332375, "user_tz": -120, "elapsed": 74, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="677bc267-8bf3-4a37-ccf5-c09cd405b14c"
##Taking a look into the real questions before cleaning
train_df.iloc[df_temp2[df_temp2['is_duplicate']==0].index,:]
# + [markdown] id="XWzcY-sZu9DJ"
# **Checking the length of each question**
# + id="_DtBFJWY2iRn" executionInfo={"status": "ok", "timestamp": 1627163333296, "user_tz": -120, "elapsed": 990, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##getting length of each question
##Dropping empty rows first
df_temp3=train_df.drop(index= [201841,105780,363362])
df_temp3['quest1_len']=df_temp3['question1'].apply(ques_len)
df_temp3['quest2_len']=df_temp3['question2'].apply(ques_len)
# + colab={"base_uri": "https://localhost:8080/"} id="OA-qTTUJwOs_" executionInfo={"status": "ok", "timestamp": 1627163333298, "user_tz": -120, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="fadfd926-9e9e-43aa-c15f-b79eb4713214"
(df_temp3['quest1_len'] < 2).sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="di7DkkyiyFoZ" executionInfo={"status": "ok", "timestamp": 1627163333304, "user_tz": -120, "elapsed": 23, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="02f830d0-f10f-40aa-85ed-551f85354072"
df_temp3[df_temp3['quest1_len'] < 2]
# + colab={"base_uri": "https://localhost:8080/"} id="0TXk-hYNxvIZ" executionInfo={"status": "ok", "timestamp": 1627163333692, "user_tz": -120, "elapsed": 407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="75c70ee8-905c-44a7-e77f-eaef532759ca"
(df_temp3['quest2_len'] < 2).sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 731} id="x3VoCQ3zwb83" executionInfo={"status": "ok", "timestamp": 1627163333694, "user_tz": -120, "elapsed": 20, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="15d2f6de-1cc3-48bd-c61c-9f44860deb7d"
df_temp3[df_temp3['quest2_len'] < 2]
# + [markdown] id="pfqV5TITxXK3"
# **There are multiple questions that there length is less than 3 words, thoses must be meaningless sentences**
# + [markdown] id="L1CNrXChD0Q8"
# # Data & Text cleaning
# + id="Ps5pvFgMD4e5" executionInfo={"status": "ok", "timestamp": 1627163333701, "user_tz": -120, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##Removing Rows with NaN questions
train_df.drop(index= [201841,105780,363362],inplace=True)
# + id="JSYXxfZkzt7H" executionInfo={"status": "ok", "timestamp": 1627163334358, "user_tz": -120, "elapsed": 679, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##Getting length of each question
train_df['quest1_len']=train_df['question1'].apply(ques_len)
train_df['quest2_len']=train_df['question2'].apply(ques_len)
# + id="vXOm_LuTzJwR" executionInfo={"status": "ok", "timestamp": 1627163334755, "user_tz": -120, "elapsed": 403, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
###Removing Rows with question length is less than 2
train_df=train_df[train_df['quest1_len'] >1 ]
# + id="lYBGNJCSz80I" executionInfo={"status": "ok", "timestamp": 1627163334756, "user_tz": -120, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
###Removing Rows with question length is less than 2
train_df=train_df[train_df['quest2_len'] >1 ]
# + id="ODZOXZjxYYec" executionInfo={"status": "ok", "timestamp": 1627163444352, "user_tz": -120, "elapsed": 109604, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##Cleaning Text
train_df['question1']=train_df['question1'].apply(clean_text)
train_df['question2']=train_df['question2'].apply(clean_text)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="boZPCVUZX_5V" executionInfo={"status": "ok", "timestamp": 1627163444354, "user_tz": -120, "elapsed": 53, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="95683398-cc9f-4f28-b206-78d1420264a6"
train_df.head()
# + [markdown] id="Ezi0vPInaUWH"
# # Text Features
# + id="fliREM2UazOF" executionInfo={"status": "ok", "timestamp": 1627163451164, "user_tz": -120, "elapsed": 6856, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
###Getting the number of common words between the two questions
train_df['common_word_count']=train_df.apply( word_common,axis=1)
# + id="n9SyxiAPeqQk" executionInfo={"status": "ok", "timestamp": 1627163457125, "user_tz": -120, "elapsed": 5996, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
##Getting the difference between the number of words in the two questions
train_df['word_len_diff']=train_df.apply(word_len_diff,axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="M2lIPpZapYev" executionInfo={"status": "ok", "timestamp": 1627163457142, "user_tz": -120, "elapsed": 63, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="ebc10d0f-0fd0-4007-f405-229c5aff9685"
train_df.head()
# + id="lJ5vzh4epYev" executionInfo={"status": "ok", "timestamp": 1627163457145, "user_tz": -120, "elapsed": 60, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
train_df = train_df[['id','qid1','qid2', 'question1', 'question2', 'quest1_len','quest2_len','common_word_count','word_len_diff','is_duplicate']]
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="iUZ9DMD4pYew" executionInfo={"status": "ok", "timestamp": 1627163457148, "user_tz": -120, "elapsed": 62, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="76f255f2-fc14-4f86-ad55-6852436a878c"
train_df.head()
# + [markdown] id="w4YT6k81pYew"
# ### LDA Model
# + colab={"base_uri": "https://localhost:8080/"} id="3mkkDeilpYew" executionInfo={"status": "ok", "timestamp": 1627163460891, "user_tz": -120, "elapsed": 3801, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="618d4e0c-e2bc-4d98-8618-0542cf507633"
# !pip install gensim
# + id="CjW3wiRNpYex" executionInfo={"status": "ok", "timestamp": 1627163461285, "user_tz": -120, "elapsed": 402, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
import re
import gensim
from gensim import corpora
from nltk.corpus import stopwords
from nltk.stem.porter import *
from sklearn.feature_extraction.text import TfidfVectorizer
words = re.compile(r"\w+",re.I)
stopword = stopwords.words('english')
# + id="GwWJFdgIpYey" executionInfo={"status": "ok", "timestamp": 1627163461286, "user_tz": -120, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
def tokenize(df):
q1 = []
q2 = []
for q in df.question1.tolist():
q1.append([(i.lower()) for i in words.findall(q) if i not in stopword])
for q in df.question2.tolist():
q2.append([(i.lower()) for i in words.findall(q) if i not in stopword])
df["q1_tokens"] = q1
df["q2_tokens"] = q2
return df
def train_dictionary(df):
q_tokens = df.q1_tokens.tolist() + df.q2_tokens.tolist()
dictionary = corpora.Dictionary(q_tokens)
return dictionary
# + id="xNt5p117pYey" executionInfo={"status": "ok", "timestamp": 1627163490285, "user_tz": -120, "elapsed": 29007, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
LDA_df = tokenize(train_df)
LDA_dictionary = train_dictionary(train_df)
# + id="TpkwibeWpYez" executionInfo={"status": "ok", "timestamp": 1627163490296, "user_tz": -120, "elapsed": 49, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
def get_vectors(df, dictionary):
question1_vec = [dictionary.doc2bow(text) for text in df.q1_tokens.tolist()]
question2_vec = [dictionary.doc2bow(text) for text in df.q2_tokens.tolist()]
question1_csc = gensim.matutils.corpus2csc(question1_vec, num_terms=len(dictionary.token2id))
question2_csc = gensim.matutils.corpus2csc(question2_vec, num_terms=len(dictionary.token2id))
return question1_csc.transpose(),question2_csc.transpose()
# + id="a0CGeUWgpYez" executionInfo={"status": "ok", "timestamp": 1627163501017, "user_tz": -120, "elapsed": 10767, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
q1_vectors, q2_vectors = get_vectors(LDA_df,LDA_dictionary)
# + colab={"base_uri": "https://localhost:8080/", "height": 193} id="aSHyZZqYpYe0" executionInfo={"status": "ok", "timestamp": 1627163501026, "user_tz": -120, "elapsed": 49, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="d50ff67c-60bb-4d44-bc7c-48803ab77b0f"
LDA_df.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="dZYxrZZmpYe0" executionInfo={"status": "ok", "timestamp": 1627164961266, "user_tz": -120, "elapsed": 1460283, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="b9d2680b-413e-4643-9446-db98442a4b4d"
from sklearn.metrics.pairwise import cosine_similarity as cs
from sklearn.metrics.pairwise import manhattan_distances as md
from sklearn.metrics.pairwise import euclidean_distances as ed
from sklearn.neighbors import DistanceMetric
minkowski_distance = DistanceMetric.get_metric('minkowski')
def get_similarity_values(q1_vector, q2_vector):
cosine_similarty = []
manhattan_distance = []
eucledian_distance = []
minkowsk_distance = []
for i,j in zip(q1_vectors, q2_vectors):
sim = cs(i,j)
cosine_similarty.append(sim[0][0])
sim = md(i,j)
manhattan_distance.append(sim[0][0])
sim = ed(i,j)
eucledian_distance.append(sim[0][0])
x = i.toarray()
y = j.toarray()
sim = minkowski_distance.pairwise(x,y)
minkowsk_distance.append(sim[0][0])
return cosine_similarty, manhattan_distance, eucledian_distance, minkowsk_distance
cosine_sim, manhattan_dis, eucledian_dis, minkowsk_dis = get_similarity_values(q1_vectors, q2_vectors)
train_df["cosine"] = cosine_sim
train_df["manhattan"] = manhattan_dis
train_df["minkowsk"] = minkowsk_dis
print(train_df.head())
# + id="h5Dh9vb4pYe1" executionInfo={"status": "ok", "timestamp": 1627164961610, "user_tz": -120, "elapsed": 417, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
X_train, X_test = train_test_split(train_df, test_size=0.2, random_state=0)
ytrain = X_train["is_duplicate"]
ytest = X_test["is_duplicate"]
#print(df_train.head())
xtrain = X_train.loc[:,'cosine':]
xtest = X_test.loc[:,'cosine':]
# + id="3cLicYNApYe3" executionInfo={"status": "ok", "timestamp": 1627169639909, "user_tz": -120, "elapsed": 1834, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lr_normal=LogisticRegression()
lr_normal.fit(xtrain,ytrain)
Ypred_lr_normal_train=lr_normal.predict(xtrain)
Ypred_lr_normal_test=lr_normal.predict(xtest)
# + colab={"base_uri": "https://localhost:8080/"} id="Wp_amLbM-2vP" executionInfo={"status": "ok", "timestamp": 1627170091559, "user_tz": -120, "elapsed": 389, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="91ccfb5e-ac52-4e9a-a969-aa64a77d8bbd"
from sklearn.metrics import classification_report, confusion_matrix , accuracy_score
text = " Logistic Regression"
print(text, " Train Accuracy : ", accuracy_score(ytrain,Ypred_lr_normal_train)*100)
print(text, " Test Accuracy : ", accuracy_score(ytest,Ypred_lr_normal_test)*100)
print("\n\t\tTEST DATA METRICS")
print(text, " Confusion Matrix: ",confusion_matrix(ytest, Ypred_lr_normal_test))
print(text, " Report : ")
print(classification_report(ytest,Ypred_lr_normal_test))
# + id="NlE9My5cpYe3" executionInfo={"status": "ok", "timestamp": 1627164963619, "user_tz": -120, "elapsed": 386, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.preprocessing import StandardScaler
lda=LDA(n_components=1)
X_train_lda=lda.fit_transform(xtrain,ytrain)
X_test_lda = lda.fit_transform(xtest,ytest)
# + id="0bPu583bpYe4" executionInfo={"status": "ok", "timestamp": 1627170174587, "user_tz": -120, "elapsed": 700, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}}
lr=LogisticRegression()
lr.fit(X_train_lda,ytrain)
Ypred_lr_train=lr.predict(X_train_lda)
Ypred_lr_test=lr.predict(X_test_lda)
# + colab={"base_uri": "https://localhost:8080/"} id="1sSjpX0BpYe4" executionInfo={"status": "ok", "timestamp": 1627170234748, "user_tz": -120, "elapsed": 1119, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjSgDiriuvd3ZwzeX-j_wJc9wiB_r-7loHhfaSvvA=s64", "userId": "00907015042831639542"}} outputId="60ae1929-5674-424d-ede5-4b17bfd3ef6b"
text_1 = "Logistic Regression built on LDA"
print(text_1, " Train Accuracy : ", accuracy_score(ytrain,Ypred_lr_train)*100)
print(text_1, " Test Accuracy : ", accuracy_score(ytest,Ypred_lr_test)*100)
print("\n\t\tTEST DATA METRICS")
print(text_1, " Confusion Matrix: ",confusion_matrix(ytest, Ypred_lr_test))
print(text_1, " Report : ")
print(classification_report(ytest,Ypred_lr_test))
# + id="Vt7k-k-2rw_i"
| classification/Classification_with_distances.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 03a - BigQuery Machine Learning (BQML) - Machine Learning with SQL
#
# BigQuery has a number of machine learning algorithms callable directly from SQL. This gives the convenience of using the common language of SQL to "CREATE MODEL …). The library of available models is constantly growing and covers supervised, unsupervised, and time series methods as well as functions for evaluation - even anomaly detection from results, explainability and hyperparameter tuning. A great starting point for seeing the scope of available methods is [user journey for models](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-e2e-journey).
#
# In this demonstration, BigQuery ML (BQML) is used to create a logistic regression model.
#
# <p><center><a href="https://youtu.be/IcVyP_ZAXmY" target="_blank" rel="noopener noreferrer"><img src="architectures/thumbnails/playbutton/03a.png" width="50%"></a></center></p>
#
# **Prerequisites:**
#
# - 01 - BigQuery - Table Data Source
#
# **Overview:**
#
# - Train logistic regression model with BQML
# - CREATE MODEL …. model_type="LOGISTIC_REG"
# - Review training information
# - SELECT * FROM ML.TRAINING_INFO…
# - Evaluated the models performance
# - SELECT * FROM ML.EVALUATE…
# - Review the classification errors with a confusion matrix
# - SELECT * FROM ML.CONFUSION_MATRIX…
# - Create prediction for data in BigQuery
# - SELECT * FROM ML.PREDICT
#
# **Resources:**
#
# - [BigQuery ML (BQML) Overview](https://cloud.google.com/bigquery-ml/docs/introduction)
# - [Overview of BQML methods and workflows](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-e2e-journey)
# - [BigQuery magics for jupyter notebooks](https://github.com/GoogleCloudPlatform/bigquery-notebooks/blob/main/notebooks/official/template_notebooks/bigquery_magic.ipynb)
#
# **Related Training:**
#
# - todo
# ---
# ## Vertex AI - Conceptual Flow
#
# <img src="architectures/slides/03a_arch.png">
#
# ---
# ## Vertex AI - Workflow
#
# <img src="architectures/slides/03a_console.png">
# ---
# ## Setup
# inputs:
# +
REGION = 'us-central1'
PROJECT_ID='statmike-mlops'
DATANAME = 'fraud'
NOTEBOOK = '03a'
# Model Training
VAR_TARGET = 'Class'
VAR_OMIT = 'transaction_id' # add more variables to the string with space delimiters
# -
# packages:
from google.cloud import bigquery
# clients:
bigquery = bigquery.Client()
# ---
# ## Train Model
#
# Use BigQuery ML to train multiclass logistic regression model:
# - https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create-glm
# - This uses the `splits` column that notebook `01` created
# - `data_split_method = CUSTOM` uses the column in `data_split_col` to assign training data for `FALSE` values and evaluation data for `TRUE` values.
query = f"""
CREATE OR REPLACE MODEL `{DATANAME}.{DATANAME}_lr`
OPTIONS
(model_type = 'LOGISTIC_REG',
auto_class_weights = TRUE,
input_label_cols = ['{VAR_TARGET}'],
data_split_col = 'custom_splits',
data_split_method = 'CUSTOM'
) AS
SELECT * EXCEPT({','.join(VAR_OMIT.split())}, splits),
CASE
WHEN splits = 'TRAIN' THEN FALSE
ELSE TRUE
END AS custom_splits
FROM `{DATANAME}.{DATANAME}_prepped`
WHERE splits != 'TEST'
"""
job = bigquery.query(query = query)
job.result()
(job.ended-job.started).total_seconds()
# Review the iterations from training:
bigquery.query(query=f"SELECT * FROM ML.TRAINING_INFO(MODEL `{DATANAME}.{DATANAME}_lr`) ORDER BY iteration").to_dataframe()
# ### Check out this model in BigQuery Console:
# - Click: https://console.cloud.google.com/bigquery
# - Make sure project selected is the one from this notebook
# - Under Explore, expand this project and dataset
# - Expand Models and select the model create here
# ---
# ## Evaluate Model
# Review the model evaluation statistics on the Test/Train splits:
query = f"""
SELECT 'TRAIN' as SPLIT, * FROM ML.EVALUATE (MODEL `{DATANAME}.{DATANAME}_lr`,
(SELECT * FROM `{DATANAME}.{DATANAME}_prepped` WHERE SPLITS='TRAIN'))
UNION ALL
SELECT 'TEST' as SPLIT, * FROM ML.EVALUATE (MODEL `{DATANAME}.{DATANAME}_lr`,
(SELECT * FROM `{DATANAME}.{DATANAME}_prepped` WHERE SPLITS='TEST'))
"""
bigquery.query(query = query).to_dataframe()
# Review the confusion matrix for each split:
query = f"""
SELECT *
FROM ML.CONFUSION_MATRIX (MODEL `{DATANAME}.{DATANAME}_lr`,(
SELECT *
FROM `{DATANAME}.{DATANAME}_prepped`
WHERE splits = 'TRAIN')
)
"""
bigquery.query(query = query).to_dataframe()
query = f"""
SELECT *
FROM ML.CONFUSION_MATRIX (MODEL `{DATANAME}.{DATANAME}_lr`,(
SELECT *
FROM `{DATANAME}.{DATANAME}_prepped`
WHERE splits = 'VALIDATE')
)
"""
bigquery.query(query = query).to_dataframe()
query = f"""
SELECT *
FROM ML.CONFUSION_MATRIX (MODEL `{DATANAME}.{DATANAME}_lr`,(
SELECT *
FROM `{DATANAME}.{DATANAME}_prepped`
WHERE splits = 'TEST')
)
"""
bigquery.query(query = query).to_dataframe()
# ---
# ## Predictions
# Create a pandas dataframe with predictions for the test data in the table:
query = f"""
SELECT *
FROM ML.PREDICT (MODEL `{DATANAME}.{DATANAME}_lr`,(
SELECT *
FROM `{DATANAME}.{DATANAME}_prepped`
WHERE splits = 'TEST')
)
"""
pred = bigquery.query(query = query).to_dataframe()
# Review columns from the predictions - note that the query added columns with prefix `predicted_`
pred.columns
# Print the first few rows for the columns related to the actual and predicted values:
pred[[VAR_TARGET, f'predicted_{VAR_TARGET}', f'predicted_{VAR_TARGET}_probs', 'splits']].head()
# Notice the nested dictionary for predicted probabilities. In BigQuery this is a Record type structure with nested fields for `label` and `prop`. This is returned to the pandas dataframe as a nested dictionary.
#
# The following code sorts the dictionary for the first record by `prob`:
exec('temp = pred.predicted_'+VAR_TARGET+'_probs[0]')
[sorted(x, key = lambda x: x['label']) for x in [temp]]
# ---
# ## Explanations
# https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-explain-predict
query = f"""
SELECT *
FROM ML.EXPLAIN_PREDICT (MODEL `{DATANAME}.{DATANAME}_lr`,(
SELECT *
FROM `{DATANAME}.{DATANAME}_prepped`
WHERE splits = 'TEST'), STRUCT(10 as top_k_features)
)
"""
explain = bigquery.query(query = query).to_dataframe()
explain.head()
import matplotlib.pyplot as plt
features = []
scores = []
for k in explain.iloc[0]['top_feature_attributions']:
features.append(k['feature'])
scores.append(k['attribution'])
features = [x for _, x in sorted(zip(scores, features))]
scores = sorted(scores)
fig, ax = plt.subplots()
fig.set_size_inches(9, 9)
ax.barh(features, scores)
fig.show()
# ---
# ## Remove Resources
# see notebook "99 - Cleanup"
| 03a - BigQuery Machine Learning (BQML) - Machine Learning with SQL.ipynb |